第一步,settings.py添加
ITEM_PIPELINES = {# 'scrapy_runklist.pipelines.ScrapyRunklistPipeline': 300,'scrapy_runklist.pipelines.ScrapyWeiBoPipeline': 300,
}# mongodb配置
MONGO_HOST = "127.0.0.1" # 主机IP
MONGO_PORT = 27017 # 端口号
MONGO_DB = "ranklist" # 库名
MONGO_COLL_WEIBO = "weibo" # collection名
# MONGO_USER = "simple" #用户名
# MONGO_PSW = "test" #用户密码
第二步,item.py添加
class WeiboItem(scrapy.Item):id = scrapy.Field()word = scrapy.Field()url = scrapy.Field()
第三步,spider.py添加
def parse(self, response):"""此处省略处理响应内容的代码, 直接 yield item 对象""" weiboItem = WeiboItem()weiboItem["id"] = idweiboItem["word"] = wordweiboItem["url"] = urlyield weiboItem
第四步,pipline.py添加
import pymongo
from scrapy.utils.project import get_project_settings
settings = get_project_settings()class ScrapyWeiBoPipeline:def __init__(self):# 链接数据库client = pymongo.MongoClient(host=settings['MONGO_HOST'], port=settings['MONGO_PORT'])self.db = client[settings['MONGO_DB']] # 获得数据库的句柄self.coll = self.db[settings['MONGO_COLL_WEIBO']] # 获得collection的句柄# 数据库登录需要帐号密码的话# self.db.authenticate(settings['MONGO_USER'], settings['MONGO_PSW'])def process_item(self, item, spider):print("pipline item ==== ", item)postItem = dict(item) # 把item转化成字典形式self.coll.insert(postItem) # 向数据库插入一条记录return item # 会在控制台输出原item数据,可以选择不写