爬虫进阶:框架功能完善-380玩彩网官网入口
实现项目中传入多个中间件
为什么需要多个中间件
不同的中间件可以实现对请求或者是响应对象进行不同的处理,通过不同的中间件实现不同的功能,让逻辑更加清晰
在项目文件夹中创建middlewares文件
项目文件夹中的spider_middlewares.py:
class testspidermiddleware1(object): def process_request(self, request): '''处理请求头,添加默认的user-agent''' print("testspidermiddleware1: process_request") return request def process_item(self, item): '''处理数据对象''' print("testspidermiddleware1: process_item") return item class testspidermiddleware2(object): def process_request(self, request): '''处理请求头,添加默认的user-agent''' print("testspidermiddleware2: process_request") return request def process_item(self, item): '''处理数据对象''' print("testspidermiddleware2: process_item") return item
项目文件夹中的downloader_middlewares.py:
class testdownloadermiddleware1(object): def process_request(self, request): '''处理请求头,添加默认的user-agent''' print("testdownloadermiddleware1: process_request") return request def process_response(self, item): '''处理数据对象''' print("testdownloadermiddleware1: process_response") return item class testdownloadermiddleware2(object): def process_request(self, request): '''处理请求头,添加默认的user-agent''' print("testdownloadermiddleware2: process_request") return request def process_response(self, item): '''处理数据对象''' print("testdownloadermiddleware2: process_response") return item
修改项目文件夹中的main.py
为引擎传入多个中间件
因此相应的的修改engine.py
改为使用多个中间件
# scrapy_plus/core/engine.py
.....
class engine:
'''完成对引擎模块的封装'''
def __init__(self,spiders,pipelines=[],spider_mids=[],downloader_mids=[]):
'''
实例化其他的组件,在引起中能够通过调用组件的方法实现功能
'''
# self.spider = spider()
self.spiders = spiders
self.downloader = downloader()
self.pipelines = pipelines
self.scheduler = scheduler()
self.spider_mids = spider_mids
self.downloader_mids = downloader_mids
self.total_request_nums = 0
self.total_response_nums = 0
def start(self):
'''
提供引擎启动的入口
:return:
'''
start_time = datetime.now()
logger.info("爬虫启动:{}".format(start_time))
self._start_engine()
end_time = datetime.now()
logger.info("爬虫结束:{}".format(start_time))
logger.info("爬虫一共运行:{}秒".format((end_time-start_time).total_seconds()))
logger.info("总的请求数量:{}".format(self.total_request_nums))
logger.info("总的响应数量:{}".format(self.total_response_nums))
def _start_request(self):
for spider_name,spider in self.spiders.items():
for start_request in spider.start_requests():
#1. 对start_request进过爬虫中间件进行处理
for spider_mid in self.spider_mids:
start_request = spider_mid.process_request(start_request)
start_request.spider_name = spider_name
#2. 调用调度器的add_request方法,添加request对象到调度器中
self.scheduler.add_request(start_request)
#请求数1
self.total_request_nums = 1
def _execute_request_response_item(self):
#3. 调用调度器的get_request方法,获取request对象
request = self.scheduler.get_request()
if request is none: #如果没有获取到请求对象,直接返回
return
#request对象经过下载器中间件的process_request进行处理
for downloader_mid in self.downloader_mids:
request = downloader_mid.process_request(request)
#4. 调用下载器的get_response方法,获取响应
response = self.downloader.get_response(request)
response.meta = request.meta
#response对象经过下载器中间件的process_response进行处理
for downloader_mid in self.downloader_mids:
response = downloader_mid.process_response(response)
#response对象经过下爬虫中间件的process_response进行处理
for spider_mid in self.spider_mids:
response = spider_mid.process_response(response)
#parse方法
spider = self.spiders[request.spider_name]
parse = getattr(spider,request.parse)
#5. 调用爬虫的parse方法,处理响应
for result in parse(response):
#6.判断结果的类型,如果是request,重新调用调度器的add_request方法
if isinstance(result,request):
#在解析函数得到request对象之后,使用process_request进行处理
for spider_mid in self.spider_mids:
result = spider_mid.process_request(result)
result.spider_name = request.spider_name
self.scheduler.add_request(result)
self.total_request_nums = 1
#7如果不是,调用pipeline的process_item方法处理结果
else:
for pipeline in self.pipelines:
result = pipeline.process_item(result,spider)
self.total_response_nums = 1
def _start_engine(self):
'''
具体的实现引擎的细节
:return:
'''
self._start_request()
while true:
time.sleep(0.001)
self._execute_request_response_item()
if self.total_response_nums>= self.total_request_nums:
break
......