1. 持久化
目前缺点:
- 无法完成爬虫刚开始:打开连接; 爬虫关闭时:关闭连接;
- 分工明确
pipeline/items
a. 先写pipeline类
class XXXPipeline(object):
def process_item(self, item, spider):
return item
b. 写Item类
class XdbItem(scrapy.Item):
href = scrapy.Field()
title = scrapy.Field()
c. 配置
ITEM_PIPELINES = {
'xdb.pipelines.XdbPipeline': 300,
}
d. 爬虫,yield每执行一次,process_item就调用一次。
yield Item对象
编写pipeline:
from scrapy.exceptions import DropItem
class FilePipeline(object):
def __init__(self,path):
self.f = None
self.path = path
@classmethod
def from_crawler(cls, crawler):
"""
初始化时候,用于创建pipeline对象
:param crawler:
:return:
"""
print('File.from_crawler')
path = crawler.settings.get('HREF_FILE_PATH')
return cls(path)
def open_spider(self,spider):
"""
爬虫开始执行时,调用
:param spider:
:return:
"""
print('File.open_spider')
self.f = open(self.path,'a+')
def process_item(self, item, spider):
# f = open('xx.log','a+')
# f.write(item['href']+'
')
# f.close()
print('File',item['href'])
self.f.write(item['href']+'
')
# return item # 交给下一个pipeline的process_item方法
raise DropItem()# 后续的 pipeline的process_item方法不再执行
def close_spider(self,spider):
"""
爬虫关闭时,被调用
:param spider:
:return:
"""
print('File.close_spider')
self.f.close()
注意:pipeline是所有爬虫公用,如果想要给某个爬虫定制需要使用spider参数自己进行处理。
2. 去重规则
a. 编写类
from scrapy.dupefilter import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
class XdbDupeFilter(BaseDupeFilter):
def __init__(self):
self.visited_fd = set()
@classmethod
def from_settings(cls, settings):
return cls()
def request_seen(self, request):
fd = request_fingerprint(request=request)
if fd in self.visited_fd:
return True
self.visited_fd.add(fd)
def open(self): # can return deferred
print('开始')
def close(self, reason): # can return a deferred
print('结束')
# def log(self, request, spider): # log that a request has been filtered
# print('日志')
b. 配置
# 修改默认的去重规则
# DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
DUPEFILTER_CLASS = 'xdb.dupefilters.XdbDupeFilter'
c. 爬虫使用:
class ChoutiSpider(scrapy.Spider):
name = 'chouti'
allowed_domains = ['chouti.com']
start_urls = ['https://dig.chouti.com/']
def parse(self, response):
print(response.request.url)
# item_list = response.xpath('//div[@id="content-list"]/div[@class="item"]')
# for item in item_list:
# text = item.xpath('.//a/text()').extract_first()
# href = item.xpath('.//a/@href').extract_first()
page_list = response.xpath('//div[@id="dig_lcpage"]//a/@href').extract()
for page in page_list:
from scrapy.http import Request
page = "https://dig.chouti.com" + page
# yield Request(url=page,callback=self.parse,dont_filter=False) # https://dig.chouti.com/all/hot/recent/2
yield Request(url=page,callback=self.parse,dont_filter=True) # https://dig.chouti.com/all/hot/recent/2
注意:
- request_seen中编写正确逻辑
- dont_filter=False
3. 深度
配置文件:
# 限制深度
DEPTH_LIMIT = 3
4. cookie
方式一:
- 携带
Request(
url='https://dig.chouti.com/login',
method='POST',
body="phone=8613121758648&password=woshiniba&oneMonth=1",# # body=urlencode({})"phone=8615131255555&password=12sdf32sdf&oneMonth=1"
cookies=self.cookie_dict,
headers={
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
},
callback=self.check_login
)
- 解析:
from scrapy.http.cookies import CookieJar
cookie_dict
cookie_jar = CookieJar()
cookie_jar.extract_cookies(response, response.request)
# 去对象中将cookie解析到字典
for k, v in cookie_jar._cookies.items():
for i, j in v.items():
for m, n in j.items():
cookie_dict[m] = n.value
方式二:meta