zoukankan      html  css  js  c++  java
  • python16_day37【爬虫2】

    一、异步非阻塞

       1.自定义异步非阻塞

     1 import socket
     2 import select
     3 
     4 class Request(object):
     5     def __init__(self,sock,func,url):
     6         self.sock = sock
     7         self.func = func
     8         self.url = url
     9 
    10     def fileno(self):
    11         return self.sock.fileno()
    12 
    13 def async_request(url_list):
    14 
    15     input_list = []
    16     conn_list = []
    17 
    18     for url in url_list:
    19         client = socket.socket()
    20         client.setblocking(False)
    21         # 创建连接,不阻塞
    22         try:
    23             client.connect((url[0],80,)) # 100个向百度发送的请求
    24         except BlockingIOError as e:
    25             pass
    26 
    27         obj = Request(client,url[1],url[0])
    28 
    29         input_list.append(obj)
    30         conn_list.append(obj)
    31 
    32     while True:
    33         # 监听socket是否已经发生变化 [request_obj,request_obj....request_obj]
    34         # 如果有请求连接成功:wlist = [request_obj,request_obj]
    35         # 如果有响应的数据:  rlist = [request_obj,request_obj....client100]
    36         rlist,wlist,elist = select.select(input_list,conn_list,[],0.05)
    37         for request_obj in wlist:
    38             # print('连接成功')
    39             # # # # 发送Http请求
    40             # print('发送请求')
    41             request_obj.sock.sendall("GET / HTTP/1.0
    host:{0}
    
    ".format(request_obj.url).encode('utf-8'))
    42             conn_list.remove(request_obj)
    43 
    44         for request_obj in rlist:
    45             data = request_obj.sock.recv(8096)
    46             request_obj.func(data)
    47             request_obj.sock.close()
    48             input_list.remove(request_obj)
    49 
    50         if not input_list:
    51             break

      2. 调用上面自定义模块

     1 import s2
     2 
     3 def callback1(data):
     4     print('百度回来了',data)
     5 
     6 def callback2(data):
     7     print('必应回来了',data)
     8 
     9 url_list = [
    10     ['www.baidu.com',callback1],
    11     ['www.bing.com',callback2]
    12 ]
    13 s2.async_request(url_list)
    14 
    15 # #################################  twisted #################################
    16 from twisted.web.client import getPage, defer
    17 from twisted.internet import reactor
    18 
    19 
    20 def all_done(arg):
    21     reactor.stop()
    22 
    23 
    24 def callback1(contents):
    25     print(contents)
    26 
    27 def callback2(contents):
    28     print(contents)
    29 deferred_list = []
    30 
    31 url_list = [
    32     ('http://www.bing.com',callback1),
    33     ('http://www.baidu.com',callback2)
    34 ]
    35 for url in url_list:
    36     deferred = getPage(bytes(url[0], encoding='utf8'))
    37     deferred.addCallback(url[1])
    38     deferred_list.append(deferred)
    39 
    40 dlist = defer.DeferredList(deferred_list)
    41 dlist.addBoth(all_done)

    二、scrapy

      

      1. 安装

    pip3 install scrapy

      2. 基本命令

    1. scrapy startproject p1
    2. scrapy genspider chouti chouti.com
    3. scrapy list
    4. scrapy crawl chouti
    

      3. 项目文件介绍

    scrapy.cfg  项目的主配置信息。(真正爬虫相关的配置信息在settings.py文件中)
    items.py    设置数据存储模板,用于结构化数据,如:Django的Model
    pipelines    数据处理行为,如:一般结构化的数据持久化
    settings.py 配置文件,如:递归的层数、并发数,延迟下载等
    spiders      爬虫目录,如:创建文件,编写爬虫规则
    

      4. 小试牛刀

     1 import scrapy
     2 from scrapy.selector import HtmlXPathSelector
     3 from scrapy.http.request import Request
     4  
     5  
     6 class DigSpider(scrapy.Spider):
     7     # 爬虫应用的名称,通过此名称启动爬虫命令
     8     name = "dig"
     9  
    10     # 允许的域名
    11     allowed_domains = ["chouti.com"]
    12  
    13     # 起始URL
    14     start_urls = [
    15         'http://dig.chouti.com/',
    16     ]
    17  
    18     has_request_set = {}
    19  
    20     def parse(self, response):
    21         print(response.url)
    22  
    23         hxs = HtmlXPathSelector(response)
    24         page_list = hxs.select('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/d+")]/@href').extract()
    25         for page in page_list:
    26             page_url = 'http://dig.chouti.com%s' % page
    27             key = self.md5(page_url)
    28             if key in self.has_request_set:
    29                 pass
    30             else:
    31                 self.has_request_set[key] = page_url
    32                 obj = Request(url=page_url, method='GET', callback=self.parse)
    33                 yield obj
    34  
    35     @staticmethod
    36     def md5(val):
    37         import hashlib
    38         ha = hashlib.md5()
    39         ha.update(bytes(val, encoding='utf-8'))
    40         key = ha.hexdigest()
    41         return key
    View Code

      运行:scrapy crawl chouti.com --nolog

      5. 选择器

     1 from scrapy.selector import Selector, HtmlXPathSelector
     2 from scrapy.http import HtmlResponse
     3 html = """<!DOCTYPE html>
     4 <html>
     5     <head lang="en">
     6         <meta charset="UTF-8">
     7         <title></title>
     8     </head>
     9     <body>
    10         <ul>
    11             <li class="item-"><a id='i1' href="link.html">first item</a></li>
    12             <li class="item-0"><a id='i2' href="llink.html">first item</a></li>
    13             <li class="item-1"><a href="llink2.html">second item<span>vv</span></a></li>
    14         </ul>
    15         <div><a href="llink2.html">second item</a></div>
    16     </body>
    17 </html>
    18 """
    19 response = HtmlResponse(url='http://example.com', body=html,encoding='utf-8')
    20 # hxs = HtmlXPathSelector(response)
    21 # print(hxs)
    22 # hxs = Selector(response=response).xpath('//a')
    23 # print(hxs)
    24 # hxs = Selector(response=response).xpath('//a[2]')
    25 # print(hxs)
    26 # hxs = Selector(response=response).xpath('//a[@id]')
    27 # print(hxs)
    28 # hxs = Selector(response=response).xpath('//a[@id="i1"]')
    29 # print(hxs)
    30 # hxs = Selector(response=response).xpath('//a[@href="link.html"][@id="i1"]')
    31 # print(hxs)
    32 # hxs = Selector(response=response).xpath('//a[contains(@href, "link")]')
    33 # print(hxs)
    34 # hxs = Selector(response=response).xpath('//a[starts-with(@href, "link")]')
    35 # print(hxs)
    36 # hxs = Selector(response=response).xpath('//a[re:test(@id, "id+")]')
    37 # print(hxs)
    38 # hxs = Selector(response=response).xpath('//a[re:test(@id, "id+")]/text()').extract()
    39 # print(hxs)
    40 # hxs = Selector(response=response).xpath('//a[re:test(@id, "id+")]/@href').extract()
    41 # print(hxs)
    42 # hxs = Selector(response=response).xpath('/html/body/ul/li/a/@href').extract()
    43 # print(hxs)
    44 # hxs = Selector(response=response).xpath('//body/ul/li/a/@href').extract_first()
    45 # print(hxs)
    46  
    47 # ul_list = Selector(response=response).xpath('//body/ul/li')
    48 # for item in ul_list:
    49 #     v = item.xpath('./a/span')
    50 #     # 或
    51 #     # v = item.xpath('a/span')
    52 #     # 或
    53 #     # v = item.xpath('*/a/span')
    54 #     print(v)
    View Code
     1 import scrapy
     2 from scrapy.selector import HtmlXPathSelector
     3 from scrapy.http.request import Request
     4 from scrapy.http.cookies import CookieJar
     5 from scrapy import FormRequest
     6 
     7 
     8 class ChouTiSpider(scrapy.Spider):
     9     # 爬虫应用的名称,通过此名称启动爬虫命令
    10     name = "chouti"
    11     # 允许的域名
    12     allowed_domains = ["chouti.com"]
    13 
    14     cookie_dict = {}
    15     has_request_set = {}
    16 
    17     def start_requests(self):
    18         url = 'http://dig.chouti.com/'
    19         # return [Request(url=url, callback=self.login)]
    20         yield Request(url=url, callback=self.login)
    21 
    22     def login(self, response):
    23         cookie_jar = CookieJar()
    24         cookie_jar.extract_cookies(response, response.request)
    25         for k, v in cookie_jar._cookies.items():
    26             for i, j in v.items():
    27                 for m, n in j.items():
    28                     self.cookie_dict[m] = n.value
    29 
    30         req = Request(
    31             url='http://dig.chouti.com/login',
    32             method='POST',
    33             headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'},
    34             body='phone=8615131255089&password=pppppppp&oneMonth=1',
    35             cookies=self.cookie_dict,
    36             callback=self.check_login
    37         )
    38         yield req
    39 
    40     def check_login(self, response):
    41         req = Request(
    42             url='http://dig.chouti.com/',
    43             method='GET',
    44             callback=self.show,
    45             cookies=self.cookie_dict,
    46             dont_filter=True
    47         )
    48         yield req
    49 
    50     def show(self, response):
    51         # print(response)
    52         hxs = HtmlXPathSelector(response)
    53         news_list = hxs.select('//div[@id="content-list"]/div[@class="item"]')
    54         for new in news_list:
    55             # temp = new.xpath('div/div[@class="part2"]/@share-linkid').extract()
    56             link_id = new.xpath('*/div[@class="part2"]/@share-linkid').extract_first()
    57             yield Request(
    58                 url='http://dig.chouti.com/link/vote?linksId=%s' %(link_id,),
    59                 method='POST',
    60                 cookies=self.cookie_dict,
    61                 callback=self.do_favor
    62             )
    63 
    64         page_list = hxs.select('//div[@id="dig_lcpage"]//a[re:test(@href, "/all/hot/recent/d+")]/@href').extract()
    65         for page in page_list:
    66 
    67             page_url = 'http://dig.chouti.com%s' % page
    68             import hashlib
    69             hash = hashlib.md5()
    70             hash.update(bytes(page_url,encoding='utf-8'))
    71             key = hash.hexdigest()
    72             if key in self.has_request_set:
    73                 pass
    74             else:
    75                 self.has_request_set[key] = page_url
    76                 yield Request(
    77                     url=page_url,
    78                     method='GET',
    79                     callback=self.show
    80                 )
    81 
    82     def do_favor(self, response):
    83         print(response.text)
    自动登录抽屉点赞

       6. 格式化处理

    spider/xiahuar.py
    1 import scrapy
    2 
    3 
    4 class XiaoHuarItem(scrapy.Item):
    5     name = scrapy.Field()
    6     school = scrapy.Field()
    7     url = scrapy.Field()
    items
     1 import json
     2 import os
     3 import requests
     4 
     5 
     6 class JsonPipeline(object):
     7     def __init__(self):
     8         self.file = open('xiaohua.txt', 'w')
     9 
    10     def process_item(self, item, spider):
    11         v = json.dumps(dict(item), ensure_ascii=False)
    12         self.file.write(v)
    13         self.file.write('
    ')
    14         self.file.flush()
    15         return item
    16 
    17 
    18 class FilePipeline(object):
    19     def __init__(self):
    20         if not os.path.exists('imgs'):
    21             os.makedirs('imgs')
    22 
    23     def process_item(self, item, spider):
    24         response = requests.get(item['url'], stream=True)
    25         file_name = '%s_%s.jpg' % (item['name'], item['school'])
    26         with open(os.path.join('imgs', file_name), mode='wb') as f:
    27             f.write(response.content)
    28         return item
    pipelines
    1 ITEM_PIPELINES = {
    2    'spider1.pipelines.JsonPipeline': 100,
    3    'spider1.pipelines.FilePipeline': 300,
    4 }
    5 # 每行后面的整型值,确定了他们运行的顺序,item按数字从低到高的顺序,通过pipeline,通常将这些数字定义在0-1000范围内。
    settings

      7. 自定义pipelines

     1 from scrapy.exceptions import DropItem
     2 
     3 class CustomPipeline(object):
     4     def __init__(self,v):
     5         self.value = v
     6 
     7     def process_item(self, item, spider):
     8         # 操作并进行持久化
     9 
    10         # return表示会被后续的pipeline继续处理
    11         return item
    12 
    13         # 表示将item丢弃,不会被后续pipeline处理
    14         # raise DropItem()
    15 
    16 
    17     @classmethod
    18     def from_crawler(cls, crawler):
    19         """
    20         初始化时候,用于创建pipeline对象
    21         :param crawler: 
    22         :return: 
    23         """
    24         val = crawler.settings.getint('MMMM')
    25         return cls(val)
    26 
    27     def open_spider(self,spider):
    28         """
    29         爬虫开始执行时,调用
    30         :param spider: 
    31         :return: 
    32         """
    33         print('000000')
    34 
    35     def close_spider(self,spider):
    36         """
    37         爬虫关闭时,被调用
    38         :param spider: 
    39         :return: 
    40         """
    41         print('111111')
    42 
    43 自定义pipeline
    View Code

      8. 中间件  

     1 class SpiderMiddleware(object):
     2 
     3     def process_spider_input(self,response, spider):
     4         """
     5         下载完成,执行,然后交给parse处理
     6         :param response: 
     7         :param spider: 
     8         :return: 
     9         """
    10         pass
    11 
    12     def process_spider_output(self,response, result, spider):
    13         """
    14         spider处理完成,返回时调用
    15         :param response:
    16         :param result:
    17         :param spider:
    18         :return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable)
    19         """
    20         return result
    21 
    22     def process_spider_exception(self,response, exception, spider):
    23         """
    24         异常调用
    25         :param response:
    26         :param exception:
    27         :param spider:
    28         :return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline
    29         """
    30         return None
    31 
    32 
    33     def process_start_requests(self,start_requests, spider):
    34         """
    35         爬虫启动时调用
    36         :param start_requests:
    37         :param spider:
    38         :return: 包含 Request 对象的可迭代对象
    39         """
    40         return start_requests
    41 
    42 爬虫中间件
    爬虫中间件
     1 class DownMiddleware1(object):
     2     def process_request(self, request, spider):
     3         """
     4         请求需要被下载时,经过所有下载器中间件的process_request调用
     5         :param request: 
     6         :param spider: 
     7         :return:  
     8             None,继续后续中间件去下载;
     9             Response对象,停止process_request的执行,开始执行process_response
    10             Request对象,停止中间件的执行,将Request重新调度器
    11             raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
    12         """
    13         pass
    14 
    15 
    16 
    17     def process_response(self, request, response, spider):
    18         """
    19         spider处理完成,返回时调用
    20         :param response:
    21         :param result:
    22         :param spider:
    23         :return: 
    24             Response 对象:转交给其他中间件process_response
    25             Request 对象:停止中间件,request会被重新调度下载
    26             raise IgnoreRequest 异常:调用Request.errback
    27         """
    28         print('response1')
    29         return response
    30 
    31     def process_exception(self, request, exception, spider):
    32         """
    33         当下载处理器(download handler)或 process_request() (下载中间件)抛出异常
    34         :param response:
    35         :param exception:
    36         :param spider:
    37         :return: 
    38             None:继续交给后续中间件处理异常;
    39             Response对象:停止后续process_exception方法
    40             Request对象:停止中间件,request将会被重新调用下载
    41         """
    42         return None
    43 
    44 下载器中间件
    下载中间件

      9. 自定制命令

      A.在spiders同级创建任意目录,在目录创建crawlall.py

     1 from scrapy.commands import ScrapyCommand
     2     from scrapy.utils.project import get_project_settings
     3 
     4 
     5     class Command(ScrapyCommand):
     6 
     7         requires_project = True
     8 
     9         def syntax(self):
    10             return '[options]'
    11 
    12         def short_desc(self):
    13             return 'Runs all of the spiders'
    14 
    15         def run(self, args, opts):
    16             spider_list = self.crawler_process.spiders.list()
    17             for name in spider_list:
    18                 self.crawler_process.crawl(name, **opts.__dict__)
    19             self.crawler_process.start()
    crawlall.py

      B.在settings.py 中添加配置 COMMANDS_MODULE = '项目名称.目录名称

      C.scrapy crawlall

     

      10. 避免重复访问

        scrapy默认使用 scrapy.dupefilter.RFPDupeFilter 进行去重,相关配置有:

    DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
    DUPEFILTER_DEBUG = False
    JOBDIR = "保存范文记录的日志路径,如:/root/"  # 最终路径为 /root/requests.seen
    

      11. settings其它配置

      1 # -*- coding: utf-8 -*-
      2 
      3 # Scrapy settings for step8_king project
      4 #
      5 # For simplicity, this file contains only settings considered important or
      6 # commonly used. You can find more settings consulting the documentation:
      7 #
      8 #     http://doc.scrapy.org/en/latest/topics/settings.html
      9 #     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
     10 #     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
     11 
     12 # 1. 爬虫名称
     13 BOT_NAME = 'step8_king'
     14 
     15 # 2. 爬虫应用路径
     16 SPIDER_MODULES = ['step8_king.spiders']
     17 NEWSPIDER_MODULE = 'step8_king.spiders'
     18 
     19 # Crawl responsibly by identifying yourself (and your website) on the user-agent
     20 # 3. 客户端 user-agent请求头
     21 # USER_AGENT = 'step8_king (+http://www.yourdomain.com)'
     22 
     23 # Obey robots.txt rules
     24 # 4. 禁止爬虫配置
     25 # ROBOTSTXT_OBEY = False
     26 
     27 # Configure maximum concurrent requests performed by Scrapy (default: 16)
     28 # 5. 并发请求数
     29 # CONCURRENT_REQUESTS = 4
     30 
     31 # Configure a delay for requests for the same website (default: 0)
     32 # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
     33 # See also autothrottle settings and docs
     34 # 6. 延迟下载秒数
     35 # DOWNLOAD_DELAY = 2
     36 
     37 
     38 # The download delay setting will honor only one of:
     39 # 7. 单域名访问并发数,并且延迟下次秒数也应用在每个域名
     40 # CONCURRENT_REQUESTS_PER_DOMAIN = 2
     41 # 单IP访问并发数,如果有值则忽略:CONCURRENT_REQUESTS_PER_DOMAIN,并且延迟下次秒数也应用在每个IP
     42 # CONCURRENT_REQUESTS_PER_IP = 3
     43 
     44 # Disable cookies (enabled by default)
     45 # 8. 是否支持cookie,cookiejar进行操作cookie
     46 # COOKIES_ENABLED = True
     47 # COOKIES_DEBUG = True
     48 
     49 # Disable Telnet Console (enabled by default)
     50 # 9. Telnet用于查看当前爬虫的信息,操作爬虫等...
     51 #    使用telnet ip port ,然后通过命令操作
     52 # TELNETCONSOLE_ENABLED = True
     53 # TELNETCONSOLE_HOST = '127.0.0.1'
     54 # TELNETCONSOLE_PORT = [6023,]
     55 
     56 
     57 # 10. 默认请求头
     58 # Override the default request headers:
     59 # DEFAULT_REQUEST_HEADERS = {
     60 #     'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
     61 #     'Accept-Language': 'en',
     62 # }
     63 
     64 
     65 # Configure item pipelines
     66 # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
     67 # 11. 定义pipeline处理请求
     68 # ITEM_PIPELINES = {
     69 #    'step8_king.pipelines.JsonPipeline': 700,
     70 #    'step8_king.pipelines.FilePipeline': 500,
     71 # }
     72 
     73 
     74 
     75 # 12. 自定义扩展,基于信号进行调用
     76 # Enable or disable extensions
     77 # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
     78 # EXTENSIONS = {
     79 #     # 'step8_king.extensions.MyExtension': 500,
     80 # }
     81 
     82 
     83 # 13. 爬虫允许的最大深度,可以通过meta查看当前深度;0表示无深度
     84 # DEPTH_LIMIT = 3
     85 
     86 # 14. 爬取时,0表示深度优先Lifo(默认);1表示广度优先FiFo
     87 
     88 # 后进先出,深度优先
     89 # DEPTH_PRIORITY = 0
     90 # SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
     91 # SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
     92 # 先进先出,广度优先
     93 
     94 # DEPTH_PRIORITY = 1
     95 # SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue'
     96 # SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue'
     97 
     98 # 15. 调度器队列
     99 # SCHEDULER = 'scrapy.core.scheduler.Scheduler'
    100 # from scrapy.core.scheduler import Scheduler
    101 
    102 
    103 # 16. 访问URL去重
    104 # DUPEFILTER_CLASS = 'step8_king.duplication.RepeatUrl'
    105 
    106 
    107 # Enable and configure the AutoThrottle extension (disabled by default)
    108 # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
    109 
    110 """
    111 17. 自动限速算法
    112     from scrapy.contrib.throttle import AutoThrottle
    113     自动限速设置
    114     1. 获取最小延迟 DOWNLOAD_DELAY
    115     2. 获取最大延迟 AUTOTHROTTLE_MAX_DELAY
    116     3. 设置初始下载延迟 AUTOTHROTTLE_START_DELAY
    117     4. 当请求下载完成后,获取其"连接"时间 latency,即:请求连接到接受到响应头之间的时间
    118     5. 用于计算的... AUTOTHROTTLE_TARGET_CONCURRENCY
    119     target_delay = latency / self.target_concurrency
    120     new_delay = (slot.delay + target_delay) / 2.0 # 表示上一次的延迟时间
    121     new_delay = max(target_delay, new_delay)
    122     new_delay = min(max(self.mindelay, new_delay), self.maxdelay)
    123     slot.delay = new_delay
    124 """
    125 
    126 # 开始自动限速
    127 # AUTOTHROTTLE_ENABLED = True
    128 # The initial download delay
    129 # 初始下载延迟
    130 # AUTOTHROTTLE_START_DELAY = 5
    131 # The maximum download delay to be set in case of high latencies
    132 # 最大下载延迟
    133 # AUTOTHROTTLE_MAX_DELAY = 10
    134 # The average number of requests Scrapy should be sending in parallel to each remote server
    135 # 平均每秒并发数
    136 # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    137 
    138 # Enable showing throttling stats for every response received:
    139 # 是否显示
    140 # AUTOTHROTTLE_DEBUG = True
    141 
    142 # Enable and configure HTTP caching (disabled by default)
    143 # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    144 
    145 
    146 """
    147 18. 启用缓存
    148     目的用于将已经发送的请求或相应缓存下来,以便以后使用
    149     
    150     from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware
    151     from scrapy.extensions.httpcache import DummyPolicy
    152     from scrapy.extensions.httpcache import FilesystemCacheStorage
    153 """
    154 # 是否启用缓存策略
    155 # HTTPCACHE_ENABLED = True
    156 
    157 # 缓存策略:所有请求均缓存,下次在请求直接访问原来的缓存即可
    158 # HTTPCACHE_POLICY = "scrapy.extensions.httpcache.DummyPolicy"
    159 # 缓存策略:根据Http响应头:Cache-Control、Last-Modified 等进行缓存的策略
    160 # HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy"
    161 
    162 # 缓存超时时间
    163 # HTTPCACHE_EXPIRATION_SECS = 0
    164 
    165 # 缓存保存路径
    166 # HTTPCACHE_DIR = 'httpcache'
    167 
    168 # 缓存忽略的Http状态码
    169 # HTTPCACHE_IGNORE_HTTP_CODES = []
    170 
    171 # 缓存存储的插件
    172 # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    173 
    174 
    175 """
    176 19. 代理,需要在环境变量中设置
    177     from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware
    178     
    179     方式一:使用默认
    180         os.environ
    181         {
    182             http_proxy:http://root:woshiniba@192.168.11.11:9999/
    183             https_proxy:http://192.168.11.11:9999/
    184         }
    185     方式二:使用自定义下载中间件
    186     
    187     def to_bytes(text, encoding=None, errors='strict'):
    188         if isinstance(text, bytes):
    189             return text
    190         if not isinstance(text, six.string_types):
    191             raise TypeError('to_bytes must receive a unicode, str or bytes '
    192                             'object, got %s' % type(text).__name__)
    193         if encoding is None:
    194             encoding = 'utf-8'
    195         return text.encode(encoding, errors)
    196         
    197     class ProxyMiddleware(object):
    198         def process_request(self, request, spider):
    199             PROXIES = [
    200                 {'ip_port': '111.11.228.75:80', 'user_pass': ''},
    201                 {'ip_port': '120.198.243.22:80', 'user_pass': ''},
    202                 {'ip_port': '111.8.60.9:8123', 'user_pass': ''},
    203                 {'ip_port': '101.71.27.120:80', 'user_pass': ''},
    204                 {'ip_port': '122.96.59.104:80', 'user_pass': ''},
    205                 {'ip_port': '122.224.249.122:8088', 'user_pass': ''},
    206             ]
    207             proxy = random.choice(PROXIES)
    208             if proxy['user_pass'] is not None:
    209                 request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
    210                 encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass']))
    211                 request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass)
    212                 print "**************ProxyMiddleware have pass************" + proxy['ip_port']
    213             else:
    214                 print "**************ProxyMiddleware no pass************" + proxy['ip_port']
    215                 request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
    216     
    217     DOWNLOADER_MIDDLEWARES = {
    218        'step8_king.middlewares.ProxyMiddleware': 500,
    219     }
    220     
    221 """
    222 
    223 """
    224 20. Https访问
    225     Https访问时有两种情况:
    226     1. 要爬取网站使用的可信任证书(默认支持)
    227         DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
    228         DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory"
    229         
    230     2. 要爬取网站使用的自定义证书
    231         DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
    232         DOWNLOADER_CLIENTCONTEXTFACTORY = "step8_king.https.MySSLFactory"
    233         
    234         # https.py
    235         from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
    236         from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate)
    237         
    238         class MySSLFactory(ScrapyClientContextFactory):
    239             def getCertificateOptions(self):
    240                 from OpenSSL import crypto
    241                 v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.key.unsecure', mode='r').read())
    242                 v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.pem', mode='r').read())
    243                 return CertificateOptions(
    244                     privateKey=v1,  # pKey对象
    245                     certificate=v2,  # X509对象
    246                     verify=False,
    247                     method=getattr(self, 'method', getattr(self, '_ssl_method', None))
    248                 )
    249     其他:
    250         相关类
    251             scrapy.core.downloader.handlers.http.HttpDownloadHandler
    252             scrapy.core.downloader.webclient.ScrapyHTTPClientFactory
    253             scrapy.core.downloader.contextfactory.ScrapyClientContextFactory
    254         相关配置
    255             DOWNLOADER_HTTPCLIENTFACTORY
    256             DOWNLOADER_CLIENTCONTEXTFACTORY
    257 
    258 """
    259 
    260 
    261 
    262 """
    263 21. 爬虫中间件
    264     class SpiderMiddleware(object):
    265 
    266         def process_spider_input(self,response, spider):
    267             '''
    268             下载完成,执行,然后交给parse处理
    269             :param response: 
    270             :param spider: 
    271             :return: 
    272             '''
    273             pass
    274     
    275         def process_spider_output(self,response, result, spider):
    276             '''
    277             spider处理完成,返回时调用
    278             :param response:
    279             :param result:
    280             :param spider:
    281             :return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable)
    282             '''
    283             return result
    284     
    285         def process_spider_exception(self,response, exception, spider):
    286             '''
    287             异常调用
    288             :param response:
    289             :param exception:
    290             :param spider:
    291             :return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline
    292             '''
    293             return None
    294     
    295     
    296         def process_start_requests(self,start_requests, spider):
    297             '''
    298             爬虫启动时调用
    299             :param start_requests:
    300             :param spider:
    301             :return: 包含 Request 对象的可迭代对象
    302             '''
    303             return start_requests
    304     
    305     内置爬虫中间件:
    306         'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
    307         'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
    308         'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
    309         'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
    310         'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,
    311 
    312 """
    313 # from scrapy.contrib.spidermiddleware.referer import RefererMiddleware
    314 # Enable or disable spider middlewares
    315 # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    316 SPIDER_MIDDLEWARES = {
    317    # 'step8_king.middlewares.SpiderMiddleware': 543,
    318 }
    319 
    320 
    321 """
    322 22. 下载中间件
    323     class DownMiddleware1(object):
    324         def process_request(self, request, spider):
    325             '''
    326             请求需要被下载时,经过所有下载器中间件的process_request调用
    327             :param request:
    328             :param spider:
    329             :return:
    330                 None,继续后续中间件去下载;
    331                 Response对象,停止process_request的执行,开始执行process_response
    332                 Request对象,停止中间件的执行,将Request重新调度器
    333                 raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
    334             '''
    335             pass
    336     
    337     
    338     
    339         def process_response(self, request, response, spider):
    340             '''
    341             spider处理完成,返回时调用
    342             :param response:
    343             :param result:
    344             :param spider:
    345             :return:
    346                 Response 对象:转交给其他中间件process_response
    347                 Request 对象:停止中间件,request会被重新调度下载
    348                 raise IgnoreRequest 异常:调用Request.errback
    349             '''
    350             print('response1')
    351             return response
    352     
    353         def process_exception(self, request, exception, spider):
    354             '''
    355             当下载处理器(download handler)或 process_request() (下载中间件)抛出异常
    356             :param response:
    357             :param exception:
    358             :param spider:
    359             :return:
    360                 None:继续交给后续中间件处理异常;
    361                 Response对象:停止后续process_exception方法
    362                 Request对象:停止中间件,request将会被重新调用下载
    363             '''
    364             return None
    365 
    366     
    367     默认下载中间件
    368     {
    369         'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
    370         'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
    371         'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
    372         'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
    373         'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
    374         'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
    375         'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
    376         'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
    377         'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
    378         'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
    379         'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
    380         'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
    381         'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
    382         'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
    383     }
    384 
    385 """
    386 # from scrapy.contrib.downloadermiddleware.httpauth import HttpAuthMiddleware
    387 # Enable or disable downloader middlewares
    388 # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    389 # DOWNLOADER_MIDDLEWARES = {
    390 #    'step8_king.middlewares.DownMiddleware1': 100,
    391 #    'step8_king.middlewares.DownMiddleware2': 500,
    392 # }
    393 
    394 settings
    View Code

      

  • 相关阅读:
    linux php5.6 安装Redis扩展
    linux php7.2安装扩展memcached
    极简的switch控件
    整理了最近百年的藏历数据,做了个公历藏历映射的小工具
    我是怎么让全国最大的儿童失踪预警平台流量掉底的
    jq的getScript函数不支持chaset?override掉!
    大家好像都比较少关心webcrypto,试试写个简单的sha1/sha256/sha384/sha512实现看看
    惊喜:opera换webkit内核后完美支持SDCH压缩协议
    TCPIP协议实践:wireshark抓包分析之链路层与网络层
    使用unity3d和tensorflow实现基于姿态估计的体感游戏
  • 原文地址:https://www.cnblogs.com/weibiao/p/7715986.html
Copyright © 2011-2022 走看看