zoukankan      html  css  js  c++  java
  • scrapy 基础组件专题(七):scrapy 调度器、调度器中间件、自定义调度器

    一、调度器

    配置

    SCHEDULER = 'scrapy.core.scheduler.Scheduler' #表示scrapy包下core文件夹scheduler文件Scheduler类
    # 可以通过设置SCHEDULER值来使用自定义的调度器,

    源码

    import os
    import json
    import logging
    import warnings
    from os.path import join, exists
    
    from queuelib import PriorityQueue
    
    from scrapy.utils.misc import load_object, create_instance
    from scrapy.utils.job import job_dir
    from scrapy.utils.deprecate import ScrapyDeprecationWarning
    
    
    logger = logging.getLogger(__name__)
    
    
    class Scheduler:
        """
        Scrapy Scheduler. It allows to enqueue requests and then get
        a next request to download. Scheduler is also handling duplication
        filtering, via dupefilter.
        Prioritization and queueing is not performed by the Scheduler.
        User sets ``priority`` field for each Request, and a PriorityQueue
        (defined by :setting:`SCHEDULER_PRIORITY_QUEUE`) uses these priorities
        to dequeue requests in a desired order.
        Scheduler uses two PriorityQueue instances, configured to work in-memory
        and on-disk (optional). When on-disk queue is present, it is used by
        default, and an in-memory queue is used as a fallback for cases where
        a disk queue can't handle a request (can't serialize it).
        :setting:`SCHEDULER_MEMORY_QUEUE` and
        :setting:`SCHEDULER_DISK_QUEUE` allow to specify lower-level queue classes
        which PriorityQueue instances would be instantiated with, to keep requests
        on disk and in memory respectively.
        Overall, Scheduler is an object which holds several PriorityQueue instances
        (in-memory and on-disk) and implements fallback logic for them.
        Also, it handles dupefilters.
        """
        def __init__(self, dupefilter, jobdir=None, dqclass=None, mqclass=None,
                     logunser=False, stats=None, pqclass=None, crawler=None):
            self.df = dupefilter
            self.dqdir = self._dqdir(jobdir)
            self.pqclass = pqclass #优先级队列
            self.dqclass = dqclass #磁盘队列
            self.mqclass = mqclass #内存队列
            self.logunser = logunser
            self.stats = stats
            self.crawler = crawler
    #Scheduler是这里创建的Scheduler.from_crawler @classmethod
    def from_crawler(cls, crawler): settings = crawler.settings dupefilter_cls = load_object(settings['DUPEFILTER_CLASS']) dupefilter = create_instance(dupefilter_cls, settings, crawler) pqclass = load_object(settings['SCHEDULER_PRIORITY_QUEUE']) if pqclass is PriorityQueue: warnings.warn("SCHEDULER_PRIORITY_QUEUE='queuelib.PriorityQueue'" " is no longer supported because of API changes; " "please use 'scrapy.pqueues.ScrapyPriorityQueue'", ScrapyDeprecationWarning) from scrapy.pqueues import ScrapyPriorityQueue pqclass = ScrapyPriorityQueue dqclass = load_object(settings['SCHEDULER_DISK_QUEUE']) mqclass = load_object(settings['SCHEDULER_MEMORY_QUEUE']) logunser = settings.getbool('SCHEDULER_DEBUG') return cls(dupefilter, jobdir=job_dir(settings), logunser=logunser, stats=crawler.stats, pqclass=pqclass, dqclass=dqclass, mqclass=mqclass, crawler=crawler)

           #dupefilter是DUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter',主要是对请求生成提取特征值去重用的

    
    

           #还有三个队列:pqclass是一个优先级队列,dqclass是磁盘队列,mqclass 内存队列

    
    

           #主要看下这两个方法enqueue_request跟next_request。这两个一个是请求进队列,一个是从队列里取出数据进行处理



    def has_pending_requests(self): return len(self) > 0 def open(self, spider): self.spider = spider self.mqs = self._mq() self.dqs = self._dq() if self.dqdir else None return self.df.open() def close(self, reason): if self.dqs: state = self.dqs.close() self._write_dqs_state(self.dqdir, state) return self.df.close(reason)
    #enqueue_request 会优先进入磁盘队列,磁盘队列没有或push异常才改为内存队列
    def enqueue_request(self, request): if not request.dont_filter and self.df.request_seen(request): self.df.log(request, self.spider) return False dqok = self._dqpush(request) if dqok: self.stats.inc_value('scheduler/enqueued/disk', spider=self.spider) else: self._mqpush(request) self.stats.inc_value('scheduler/enqueued/memory', spider=self.spider) self.stats.inc_value('scheduler/enqueued', spider=self.spider) return True
    #next_request优先从内存队列里取,然后才是磁盘队列
    def next_request(self): request = self.mqs.pop() if request: self.stats.inc_value('scheduler/dequeued/memory', spider=self.spider) else: request = self._dqpop() if request: self.stats.inc_value('scheduler/dequeued/disk', spider=self.spider) if request: self.stats.inc_value('scheduler/dequeued', spider=self.spider) return request def __len__(self): return len(self.dqs) + len(self.mqs) if self.dqs else len(self.mqs) def _dqpush(self, request): if self.dqs is None: return try: self.dqs.push(request) except ValueError as e: # non serializable request if self.logunser: msg = ("Unable to serialize request: %(request)s - reason:" " %(reason)s - no more unserializable requests will be" " logged (stats being collected)") logger.warning(msg, {'request': request, 'reason': e}, exc_info=True, extra={'spider': self.spider}) self.logunser = False self.stats.inc_value('scheduler/unserializable', spider=self.spider) return else: return True def _mqpush(self, request): self.mqs.push(request) def _dqpop(self): if self.dqs: return self.dqs.pop() def _mq(self): """ Create a new priority queue instance, with in-memory storage """ return create_instance(self.pqclass, settings=None, crawler=self.crawler, downstream_queue_cls=self.mqclass, key='') def _dq(self): """ Create a new priority queue instance, with disk storage """ state = self._read_dqs_state(self.dqdir) q = create_instance(self.pqclass, settings=None, crawler=self.crawler, downstream_queue_cls=self.dqclass, key=self.dqdir, startprios=state) if q: logger.info("Resuming crawl (%(queuesize)d requests scheduled)", {'queuesize': len(q)}, extra={'spider': self.spider}) return q def _dqdir(self, jobdir): """ Return a folder name to keep disk queue state at """ if jobdir: dqdir = join(jobdir, 'requests.queue') if not exists(dqdir): os.makedirs(dqdir) return dqdir
    #还有一个点队列停止或启动通过这个命令将内存未完成的队列保存到磁盘以便启动后继续执行scrapy crawl somespider -s JOBDIR=crawls/somespider-1
    def _read_dqs_state(self, dqdir): path = join(dqdir, 'active.json') if not exists(path): return () with open(path) as f: return json.load(f) def _write_dqs_state(self, dqdir, state): with open(join(dqdir, 'active.json'), 'w') as f: json.dump(state, f)

    二、调度器中间件

    早期scrapy版本0.9中具有scrapy.contrib.schedulermiddleware.SchedulerMiddleware类,可以使用。后期自己找不到

    三、自定义调度器

    settings里面的配置:
    '''当下面配置了这个(scrapy-redis)时候,下面的调度器已经配置在scrapy-redis里面了'''
    ##########连接配置########
    REDIS_HOST = '127.0.0.1'
    REDIS_PORT = 6379
    # REDIS_PARAMS  = {'password':'xxxx'}    #Redis连接参数,默认:REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING,})
    REDIS_ENCODING = "utf-8"
    
    # REDIS_URL = 'redis://user:pass@hostname:6379' #连接URL(优先于以上配置)
    ###########调度器##########
    # from   scrapy_pro1.scheduler_test import  Self_Scheduler
    #SCHEDULER='scrapy_pro1.scheduler_test.Self_Scheduler'##可以使用自己定制的调度器
    
    SCHEDULER='scrapy_redis.scheduler.Scheduler'#自带的调度器
    ##有scrapy_redis里面的调度器,也就是调度器》》scrapy-redis里面的调度器
    SCHEDULER_QUEUE_KEY = '%(spider)s:requests'  # 调度器中请求存放在redis中的key
    #每一个爬虫都有自己自己的历史记录
    '''
    {
    里面是全部的爬虫(里面有相对应的爬虫记录)
    chouti:requets(封装了>>url:'',callback=''):'xx结果'
    由于redis不能存放request对象,所以需要序列化一下,生成字符串然后保存在redis里面,作为key存在
    pickle.dumps(chouti:requets,requets里面封装了要访问url和回调函数,chouti:requets就是key,要去这里面的数据的时候应该也是conn.smembers('chouti:requets')
    }
    '''
    SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat"  # 对保存到redis中的数据进行序列化,默认使用pickle
    ##将requets对象进行序列化处理,作为key保存
    SCHEDULER_PERSIST = False  # 是否在关闭时候保留原来的调度器和去重记录,True=保留,False=清空
    ##是否在关闭的时候保留数据REDIS_PARAMS
    SCHEDULER_FLUSH_ON_START = True  # 是否在开始之前清空 调度器和去重记录,True=清空,False=不清空
    ##在爬虫启动的时候清空或者是不清空
    # SCHEDULER_IDLE_BEFORE_CLOSE = 10  # 去调度器中获取数据时,如果为空,最多等待时间(最后没数据,未获取到)。
    #当没有数据的时候,最多等待的时间
    SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter'  # 去重规则,在redis中保存时对应的key》》chouti:dupefilter
    ##爬虫相对应的记录,对应的键
    SCHEDULER_DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'  # 去重规则对应处理的类
    START_URLS_KEY = '%(name)s:start_urls'
    ##你要保存去重规则的键
    REDIS_START_URLS_AS_SET = False
    scrapy-redis调度器源码:
    from   scrapy_redis.scheduler import   Scheduler
    import importlib
    import six##判断类型,six.xxtype
    
    from scrapy.utils.misc import load_object
    
    from . import connection, defaults
    
    
    # TODO: add SCRAPY_JOB support.
    class Scheduler(object):
        """Redis-based scheduler
    
        Settings
        --------
        SCHEDULER_PERSIST : bool (default: False)
            Whether to persist or clear redis queue.
        SCHEDULER_FLUSH_ON_START : bool (default: False)
            Whether to flush redis queue on start.
        SCHEDULER_IDLE_BEFORE_CLOSE : int (default: 0)
            How many seconds to wait before closing if no message is received.
        SCHEDULER_QUEUE_KEY : str
            Scheduler redis key.
        SCHEDULER_QUEUE_CLASS : str
            Scheduler queue class.
        SCHEDULER_DUPEFILTER_KEY : str
            Scheduler dupefilter redis key.
        SCHEDULER_DUPEFILTER_CLASS : str
            Scheduler dupefilter class.
        SCHEDULER_SERIALIZER : str
            Scheduler serializer.
    
        """
    
        def __init__(self, server,
                     persist=False,
                     flush_on_start=False,
                     queue_key=defaults.SCHEDULER_QUEUE_KEY,
                     queue_cls=defaults.SCHEDULER_QUEUE_CLASS,
                     dupefilter_key=defaults.SCHEDULER_DUPEFILTER_KEY,
                     dupefilter_cls=defaults.SCHEDULER_DUPEFILTER_CLASS,
                     idle_before_close=0,
                     serializer=None):
            """Initialize scheduler.
    
            Parameters
            ----------
            server : Redis
                The redis server instance.
            persist : bool
                Whether to flush requests when closing. Default is False.
            flush_on_start : bool
                Whether to flush requests on start. Default is False.
            queue_key : str
                Requests queue key.
            queue_cls : str
                Importable path to the queue class.
            dupefilter_key : str
                Duplicates filter key.
            dupefilter_cls : str
                Importable path to the dupefilter class.
            idle_before_close : int
                Timeout before giving up.
    
            """
            if idle_before_close < 0:
                raise TypeError("idle_before_close cannot be negative")
    
            self.server = server
            self.persist = persist
            self.flush_on_start = flush_on_start
            self.queue_key = queue_key
            self.queue_cls = queue_cls
            self.dupefilter_cls = dupefilter_cls
            self.dupefilter_key = dupefilter_key
            self.idle_before_close = idle_before_close
            self.serializer = serializer
            self.stats = None
    
        def __len__(self):
            return len(self.queue)
    
        @classmethod
        def from_settings(cls, settings):##settings是传过来的配置文件信息
            kwargs = {
                'persist': settings.getbool('SCHEDULER_PERSIST'),
                'flush_on_start': settings.getbool('SCHEDULER_FLUSH_ON_START'),
                'idle_before_close': settings.getint('SCHEDULER_IDLE_BEFORE_CLOSE'),
            }
    
            # If these values are missing, it means we want to use the defaults.
            optional = {
                # TODO: Use custom prefixes for this settings to note that are
                # specific to scrapy-redis.
                'queue_key': 'SCHEDULER_QUEUE_KEY',
                'queue_cls': 'SCHEDULER_QUEUE_CLASS',
                'dupefilter_key': 'SCHEDULER_DUPEFILTER_KEY',
                # We use the default setting name to keep compatibility.
                'dupefilter_cls': 'DUPEFILTER_CLASS',
                'serializer': 'SCHEDULER_SERIALIZER',
            }
            ##读取上面的配置文件,取settings里面找到相对应的值,拿到settings后面的结果
            for name, setting_name in optional.items():
                val = settings.get(setting_name)##匹配settings对应的值出来(自己配置的)
                if val:
                    kwargs[name] = val
    
            # Support serializer as a path to a module.
            if isinstance(kwargs.get('serializer'), six.string_types):
                kwargs['serializer'] = importlib.import_module(kwargs['serializer'])
    
            server = connection.from_settings(settings)##取配置文件里面读取自己配置的连接相关的配置文件
            # Ensure the connection is working.
            server.ping()
    
            return cls(server=server, **kwargs)##这里开始实例化scheduler对象,开始正式执行爬虫,cls就是当前的类
    
        @classmethod
        def from_crawler(cls, crawler):##当你执行调度器scrapy-redis的时候,就会传入settigs进来,配置信息是在crawler.settings
            instance = cls.from_settings(crawler.settings)##crawlwe.settinsg拿到的是setting对象<scrapy.settings.Settings object at 0x00000265B2E41940>
            '''可以调用里面的方法,通过crawler.settings.get("host")'''
            # FIXME: for now, stats are only supported from this constructor
            instance.stats = crawler.stats
            return instance
    
        def open(self, spider):
            self.spider = spider
    
            try:
                self.queue = load_object(self.queue_cls)(
                    server=self.server,
                    spider=spider,
                    key=self.queue_key % {'spider': spider.name},
                    serializer=self.serializer,
                )
            except TypeError as e:
                raise ValueError("Failed to instantiate queue class '%s': %s",
                                 self.queue_cls, e)
    
            try:
                self.df = load_object(self.dupefilter_cls)(
                    server=self.server,
                    key=self.dupefilter_key % {'spider': spider.name},
                    debug=spider.settings.getbool('DUPEFILTER_DEBUG'),
                )
            except TypeError as e:
                raise ValueError("Failed to instantiate dupefilter class '%s': %s",
                                 self.dupefilter_cls, e)
    
            if self.flush_on_start:
                self.flush()
            # notice if there are requests already in the queue to resume the crawl
            if len(self.queue):
                spider.log("Resuming crawl (%d requests scheduled)" % len(self.queue))
    
        def close(self, reason):
            if not self.persist:
                self.flush()
    
        def flush(self):
            self.df.clear()
            self.queue.clear()
    
    
    ##开始真正执行下面的爬虫部分了,上面的只是取读取配置信息
        def enqueue_request(self, request):
            if not request.dont_filter and self.df.request_seen(request):
                #判断requets里面是否封装了dont_filter
                ##判断之前是否已经存在此爬虫
                self.df.log(request, self.spider)
                return False
            ##已经访问过不用在访问了,返回false
            if self.stats:
                ##如果已经访问过的话
                self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
            ##如果未访问过的话,将这个requets对象,加进调度器里面,以便下载器调度使用
            self.queue.push(request)
            ##其请求的调度其里面
            return True##没有访问过的url,将他添加进调度器里面
    
        def next_request(self):
            block_pop_timeout = self.idle_before_close
            request = self.queue.pop(block_pop_timeout)
            if request and self.stats:
                self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)
            return request
    
        def has_pending_requests(self):
            return len(self) > 0
    在scray-redis调度器scheduler里面:
    实例化调度器对象:scrapy  crawl  baidu  --nolog
    最开始执行from_crawler:
    @classmethod
    def from_crawler(cls, crawler):##当你执行调度器scrapy-redis的时候,就会传入settigs进来,配置信息是在crawler.settings
        instance = cls.from_settings(crawler.settings)##crawlwe.settinsg拿到的是setting对象<scrapy.settings.Settings object at 0x00000265B2E41940>
        '''可以调用里面的方法,通过crawler.settings.get("host")'''
        # FIXME: for now, stats are only supported from this constructor
        instance.stats = crawler.stats
        return instance##执行from_settings,传入参数settings
    执行from_settings(传入参数settings,配置信息):
    作用:读取配置信息


    @classmethod
    def from_settings(cls, settings):##settings是传过来的配置文件信息
        kwargs = {
            'persist': settings.getbool('SCHEDULER_PERSIST'),
            'flush_on_start': settings.getbool('SCHEDULER_FLUSH_ON_START'),
            'idle_before_close': settings.getint('SCHEDULER_IDLE_BEFORE_CLOSE'),
        }
    
        # If these values are missing, it means we want to use the defaults.
        optional = {
            # TODO: Use custom prefixes for this settings to note that are
            # specific to scrapy-redis.
            'queue_key': 'SCHEDULER_QUEUE_KEY',
            'queue_cls': 'SCHEDULER_QUEUE_CLASS',
            'dupefilter_key': 'SCHEDULER_DUPEFILTER_KEY',
            # We use the default setting name to keep compatibility.
            'dupefilter_cls': 'DUPEFILTER_CLASS',
            'serializer': 'SCHEDULER_SERIALIZER',
        }
        ##读取上面的配置文件,取settings里面找到相对应的值,拿到settings后面的结果
        for name, setting_name in optional.items():
            val = settings.get(setting_name)##匹配settings对应的值出来(自己配置的)
            if val:
                kwargs[name] = val
    '''
    
    val = settings.get(setting_name)取配置文件settings里面拿到相对应的值出来,settings里面的键是在这里面循环拿到的(optional),也就是optional后面的值,对应settinsg里面的键
    kwargs[name] = val#存进去
    '''
        # Support serializer as a path to a module.
    ##序列化操作,爬虫key序列化
        if isinstance(kwargs.get('serializer'), six.string_types):
            kwargs['serializer'] = importlib.import_module(kwargs['serializer'])
    ##取settings里面拿到相对应的配置信息,连接上redis,在settings里面的配置信息就是:
    '''
    REDIS_HOST = '127.0.0.1'
    REDIS_PORT = 6379
    # REDIS_PARAMS  = {'password':'xxxx'}    #Redis连接参数,默认:REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING,})
    REDIS_ENCODING = "utf-8"
    
    # REDIS_URL = 'redis://user:pass@hostname:6379' #连接URL(优先于以上配置)
    
    '''
        server = connection.from_settings(settings)##取配置文件里面读取自己配置的连接相关的配置文件,连接redis操作
    
        # Ensure the connection is working.
        server.ping()##可以测试有没有连接成功
        return cls(server=server, **kwargs)
    ##开始实例化scheduler对象,执行爬虫,cls是当前的类
    连接redis操作:from_settings

    from_settings = get_redis_from_settings
    def get_redis_from_settings(settings):
    
    
        params = defaults.REDIS_PARAMS.copy()
    ##拿到默认的配置参数:
    '''
    REDIS_PARAMS = {
        'socket_timeout': 30,
        'socket_connect_timeout': 30,
        'retry_on_timeout': True,
        'encoding': REDIS_ENCODING,
    }
    '''
        params.update(settings.getdict('REDIS_PARAMS'))##取settings里面读取相对应的连接的配合信息,字典扩展一下,后面是settings配置的值,加进去
    ##把配置settings里面的信息加进来
        # XXX: Deprecate REDIS_* settings.
        for source, dest in SETTINGS_PARAMS_MAP.items():
            val = settings.get(source)##settings.get这个是settings里面的字典名称,DNA在settings里面没有配置名称,所以自己是取模块文件取静态方法,直接后面是模块名字
            '''
            这个操作是去到这里的键
            然后在settigs里面拿到拿到相对应的值出来
            '''
            if val:
                params[dest] = val
    
        # Allow ``redis_cls`` to be a path to a class.
        if isinstance(params.get('redis_cls'), six.string_types):
            params['redis_cls'] = load_object(params['redis_cls'])
    
        return get_redis(**params)
    getdict方法:

    def getdict(self, name, default=None):
       
        value = self.get(name, default or {})
        if isinstance(value, six.string_types):
            value = json.loads(value)
        return dict(value)
    实例化scheduler对象的时候,开始执行爬虫:
    ##开始真正执行下面的爬虫部分了,上面的只是取读取配置信息
    def enqueue_request(self, request):
            if not request.dont_filter and self.df.request_seen(request):
                #判断requets里面是否封装了dont_filter
                ##判断之前是否已经存在此爬虫
                self.df.log(request, self.spider)
                return False
            ##已经访问过不用在访问了,返回false
            if self.stats:
                ##如果已经访问过的话
                self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
            ##如果未访问过的话,将这个requets对象,加进调度器里面,以便下载器调度使用
            self.queue.push(request)##放进队列里面,可能是先进先出,优先级队列,取决于你在settings里面的配置
            ##其请求的调度其里面
            return True##没有访问过的url,将他添加进调度器里面


    下载器去队列里面获取数据:queue

    def next_request(self):
        block_pop_timeout = self.idle_before_close
        request = self.queue.pop(block_pop_timeout)##每pop一次的时候,可以拿出当前取出的requets对象
        if request and self.stats:
            self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)
        return request

    参考:

    https://cuiqingcai.com/6058.html




  • 相关阅读:
    Opengl绘制我们的小屋(二)第一人称漫游
    C# this.Invoke和this.BeginInvoke 最简单的写法
    C# 递归模型定义。赋值
    .net Core 2.1 后 Session保存,新页面获取不到值
    .net core mvc 错误信息显示 ModelState.AddModelError
    .net Core 依赖注入 Add********说明
    C# 中2个问号的作用。C#的??代表是什么意思
    asp.net mvc 加三层架构 完美搭配
    C# DataTable.Compute()用法
    C# DateTime判断时间
  • 原文地址:https://www.cnblogs.com/qiu-hua/p/12638987.html
Copyright © 2011-2022 走看看