zoukankan      html  css  js  c++  java
  • scrapy 和 scrapy_redis 安装

    安装sqlslte,scrapy需要这个模块

    yum install sqlite-devel

    python3.5

    下载包自己编译安装

    ./configure

    make

    make install

    自带pip,升到最新版

    pip3 install --upgrade pip

    python3 MySQL模块

    pip3 install pymysql

    安装Twisted,scrapy使用的线程框架

    wget https://pypi.python.org/packages/6b/23/8dbe86fc83215015e221fbd861a545c6ec5c9e9cd7514af114d1f64084ab/Twisted-16.4.1.tar.bz2#md5=c6d09bdd681f538369659111f079c29d

    解包

    tar -jxvf Twisted-16.4.1.tar.bz2

    进目录
    cd Twisted-16.4.1

    安装

    python3 setup.py install

    安装scrapy

    pip install scrapy

    安装redis

     yum install redis

    安装scrapy-redis

    git clone https://github.com/rolando/scrapy-redis.git

    cd scrapy-redis/

    python3 setup.py install

    由于python2和python3字符串不同引起的bug,github上临时解决的方法

    #util.py
    import six
    
    
    def bytes_to_str(s, encoding='utf-8'):
        """Returns a str if a bytes object is given."""
    
        if six.PY3 and isinstance(s, bytes):
            return s.decode(encoding)
    
        return s
    

      

    # spider.py
    import six from scrapy import signals from scrapy.exceptions import DontCloseSpider from scrapy.spiders import Spider, CrawlSpider from . import connection from .utils import bytes_to_str # Default batch size matches default concurrent requests setting. DEFAULT_START_URLS_BATCH_SIZE = 16 DEFAULT_START_URLS_KEY = '%(name)s:start_urls' class RedisMixin(object): """Mixin class to implement reading urls from a redis queue.""" # Per spider redis key, default to DEFAULT_KEY. redis_key = None # Fetch this amount of start urls when idle. Default to DEFAULT_BATCH_SIZE. redis_batch_size = None redis_encoding = 'utf-8' # Redis client instance. server = None def start_requests(self): """Returns a batch of start requests from redis.""" return self.next_requests() def setup_redis(self, crawler=None): """Setup redis connection and idle signal. This should be called after the spider has set its crawler object. """ if self.server is not None: return if crawler is None: # We allow optional crawler argument to keep backwards # compatibility. # XXX: Raise a deprecation warning. crawler = getattr(self, 'crawler', None) if crawler is None: raise ValueError("crawler is required") settings = crawler.settings if self.redis_key is None: self.redis_key = settings.get( 'REDIS_START_URLS_KEY', DEFAULT_START_URLS_KEY, ) self.redis_key = self.redis_key % {'name': self.name} if not self.redis_key.strip(): raise ValueError("redis_key must not be empty") if self.redis_batch_size is None: self.redis_batch_size = settings.getint( 'REDIS_START_URLS_BATCH_SIZE', DEFAULT_START_URLS_BATCH_SIZE, ) try: self.redis_batch_size = int(self.redis_batch_size) except (TypeError, ValueError): raise ValueError("redis_batch_size must be an integer") self.logger.info("Reading start URLs from redis key '%(redis_key)s' " "(batch size: %(redis_batch_size)s)", self.__dict__) self.server = connection.from_settings(crawler.settings) # The idle signal is called when the spider has no requests left, # that's when we will schedule new requests from redis queue crawler.signals.connect(self.spider_idle, signal=signals.spider_idle) def next_requests(self): """Returns a request to be scheduled or none.""" use_set = self.settings.getbool('REDIS_START_URLS_AS_SET') fetch_one = self.server.spop if use_set else self.server.lpop # XXX: Do we need to use a timeout here? found = 0 while found < self.redis_batch_size: data = fetch_one(self.redis_key) if not data: # Queue empty. break req = self.make_request_from_data(data) if req: yield req found += 1 else: self.logger.debug("Request not made from data: %r", data) if found: self.logger.debug("Read %s requests from '%s'", found, self.redis_key) def make_request_from_data(self, data): # By default, data is an URL. if not isinstance(data, six.string_types): # XXX: Shall we log and continue? self.logger.error("Wrong type for data: %s" % type(data)) url = bytes_to_str(data, self.redis_encoding) else: url = data # FIXME: This is a naive guard against using a wrong redis_key where # data are not string URLs. if '://' not in url: # XXX: Shall this be an exception? self.logger.error("Missing scheme in URL: '%s'", url) return self.make_requests_from_url(url) def schedule_next_requests(self): """Schedules a request if available""" for req in self.next_requests(): self.crawler.engine.crawl(req, spider=self) def spider_idle(self): """Schedules a request if available, otherwise waits.""" # XXX: Handle a sentinel to close the spider. self.schedule_next_requests() raise DontCloseSpider class RedisSpider(RedisMixin, Spider): """Spider that reads urls from redis queue when idle.""" @classmethod def from_crawler(self, crawler, *args, **kwargs): obj = super(RedisSpider, self).from_crawler(crawler, *args, **kwargs) obj.setup_redis(crawler) return obj class RedisCrawlSpider(RedisMixin, CrawlSpider): """Spider that reads urls from redis queue when idle.""" @classmethod def from_crawler(self, crawler, *args, **kwargs): obj = super(RedisCrawlSpider, self).from_crawler(crawler, *args, **kwargs) obj.setup_redis(crawler) return obj

      

    给笨笨的自己提个醒>_<~
  • 相关阅读:
    lamp架构之升级php版本
    Linux常用命令大全
    Mysql表连接查询
    PHP练习题三
    PHP练习题二
    php 设计模式
    LAMP环境搭建教程
    Storm入门(四)WordCount示例
    Storm入门(三)HelloWorld示例
    Storm入门(一)原理介绍
  • 原文地址:https://www.cnblogs.com/ephuizi/p/5909958.html
Copyright © 2011-2022 走看看