zoukankan      html  css  js  c++  java
  • 爬取虎牙存入mysql和redis

    huya.py

    # -*- coding: utf-8 -*-
    import scrapy
    from huyaPro1.items import Huyapro1Item


    class HuyaSpider(scrapy.Spider):
    name = 'huya'
    # allowed_domains = ['www.xxx.com']
    start_urls = ['https://www.huya.com/g/xingxiu']

    def parse(self, response):
    li_list = response.xpath('//*[@id="js-live-list"]/li')
    for li in li_list:
    title = li.xpath('./a[2]/text()').extract_first()
    # title = li.xpath('./a[2]/text()').extract()
    author = li.xpath('./span/span[1]/i/text()').extract_first()
    hot = li.xpath('./span/span[2]/i[2]/text()').extract_first()
    # print(title, type(title))
    # print(author, type(author))
    # print(hot, type(hot))
    item = Huyapro1Item()
    item['title'] = title
    item['author'] = author
    item['hot'] = hot
    yield item

    items.py

    # -*- coding: utf-8 -*-

    # Define here the models for your scraped items
    #
    # See documentation in:
    # https://docs.scrapy.org/en/latest/topics/items.html

    import scrapy


    class Huyapro1Item(scrapy.Item):
    # define the fields for your item here like:
    title = scrapy.Field()
    author = scrapy.Field()
    hot = scrapy.Field()

    middlewares.py——

    # -*- coding: utf-8 -*-

    # Define here the models for your spider middleware
    #
    # See documentation in:
    # https://docs.scrapy.org/en/latest/topics/spider-middleware.html

    from scrapy import signals


    class Huyapro1SpiderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the spider middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
    # This method is used by Scrapy to create your spiders.
    s = cls()
    crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
    return s

    def process_spider_input(self, response, spider):
    # Called for each response that goes through the spider
    # middleware and into the spider.

    # Should return None or raise an exception.
    return None

    def process_spider_output(self, response, result, spider):
    # Called with the results returned from the Spider, after
    # it has processed the response.

    # Must return an iterable of Request, dict or Item objects.
    for i in result:
    yield i

    def process_spider_exception(self, response, exception, spider):
    # Called when a spider or process_spider_input() method
    # (from other spider middleware) raises an exception.

    # Should return either None or an iterable of Request, dict
    # or Item objects.
    pass

    def process_start_requests(self, start_requests, spider):
    # Called with the start requests of the spider, and works
    # similarly to the process_spider_output() method, except
    # that it doesn’t have a response associated.

    # Must return only requests (not items).
    for r in start_requests:
    yield r

    def spider_opened(self, spider):
    spider.logger.info('Spider opened: %s' % spider.name)


    class Huyapro1DownloaderMiddleware(object):
    # Not all methods need to be defined. If a method is not defined,
    # scrapy acts as if the downloader middleware does not modify the
    # passed objects.

    @classmethod
    def from_crawler(cls, crawler):
    # This method is used by Scrapy to create your spiders.
    s = cls()
    crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
    return s

    def process_request(self, request, spider):
    # Called for each request that goes through the downloader
    # middleware.

    # Must either:
    # - return None: continue processing this request
    # - or return a Response object
    # - or return a Request object
    # - or raise IgnoreRequest: process_exception() methods of
    # installed downloader middleware will be called
    return None

    def process_response(self, request, response, spider):
    # Called with the response returned from the downloader.

    # Must either;
    # - return a Response object
    # - return a Request object
    # - or raise IgnoreRequest
    return response

    def process_exception(self, request, exception, spider):
    # Called when a download handler or a process_request()
    # (from other downloader middleware) raises an exception.

    # Must either:
    # - return None: continue processing this exception
    # - return a Response object: stops process_exception() chain
    # - return a Request object: stops process_exception() chain
    pass

    def spider_opened(self, spider):
    spider.logger.info('Spider opened: %s' % spider.name)

    pipelines.py

    # -*- coding: utf-8 -*-

    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
    import pymysql
    from redis import Redis


    class Huyapro1Pipeline(object):
    fp = None

    def open_spider(self, spider):
    self.fp = open('huyazhibo.txt', 'w', encoding='utf-8')

    def process_item(self, item, spider):
    self.fp.write(item['title'] + ':' + item['author'] + ':' + item['hot'] + ' ')
    print(item['title'], '写入成功!!!')
    return item

    def close_spider(self, spider):
    self.fp.close()


    class mysqlPipeLine(object):
    conn = None
    cursot = None

    def open_spider(self, spider):
    self.conn = pymysql.Connect(host='127.0.0.1', port=3306, user='root', password='123', db='Spider',
    charset='utf8')
    print(self.conn)

    def process_item(self, item, spider):
    sql = 'insert into huya values("%s","%s","%s")' % (item['title'], item['author'], item['hot'])
    self.cursot = self.conn.cursor()
    try:
    self.cursot.execute(sql)
    self.conn.commit()
    except Exception as e:
    print(e)
    self.conn.rollback()
    return item

    def close_spider(self, spider):
    self.cursot.close()
    self.conn.close()


    class RedisPipeLine(object):
    conn = None

    def open_spider(self, spider):
    self.conn = Redis(host='127.0.0.1', port=6379)

    def process_item(self, item, spider):
    self.conn.lpush('huyalist', item)
    return item

    settings.py

    # -*- coding: utf-8 -*-

    # Scrapy settings for huyaPro1 project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    # https://docs.scrapy.org/en/latest/topics/settings.html
    # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    # https://docs.scrapy.org/en/latest/topics/spider-middleware.html

    BOT_NAME = 'huyaPro1'

    SPIDER_MODULES = ['huyaPro1.spiders']
    NEWSPIDER_MODULE = 'huyaPro1.spiders'

    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
    LOG_LEVEL = 'ERROR'
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = False

    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    # CONCURRENT_REQUESTS = 32

    # Configure a delay for requests for the same website (default: 0)
    # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    # DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    # CONCURRENT_REQUESTS_PER_DOMAIN = 16
    # CONCURRENT_REQUESTS_PER_IP = 16

    # Disable cookies (enabled by default)
    # COOKIES_ENABLED = False

    # Disable Telnet Console (enabled by default)
    # TELNETCONSOLE_ENABLED = False

    # Override the default request headers:
    # DEFAULT_REQUEST_HEADERS = {
    # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    # 'Accept-Language': 'en',
    # }

    # Enable or disable spider middlewares
    # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    # SPIDER_MIDDLEWARES = {
    # 'huyaPro1.middlewares.Huyapro1SpiderMiddleware': 543,
    # }

    # Enable or disable downloader middlewares
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    # DOWNLOADER_MIDDLEWARES = {
    # 'huyaPro1.middlewares.Huyapro1DownloaderMiddleware': 543,
    # }

    # Enable or disable extensions
    # See https://docs.scrapy.org/en/latest/topics/extensions.html
    # EXTENSIONS = {
    # 'scrapy.extensions.telnet.TelnetConsole': None,
    # }

    # Configure item pipelines
    # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
    'huyaPro1.pipelines.Huyapro1Pipeline': 300,
    'huyaPro1.pipelines.mysqlPipeLine': 301,
    'huyaPro1.pipelines.RedisPipeLine': 302,
    }

    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
    # AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    # AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    # AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    # AUTOTHROTTLE_DEBUG = False

    # Enable and configure HTTP caching (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    # HTTPCACHE_ENABLED = True
    # HTTPCACHE_EXPIRATION_SECS = 0
    # HTTPCACHE_DIR = 'httpcache'
    # HTTPCACHE_IGNORE_HTTP_CODES = []
    # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
  • 相关阅读:
    [0] 需求管理工具CaliberRM
    [0] 自动化测试工具QTP
    [0] UML建模工具(StarUML)
    [0] C# 扩展方法(Extension Method)
    [0] 统一软件过程(RUP)
    [0] XP敏捷开发
    关于ES6的let,const与var之间的三生三世
    lumen手记:自定义Validate表单验证
    window下python安装pip,切换为国内阿里云镜像
    Lumen开发:简单实现auth用户认证
  • 原文地址:https://www.cnblogs.com/zhang-da/p/12432129.html
Copyright © 2011-2022 走看看