zoukankan      html  css  js  c++  java
  • Scrapy项目



    1.tencentSpider.py

    # -*- coding: utf-8 -*-
    import scrapy
    from Tencent.items import TencentItem
    #创建爬虫类
    class TencentspiderSpider(scrapy.Spider):
        name = 'tencentSpider'#爬虫名字
        allowed_domains = ['tencent.com']#容许爬虫的作用范围
    
        # 定义开始的URL
        offset = 0
        url = 'https://hr.tencent.com/position.php?&start='
        #urll='#a'
    
        start_urls = [url + str(offset)]  # 爬虫开始的URL
    
    
        def parse(self, response):
            # 继承
            item = TencentItem()
            # 根节点
            movies = response.xpath("//tr[@class='odd']|//tr[@class='even']")
            for each in movies:
                item['zhiwei']=each.xpath(".//td[@class='l square']/a/text()").extract()[0]
                item['lianjie'] = each.xpath(".//td[@class='l square']/a/@href").extract()[0]
                #item['leibie'] = each.xpath("//tr[@class='odd']/td[2]/text()|//tr[@class='even']/td[2]/text()").extract()[0]
                item['leibie'] = each.xpath("//tr[@class='odd']/td[2]/text()|//tr[@class='even']/td[2]/text()").extract()[0]
                #data = response.xpath(".//tr[@class='odd']/td[2]|//tr[@class='even']/td[2][descendant-or-self::text()]")
                #item['leibie'] = data.xpath('string(.)').extract()
    
    
                item['renshu'] = each.xpath("//tr[@class='odd']/td[3]/text()|//tr[@class='even']/td[3]/text()").extract()[0]
                item['didian'] = each.xpath("//tr[@class='odd']/td[4]/text()|//tr[@class='even']/td[4]/text()").extract()[0]
                item['shijian'] = each.xpath("//tr[@class='odd']/td[5]/text()|//tr[@class='even']/td[5]/text()").extract()[0]
    
                # 异常处理
                #if len(quote) != 0:
                    #item['quote'] = quote[0]
                print(item)
                yield item
    
            if self.offset < 2840:
                self.offset += 10
                # 每次处理完一页之后,重新发送下一页请求
                # self offset 自增25,同时拼接为新的URL并调用回调函数,self parse 处理response
            yield scrapy.Request(self.url + str(self.offset),callback=self.parse)

    2.items.py

    # -*- coding: utf-8 -*-
    
    # Define here the models for your scraped items
    #
    # See documentation in:
    # https://doc.scrapy.org/en/latest/topics/items.html
    
    import scrapy
    
    
    class TencentItem(scrapy.Item):
        # define the fields for your item here like:
        # name = scrapy.Field()
        zhiwei = scrapy.Field()
        lianjie = scrapy.Field()
        leibie = scrapy.Field()
        renshu = scrapy.Field()
        didian = scrapy.Field()
        shijian = scrapy.Field()

    3.main.py

    from scrapy import cmdline
    #
    cmdline.execute("scrapy crawl tencentSpider".split())

    4.middlewares.py

    # -*- coding: utf-8 -*-
    
    # Define here the models for your spider middleware
    #
    # See documentation in:
    # https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    
    from scrapy import signals
    
    
    class TencentSpiderMiddleware(object):
        # Not all methods need to be defined. If a method is not defined,
        # scrapy acts as if the spider middleware does not modify the
        # passed objects.
    
        @classmethod
        def from_crawler(cls, crawler):
            # This method is used by Scrapy to create your spiders.
            s = cls()
            crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
            return s
    
        def process_spider_input(self, response, spider):
            # Called for each response that goes through the spider
            # middleware and into the spider.
    
            # Should return None or raise an exception.
            return None
    
        def process_spider_output(self, response, result, spider):
            # Called with the results returned from the Spider, after
            # it has processed the response.
    
            # Must return an iterable of Request, dict or Item objects.
            for i in result:
                yield i
    
        def process_spider_exception(self, response, exception, spider):
            # Called when a spider or process_spider_input() method
            # (from other spider middleware) raises an exception.
    
            # Should return either None or an iterable of Response, dict
            # or Item objects.
            pass
    
        def process_start_requests(self, start_requests, spider):
            # Called with the start requests of the spider, and works
            # similarly to the process_spider_output() method, except
            # that it doesn’t have a response associated.
    
            # Must return only requests (not items).
            for r in start_requests:
                yield r
    
        def spider_opened(self, spider):
            spider.logger.info('Spider opened: %s' % spider.name)
    
    
    class TencentDownloaderMiddleware(object):
        # Not all methods need to be defined. If a method is not defined,
        # scrapy acts as if the downloader middleware does not modify the
        # passed objects.
    
        @classmethod
        def from_crawler(cls, crawler):
            # This method is used by Scrapy to create your spiders.
            s = cls()
            crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
            return s
    
        def process_request(self, request, spider):
            # Called for each request that goes through the downloader
            # middleware.
    
            # Must either:
            # - return None: continue processing this request
            # - or return a Response object
            # - or return a Request object
            # - or raise IgnoreRequest: process_exception() methods of
            #   installed downloader middleware will be called
            return None
    
        def process_response(self, request, response, spider):
            # Called with the response returned from the downloader.
    
            # Must either;
            # - return a Response object
            # - return a Request object
            # - or raise IgnoreRequest
            return response
    
        def process_exception(self, request, exception, spider):
            # Called when a download handler or a process_request()
            # (from other downloader middleware) raises an exception.
    
            # Must either:
            # - return None: continue processing this exception
            # - return a Response object: stops process_exception() chain
            # - return a Request object: stops process_exception() chain
            pass
    
        def spider_opened(self, spider):
            spider.logger.info('Spider opened: %s' % spider.name)

    5.pipelines.py

    # -*- coding: utf-8 -*-
    
    # Define your item pipelines here
    #Tencent.json
    #class TencentPipeline(object):
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
    
    import json
    from openpyxl import Workbook
    class TencentPipeline(object):
        wb = Workbook()
        ws = wb.active
        # 设置表头
        ws.append(['职位', '链接', '类型', '人数', '地点', '时间'])
    
        def process_item(self, item, spider):
            # 添加数据
            line = [item['zhiwei'],item['lianjie'],item['leibie'],item['renshu'],item['didian'],item['shijian']]
            self.ws.append(line)  # 按行添加
            self.wb.save('tencentSpider.xlsx')
            return item

    6.settings.py

    # -*- coding: utf-8 -*-
    
    # Scrapy settings for Tencent project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     https://doc.scrapy.org/en/latest/topics/settings.html
    #     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #     https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'Tencent'
    
    SPIDER_MODULES = ['Tencent.spiders']
    NEWSPIDER_MODULE = 'Tencent.spiders'
    
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'
    
    # Obey robots.txt rules
    #ROBOTSTXT_OBEY = True
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    #}
    
    # Enable or disable spider middlewares
    # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    #    'Tencent.middlewares.TencentSpiderMiddleware': 543,
    #}
    
    # Enable or disable downloader middlewares
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #DOWNLOADER_MIDDLEWARES = {
    #    'Tencent.middlewares.TencentDownloaderMiddleware': 543,
    #}
    
    # Enable or disable extensions
    # See https://doc.scrapy.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    #}
    
    # Configure item pipelines
    # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
        'Tencent.pipelines.TencentPipeline': 300,
    }
    
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
  • 相关阅读:
    设置cookie,读取cookie案例
    npm常用命令及版本号浅析
    nrm安装与使用
    ES6解构赋值
    nodemon 基本配置与使用
    nodejs开发辅助工具nodemon
    Node自动重启工具 nodemon
    深入浅出Object.defineProperty()
    js原生缓慢返回顶部函数封装
    The linux command 之权限
  • 原文地址:https://www.cnblogs.com/Raodi/p/11188232.html
Copyright © 2011-2022 走看看