zoukankan      html  css  js  c++  java
  • python之scrapy爬取某集团招聘信息以及招聘详情

    1、定义爬取的字段items.py

    # -*- coding: utf-8 -*-
    
    # Define here the models for your scraped items
    #
    # See documentation in:
    # https://doc.scrapy.org/en/latest/topics/items.html
    
    import scrapy
    class GosuncnItem(scrapy.Item):
        """
        定义爬虫的字段
        """
        # define the fields for your item here like:
        # name = scrapy.Field()
        platform = scrapy.Field()
        position = scrapy.Field()
        num = scrapy.Field()
        time = scrapy.Field()
        url = scrapy.Field()
        content = scrapy.Field()
        responsible = scrapy.Field()
        page = scrapy.Field()
        pass
    View Code

    2、配置设置settings.py

    # -*- coding: utf-8 -*-
    
    # Scrapy settings for gosuncn project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     https://doc.scrapy.org/en/latest/topics/settings.html
    #     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #     https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'gosuncn'
    
    SPIDER_MODULES = ['gosuncn.spiders']
    NEWSPIDER_MODULE = 'gosuncn.spiders'
    
    LOG_LEVEL="WARNING"
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    #USER_AGENT = 'gosuncn (+http://www.yourdomain.com)'
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = True
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    #}
    
    # Enable or disable spider middlewares
    # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    #    'gosuncn.middlewares.GosuncnSpiderMiddleware': 543,
    #}
    
    # Enable or disable downloader middlewares
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #DOWNLOADER_MIDDLEWARES = {
    #    'gosuncn.middlewares.GosuncnDownloaderMiddleware': 543,
    #}
    
    # Enable or disable extensions
    # See https://doc.scrapy.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    #}
    
    # Configure item pipelines
    # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
       'gosuncn.pipelines.GosuncnPipeline': 300,
    }
    LOG_LEVEL ="WARNING"
    LOG_FILE = "./log.log"
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    View Code

    3、爬取某集团的招聘信息,注意:items的字段必须和该文件爬取的字段一直,否则报错

    # -*- coding: utf-8 -*-
    import scrapy
    import logging
    from gosuncn.items import GosuncnItem
    logger = logging.getLogger(__name__)
    #引入日志
    class GaoxinxingSpider(scrapy.Spider):
        name = 'gaoxinxing'
        allowed_domains = ['gosuncn.zhiye.com']
        start_urls = ['http://gosuncn.zhiye.com/Social']
        next_page_num = 1
        def parse(self, response):
            tr_list = response.xpath("//table[@class='jobsTable']/tr")[1:]
            #print(tr_list)
            for tr in tr_list:
                item = GosuncnItem()
                item["position"]=tr.xpath(".//td[1]/a/text()").extract_first()
                item["url"] = "http://gosuncn.zhiye.com"+tr.xpath(".//td[1]/a/@href").extract_first()
                item["platform"] = tr.xpath(".//td[3]/text()").extract_first()
                item["num"] = tr.xpath(".//td[4]/text()").extract_first()
                item["time"] = tr.xpath(".//td[6]/text()").extract_first()
                item["page"]= self.next_page_num
                #print(item)
                #logger.warning(item) #打印日志
                #yield item
            ################爬取详情页######################
                yield scrapy.Request(
                    item["url"],
                    callback=self.url_parse,  # 不能打括号,否则是调用了
                    meta = {"item":item}# 将数据传递给url_parse()
                )
    
            ##############翻页爬取###############################
            next_page_url = response.xpath("//div[@class='pager2']//a[@class='next']/@href").extract_first()
            print(next_page_url)
            self.next_page_num = self.next_page_num+1
            if self.next_page_num<5:
                next_url = "http://gosuncn.zhiye.com/social/?PageIndex=" + str(self.next_page_num)
                #print(next_url)
                yield scrapy.Request(
                    next_url,
                    callback=self.parse  #不能打括号,否则是调用了
                )
    
        def url_parse(self,response):
            """
            爬取详情页
            :param response:
            :return:
            """
            item = response.meta["item"]
            item["content"] = response.xpath("//div[@class='xiangqingcontain']/ul[@class='xiangqinglist clearfix']/li[6]/text()").extract()
            #item["responsible"] = response.xpath("//div[@class='xiangqingcontain']/div[@class='xiangqingtext']/p[2]/text()").extract()
            logger.warning(item)  # 打印日志
            #print(item)
            yield item
    View Code

    4、在pipelines处理传递过来的数据

    # -*- coding: utf-8 -*-
    
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
    import  re
    from gosuncn.items import GosuncnItem
    class GosuncnPipeline(object):
        def process_item(self, item, spider):
            if isinstance(item,GosuncnItem):
                item["content"] = self.process_content(item["content"])
                print(item)
            return item
    
        def process_content(self,content):
            content =[re.sub(r"
    |' '","",i) for i in content]
            content = [i for i in content if len(i)>0]
            return content
    # class GosuncnPipeline1(object):
    #     def process_item(self, item, spider):
    #         if isinstance(item,GosuncnItem):
    #             print(item)
    #         return item
    View Code
  • 相关阅读:
    [python学习篇][python工具使用篇][1] 编辑,设置等
    [python学习篇][廖雪峰][1]高级特性--创建生成器 方法2 yield
    [python学习篇][廖雪峰][1]高级特性--创建生成器 方法1 a = (x for x in range(1,3))
    [python学习篇][廖雪峰][1]高级特性--列表生成式
    [python学习篇][廖雪峰][1]高级特性 ---迭代
    自定义Sharepoint的登陆页面
    SharePoint 2010 使用自定义aspx页面替换列表默认的新建(NewForm.aspx),查看(DispForm.aspx)和编辑(EditForm.aspx)页面
    SharePoint2010新特性:InfoPath定义创建列表的界面
    SharePoint 2010 获取列表中所有数据(包括文件夹内)的方法
    SharePoint 2013中修改windows 活动目录(AD)域用户密码的WebPart(免费下载)
  • 原文地址:https://www.cnblogs.com/ywjfx/p/11081555.html
Copyright © 2011-2022 走看看