zoukankan      html  css  js  c++  java
  • 新浪新闻全站

    一.爬虫

    # -*- coding: utf-8 -*-
    import scrapy
    import os
    from sina.items import SinaItem
    
    
    class SinapiderSpider(scrapy.Spider):
        name = 'sinapider'
        # allowed_domains = ['www.xxx.com']
        start_urls = ['http://news.sina.com.cn/guide/']
    
        def parse(self, response):
            items = []
            # 所有大类的url 和 标题
            #返回值都是列表
            parentUrls = response.xpath('//div[@id="tab01"]/div/h3/a/@href').extract()
            parentTitle = response.xpath('//div[@id="tab01"]/div/h3/a/text()').extract()
    
            # 所有小类的ur 和 标题
            #返回值都是列表
            subUrls = response.xpath('//div[@id="tab01"]/div/ul/li/a/@href').extract()
            subTitle = response.xpath('//div[@id="tab01"]/div/ul/li/a/text()').extract()
    
            # 爬取所有大类
            for i in range(0, len(parentTitle)):
                # 指定大类目录的路径和目录名
                parentFilename = "./Data/" + parentTitle[i]
    
                # 如果目录不存在,则创建目录
                if (not os.path.exists(parentFilename)):
                    os.makedirs(parentFilename)
    
                # 爬取所有小类
                for j in range(0, len(subUrls)):
                    item = SinaItem()
    
                    # 保存大类的title和urls
                    #因为所有的小类的url在同一个列表,为了后面的判断是这个小类url是属于哪个父类的
                    item['parentTitle'] = parentTitle[i]
                    item['parentUrls'] = parentUrls[i]
    
                    # 检查小类的url是否以同类别大类url开头,如果是返回True (sports.sina.com.cn 和 sports.sina.com.cn/nba)
                    if_belong = subUrls[j].startswith(item['parentUrls'])
    
                    # 如果属于本大类,将存储目录放在本大类目录下
                    if (if_belong):
                        subFilename = parentFilename + '/' + subTitle[j]
                        # 如果目录不存在,则创建目录
                        if (not os.path.exists(subFilename)):
                            os.makedirs(subFilename)
    
                        # 存储 小类url、title和filename字段数据
                        item['subUrls'] = subUrls[j]
                        item['subTitle'] = subTitle[j]
                        item['subFilename'] = subFilename
    
                        items.append(item)
    
            # 发送每个小类url的Request请求,得到Response连同包含meta数据 一同交给回调函数 second_parse 方法处理
            for item in items:
                yield scrapy.Request(url=item['subUrls'], meta={'meta_1': item}, callback=self.second_parse)
    
        # 对于返回的小类的url,再进行递归请求
        def second_parse(self, response):
            # 提取每次Response的meta数据
            meta_1 = response.meta['meta_1']
    
            # 取出小类里所有子链接
            sonUrls = response.xpath('//a/@href').extract()
    
            items = []
            for i in range(0, len(sonUrls)):
                # 检查每个链接是否以大类url开头、以.shtml结尾,如果是返回True
                if_belong = sonUrls[i].endswith('.shtml') and sonUrls[i].startswith(meta_1['parentUrls'])
    
                # 如果属于本大类,获取字段值放在同一个item下便于传输
                if (if_belong):
                    item = SinaItem()
                    item['parentTitle'] = meta_1['parentTitle']
                    item['parentUrls'] = meta_1['parentUrls']
                    item['subUrls'] = meta_1['subUrls']
                    item['subTitle'] = meta_1['subTitle']
                    item['subFilename'] = meta_1['subFilename']
                    item['sonUrls'] = sonUrls[i]
                    items.append(item)
    
            # 发送每个小类下子链接url的Request请求,得到Response后连同包含meta数据 一同交给回调函数 detail_parse 方法处理
            for item in items:
                yield scrapy.Request(url=item['sonUrls'], meta={'meta_2': item}, callback=self.detail_parse)
    
        # 数据解析方法,获取文章标题和内容
        def detail_parse(self, response):
            item = response.meta['meta_2']
            content = ""
            head = response.xpath('//h1[@id="main_title"]/text()')
            content_list = response.xpath('//div[@id="artibody"]/p/text()').extract()
    
            # 将p标签里的文本内容合并到一起
            for content_one in content_list:
                content += content_one
    
            item['head'] = head
            item['content'] = content
    
            yield item         

    二. item

    import scrapy
    
    
    class SinaItem(scrapy.Item):
        # define the fields for your item here like:
        # name = scrapy.Field()
        #大类标题和连接
        parentTitle = scrapy.Field()
        parentUrls = scrapy.Field()
    
        #小类标题和url
        subTitle=scrapy.Field()
        subUrls=scrapy.Field()
    
        #小类目录存储路径
        subFilename=scrapy.Field()
    
        #小类下的子连接
        sonUrls=scrapy.Field()
    
    
        #文章的标题和内容
        head=scrapy.Field()
        content=scrapy.Field()

    3.pipeline

    # -*- coding: utf-8 -*-
    
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
    
    class SinaPipeline(object):
        def process_item(self, item, spider):
            sonUrls = item['sonUrls']
    
            # 文件名为子链接url中间部分,并将 / 替换为 _,保存为 .txt格式
            filename = sonUrls[7:-6].replace('/', '_')
            filename += ".txt"
    
            fp = open(item['subFilename'] + '/' + filename, 'w',encoding='utf-8')
            fp.write(item['content'])
            fp.close()
    
            return item

    4.settings

    # -*- coding: utf-8 -*-
    
    # Scrapy settings for sina project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     https://doc.scrapy.org/en/latest/topics/settings.html
    #     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #     https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'sina'
    
    SPIDER_MODULES = ['sina.spiders']
    NEWSPIDER_MODULE = 'sina.spiders'
    
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = False
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    #}
    
    # Enable or disable spider middlewares
    # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    #    'sina.middlewares.SinaSpiderMiddleware': 543,
    #}
    
    # Enable or disable downloader middlewares
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #DOWNLOADER_MIDDLEWARES = {
    #    'sina.middlewares.SinaDownloaderMiddleware': 543,
    #}
    
    # Enable or disable extensions
    # See https://doc.scrapy.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    #}
    
    # Configure item pipelines
    # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
       'sina.pipelines.SinaPipeline': 300,
    }
    
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    
    
    LOG_LEVEL = 'DEBUG'
  • 相关阅读:
    Redis-cluster集群
    MySQL-binlog日志格式
    MySQL-备份与恢复
    PG-内存参数配置标准
    [POJ1724]Roads
    [POJ1062][最短路]昂贵的聘礼
    我的小博客美化记(仍在持续修改和增加,敬请持续关注)
    [离散化模板][并查集][洛谷1955]程序自动化分析
    python调用百度接口解析/逆解析地址
    datax安装部署
  • 原文地址:https://www.cnblogs.com/tjp40922/p/10720991.html
Copyright © 2011-2022 走看看