zoukankan      html  css  js  c++  java
  • 爬取网易新闻

    创建项目:

    scrapy startproject wangyi

    创建虫子:

    scrapy genspider wangyi www.xxx.com :创建爬虫文件
    执行:scrapy crawl spiderName

    wangyi.py 虫子

    # -*- coding: utf-8 -*-
    import scrapy
    from selenium import webdriver
    from wangyiPeo1.items import Wangyipeo1Item


    class WangyiSpider(scrapy.Spider):
    name = 'wangyi'
    # allowed_domains = ['www.xxx.com']
    start_urls = ['https://news.163.com/']
    model_urls = []
    bro = webdriver.Chrome()

    def parse(self, response):
    # 解析出5个板块对应的url:
    li_list = response.xpath('//*[@id="index2016_wrap"]/div[1]/div[2]/div[2]/div[2]/div[2]/div/ul/li')
    model_index = [3, 4, 6, 7, 8]
    for index in model_index:
    # li依次表示是5个板块对应的li标签:
    li = li_list[index]
    # 解析每个url:
    model_url = li.xpath("./a/@href").extract_first()
    self.model_urls.append(model_url)
    # 手动请求发送每个板块对应的url:
    yield scrapy.Request(model_url, callback=self.parse_model)

    def parse_model(self, response):
    """
    用于解析每个板块对应的页面数据中的新闻标题和详情页的url
    该方法中获取的response对象是没有包含动态加载出的新闻数据(是一个不满足需求的response)
    """
    div_list = response.xpath('/html/body/div/div[3]/div[4]/div[1]/div/div/ul/li/div/div')
    for div in div_list:
    # 标题:
    title = div.xpath('./div/div[1]/h3/a/text()').extract_first()
    # 标题的url:
    detail_url = div.xpath('./a/@href').extract_first()
    item = Wangyipeo1Item()
    item['title'] = title
    # 手动发请求:
    yield scrapy.Request(detail_url, callback=self.parse_new_detail, meta={'item': item})

    def parse_new_detail(self, response):
    """用于解析新闻内容"""
    item = response.meta['item']
    content = response.xpath('//*[@id="endText"]//text()').extract()
    content = ''.join(content)
    item['content'] = content
    yield item

    def closed(self, spider):
    # 关闭解析、该方法只会在整个程序结束时执行一次
    self.bro.quit()

    items.py

    # -*- coding: utf-8 -*-

    # Define here the models for your scraped items
    #
    # See documentation in:
    # https://docs.scrapy.org/en/latest/topics/items.html

    import scrapy


    class Wangyipeo1Item(scrapy.Item):
    # define the fields for your item here like:
    title = scrapy.Field()
    content = scrapy.Field()

    middlewares.py请求响应

    # -*- coding: utf-8 -*-

    # Define here the models for your spider middleware
    #
    # See documentation in:
    # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    import time
    from scrapy import signals
    from scrapy.http import HtmlResponse
    from time import sleep


    class Wangyipeo1DownloaderMiddleware(object):
    # 参数:
    # reuqest:拦截到请求对应的响应对象
    # response:拦截到所有的响应对象(1+5+n)
    # spider:爬虫类实例化的对象,可以实现爬虫类和中间件类的数据交互
    def process_response(self, request, response, spider):
    # 拦截到5个板块对应的响应对象,将其替换成5个符合需求的新的响应对象进行返回
    # 1.找出5个板块对应的5个不符合需求的响应对象
    if request.url in spider.model_urls:
    # 就是满足需求的五个板块对应的响应对象
    # url:响应对象对应的请求对象的url
    # body:响应数据,可以由selenium中的page_source返回
    bro = spider.bro
    bro.get(request.url)
    time.sleep(1)
    page_text = bro.page_source # 获取到源码数据(包含动态加载的新闻数据)
    new_response = HtmlResponse(url=request.url, body=page_text, encoding="utf-8", request=request)
    return new_response
    else:
    return response

    pipelines.py管道

    # -*- coding: utf-8 -*-

    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
    import pymysql


    class Wangyipeo1Pipeline(object):
    # 定义数据库初始状态:
    conn = None
    # 定义游标:
    curse = None

    def open_spider(self, spider):
    # 连接数据库:
    self.conn = pymysql.Connect(host="127.0.0.1", port=3306, user="root", password="123", db="Spider",
    charset="utf8")
    print(self.conn)

    def process_item(self, item, spider):
    """插入数据"""
    print(item)
    sql = 'insert into wangyi value ("%s","%s")' % (item["title"], item["content"])
    self.curse = self.conn.cursor()
    try:
    self.curse.execute(sql)
    self.conn.commit()
    except Exception as e:
    print(e)
    self.conn.rollback()
    return item

    def close_spider(self, spider):
    """关闭数据库连接状态"""
    self.curse.close()
    self.conn.close()

    settings.py配置文件

    # -*- coding: utf-8 -*-

    # Scrapy settings for wangyiPeo1 project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    # https://docs.scrapy.org/en/latest/topics/settings.html
    # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    # https://docs.scrapy.org/en/latest/topics/spider-middleware.html

    BOT_NAME = 'wangyiPeo1'

    SPIDER_MODULES = ['wangyiPeo1.spiders']
    NEWSPIDER_MODULE = 'wangyiPeo1.spiders'

    # UA伪装:
    USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'

    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    # USER_AGENT = 'wangyiPeo1 (+http://www.yourdomain.com)'

    # Obey robots.txt rules
    ROBOTSTXT_OBEY = False

    # 日志级别:
    LOG_LEVEL = 'ERROR'

    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    # CONCURRENT_REQUESTS = 32

    # Configure a delay for requests for the same website (default: 0)
    # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    # DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    # CONCURRENT_REQUESTS_PER_DOMAIN = 16
    # CONCURRENT_REQUESTS_PER_IP = 16

    # Disable cookies (enabled by default)
    # COOKIES_ENABLED = False

    # Disable Telnet Console (enabled by default)
    # TELNETCONSOLE_ENABLED = False

    # Override the default request headers:
    # DEFAULT_REQUEST_HEADERS = {
    # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    # 'Accept-Language': 'en',
    # }

    # Enable or disable spider middlewares
    # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    # SPIDER_MIDDLEWARES = {
    # 'wangyiPeo1.middlewares.Wangyipeo1SpiderMiddleware': 543,
    # }

    # Enable or disable downloader middlewares
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    # 开启下载管道:
    DOWNLOADER_MIDDLEWARES = {
    'wangyiPeo1.middlewares.Wangyipeo1DownloaderMiddleware': 543,
    }

    # Enable or disable extensions
    # See https://docs.scrapy.org/en/latest/topics/extensions.html
    # EXTENSIONS = {
    # 'scrapy.extensions.telnet.TelnetConsole': None,
    # }

    # Configure item pipelines
    # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
    # 开启数据管道
    ITEM_PIPELINES = {
    'wangyiPeo1.pipelines.Wangyipeo1Pipeline': 300,
    }

    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
    # AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    # AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    # AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    # AUTOTHROTTLE_DEBUG = False

    # Enable and configure HTTP caching (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    # HTTPCACHE_ENABLED = True
    # HTTPCACHE_EXPIRATION_SECS = 0
    # HTTPCACHE_DIR = 'httpcache'
    # HTTPCACHE_IGNORE_HTTP_CODES = []
    # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
  • 相关阅读:
    电商 JQuery 抓取联系人信息 复制的代发兔
    Linux-Centos下安装工具用来解压压缩
    Linux远程连接及上传文件
    虚拟机-Linux-Centos8.1 与宿主主机ping互通
    虚拟机-Linux-Centos8.1联网
    数字量输入模块和模拟量输入模块的区别是什么?
    串口转以太网模块设备介绍和使用说明
    以太网模块是什么,以太网模块的功能和特点
    4G DTU连接阿里云说明手册
    react07
  • 原文地址:https://www.cnblogs.com/zhang-da/p/12432091.html
Copyright © 2011-2022 走看看