zoukankan      html  css  js  c++  java
  • scrapy 博客爬取

    item.py

    import scrapy
    
    
    class FulongpjtItem(scrapy.Item):
        # define the fields for your item here like:
        name = scrapy.Field()
        url = scrapy.Field()
        hits = scrapy.Field()
        comment = scrapy.Field()

    pipeline.py

    import pymysql
    from pymysql import connections
    class FulongpjtPipeline(object):
        def __init__(self):
            self.conn = pymysql.connect(host='127.0.0.1', user='root', passwd='123456', db='mydb')
            self.cursor = self.conn.cursor()
    
        def process_item(self, item, spider):
            for j in range(0,len(item['name'])):
                name = item['name'][j]
                url = item['url'][j]
                hits = item['hits'][j]
                comment = item['comment'][j]
                sql = "insert into boke(name,url,hits,comment) VALUES(%s,%s,%s,%s)"
                self.cursor.execute(sql,(name,url,hits,comment,))
                self.conn.commit()
            return item
        def close_spider(self,spider):
            self.conn.close()

    spd.py

    import scrapy
    from Fulongpjt.items import FulongpjtItem
    from scrapy.http import Request
    import re
    import urllib.request
    class MyspdSpider(scrapy.Spider):
        name = "myspd"
        allowed_domains = ["hexun.com"]
        start_urls = ['http://hexun.com/']
        uid = '19940007'
        def start_requests(self):
            yield Request('http://'+str(self.uid)+'.blog.hexun.com/p1/default.html',headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36 QIHU 360EE'})
    
        def parse(self, response):
            item = FulongpjtItem()
            item['name'] = response.xpath('//span[@class="ArticleTitleText"]/a/text()').extract()
            item['url'] = response.xpath('//span[@class="ArticleTitleText"]/a/@href').extract()
            part1 = '''<script type="text/javascript" src="(http://click.tool.hexun.com/.*?)">'''
            hcurl = re.compile(part1).findall(str(response.body))[0]
            headers2 = ('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36 QIHU 360EE')
            opener =urllib.request.build_opener()
            opener.addheaders =[headers2]
            urllib.request.install_opener(opener)
            data = urllib.request.urlopen(hcurl).read()
            part2 = "clickd*?','(d*?)'"
            part3 = "commentd*?','(d*?)'"
            item['hits'] = re.compile(part2).findall(str(data))
            item['comment'] = re.compile(part3).findall(str(data))
            yield item
            # 提取文章总页数
            part4 = 'blog.hexun.com/p(.*?)/'
            data2=re.compile(part4).findall(str(response.body))
            if len(data2)>2:
                totalurl = data2[-2]
            else:
                totalurl = 1
            for i in range(2,int(totalurl)+1):
                next_url = 'http://'+str(self.uid)+'.blog.hexun.com/p'+str(i)+'/default.html'
                yield Request(next_url,callback=self.parse,headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36 QIHU 360EE'})
  • 相关阅读:
    GET: https://login.weixin.qq.com/cgi-bin/mmwebwx-bin/login? loginicon=true &uuid=odcptUu2JA==&tip=0
    00018_流程控制语句switch
    百度编辑器如何能实现直接粘贴把图片上传到服务器中?
    wangEditor如何能实现直接粘贴把图片上传到服务器中?
    tinymce如何能实现直接粘贴把图片上传到服务器中?
    kindeditor如何能实现直接粘贴把图片上传到服务器中
    B/S实现浏览器端大文件分块上传
    百度WebUploader实现浏览器端大文件分块上传
    WebUploader实现浏览器端大文件分块上传
    php实现浏览器端大文件分块上传
  • 原文地址:https://www.cnblogs.com/Erick-L/p/6841027.html
Copyright © 2011-2022 走看看