zoukankan      html  css  js  c++  java
  • scrapy 数据存储mysql

    #spider.py
    from
    scrapy.linkextractors import LinkExtractor from scrapy.spiders import CrawlSpider, Rule from Cwpjt.items import CwpjtItem class FulongSpider(CrawlSpider): name = 'fulong' allowed_domains = ['sina.com.cn'] start_urls = ['http://sina.com.cn/'] 'http://news.sina.com.cn/c/2017-05-09/doc-ifyeycte9324112.shtml' rules = ( Rule(LinkExtractor(allow=('.*?/[0-9]{4}.[0-9]{2}.[0-9]{2}.doc-.*?shtml'),allow_domains=('sina.com.cn')), callback='parse_item', follow=True), ) def parse_item(self, response): i = CwpjtItem() i['name']=response.xpath('/html/head/title/text()').extract() i['kws'] = response.xpath('/html/head/meta[@name="keywords"]/@content').extract() #i['domain_id'] = response.xpath('//input[@id="sid"]/@value').extract() #i['name'] = response.xpath('//div[@id="name"]').extract() #i['description'] = response.xpath('//div[@id="description"]').extract() return i

    pipeline

    import pymysql
    from pymysql import connections
    class CwpjtPipeline(object):
        def __init__(self):
            self.conn = pymysql.connect(host='127.0.0.1',user='root',passwd='123456',db ='mydb')
            self.cursor = self.conn.cursor()
        def process_item(self, item, spider):
            name = item['name'][0]
            kws = item['kws'][0]
            sql ="insert into hehe(title,kws) VALUES(%s,%s)"
            self.cursor.execute(sql,(name,kws,))
            self.conn.commit()
            return item
        def close_spider(self,spider):
            self.conn.close()

    item

    import scrapy
    
    
    class CwpjtItem(scrapy.Item):
        # define the fields for your item here like:
        name = scrapy.Field()
        kws = scrapy.Field()
  • 相关阅读:
    私有属性的另类访问方式
    获取类所有属性和查看帮助文档
    类的私有属性及私方法(请注意属性的传值方式)
    类的私有属性及私方法
    类的定义
    怎么区分类变量和实例变量?
    面向对象编程案例04--访问控制
    面向对象编程案例03---继承之高级部分
    python 面向对象编程案例01
    静态方法
  • 原文地址:https://www.cnblogs.com/Erick-L/p/6837125.html
Copyright © 2011-2022 走看看