zoukankan      html  css  js  c++  java
  • 爬虫笔记

    import urllib2
    import json
    import cookielib
    
    
    def urllib2_request(url, method="GET", cookie="", headers={}, data=None):
        """
        :param url: 要请求的url
        :param cookie: 请求方式,GET、POST、DELETE、PUT..
        :param cookie: 要传入的cookie,cookie= 'k1=v1;k1=v2'
        :param headers: 发送数据时携带的请求头,headers = {'ContentType':'application/json; charset=UTF-8'}
        :param data: 要发送的数据GET方式需要传入参数,data={'d1': 'v1'}
        :return: 返回元祖,响应的字符串内容 和 cookiejar对象
        对于cookiejar对象,可以使用for循环访问:
            for item in cookiejar:
                print item.name,item.value
        """
        if data:
            data = json.dumps(data)
    
        cookie_jar = cookielib.CookieJar()
        handler = urllib2.HTTPCookieProcessor(cookie_jar)
        opener = urllib2.build_opener(handler)
        opener.addheaders.append(['Cookie', 'k1=v1;k1=v2'])
        request = urllib2.Request(url=url, data=data, headers=headers)
        request.get_method = lambda: method
    
        response = opener.open(request)
        origin = response.read()
    
        return origin, cookie_jar
    
    
    # GET
    result = urllib2_request('http://127.0.0.1:8001/index/', method="GET")
    
    # POST
    result = urllib2_request('http://127.0.0.1:8001/index/',  method="POST", data= {'k1': 'v1'})
    
    # PUT
    result = urllib2_request('http://127.0.0.1:8001/index/',  method="PUT", data= {'k1': 'v1'})
    
    封装urllib请求
    
    
    
    
    import requests
     
    ret = requests.get('https://github.com/timeline.json')
     
    print ret.url
    print ret.text
     
     
     
    # 2、有参数实例
     
    import requests
     
    payload = {'key1': 'value1', 'key2': 'value2'}
    ret = requests.get("http://httpbin.org/get", params=payload)
     
    print ret.url
    print ret.text
    
    
    
    
    import requests
     
    payload = {'key1': 'value1', 'key2': 'value2'}
    ret = requests.post("http://httpbin.org/post", data=payload)
     
    print ret.text
     
     
    # 2、发送请求头和数据实例
     
    import requests
    import json
     
    url = 'https://api.github.com/some/endpoint'
    payload = {'some': 'data'}
    headers = {'content-type': 'application/json'}
     
    ret = requests.post(url, data=json.dumps(payload), headers=headers)
     
    print ret.text
    print ret.cookies
    
    
    
    
    
    requests.get(url, params=None, **kwargs)
    requests.post(url, data=None, json=None, **kwargs)
    requests.put(url, data=None, **kwargs)
    requests.head(url, **kwargs)
    requests.delete(url, **kwargs)
    requests.patch(url, data=None, **kwargs)
    requests.options(url, **kwargs)
     
    # 以上方法均是在此方法的基础上构建
    requests.request(method, url, **kwargs)
    
    
    
    def request(method, url, **kwargs):
        """Constructs and sends a :class:`Request <Request>`.
    
        :param method: method for the new :class:`Request` object.
        :param url: URL for the new :class:`Request` object.
        :param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
        :param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
        :param json: (optional) json data to send in the body of the :class:`Request`.
        :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
        :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
        :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
        :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
        :param timeout: (optional) How long to wait for the server to send data
            before giving up, as a float, or a :ref:`(connect timeout, read
            timeout) <timeouts>` tuple.
        :type timeout: float or tuple
        :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
        :type allow_redirects: bool
        :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
        :param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``.
        :param stream: (optional) if ``False``, the response content will be immediately downloaded.
        :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
        :return: :class:`Response <Response>` object
        :rtype: requests.Response
    
        Usage::
    
          >>> import requests
          >>> req = requests.request('GET', 'http://httpbin.org/get')
          <Response [200]>
        """
    
        # By using the 'with' statement we are sure the session is closed, thus we
        # avoid leaving sockets open which can trigger a ResourceWarning in some
        # cases, and look like a memory leak in others.
        with sessions.Session() as session:
            return session.request(method=method, url=url, **kwargs)
    
    更多参数
    
    
    
    
    # -*- coding:utf-8 -*- 
    import requests
    import time
    import hashlib
    
    
    def _password(pwd):
        ha = hashlib.md5()
        ha.update(pwd)
        return ha.hexdigest()
    
    def login():
        
        login_dict = {
            'username': "用户名",
            'pwd': _password("密码"),
            'imgcode': "",
            'f': 'json'
        }
    
        login_res = requests.post(
            url= "https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN",
            data=login_dict,
            headers={'Referer': 'https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN'})
    
        # 登陆成功之后获取服务器响应的cookie
        resp_cookies_dict = login_res.cookies.get_dict()
        # 登陆成功后,获取服务器响应的内容
        resp_text = login_res.text
        # 登陆成功后,获取token
        token = re.findall(".*token=(d+)", resp_text)[0]
    
        print resp_text
        print token
        print resp_cookies_dict
    
    login()
    
    登陆代码
    
    
    # -*- coding:utf-8 -*- 
    import requests
    import time
    import hashlib
    import json
    import re
    
    LOGIN_COOKIES_DICT = {}
    
    def _password(pwd):
        ha = hashlib.md5()
        ha.update(pwd)
        return ha.hexdigest()
    
    def login():
        
        login_dict = {
            'username': "用户名",
            'pwd': _password("密码"),
            'imgcode': "",
            'f': 'json'
        }
    
        login_res = requests.post(
            url= "https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN",
            data=login_dict,
            headers={'Referer': 'https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN'})
    
        # 登陆成功之后获取服务器响应的cookie
        resp_cookies_dict = login_res.cookies.get_dict()
        # 登陆成功后,获取服务器响应的内容
        resp_text = login_res.text
        # 登陆成功后,获取token
        token = re.findall(".*token=(d+)", resp_text)[0]
    
        return {'token': token, 'cookies': resp_cookies_dict}
    
    
    def standard_user_list(content):
        content = re.sub('s*', '', content)
        content = re.sub('
    *', '', content)
        data = re.findall("""cgiData=(.*);seajs""", content)[0]
        data = data.strip()
        while True:
            temp = re.split('({)(w+)(:)', data, 1)
            if len(temp) == 5:
                temp[2] = '"' + temp[2] + '"'
                data = ''.join(temp)
            else:
                break
    
        while True:
            temp = re.split('(,)(w+)(:)', data, 1)
            if len(temp) == 5:
                temp[2] = '"' + temp[2] + '"'
                data = ''.join(temp)
            else:
                break
    
        data = re.sub('*d+', "", data)
        ret = json.loads(data)
        return ret
    
    
    def get_user_list():
    
        login_dict = login()
        LOGIN_COOKIES_DICT.update(login_dict)
    
        login_cookie_dict = login_dict['cookies']
        res_user_list = requests.get(
            url= "https://mp.weixin.qq.com/cgi-bin/user_tag",
            params = {"action": "get_all_data", "lang": "zh_CN", "token": login_dict['token']},
            cookies = login_cookie_dict,
            headers={'Referer': 'https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN'}
        )
        user_info = standard_user_list(res_user_list.text)
        for item in user_info['user_list']:
            print "%s %s " % (item['nick_name'],item['id'],)
        
    get_user_list()
    
    代码实现
    
    
    
    # -*- coding:utf-8 -*- 
    import requests
    import time
    import hashlib
    import json
    import re
    
    LOGIN_COOKIES_DICT = {}
    
    def _password(pwd):
        ha = hashlib.md5()
        ha.update(pwd)
        return ha.hexdigest()
    
    def login():
        
        login_dict = {
            'username': "用户名",
            'pwd': _password("密码"),
            'imgcode': "",
            'f': 'json'
        }
    
        login_res = requests.post(
            url= "https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN",
            data=login_dict,
            headers={'Referer': 'https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN'})
    
        # 登陆成功之后获取服务器响应的cookie
        resp_cookies_dict = login_res.cookies.get_dict()
        # 登陆成功后,获取服务器响应的内容
        resp_text = login_res.text
        # 登陆成功后,获取token
        token = re.findall(".*token=(d+)", resp_text)[0]
    
        return {'token': token, 'cookies': resp_cookies_dict}
    
    
    def standard_user_list(content):
        content = re.sub('s*', '', content)
        content = re.sub('
    *', '', content)
        data = re.findall("""cgiData=(.*);seajs""", content)[0]
        data = data.strip()
        while True:
            temp = re.split('({)(w+)(:)', data, 1)
            if len(temp) == 5:
                temp[2] = '"' + temp[2] + '"'
                data = ''.join(temp)
            else:
                break
    
        while True:
            temp = re.split('(,)(w+)(:)', data, 1)
            if len(temp) == 5:
                temp[2] = '"' + temp[2] + '"'
                data = ''.join(temp)
            else:
                break
    
        data = re.sub('*d+', "", data)
        ret = json.loads(data)
        return ret
    
    
    def get_user_list():
    
        login_dict = login()
        LOGIN_COOKIES_DICT.update(login_dict)
    
        login_cookie_dict = login_dict['cookies']
        res_user_list = requests.get(
            url= "https://mp.weixin.qq.com/cgi-bin/user_tag",
            params = {"action": "get_all_data", "lang": "zh_CN", "token": login_dict['token']},
            cookies = login_cookie_dict,
            headers={'Referer': 'https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN'}
        )
        user_info = standard_user_list(res_user_list.text)
        for item in user_info['user_list']:
            print "%s %s " % (item['nick_name'],item['id'],)
        
    
    def send_msg(user_fake_id, content='啥也没发'):
    
        login_dict = LOGIN_COOKIES_DICT
        
        token = login_dict['token']
        login_cookie_dict = login_dict['cookies']
    
        send_dict = {
            'token': token,
            'lang': "zh_CN",
            'f': 'json',
            'ajax': 1,
            'random': "0.5322618900912392",
            'type': 1,
            'content': content,
            'tofakeid': user_fake_id,
            'imgcode': ''
        }
       
        send_url = "https://mp.weixin.qq.com/cgi-bin/singlesend?t=ajax-response&f=json&token=%s&lang=zh_CN" % (token,)
        message_list = requests.post(
            url=send_url, 
            data=send_dict, 
            cookies=login_cookie_dict, 
            headers={'Referer': 'https://mp.weixin.qq.com/cgi-bin/login?lang=zh_CN'}
        )
    
    
    get_user_list()
    fake_id = raw_input('请输入用户ID:')
    content = raw_input('请输入消息内容:')
    send_msg(fake_id, content)
    
    发送消息代码
    
    
    #!/usr/bin/env python
    # -*- coding:utf-8 -*-
    import scrapy
     
    class XiaoHuarSpider(scrapy.spiders.Spider):
        name = "xiaohuar"
        allowed_domains = ["xiaohuar.com"]
        start_urls = [
            "http://www.xiaohuar.com/hua/",
        ]
     
        def parse(self, response):
            # print(response, type(response))
            # from scrapy.http.response.html import HtmlResponse
            # print(response.body_as_unicode())
     
            current_url = response.url
            body = response.body
            unicode_body = response.body_as_unicode()
    
    
    
    #!/usr/bin/env python
    # -*- coding:utf-8 -*-
    import scrapy
    from scrapy.http import Request
    from scrapy.selector import HtmlXPathSelector
    import re
    import urllib
    import os
     
     
    class XiaoHuarSpider(scrapy.spiders.Spider):
        name = "xiaohuar"
        allowed_domains = ["xiaohuar.com"]
        start_urls = [
            "http://www.xiaohuar.com/list-1-1.html",
        ]
     
        def parse(self, response):
            # 分析页面
            # 找到页面中符合规则的内容(校花图片),保存
            # 找到所有的a标签,再访问其他a标签,一层一层的搞下去
     
            hxs = HtmlXPathSelector(response)
     
            # 如果url是 http://www.xiaohuar.com/list-1-d+.html
            if re.match('http://www.xiaohuar.com/list-1-d+.html', response.url):
                items = hxs.select('//div[@class="item_list infinite_scroll"]/div')
                for i in range(len(items)):
                    src = hxs.select('//div[@class="item_list infinite_scroll"]/div[%d]//div[@class="img"]/a/img/@src' % i).extract()
                    name = hxs.select('//div[@class="item_list infinite_scroll"]/div[%d]//div[@class="img"]/span/text()' % i).extract()
                    school = hxs.select('//div[@class="item_list infinite_scroll"]/div[%d]//div[@class="img"]/div[@class="btns"]/a/text()' % i).extract()
                    if src:
                        ab_src = "http://www.xiaohuar.com" + src[0]
                        file_name = "%s_%s.jpg" % (school[0].encode('utf-8'), name[0].encode('utf-8'))
                        file_path = os.path.join("/Users/wupeiqi/PycharmProjects/beauty/pic", file_name)
                        urllib.urlretrieve(ab_src, file_path)
     
            # 获取所有的url,继续访问,并在其中寻找相同的url
            all_urls = hxs.select('//a/@href').extract()
            for url in all_urls:
                if url.startswith('http://www.xiaohuar.com/list-1-'):
                    yield Request(url, callback=self.parse)
    
    
    
    
    
    #!/usr/bin/env python
    # -*- coding:utf-8 -*-
    
    import scrapy
    import hashlib
    from tutorial.items import JinLuoSiItem
    from scrapy.http import Request
    from scrapy.selector import HtmlXPathSelector
    
    
    class JinLuoSiSpider(scrapy.spiders.Spider):
        count = 0
        url_set = set()
    
        name = "jluosi"
        domain = 'http://www.jluosi.com'
        allowed_domains = ["jluosi.com"]
    
        start_urls = [
            "http://www.jluosi.com:80/ec/goodsDetail.action?jls=QjRDNEIzMzAzOEZFNEE3NQ==",
        ]
    
        def parse(self, response):
            md5_obj = hashlib.md5()
            md5_obj.update(response.url)
            md5_url = md5_obj.hexdigest()
            if md5_url in JinLuoSiSpider.url_set:
                pass
            else:
                JinLuoSiSpider.url_set.add(md5_url)
                hxs = HtmlXPathSelector(response)
                if response.url.startswith('http://www.jluosi.com:80/ec/goodsDetail.action'):
                    item = JinLuoSiItem()
                    item['company'] = hxs.select('//div[@class="ShopAddress"]/ul/li[1]/text()').extract()
                    item['link'] = hxs.select('//div[@class="ShopAddress"]/ul/li[2]/text()').extract()
                    item['qq'] = hxs.select('//div[@class="ShopAddress"]//a/@href').re('.*uin=(?P<qq>d*)&')
                    item['address'] = hxs.select('//div[@class="ShopAddress"]/ul/li[4]/text()').extract()
    
                    item['title'] = hxs.select('//h1[@class="goodsDetail_goodsName"]/text()').extract()
    
                    item['unit'] = hxs.select('//table[@class="R_WebDetail_content_tab"]//tr[1]//td[3]/text()').extract()
                    product_list = []
                    product_tr = hxs.select('//table[@class="R_WebDetail_content_tab"]//tr')
                    for i in range(2,len(product_tr)):
                        temp = {
                            'standard':hxs.select('//table[@class="R_WebDetail_content_tab"]//tr[%d]//td[2]/text()' %i).extract()[0].strip(),
                            'price':hxs.select('//table[@class="R_WebDetail_content_tab"]//tr[%d]//td[3]/text()' %i).extract()[0].strip(),
                        }
                        product_list.append(temp)
    
                    item['product_list'] = product_list
                    yield item
    
                current_page_urls = hxs.select('//a/@href').extract()
                for i in range(len(current_page_urls)):
                    url = current_page_urls[i]
                    if url.startswith('http://www.jluosi.com'):
                        url_ab = url
                        yield Request(url_ab, callback=self.parse)
    
    选择器规则Demo
    
    
    
    
    # -*- coding: utf-8 -*-
     
    # Define here the models for your scraped items
    #
    # See documentation in:
    # http://doc.scrapy.org/en/latest/topics/items.html
     
    import scrapy
     
    class JieYiCaiItem(scrapy.Item):
     
        company = scrapy.Field()
        title = scrapy.Field()
        qq = scrapy.Field()
        info = scrapy.Field()
        more = scrapy.Field()
    
    
    
    #!/usr/bin/env python
    # -*- coding:utf-8 -*-
    
    import scrapy
    import hashlib
    from beauty.items import JieYiCaiItem
    from scrapy.http import Request
    from scrapy.selector import HtmlXPathSelector
    from scrapy.spiders import CrawlSpider, Rule
    from scrapy.linkextractors import LinkExtractor
    
    
    class JieYiCaiSpider(scrapy.spiders.Spider):
        count = 0
        url_set = set()
    
        name = "jieyicai"
        domain = 'http://www.jieyicai.com'
        allowed_domains = ["jieyicai.com"]
    
        start_urls = [
            "http://www.jieyicai.com",
        ]
    
        rules = [
            #下面是符合规则的网址,但是不抓取内容,只是提取该页的链接(这里网址是虚构的,实际使用时请替换)
            #Rule(SgmlLinkExtractor(allow=(r'http://test_url/test?page_index=d+'))),
            #下面是符合规则的网址,提取内容,(这里网址是虚构的,实际使用时请替换)
            #Rule(LinkExtractor(allow=(r'http://www.jieyicai.com/Product/Detail.aspx?pid=d+')), callback="parse"),
        ]
    
        def parse(self, response):
            md5_obj = hashlib.md5()
            md5_obj.update(response.url)
            md5_url = md5_obj.hexdigest()
            if md5_url in JieYiCaiSpider.url_set:
                pass
            else:
                JieYiCaiSpider.url_set.add(md5_url)
                
                hxs = HtmlXPathSelector(response)
                if response.url.startswith('http://www.jieyicai.com/Product/Detail.aspx'):
                    item = JieYiCaiItem()
                    item['company'] = hxs.select('//span[@class="username g-fs-14"]/text()').extract()
                    item['qq'] = hxs.select('//span[@class="g-left bor1qq"]/a/@href').re('.*uin=(?P<qq>d*)&')
                    item['info'] = hxs.select('//div[@class="padd20 bor1 comard"]/text()').extract()
                    item['more'] = hxs.select('//li[@class="style4"]/a/@href').extract()
                    item['title'] = hxs.select('//div[@class="g-left prodetail-text"]/h2/text()').extract()
                    yield item
    
                current_page_urls = hxs.select('//a/@href').extract()
                for i in range(len(current_page_urls)):
                    url = current_page_urls[i]
                    if url.startswith('/'):
                        url_ab = JieYiCaiSpider.domain + url
                        yield Request(url_ab, callback=self.parse)
    
    spider
    
    
    # -*- coding: utf-8 -*-
    
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
    
    import json
    from twisted.enterprise import adbapi
    import MySQLdb.cursors
    import re
    
    mobile_re = re.compile(r'(13[0-9]|15[012356789]|17[678]|18[0-9]|14[57])[0-9]{8}')
    phone_re = re.compile(r'(d+-d+|d+)')
    
    class JsonPipeline(object):
    
        def __init__(self):
            self.file = open('/Users/wupeiqi/PycharmProjects/beauty/beauty/jieyicai.json', 'wb')
    
    
        def process_item(self, item, spider):
            line = "%s  %s
    " % (item['company'][0].encode('utf-8'), item['title'][0].encode('utf-8'))
            self.file.write(line)
            return item
    
    class DBPipeline(object):
    
        def __init__(self):
            self.db_pool = adbapi.ConnectionPool('MySQLdb',
                                                 db='DbCenter',
                                                 user='root',
                                                 passwd='123',
                                                 cursorclass=MySQLdb.cursors.DictCursor,
                                                 use_unicode=True)
    
        def process_item(self, item, spider):
            query = self.db_pool.runInteraction(self._conditional_insert, item)
            query.addErrback(self.handle_error)
            return item
    
        def _conditional_insert(self, tx, item):
            tx.execute("select nid from company where company = %s", (item['company'][0], ))
            result = tx.fetchone()
            if result:
                pass
            else:
                phone_obj = phone_re.search(item['info'][0].strip())
                phone = phone_obj.group() if phone_obj else ' '
    
                mobile_obj = mobile_re.search(item['info'][1].strip())
                mobile = mobile_obj.group() if mobile_obj else ' '
    
                values = (
                    item['company'][0],
                    item['qq'][0],
                    phone,
                    mobile,
                    item['info'][2].strip(),
                    item['more'][0])
                tx.execute("insert into company(company,qq,phone,mobile,address,more) values(%s,%s,%s,%s,%s,%s)", values)
    
        def handle_error(self, e):
            print 'error',e
    
    pipelines
    
    
    
    ITEM_PIPELINES = {
        'beauty.pipelines.DBPipeline': 300,
        'beauty.pipelines.JsonPipeline': 100,
    }
  • 相关阅读:
    day55---前端基础之BOM操作和DOM操作
    每日作业5/8
    数据库之索引
    数据库之视图、触发器、事务、存储过程、内置函数、流程控制
    每日作业5/7
    数据备份与pymysql模块
    Navicat与MySQL使用
    每日作业5/6
    数据库之多表查询
    数据库之单表查询
  • 原文地址:https://www.cnblogs.com/zhaobin022/p/5546006.html
Copyright © 2011-2022 走看看