zoukankan      html  css  js  c++  java
  • 爬虫

    爬虫:编写程序向网站发起请求,获取资源后分析并提取有用数据

    requests

    get请求

    # 1、无参数实例
      
    import requests
      
    ret = requests.get('https://github.com/timeline.json')
      
    print ret.url
    print ret.text
      
      
      
    # 2、有参数实例
      
    import requests
      
    payload = {'key1': 'value1', 'key2': 'value2'}
    ret = requests.get("http://httpbin.org/get", params=payload)
      
    print ret.url
    print ret.text
    View Code

    post请求

    # 1、基本POST实例
      
    import requests
      
    payload = {'key1': 'value1', 'key2': 'value2'}
    ret = requests.post("http://httpbin.org/post", data=payload)
      
    print ret.text
      
      
    # 2、发送请求头和数据实例
      
    import requests
    import json
      
    url = 'https://api.github.com/some/endpoint'
    payload = {'some': 'data'}
    headers = {'content-type': 'application/json'}
      
    ret
    = requests.post(url, data=json.dumps(payload), headers=headers) 或
    ret = requests.post(url, json=payload, headers=headers)
      
    print ret.text
    print ret.cookies

    其它请求

    requests.get(url, params=None, **kwargs)
    requests.post(url, data=None, json=None, **kwargs)
    requests.put(url, data=None, **kwargs)
    requests.head(url, **kwargs)
    requests.delete(url, **kwargs)
    requests.patch(url, data=None, **kwargs)
    requests.options(url, **kwargs)
      
    # 以上方法均是在此方法的基础上构建
    requests.request(method, url, **kwargs)
    View Code

    参数

    def param_method_url():
        # requests.request(method='get', url='http://127.0.0.1:8000/test/')
        # requests.request(method='post', url='http://127.0.0.1:8000/test/')
        pass
    
    
    def param_param():
        # - 可以是字典
        # - 可以是字符串
        # - 可以是字节(ascii编码以内)
    
        # requests.request(method='get',
        # url='http://127.0.0.1:8000/test/',
        # params={'k1': 'v1', 'k2': '水电费'})
    
        # requests.request(method='get',
        # url='http://127.0.0.1:8000/test/',
        # params="k1=v1&k2=水电费&k3=v3&k3=vv3")
    
        # requests.request(method='get',
        # url='http://127.0.0.1:8000/test/',
        # params=bytes("k1=v1&k2=k2&k3=v3&k3=vv3", encoding='utf8'))
    
        # 错误
        # requests.request(method='get',
        # url='http://127.0.0.1:8000/test/',
        # params=bytes("k1=v1&k2=水电费&k3=v3&k3=vv3", encoding='utf8'))
        pass
    
    
    def param_data():
        # 可以是字典
        # 可以是字符串
        # 可以是字节
        # 可以是文件对象
    
        # requests.request(method='POST',
        # url='http://127.0.0.1:8000/test/',
        # data={'k1': 'v1', 'k2': '水电费'})
    
        # requests.request(method='POST',
        # url='http://127.0.0.1:8000/test/',
        # data="k1=v1; k2=v2; k3=v3; k3=v4"
        # )
    
        # requests.request(method='POST',
        # url='http://127.0.0.1:8000/test/',
        # data="k1=v1;k2=v2;k3=v3;k3=v4",
        # headers={'Content-Type': 'application/x-www-form-urlencoded'}
        # )
    
        # requests.request(method='POST',
        # url='http://127.0.0.1:8000/test/',
        # data=open('data_file.py', mode='r', encoding='utf-8'), # 文件内容是:k1=v1;k2=v2;k3=v3;k3=v4
        # headers={'Content-Type': 'application/x-www-form-urlencoded'}
        # )
        pass
    
    
    def param_json():
        # 将json中对应的数据进行序列化成一个字符串,json.dumps(...)
        # 然后发送到服务器端的body中,并且Content-Type是 {'Content-Type': 'application/json'}
        requests.request(method='POST',
                         url='http://127.0.0.1:8000/test/',
                         json={'k1': 'v1', 'k2': '水电费'})
    
    
    def param_headers():
        # 发送请求头到服务器端
        requests.request(method='POST',
                         url='http://127.0.0.1:8000/test/',
                         json={'k1': 'v1', 'k2': '水电费'},
                         headers={'Content-Type': 'application/x-www-form-urlencoded'}
                         )
    
    
    def param_cookies():
        # 发送Cookie到服务器端
        requests.request(method='POST',
                         url='http://127.0.0.1:8000/test/',
                         data={'k1': 'v1', 'k2': 'v2'},
                         cookies={'cook1': 'value1'},
                         )
        # 也可以使用CookieJar(字典形式就是在此基础上封装)
        from http.cookiejar import CookieJar
        from http.cookiejar import Cookie
    
        obj = CookieJar()
        obj.set_cookie(Cookie(version=0, name='c1', value='v1', port=None, domain='', path='/', secure=False, expires=None,
                              discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False,
                              port_specified=False, domain_specified=False, domain_initial_dot=False, path_specified=False)
                       )
        requests.request(method='POST',
                         url='http://127.0.0.1:8000/test/',
                         data={'k1': 'v1', 'k2': 'v2'},
                         cookies=obj)
    
                         
    def param_proxies():
        #代理,如果封了ip就可以用代理
    
        # proxies = {
        # "http": "61.172.249.96:80",
        # "https": "http://61.185.219.126:3128",
        # }
    
        # proxies = {'http://10.20.1.128': 'http://10.10.1.10:5323'}
    
        # ret = requests.get("http://www.proxy360.cn/Proxy", proxies=proxies)
        # print(ret.headers)
    
    
        # from requests.auth import HTTPProxyAuth
        #
        # proxyDict = {
        # 'http': '77.75.105.165',
        # 'https': '77.75.105.165'
        # }
        # auth = HTTPProxyAuth('username', 'mypassword')
        #
        # r = requests.get("http://www.google.com", proxies=proxyDict, auth=auth)
        # print(r.text)
    
        pass
    
    
    def param_files():
        # 发送文件
        # file_dict = {
        # 'f1': open('readme', 'rb')
        # }
        # requests.request(method='POST',
        # url='http://127.0.0.1:8000/test/',
        # files=file_dict)
    
        # 发送文件,定制文件名
        # file_dict = {
        # 'f1': ('test.txt', open('readme', 'rb'))
        # }
        # requests.request(method='POST',
        # url='http://127.0.0.1:8000/test/',
        # files=file_dict)
    
        # 发送文件,定制文件名
        # file_dict = {
        # 'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf")
        # }
        # requests.request(method='POST',
        # url='http://127.0.0.1:8000/test/',
        # files=file_dict)
    
        # 发送文件,定制文件名
        # file_dict = {
        #     'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf", 'application/text', {'k1': '0'})
        # }
        # requests.request(method='POST',
        #                  url='http://127.0.0.1:8000/test/',
        #                  files=file_dict)
    
        pass
    
    
    def param_auth():
        from requests.auth import HTTPBasicAuth, HTTPDigestAuth
    
        ret = requests.get('https://api.github.com/user', auth=HTTPBasicAuth('wupeiqi', 'sdfasdfasdf'))
        print(ret.text)
    
        # ret = requests.get('http://192.168.1.1',
        # auth=HTTPBasicAuth('admin', 'admin'))
        # ret.encoding = 'gbk'
        # print(ret.text)
    
        # ret = requests.get('http://httpbin.org/digest-auth/auth/user/pass', auth=HTTPDigestAuth('user', 'pass'))
        # print(ret)
        #
    
    
    def param_timeout():
        # ret = requests.get('http://google.com/', timeout=1)
        # print(ret)
    
        # ret = requests.get('http://google.com/', timeout=(5, 1))
        # print(ret)
        pass
    
    
    def param_allow_redirects():
        ret = requests.get('http://127.0.0.1:8000/test/', allow_redirects=False)
        print(ret.text)
    
    
    
    
    def param_stream():
        ret = requests.get('http://127.0.0.1:8000/test/', stream=True)
        print(ret.content)
        ret.close()
    
        # from contextlib import closing
        # with closing(requests.get('http://httpbin.org/get', stream=True)) as r:
        # # 在此处理响应。
        # for i in r.iter_content():
        # print(i)
    
    
    def requests_session():
        import requests
    
        session = requests.Session()
    
        ### 1、首先登陆任何页面,获取cookie
    
        i1 = session.get(url="http://dig.chouti.com/help/service")
    
        ### 2、用户登陆,携带上一次的cookie,后台对cookie中的 gpsd 进行授权
        i2 = session.post(
            url="http://dig.chouti.com/login",
            data={
                'phone': "8615131255089",
                'password': "xxxxxx",
                'oneMonth': ""
            }
        )
    
        i3 = session.post(
            url="http://dig.chouti.com/link/vote?linksId=8589623",
        )
        print(i3.text)
    View Code

    BeautifulSoup

    BeautifulSoup是一个模块,该模块用于接收一个HTML或XML字符串,然后将其进行格式化,之后则可以使用他提供的方法进行快速查找指定元素,从而使得在HTML或XML中查找指定元素变得简单

    安装:pip3 install beautifulsoup4

    使用:

     from bs4 import BeautifulSoup
    bs4的用处:
      -解析爬虫数据
      -解析XHML数据
      -用户提交数据,进行格式校验(KindEditor、UEditor)
    1 soup = BeautifulSoup(html, "html.parser")
    2 # 找到第一个a标签
    3 tag1 = soup.find(name='a')
    4 # 找到所有的a标签
    5 tag2 = soup.find_all(name='a')
    6 # 找到id=link2的标签
    7 tag3 = soup.select(attes={id='link2')

    实例

    import requests
    from bs4 import BeautifulSoup
    
    # 1、下载页面
    ret=requests.get(
        url="https://www.autohome.com.cn/news/",
        )
    
    # 字节内容
    # print(ret.content)
    
    # 该网页的字节编码
    # print(ret.apparent_encoding)
    ret.encoding=ret.apparent_encoding
    # 将byte转化为字符串格式
    # print(ret.text)
    
    # 2、解析
    # 获取指定内容
    soup=BeautifulSoup(ret.text,"html.parser")  #解析器
    
    div=soup.find(name="div",id="auto-channel-lazyload-article")
    
    li_list=div.find_all(name="li")
    # print(li_list)
    for li in li_list:
        h3=li.find(name="h3")
        if not h3:
            continue
    
        a=li.find(name="a")
        href=(a.get("href")).strip("//")
    
        p=li.find(name="p")
    
        img=li.find(name="img")
        src=img.get("src")
    
        file_name=src.split("__")[1]
        # 获取图片
        img_list=requests.get(url="https:%s"%src)
        print(img_list)
        # 写入文件
        with open("img/%s"%file_name,"wb") as f:
            f.write(img_list.content)
    
    
        # print(h3.text)
        # print(href)
        # print(p.text)
    汽车之家新闻
    import requests
    from bs4 import BeautifulSoup
    
    # 获取未授权的cookies
    ret1=requests.get(
        url="https://dig.chouti.com/",
        headers={
            "User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
        }
        )
    ret1_cookie=ret1.cookies.get_dict()
    
    
    # 登录
    ret=requests.post(
        url='https://dig.chouti.com/login',
        data={
            "phone":'xx',
            "password":"xx",
            "oneMonth":1
        },
        headers={
            "User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
        },
        cookies=ret1_cookie
    )
    
    # 获取每页id
    for id in range(1,2):
    
        ret2=requests.get(
            url="https://dig.chouti.com/all/hot/recent/%s"%id,
            headers={
                "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
            },
            cookies=ret1_cookie
    
        )
    
        # print(ret2.text)
    
        soup=BeautifulSoup(ret2.text,"html.parser")
    
        div=soup.find(name="div",attrs={"class":"content-list","id":"content-list"})
    
        items=div.find_all(name="div",attrs={"class":"item"})
    
    
        for i in items:
            par2=i.find(name="div",attrs={"class":"part2"})
            nid=par2.get("share-linkid")
    
            # 点赞
            ret3=requests.post(
                url="https://dig.chouti.com/link/vote?linksId=%s"%nid,
                headers={
                "User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
             },
                cookies=ret1_cookie
            )
            print(ret3.text)
    抽屉登录点赞

     模拟微信登录并获取信息

    1、显示二维码 

      url+时间戳*1000,并保存xuuid(伪uuid)

      长轮询:浏览器向微信服务器发送一个请求,服务器hold次连接(有一点时间限制),在次期间如果用户扫码则立即响应  

      轮询:浏览器短时间内,一直向服务器发送请求,

    2、扫码   

       返回201和img,

    3、确认登录

      返回200和url ,在此url+“xxx”,再次发送请求,获取登录用户凭证相关的数据(XHML), 将凭证数据和cookies保存

    4、用户信息初始化

      发post请求,发送用户凭证信息  json数据

    5、获取 头像

        - 图片防盗链
          - Referer
          - cookie

    5、获取联系人信息

      发请求带上cookies

    示例:

      

      

  • 相关阅读:
    闭包问题
    二级联动多选器
    一个和与后台数据连接的模板get post put 以及延伸的query
    鼠标经过图片的小动画
    UITableViewCell中cell重用机制导致内容重复的方法
    iOS Button在ios6的系统上无法实现点击,在更高版本的系统上可以
    iOS 使用Reachability实时检测网络连接状况
    iOS Uibutton防止多按钮同时按下
    iOS plist文件的添加
    iOS Xcode cannot run using the selected device.
  • 原文地址:https://www.cnblogs.com/caochao-/p/9004377.html
Copyright © 2011-2022 走看看