zoukankan      html  css  js  c++  java
  • requests和bs4

    requests模块,仿造浏览器发送Http请求
    bs4主要对html或xml格式字符串解析成对象,使用find/find_all查找 text/attrs

    爬取汽车之家

        爬取汽车之家的资讯信息,它没有做什么防爬策略,直接request爬取就可以了
        但是需要注意的是response返回的gbk编码,需要转码

    import requests
    from bs4 import BeautifulSoup
    
    response = requests.get("https://www.autohome.com.cn/news/")
    response.encoding = 'gbk'
    
    soup = BeautifulSoup(response.text,'html.parser')
    
    div = soup.find(name='div',attrs={'id':'auto-channel-lazyload-article'})
    
    li_list = div.find_all(name='li')
    
    for li in li_list:
    
        title = li.find(name='h3')
        if not title:
            continue
        p = li.find(name='p')
        a = li.find(name='a')
    
        print(title.text)
        print(a.attrs.get('href'))
        print(p.text)
    
        img = li.find(name='img')
        src = img.get('src')
        src = "https:" + src
        print(src)
    
        # 再次发起请求,下载图片
        file_name = src.rsplit('/',maxsplit=1)[1]
        ret = requests.get(src)
        with open(file_name,'wb') as f:
            f.write(ret.content)
    

    爬取抽屉新热榜

        获取抽屉新闻,需要携带请求头user-Agent
        对于登录,请求头需要携带登录前的cookie去做授权,加上user-Agent
        请求体        
            'phone':'8613121758648',
            'password':'woshiniba',
            'oneMonth':1
        登录成功后,携带授权cookie和user-agent,就可以进行点赞的相关操作了

    """"""
    
    # ################################### 示例一:爬取数据(携带请起头) ###################################
    """
    import requests
    from bs4 import BeautifulSoup
    
    r1 = requests.get(
        url='https://dig.chouti.com/',
        headers={
            'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
        }
    )
    
    soup = BeautifulSoup(r1.text,'html.parser')
    
    # 标签对象
    content_list = soup.find(name='div',id='content-list')
    # print(content_list)
    # [标签对象,标签对象]
    item_list = content_list.find_all(name='div',attrs={'class':'item'})
    for item in item_list:
        a = item.find(name='a',attrs={'class':'show-content color-chag'})
        print(a.text.strip())
        # print(a.text)
    """
    # ################################### 示例二:点赞 ###################################
    """
    import requests
    # 1. 查看首页
    r1 = requests.get(
        url='https://dig.chouti.com/',
        headers={
            'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
        }
    )
    
    # 2. 提交用户名和密码
    r2 = requests.post(
        url='https://dig.chouti.com/login',
        headers={
            'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
        },
        data={
            'phone':'8613121758648',
            'password':'woshiniba',
            'oneMonth':1
        },
        cookies=r1.cookies.get_dict()
    )
    
    
    # 3. 点赞
    r3 = requests.post(
        url='https://dig.chouti.com/link/vote?linksId=20435396',
        headers={
            'user-agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
        },
        cookies=r1.cookies.get_dict()
    )
    print(r3.text)
    """
    

    爬取GitHub

        get访问登录页面:
            需要去HTML中找隐藏的Input标签获取csrf token
            获取cookie
        post登录
            请求体携带csrf token值和用户名密码等信息,进行登录验证
        其他的操作,携带cookie就可以了

    """"""
    # ################################### 示例三:自动登录GitHub ###################################
    # 1. GET,访问登录页面
    """
    - 去HTML中找隐藏的Input标签获取csrf token
    - 获取cookie
    """
    
    # 2. POST,用户名和密码
    """
    - 发送数据:
        - csrf
        - 用户名
        - 密码
    - 携带cookie
    """
    
    # 3. GET,访问https://github.com/settings/emails
    """
    - 携带 cookie
    """
    
    import requests
    from bs4 import BeautifulSoup
    
    # ############## 方式一 ##############
    #
    # # 1. 访问登陆页面,获取 authenticity_token
    # i1 = requests.get('https://github.com/login')
    # soup1 = BeautifulSoup(i1.text, features='lxml')
    # tag = soup1.find(name='input', attrs={'name': 'authenticity_token'})
    # authenticity_token = tag.get('value')
    # c1 = i1.cookies.get_dict()
    # i1.close()
    #
    # # 1. 携带authenticity_token和用户名密码等信息,发送用户验证
    # form_data = {
    # "authenticity_token": authenticity_token,
    #     "utf8": "",
    #     "commit": "Sign in",
    #     "login": "wupeiqi@live.com",
    #     'password': 'xxoo'
    # }
    #
    # i2 = requests.post('https://github.com/session', data=form_data, cookies=c1)
    # c2 = i2.cookies.get_dict()
    # c1.update(c2)
    # i3 = requests.get('https://github.com/settings/repositories', cookies=c1)
    #
    # soup3 = BeautifulSoup(i3.text, features='lxml')
    # list_group = soup3.find(name='div', class_='listgroup')
    #
    # from bs4.element import Tag
    #
    # for child in list_group.children:
    #     if isinstance(child, Tag):
    #         project_tag = child.find(name='a', class_='mr-1')
    #         size_tag = child.find(name='small')
    #         temp = "项目:%s(%s); 项目路径:%s" % (project_tag.get('href'), size_tag.string, project_tag.string, )
    #         print(temp)
    

    爬取拉勾网

        get访问登录页面,请求需要携带user-agent
        
        post请求的请求头,除了加上User-Agent,还要加上referer上一次访问地址即登录url,还有
        两个特殊的请求头,它写在访问的登录页面的js中,需要正则匹配获取,
        
        请求体需要携带用户相关信息,这里还要注意这里密码是密文的

    import re
    import requests
    
    r1 = requests.get(
        url='https://passport.lagou.com/login/login.html',
        headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
        }
    )
    X_Anti_Forge_Token = re.findall("X_Anti_Forge_Token = '(.*?)'", r1.text, re.S)[0]
    X_Anti_Forge_Code = re.findall("X_Anti_Forge_Code = '(.*?)'", r1.text, re.S)[0]
    # print(X_Anti_Forge_Token, X_Anti_Forge_Code)
    # print(r1.text)
    #
    r2 = requests.post(
        url='https://passport.lagou.com/login/login.json',
        headers={
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
            'X-Anit-Forge-Code':X_Anti_Forge_Code,
            'X-Anit-Forge-Token':X_Anti_Forge_Token,
            'Referer': 'https://passport.lagou.com/login/login.html', # 上一次请求地址是什么?
        },
        data={
            "isValidate": True,
            'username': '15131255089',
            'password': 'ab18d270d7126ea65915c50288c22c0d',
            'request_form_verifyCode': '',
            'submit': ''
        },
        cookies=r1.cookies.get_dict()
    )
    print(r2.text)
    

    这防爬套路总结:
        post登录获取cookie,以后携带cookie或者get获取未授权cookie,post登录携带这个cookie去授权,以后携带cookie
        
        特殊请求头,去以前的请求信息里找

    requests常用参数

      url,headers,cookies,data,json,params,proxies代理

    import requests
    
    """
    # 1. 方法
        requests.get
        requests.post 
        requests.put 
        requests.delete 
        ...
        requests.request(method='POST')
    """
    # 2. 参数
    """
        2.1  url
        2.2  headers
        2.3  cookies
        2.4  params
        2.5  data,传请求体
                
                requests.post(
                    ...,
                    data={'user':'alex','pwd':'123'}
                )
                
                GET /index http1.1
    host:c1.com
    
    user=alex&pwd=123
                
        2.6  json,传请求体
                requests.post(
                    ...,
                    json={'user':'alex','pwd':'123'}
                )
                
                GET /index http1.1
    host:c1.com
    Content-Type:application/json
    
    {"user":"alex","pwd":123}
        2.7 代理 proxies
            # 无验证
                proxie_dict = {
                    "http": "61.172.249.96:80",
                    "https": "http://61.185.219.126:3128",
                }
                ret = requests.get("https://www.proxy360.cn/Proxy", proxies=proxie_dict)
                
            
            # 验证代理
                from requests.auth import HTTPProxyAuth
                
                proxyDict = {
                    'http': '77.75.105.165',
                    'https': '77.75.106.165'
                }
                auth = HTTPProxyAuth('用户名', '密码')
                
                r = requests.get("http://www.google.com",data={'xxx':'ffff'} proxies=proxyDict, auth=auth)
                print(r.text)
        -----------------------------------------------------------------------------------------
        2.8 文件上传 files
            # 发送文件
                file_dict = {
                    'f1': open('xxxx.log', 'rb')
                }
                requests.request(
                    method='POST',
                    url='http://127.0.0.1:8000/test/',
                    files=file_dict
                )
                
        2.9 认证 auth
        
            内部:
                用户名和密码,用户和密码加密,放在请求头中传给后台。
                
                    - "用户:密码"
                    - base64("用户:密码")
                    - "Basic base64("用户|密码")"
                    - 请求头:
                        Authorization: "basic base64("用户|密码")"
                
            from requests.auth import HTTPBasicAuth, HTTPDigestAuth
    
            ret = requests.get('https://api.github.com/user', auth=HTTPBasicAuth('wupeiqi', 'sdfasdfasdf'))
            print(ret.text)
            
        2.10 超时 timeout 
            # ret = requests.get('http://google.com/', timeout=1)
            # print(ret)
        
            # ret = requests.get('http://google.com/', timeout=(5, 1))
            # print(ret)
            
        2.11 允许重定向  allow_redirects
            ret = requests.get('http://127.0.0.1:8000/test/', allow_redirects=False)
            print(ret.text)
            
        2.12 大文件下载 stream
            from contextlib import closing
            with closing(requests.get('http://httpbin.org/get', stream=True)) as r1:
            # 在此处理响应。
            for i in r1.iter_content():
                print(i)
                
        2.13 证书 cert
            - 百度、腾讯 => 不用携带证书(系统帮你做了)
            - 自定义证书
                requests.get('http://127.0.0.1:8000/test/', cert="xxxx/xxx/xxx.pem")
                requests.get('http://127.0.0.1:8000/test/', cert=("xxxx/xxx/xxx.pem","xxx.xxx.xx.key"))
        2.14 确认 verify =False 
    """
    
    
    requests.get('http://127.0.0.1:8000/test/', cert="xxxx/xxx/xxx.pem")
    
  • 相关阅读:
    DVWA实验之Brute Force(暴力破解)- Low
    《Web安全攻防 渗透测试实战指南 》 学习笔记 (五)
    Bugku-web进阶之phpcmsV9(一个靶机而已,别搞破坏。flag在根目录里txt文件里)
    Bugku-CTF社工篇之简单的社工尝试
    Bugku-CTF社工篇之王晓明的日记
    Bugku-CTF社工篇之社工进阶
    Bugku-CTF社工篇之简单的个人信息收集
    Bugku-CTF社工篇之信息查找
    Bugku-CTF社工篇之密码
    Bugku-CTF之login3(SKCTF)(基于布尔的SQL盲注)
  • 原文地址:https://www.cnblogs.com/xinsiwei18/p/10264375.html
Copyright © 2011-2022 走看看