zoukankan      html  css  js  c++  java
  • 利用Python爬取免费代理IP

    #       2019/9/8
    #       思路:   1、找到一个免费的ip代理网站(如:西刺代理)
    #
    #                  2、爬取ip(常规爬取requests+BeautifulSoup)
    #
    #                  3、验证ip有效性(携带爬取到的ip,去访问指定的url,看返回的状态码是不是200)
    #
    #                  4、记录ip (写到文档)
    
    
    # !/usr/bin/env python3
    # -*- coding: utf-8 -*-
    import requests, threading, datetime
    from bs4 import BeautifulSoup
    import random
    
    """
    1、抓取西刺代理网站的代理ip
    2、并根据指定的目标url,对抓取到ip的有效性进行验证
    3、最后存到指定的path
    """
    
    
    # ------------------------------------------------------文档处理--------------------------------------------------------
    # 写入文档
    def write(path, text):
        with open(path, 'a', encoding='utf-8') as f:
            f.writelines(text)
            f.write('
    ')
            f.close()
    
    
    # 清空文档
    def truncatefile(path):
        with open(path, 'w', encoding='utf-8') as f:
            f.truncate()
    
    
    # 读取文档
    def read(path):
        with open(path, 'r', encoding='utf-8') as f:
            txt = []
            for s in f.readlines():
                txt.append(s.strip())
        return txt
    
    
    # ----------------------------------------------------------------------------------------------------------------------
    # 计算时间差,格式: 时分秒
    def gettimediff(start, end):
        seconds = (end - start).seconds
        m, s = divmod(seconds, 60)
        h, m = divmod(m, 60)
        diff = ("%02d:%02d:%02d" % (h, m, s))
        return diff
    
    
    # ----------------------------------------------------------------------------------------------------------------------
    # 返回一个随机的请求头 headers
    def getheaders():
        user_agent_list = [ 
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" 
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", 
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", 
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", 
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", 
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", 
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", 
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", 
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", 
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", 
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", 
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", 
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", 
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", 
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", 
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", 
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", 
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
        ]
        UserAgent = random.choice(user_agent_list)
        headers = {'User-Agent': UserAgent}
        return headers
    
    
    # -----------------------------------------------------检查ip是否可用----------------------------------------------------
    def checkip(targeturl, ip):
        headers = getheaders()  # 定制请求头
        proxies = {"http": "http://" + ip, "https": "http://" + ip}  # 代理ip
        try:
            response = requests.get(url=targeturl, proxies=proxies, headers=headers, timeout=5).status_code
            if response == 200:
                return True
            else:
                return False
        except:
            return False
    
    
    # -------------------------------------------------------获取代理方法----------------------------------------------------
    # 免费代理 XiciDaili
    def findip(type, pagenum, targeturl, path):  # ip类型,页码,目标url,存放ip的路径
        list = {'1': 'http://www.xicidaili.com/wn/',  # xicidaili国内https代理
        '2': 'http://www.xicidaili.com/nn/',  # xicidaili国内高匿代理
        '3': 'http://www.xicidaili.com/nt/',  # xicidaili国内普通代理
        '4': 'http://www.xicidaili.com/wt/'}  # xicidaili国外http代理
        url = list[str(type)] + str(pagenum)  # 配置url
        # print("url:",url)
        headers = getheaders()  # 定制请求头
        html = requests.get(url=url, headers=headers, timeout=5).text
        # print("html:", html)
        soup = BeautifulSoup(html, 'lxml')
        all = soup.find_all('tr', class_='odd')
        for i in all:
            t = i.find_all('td')
            ip = t[1].text + ':' + t[2].text
            is_avail = checkip(targeturl, ip)
            if is_avail == True:
                write(path=path, text=ip)
                print(ip)
    
    
    # -----------------------------------------------------多线程抓取ip入口---------------------------------------------------
    def getip(targeturl, path):
        truncatefile(path)  # 爬取前清空文档
        start = datetime.datetime.now()  # 开始时间
        threads = []
        for type in range(1):  # 四种类型ip,每种类型取前三页,共12条线程
            for pagenum in range(3):
                t = threading.Thread(target=findip, args=(type + 1, pagenum + 1, targeturl, path))
                threads.append(t)
        print('开始爬取代理ip')
        for s in threads:  # 开启多线程爬取
            s.start()
        for e in threads:  # 等待所有线程结束
            e.join()
        print('爬取完成')
        end = datetime.datetime.now()  # 结束时间
        diff = gettimediff(start, end)  # 计算耗时
        ips = read(path)  # 读取爬到的ip数量
        print('一共爬取代理ip: %s 个,共耗时: %s 
    ' % (len(ips), diff))
    
    
    # -------------------------------------------------------启动-----------------------------------------------------------
    if __name__ == '__main__':
        path = 'ip.txt'  # 存放爬取ip的文档path
        targeturl = 'http://www.cnblogs.com/TurboWay/'  # 验证ip有效性的指定url
        getip(targeturl, path)

  • 相关阅读:
    Luogu P1020 导弹拦截
    MySQL事务隔离级别和实现原理
    classloader加载class文件的原理和机制
    Spring Bean的生命周期只有这四个阶段
    Spring 源码:BeanFactory 简介以及它 和FactoryBean的区别
    开闭原则——面向对象设计原则
    HashSet的实现原理
    装饰器模式(装饰设计模式)详解
    在java中,HashMap 或者HashTable,它们的键值被放满了,会出现什么情况?
    Mybitis 批量插入实践
  • 原文地址:https://www.cnblogs.com/zyt-bg/p/11487262.html
Copyright © 2011-2022 走看看