zoukankan      html  css  js  c++  java
  • python爬虫:爬取某网站视频

    把获取到的下载视频的url存放在数组中(也可写入文件中),通过调用迅雷接口,进行自动下载。(请先下载迅雷,并在其设置中心的下载管理中设置为一键下载)

    实现代码如下:

    from bs4 import BeautifulSoup
    import requests
    import os,re,time
    import urllib3
    from win32com.client import Dispatch
    class DownloadVideo:
        def __init__(self):
            self.r = requests.session()
            self.url=self.get_url()
            self.download_urla=[]
            self.download_urlb=[]
            self.url_set=["%s/shipin/list-短视频.html"%self.url]
        #获取最新网址
        def get_url(self):
            urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
            a=self.r.get('https://www.k58.com',verify=False)
            b=a.url
            return b
        #几页内容的网址
        def url_set1(self,n):
            if n==2:
                url="%s/shipin/list-短视频-2.html"%self.url
                self.url_set.append(url)
            elif n>=3:
                m=n+1
                for i in range(2,m):
                    url="%s/shipin/list-短视频-%d.html"%(self.url,i)
                    self.url_set.append(url)
            else:
                pass
        #分别加载每一个页内容的网址
        def download_url1(self):
            for j in self.url_set:
                urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
                r=self.r.get(j,verify=False)
                sp1=r.content
                soup = BeautifulSoup(sp1, "html.parser")
                sp2 = soup.find_all(class_="shown")
                for i in sp2:
                    url1=re.findall('<a href="(.*?)"',str(i))
                    u=self.url+url1[0]
                    self.download_urla.append(u)
        #分别获取各个视频的下载链接
        def download_url2(self):
            for i in self.download_urla:
                urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
                r=self.r.get(i,verify=False)
                sp1=r.content
                soup = BeautifulSoup(sp1, "html.parser")
                sp2 = soup.find_all(class_="form-control input-sm copy_btn app_disable")
                for j in sp2:
                    url2=j["data-clipboard-text"]
                    self.download_urlb.append(url2)
                    #将链接写入txt中
                    # self.write_txt(url2)
        #迅雷下载
        def thunder_download(self):
            try:
                thunder = Dispatch("ThunderAgent.Agent64.1")
                for i in self.download_urlb:
                    thunder.AddTask(i)
                    thunder.CommitTasks()
                    time.sleep(2)
            except:
                print("请下载迅雷,并在其设置中心的下载管理中设置为一键下载")
    
        def mkdir(self,path):
            folder = os.path.exists(path)
            if not folder:
                os.makedirs(path)
            else:
                pass
        def write_txt(self,c):
            self.mkdir(r"D:AAAAA")
            file_name=time.strftime('%Y%m%d_%H%M%S.txt')
            with open(r"D:AAAAA\%s"%file_name,'a') as f:
                f.write(c+"
    ")
    
    if __name__ == '__main__':
        d=DownloadVideo()
        #数字表示几页的内容
        d.url_set1(5)
        d.download_url1()
        d.download_url2()
        d.thunder_download()
  • 相关阅读:
    Local File Manage in JavaScript Using FileSystemObject
    How to Register COM in VS
    Permission Error When Building C++ in VS2010
    Upload and Download File using Java
    JavaScript Interactive with ActiveX Control
    一个完整的Installshield安装程序实例
    一个.NET(C#)的双键字典类
    C#实现内存中字符串或byte[]的加解密
    判断请求是否来自 AJAX
    Reflector 7.3.018
  • 原文地址:https://www.cnblogs.com/badbadboyyx/p/12450695.html
Copyright © 2011-2022 走看看