zoukankan      html  css  js  c++  java
  • Python 学习笔记---爬取海贼王动漫

    最近无聊整理的爬虫代码,可以自动爬取腾讯动漫的任意漫画,思路如下:

    1. 先获取想下载的动漫url, 这里用了 getUrls ,直接获取动漫的最后一章

    2. 然后进入到该动漫去获取要下载的图片url 

    3. 下载到本地

    import os
    import random
    import time
    from random import randint
    
    from bs4 import BeautifulSoup
    from selenium import webdriver
    from selenium.webdriver import DesiredCapabilities
    import urllib.request as urllib2
    
    ROOT_URL = "http://ac.qq.com"
    target_url = [
        ROOT_URL + "/Comic/comicInfo/id/505430",  # 海贼王
    ]
    ua_list = [
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv2.0.1) Gecko/20100101 Firefox/4.0.1",
            "Mozilla/5.0 (Windows NT 6.1; rv2.0.1) Gecko/20100101 Firefox/4.0.1",
            "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
            "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11"
    ]
    
    user_agent=random.choice(ua_list)
    dir_path="D:/py/海贼王/"
    
    def getImageUrls(comic_url):
        '''
        通过Selenium和Phantomjs获取动态生成的数据
        '''
        urls = []
    
        dcap = dict(DesiredCapabilities.PHANTOMJS)
        dcap["phantomjs.page.settings.userAgent"] = ("Mozilla/4.0 (compatible; MSIE 5.5; windows NT)")
        browser = webdriver.PhantomJS(executable_path=r"E:pylittlepy	encent_cartoonphantomjs-2.1.1-windowsinphantomjs.exe",
                                      desired_capabilities=dcap)
        browser.get(comic_url)
    
        imgs = browser.find_elements_by_xpath("//div[@id='mainView']/ul[@id='comicContain']//img")
        for i in range(0, len(imgs) - 1):
            if i == 1:  # 略过广告图片
                continue
            urls.append(imgs[i].get_attribute("src"))
            js = 'window.scrollTo( 800 ,' + str((i + 1) * 1280) + ')'
            browser.execute_script(js)
            time.sleep(randint(3, 6))
    
        browser.quit()
        print("urls=",urls)
        return urls
    
    def getUrls(comic_url):
        result = dict()
        req = urllib2.Request(comic_url)
        req.add_header('User-Agent', user_agent)
        print("url=",comic_url)
        response = urllib2.urlopen(req)
        soup = BeautifulSoup(response, "lxml")
        #print("soup=",soup)
        # 返回最近漫画中的最新20话
        page = soup.find(attrs={"class": "chapter-page-new works-chapter-list"}).find_all(
            "a")  # 全部漫画 chapter-page-new works-chapter-list
        title = page[-1]['title']
        result[title] = ROOT_URL + page[-1]['href']
        print("title=",title)
        print("result=",result[title])
        return title,result[title]
    
    def downloadComics(dir_path, urls):
        for url in urls:
            urllib2.urlretrieve(url, dir_path + url[-8:-2])
            #print("url=",url[-9:-2])
    
    if __name__ == "__main__":
        title,result_url=getUrls(target_url[0])
        urls=getImageUrls(result_url)
        path=dir_path+title+"/"
        isExists = os.path.exists(path)
        if not isExists:
            os.makedirs(path)
            print(path + '    创建成功')
        downloadComics(path,urls)
    

      

  • 相关阅读:
    PhpExcel笔记,phpExcel中文帮助手册
    mysql “group by ”与"order by"的研究--分类中最新的内容
    mysql中,主键与普通索引
    mysql性能优化-慢查询分析、优化索引和配置
    OpenSSL
    HAProxy
    Lighttpd
    Linux find/grep命令
    keepalived
    iptables
  • 原文地址:https://www.cnblogs.com/Ronaldo-HD/p/9708060.html
Copyright © 2011-2022 走看看