zoukankan      html  css  js  c++  java
  • 21天打造分布式爬虫-多线程下载表情包(五)

    5.1.threading模块

    简单使用

    import threading,time
    
    def coding():
        for x in range(3):
            print('正在写代码%s'%x)
            time.sleep(2)
    
    def drawing():
        for x in range(3):
            print('正在画画%s'%x)
            time.sleep(2)
    
    def main():
        t1 = threading.Thread(target=coding)
        t2 = threading.Thread(target=drawing)
        t1.start()
        t2.start()
    
    if __name__ == '__main__':
        main()

    5.2.生产者和消费者

    Lock模式的生产者和消费者

    import threading
    import random,time
    
    gMoney = 1000
    gLock = threading.Lock()
    gTotalTimes = 10
    gTimes = 0
    
    
    class Producer(threading.Thread):
        def run(self):
            global gMoney
            global gTimes
            while True:
                money = random.randint(100,1000)
                gLock.acquire()
                #只生产10次,超过就停止,必须把锁给释放掉,否则产生死锁
                if gTimes >= gTotalTimes:
                    gLock.release()
                    break
                gMoney += money
                print('%s生产了%d元钱,剩余%d元钱' % (threading.current_thread(), money, gMoney))
                #生产一次,次数加1,总共10次
                gTimes += 1
                gLock.release()
                time.sleep(0.5)
    
    
    class Consumer(threading.Thread):
        def run(self):
            global gMoney
            while True:
                money = random.randint(100,1000)
                gLock.acquire()
                if gMoney >= money:
                    gMoney -= money
                    print('%s消费了%d元钱,剩余%d元钱' % (threading.current_thread(), money,gMoney))
                else:
                    if gTimes >= gTotalTimes:
                        gLock.release()
                        break
                gLock.release()
                time.sleep(0.5)
    
    
    def main():
        for x in range(5):
            t1 = Producer()
            t1.start()
    
        for x in range(2):
            t2 = Consumer()
            t2.start()
    
    if __name__ == '__main__':
        main()

    5.3.下载表情包

    网址:http://www.doutula.com/photo/list/?page=1

    解析:xpath

    不用多线程,速度相对会很慢

    import requests
    from lxml import etree
    from urllib import request
    import os
    import re
    
    def parse_page(url):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',
            'Referer': 'https://movie.douban.com/'
        }
        response = requests.get(url,headers=headers)
        text = response.text
        html = etree.HTML(text)
        imgs = html.xpath("//div[@class='page-content text-center']//img[@class!='gif']")
        for img in imgs:
            # print(etree.tostring(img))
            #图片地址
            img_url = img.get('data-original')
            #图片名字
            alt = img.get('alt')
            #替换掉名字里面的特殊字符
            alt = re.sub(r'[??.,。!!*]','',alt)
            #获取图片的后缀名(.gif .jpg)
            suffix = os.path.splitext(img_url)[1]
            #保存的时候完整的图片名字
            filename = alt + suffix
            request.urlretrieve(img_url,'C:/Users/Administrator/Desktop/images/'+filename)
    
    def main():
        for x in range(1,10):
            url = 'http://www.doutula.com/photo/list/?page=%d'%x
            parse_page(url)
    
    if __name__ == '__main__':
        main()

    利用多线程

     main()

    • 定义两个队列,和创建多线程
    • page_queue():存放每一页的url
    • img_queue():存放每一页里面所有的表情的url

    Producer()

    • 从page_queue()队列中去每一页的url,直到队列为空则break
    • 用xpath提取出每一页的所有图片的url
    • 把每个图片的url和名字存放到img_queue()队列里面

    Consumer()

    • 从img_queue()队列中取出图片的url和名字
    • 下载保存
    • 直到page_queue()和img_queue()两个队列都为空则break

    代码

    import requests
    from lxml import etree
    from urllib import request
    import os
    import re
    import threading
    from queue import Queue
    
    class Producer(threading.Thread):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.140 Safari/537.36',
            'Referer': 'https://movie.douban.com/'
        }
    
        def __init__(self, page_queue, img_queue, *args, **kwargs):
            super(Producer, self).__init__(*args, **kwargs)
            self.page_queue = page_queue
            self.img_queue = img_queue
    
        def run(self):
            while True:
                if self.page_queue.empty():
                    break
                url = self.page_queue.get()
                self.parse_page(url)
    
        def parse_page(self,url):
            response = requests.get(url,headers=self.headers)
            text = response.text
            html = etree.HTML(text)
            imgs = html.xpath("//div[@class='page-content text-center']//img[@class!='gif']")
            for img in imgs:
                # print(etree.tostring(img))
                #图片地址
                img_url = img.get('data-original')
                #图片名字
                alt = img.get('alt')
                #替换掉名字里面的特殊字符
                alt = re.sub(r'[??.,。!!*]','',alt)
                #获取图片的后缀名(.gif .jpg)
                suffix = os.path.splitext(img_url)[1]
                #保存的时候完整的图片名字
                filename = alt + suffix
                self.img_queue.put((img_url,filename))
    
    
    class Consumer(threading.Thread):
        def __init__(self,page_queue,img_queue,*args,**kwargs):
            super(Consumer, self).__init__(*args,**kwargs)
            self.page_queue = page_queue
            self.img_queue = img_queue
    
        def run(self):
            while True:
                if self.img_queue.empty() and self.page_queue.empty():
                    break
                img_url,filename = self.img_queue.get()
                request.urlretrieve(img_url, 'C:/Users/Administrator/Desktop/images/' + filename)
                print("已下载完一张图片")
    
    
    def main():
        page_queue = Queue(1000)
        img_queue = Queue(10000)
    
        for x in range(1,1758):
            url = 'http://www.doutula.com/photo/list/?page=%d'%x
            page_queue.put(url)
    
        for x in range(10):
            t = Producer(page_queue,img_queue)
            t.start()
    
        for x in range(10):
            t = Consumer(page_queue,img_queue)
            t.start()
    
    if __name__ == '__main__':
        main()

    结果:

  • 相关阅读:
    财务报表分析(张新民教授)-第七章 企业财务质量分析
    财务报表分析(张新民教授)-第七章 企业财务质量分析
    财务报表分析(张新民教授)-第七章 企业财务质量分析
    财务报表分析(张新民教授)-第七章 企业财务质量分析
    财务报表分析(张新民教授)-第七章 企业财务质量分析
    财务报表分析(张新民教授)-第七章 企业财务质量分析
    PS
    史上最强视频网站真实地址解析
    jsonp详解
    width:100%缩小窗口时背景图片出现空白bug
  • 原文地址:https://www.cnblogs.com/derek1184405959/p/9411073.html
Copyright © 2011-2022 走看看