zoukankan      html  css  js  c++  java
  • 【爬虫小程序:爬取斗鱼所有房间信息】Xpath(线程池版)

    
    
    # 本程序亲测有效,用于理解爬虫相关的基础知识,不足之处希望大家批评指正
    from queue import Queue
    import requests
    from lxml import etree
    from multiprocessing.dummy import Pool
    import time
    """爬取目标:http://www.qiushibaike.com/8hr/page/1
        利用线程池实现
    """
    
    
    class QiuShi:
    
        def __init__(self):
    
            # url和headers
            self.base_url = 'http://www.qiushibaike.com/8hr/page/{}'
            self.headers = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
    
            # 定义队列,用来传递数据
            self.url_queue = Queue()
            self.request_queue = Queue()
            self.html_queue = Queue()
    
            # 初始化线程池
            self.pool = Pool()
    
            # 定义计数器
            self.request_num = 0
            self.response_num = 0
    
            self.is_running = True
    
        def get_url_list(self):
            """获取所有的url"""
            for i in range(1, 14):
                target_url = self.base_url.format(i)
                print(target_url)
                self.request_num += 1
                self.url_queue.put(target_url)
    
        def request_url(self):
            """向url发起请求"""
            target_url = self.url_queue.get()
            response = requests.get(target_url, self.headers)
            self.request_queue.put(response)
            self.url_queue.task_done()
    
        def get_content(self):
            """获取数据"""
            html_text = self.request_queue.get().content.decode()
            html = etree.HTML(html_text)
            div_list = html.xpath('//div[@id="content-left"]/div')
            content_list = []
            for div in div_list:
                item = {}
                item['author'] = div.xpath('.//h2/text()')[0].strip()
                item['content'] = div.xpath('.//span/text()')[0].strip()
                print(item)
                content_list.append(item)
            self.html_queue.put(content_list)
            self.request_queue.task_done()
    
        def save_data(self):
            """保存入库"""
            data_list = self.html_queue.get()
            for data in data_list:
                with open('qiushi.text', 'a+') as f:
                    f.write(str(data))
                    f.write('
    ')
            self.html_queue.task_done()
    
        def execute_request_response_save(self):
            """线程池子,执行请求,回应,保存"""
            self.request_url()
            self.get_content()
            self.save_data()
            self.response_num += 1
    
        def _callback(self,temp):
            """线程池的回调函数:"""
            if self.is_running:
                self.pool.apply_async(self.execute_request_response_save, callback=self._callback)
    
        def main(self):
            """主程序逻辑"""
            self.get_url_list()
            # 执行线程池
            for n in range(4):  # 用4个线程去执行线程池任务
                self.pool.apply_async(self.execute_request_response_save,callback=self._callback)
    
            # time.sleep(10)
            while True:
                if self.request_num == self.response_num:
                    self.is_running = False
                    break
           
    
    
    if __name__ == '__main__':
        qiushi = QiuShi()
        qiushi.main()
  • 相关阅读:
    Camera HAL3学习
    Android GPU呈现模式分析
    Android O版本自定义日志输出目录
    Android Configstore HAL
    Ubuntu下设置adb path的方法
    Ubuntu使用技巧
    PHP学习笔记
    mysql安装
    在ubuntu中安装Python
    OS X在使用<semaphore.h>时报错
  • 原文地址:https://www.cnblogs.com/888888CN/p/10070257.html
Copyright © 2011-2022 走看看