zoukankan      html  css  js  c++  java
  • 【Python爬虫实战】多线程爬虫---糗事百科段子爬取

    多线程爬虫:即程序中的某些程序段并行执行,
    合理地设置多线程,可以让爬虫效率更高
    糗事百科段子普通爬虫和多线程爬虫
    分析该网址链接得出:
    https://www.qiushibaike.com/8hr/page/页码/

    多线程爬虫也就和JAVA的多线程差不多,直接上代码

     1 '''
     2 #此处代码为普通爬虫
     3 import urllib.request
     4 import urllib.error
     5 import re
     6 
     7 headers = ("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36")
     8 opener = urllib.request.build_opener()
     9 opener.addheaders = [headers]
    10 urllib.request.install_opener(opener)
    11 for i in range(1,2):
    12     url = "https://www.qiushibaike.com/8hr/page/"+str(i)+"/"
    13     pagedata = urllib.request.urlopen(url).read().decode("utf-8","ignore")
    14     pattern = '<div class="content">.*?<span>(.*?)</span>(.*?)</div>'
    15     datalist = re.compile(pattern,re.S).findall(pagedata)
    16     for j in range(0,len(datalist)):
    17         print("第"+str(i)+"页第"+str(j)+"个段子内容是:")
    18         print(datalist[j])
    19 '''
    20 
    21 '''
    22 #此处为多线程介绍代码
    23 import threading   #导入多线程包
    24 class A(threading.Thread):  #创建一个多线程A
    25     def __init__(self):     #必须包含的两个方法之一:初始化线程
    26         threading.Thread.__init__(self)
    27     def run(self):          #必须包含的两个方法之一:线程运行方法
    28         for i in range(0,11):
    29             print("我是线程A")
    30 
    31 class B(threading.Thread):  #创建一个多线程A
    32     def __init__(self):     #必须包含的两个方法之一:初始化线程
    33         threading.Thread.__init__(self)
    34     def run(self):          #必须包含的两个方法之一:线程运行方法
    35         for i in range(0,11):
    36             print("我是线程B")
    37 
    38 t1 = A()    #线程实例化
    39 t1.start()  #线程运行
    40 t2 = B()
    41 t2.start()
    42 '''
    43 
    44 
    45 #此处为修改后的多线程爬虫
    46 #使用多线程进行奇偶页的爬取
    47 import urllib.request
    48 import urllib.error
    49 import re
    50 import threading
    51 
    52 headers = ("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36")
    53 opener = urllib.request.build_opener()
    54 opener.addheaders = [headers]
    55 urllib.request.install_opener(opener)
    56 class one(threading.Thread):   #爬取奇数页内容
    57     def __init__(self):
    58         threading.Thread.__init__(self)
    59     def run(self):
    60         for i in range(1,12,2):
    61             url = "https://www.qiushibaike.com/8hr/page/"+str(i)+"/"
    62             pagedata = urllib.request.urlopen(url).read().decode("utf-8","ignore")
    63             pattern = '<div class="content">.*?<span>(.*?)</span>(.*?)</div>'
    64             datalist = re.compile(pattern,re.S).findall(pagedata)
    65             for j in range(0,len(datalist)):
    66                 print(""+str(i)+"页第"+str(j)+"段子内容为:")
    67                 print(datalist[j])
    68 
    69 
    70 class two(threading.Thread):   #爬取奇数页内容
    71     def __init__(self):
    72         threading.Thread.__init__(self)
    73     def run(self):
    74         for i in range(2,12,2):
    75             url = "https://www.qiushibaike.com/8hr/page/"+str(i)+"/"
    76             pagedata = urllib.request.urlopen(url).read().decode("utf-8","ignore")
    77             pattern = '<div class="content">.*?<span>(.*?)</span>(.*?)</div>'
    78             datalist = re.compile(pattern,re.S).findall(pagedata)
    79             for j in range(0,len(datalist)):
    80                 print(""+str(i)+"页第"+str(j)+"段子内容为:")
    81                 print(datalist[j])
    82 t1 = one()
    83 t2 = two()
    84 t1.start()
    85 t2.start()
     
  • 相关阅读:
    【C】C99与C89区别以及转换方法
    【bug】warning #13200: No emms instruction before return from function
    【linux/makefile】-D_REENTRANT编译选项的作用
    【arm】arm交叉编译工具链使用说明
    【Linux】linux中的strip命令
    【Linux】nm命令中符号类型详解
    【link】illegal text-relocation in IOS platform
    【link】IOS平台下汇编函数符号链接问题
    【ffmpeg】ffserver搭建流媒体服务
    【economic】程序员外包平台
  • 原文地址:https://www.cnblogs.com/Liuyt-61/p/8040238.html
Copyright © 2011-2022 走看看