zoukankan      html  css  js  c++  java
  • python 爬虫入门之爬小说

    ##第一步 导包
    from bs4 import BeautifulSoup
    import requests
    import sys

    ##准备
    class downloder(object):
    def __init__(self):
    self.server = 'http://www.biqukan.com'
    self.target = 'http://www.biqukan.com/1_1094/'
    self.names = [] #存放章节名字
    self.urls = [] #存放章节链接
    self.nums = 0 # 章节数量
    def get_download_url(self):
    req = requests.get(url=self.target)
    html = req.text
    div_bf = BeautifulSoup(html)
    div = div_bf.find_all('div',class_='listmain')
    a_bf = BeautifulSoup(str(div[0]))
    a = a_bf.find_all('a')
    self.nums = len(a[15:])
    for eatch in a[15:]:
    self.names.append(eatch.string)
    self.urls.append(self.server +eatch.get('href'))
    def writer(self ,name,path,text):
    write_flag = True
    with open(path,'a',encoding='utf-8') as f:
    f.write(name +' ')
    f.writelines(text)
    f.writelines(' ')
    def get_contents(self,target):
    req = requests.get(url=target)
    html = req.text
    bf = BeautifulSoup(html)
    texts = bf.find_all('div',class_ = 'showtxt')
    texts = texts[0].text.replace( 'xa0'*8,' ')
    return texts



    if __name__ == '__main__':
    dl = downloder()
    dl.get_download_url()
    print('开始下载')
    for i in range(dl.nums):
    dl.writer(dl.names[i],'用点.txt',dl.get_contents(dl.urls[i]))
    print("下载完成")




    参考华哥的内容... 还有好多不懂
    http://cuijiahua.com/blog/2017/10/spider_tutorial_1.html

  • 相关阅读:
    centos中安装docker
    docker es
    Linux 定时备份数据库
    Linux 防火墙firewalld
    Linux Systemd
    Linux at定时任务
    Linux运行级别
    原来这就是网络
    LeetCode-897-递增顺序搜索树
    SSM整合配置文件
  • 原文地址:https://www.cnblogs.com/baili-luoyun/p/8437158.html
Copyright © 2011-2022 走看看