zoukankan      html  css  js  c++  java
  • 获取全部校园新闻

    1.取出一个新闻列表页的全部新闻 包装成函数。

    2.获取总的新闻篇数,算出新闻总页数。

    3.获取全部新闻列表页的全部新闻详情。

    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    import re
    
    
    
    
    # 获取新闻点击次数
    def getClickCount(url):
        newsId = re.findall(r'\_(.*).html', url)[0][-4:]    #使用正则表达式获得新闻编号
        clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)   #生成点击次数的Request URL
        clickRes = requests.get(clickUrl)   # 利用正则表达式获取新闻点击次数
        clickCount = int(re.search("hits').html('(.*)');", clickRes.text).group(1))
        return clickCount
    
    def Get_page(url):
        content_info = {}
        res = requests.get(url)
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text, 'html.parser')
        for new in soup.select('li'):
            if len(new.select('.news-list-title')) > 0:
    
                newsUrl = new.select('a')[0]['href']            # 调用getNewsDetail()获取新闻详情
                resd = requests.get(newsUrl)
                resd.encoding = 'utf-8'
                soupd = BeautifulSoup(resd.text, 'html.parser')
                content = soupd.select('#content')[0].text
                info = soupd.select('.show-info')[0].text            # 调用getNewsId()获取点击次数
                count = getClickCount(newsUrl)            # 识别时间格式
                date = re.search('(d{4}.d{2}.d{2}sd{2}.d{2}.d{2})', info).group(1)            # 识别一个至三个数据
                if (info.find('作者:') > 0):
                    author = re.search('作者:((.{2,4}s|.{2,4}、){1,3})', info).group(1)
                if (info.find('审核:') > 0):
                    check = re.search('审核:((.{2,4}s){1,3})', info).group(1)
                if (info.find('来源:') > 0):
                    sources = re.search('来源:(.*)s*摄|点', info).group(1)            # 用datetime将时间字符串转换为datetime类型
                dateTime = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')           # 利用format对字符串进行操作
                print('--------------------------------------------------------')
                print('发布时间:{0}
    作者:{1}
    审核:{2}
    来源:{3}
    点击次数:{4}'.format(dateTime, author, check, sources, count))
    
    
    
    url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    resd = requests.get(url)
    resd.encoding = 'utf-8'
    soup1 = BeautifulSoup(resd.text, 'html.parser')
    n = int(soup1.select('.a1')[0].text.rstrip(''))
    Get_page(url)
    for i in range(1, n):
        Get_page('http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i))

    此处有个疑问,最后的for循环中,如果改为(2,n)则只输出某一列表页中的新闻,但是换成(1,n)则显示所有列表中的所有新闻

    4.找一个自己感兴趣的主题,进行数据爬取,并进行分词分析。不能与其它同学雷同。

    import requests, re, jieba
    from bs4 import BeautifulSoup
    from datetime import datetime
    
    
    # 获取新闻细节
    def getNewsDetail(newsUrl):
        resd = requests.get(newsUrl)
        resd.encoding = 'gb2312'
        soupd = BeautifulSoup(resd.text, 'html.parser')
    
        content = soupd.select('#endText')[0].text
        info = soupd.select('.post_time_source')[0].text
        date = re.search('(d{4}.d{2}.d{2}sd{2}.d{2}.d{2})', info).group(1)  # 识别时间格式
        dateTime = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')  # 用datetime将时间字符串转换为datetime类型
        sources = re.search('来源:s*(.*)', info).group(1)
        keyWords = getKeyWords(content)
        print('发布时间:{0}
    来源:{1}'.format(dateTime, sources))
        print('关键词:{}、{}、{}'.format(keyWords[0], keyWords[1], keyWords[2]))
        print(content)
    
    
    # 通过jieba分词,获取新闻关键词
    def getKeyWords(content):
        content = ''.join(re.findall('[u4e00-u9fa5]', content))  # 通过正则表达式选取中文字符数组,拼接为无标点字符内容
        wordSet = set(jieba._lcut(content))
        wordDict = {}
        for i in wordSet:
            wordDict[i] = content.count(i)
        deleteList, keyWords = [], []
        for i in wordDict.keys():
            if len(i) < 2:
                deleteList.append(i)  # 去掉单字无意义字符
        for i in deleteList:
            del wordDict[i]
        dictList = list(wordDict.items())
        dictList.sort(key=lambda item: item[1], reverse=True)  # 排序,返回前三关键字
        for i in range(3):
            keyWords.append(dictList[i][0])
        return keyWords
    
    
    # 获取一页的新闻
    def getListPage(listUrl):
        res = requests.get(listUrl)
        res.encoding = 'gbk'
        soup = BeautifulSoup(res.text, 'html.parser')
        for new in soup.select('.newsList')[0].select('li'):
            newsUrl = new.select('a')[0]['href']
            title = new.select('a')[0].text
            print('题目:{0}
    网址链接:{1}'.format(title, newsUrl))
            getNewsDetail(newsUrl)
            break
    
    
    listUrl = 'http://tech.163.com/it/'
    getListPage(listUrl)
    for i in range(1, 10):
        listUrl = 'http://tech.163.com/special/it_2016_%02d/' % i
        getListPage(listUrl)

  • 相关阅读:
    火狐中添加selenium IDE
    loadrunner 手动添加关联
    loadrunner11完整卸载
    phpstudy后门交互式shell
    selenium+python Douyu弹幕机器人
    HTTP头sleep延迟注入
    DDCTF-2019
    感知器
    校园网破解
    pwn-格式化字符串漏洞
  • 原文地址:https://www.cnblogs.com/zxc109525/p/8798758.html
Copyright © 2011-2022 走看看