zoukankan      html  css  js  c++  java
  • 获取全部校园新闻

    1.取出一个新闻列表页的全部新闻 包装成函数。

    2.获取总的新闻篇数,算出新闻总页数。

    3.获取全部新闻列表页的全部新闻详情。

    # -*- coding : UTF-8 -*-
    # -*- author : onexiaofeng -*-
    
    
    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    import re
    import  jieba
    
    
    
    # 获取新闻点击次数
    def getClickCount(url):
        #使用正则表达式获得新闻编号
        newsId = re.findall(r'\_(.*).html', url)[0][-4:]
        #生成点击次数的Request URL
        clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
        clickRes = requests.get(clickUrl)
        # 利用正则表达式获取新闻点击次数
        clickCount = int(re.search("hits').html('(.*)');", clickRes.text).group(1))
        return clickCount
    
    def Get_page(url):
        content_info = {}
        res = requests.get(url)
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text, 'html.parser')
        for new in soup.select('li'):
            if len(new.select('.news-list-title')) > 0:
    
                newsUrl = new.select('a')[0]['href']
                # 调用getNewsDetail()获取新闻详情
    
                resd = requests.get(newsUrl)
                resd.encoding = 'utf-8'
                soupd = BeautifulSoup(resd.text, 'html.parser')
    
                content = soupd.select('#content')[0].text
                info = soupd.select('.show-info')[0].text
                # 调用getNewsId()获取点击次数
                count = getClickCount(newsUrl)
                # 识别时间格式
                date = re.search('(d{4}.d{2}.d{2}sd{2}.d{2}.d{2})', info).group(1)
                # 识别一个至三个数据
                if (info.find('作者:') > 0):
                    author = re.search('作者:((.{2,4}s|.{2,4}、){1,3})', info).group(1)
                if (info.find('审核:') > 0):
                    check = re.search('审核:((.{2,4}s){1,3})', info).group(1)
                if (info.find('来源:') > 0):
                    sources = re.search('来源:(.*)s*摄|点', info).group(1)
                # 用datetime将时间字符串转换为datetime类型
                dateTime = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
                # 利用format对字符串进行操作
                print('--------------------------------------------------------')
                print('发布时间:{0}
    作者:{1}
    审核:{2}
    来源:{3}
    点击次数:{4}'.format(dateTime, author, check, sources, count))
                #print(content)
    
    
    
    
    
    
    url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    resd = requests.get(url)
    resd.encoding = 'utf-8'
    soup1 = BeautifulSoup(resd.text, 'html.parser')
    n = int(soup1.select('.a1')[0].text.rstrip(''))//10+1
    # listCount = int(soup.select('.a1')[0].text.rstrip('条'))//10+1
    Get_page(url)
    for i in range(2, n):
        Get_page('http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i))

    4.找一个自己感兴趣的主题,进行数据爬取,并进行分词分析。不能与其它同学雷同。

    # -*- coding : UTF-8 -*-
    # -*- author : onexiaofeng -*-
    
    
    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    import re
    import jieba
    
    
    # 获取新闻点击次数
    '''
    def getClickCount(url):
        # 使用正则表达式获得新闻编号
        newsId = re.findall(r'\_(.*).html', url)[0][-4:]
        # 生成点击次数的Request URL
        clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
        clickRes = requests.get(clickUrl)
        # 利用正则表达式获取新闻点击次数
        clickCount = int(re.search("hits').html('(.*)');", clickRes.text).group(1))
        return clickCount
    '''
    def getKeynews(content):
        content = ''.join(re.findall('[u4e00-u9fa5]', content))  # 通过正则表达式选取中文字符数组,拼接为无标点字符内容
        #去掉重复的字符生成集合
        newSet = set(jieba._lcut(content))
        print(newSet)
        newDict = {}
        for i in newSet:
            newDict[i] = content.count(i)
        deleteList, keynews = [], []
        for i in newDict.keys():
            if len(i) < 2:
                deleteList.append(i)  # 去掉单字无意义字符
        for i in deleteList:
            del newDict[i]
        dictList = list(newDict.items())
        dictList.sort(key=lambda item: item[1], reverse=True)  # 排序,返回前三关键字
        for i in range(3):
            keynews.append(dictList[i][0])
        return keynews
    
    def getNewsDetail(newsUrl):
        resd = requests.get(newsUrl)
        resd.encoding = 'utf-8'
        soupd = BeautifulSoup(resd.text, 'html.parser')
    
        source=soupd.select('.comeFrom')[0].select('a')[0].text
        time=soupd.select('#pubtime_baidu')[0].text
        content = soupd.select('.artical-main-content')[0].text
        keynews = getKeynews(content)
    
        print('发布时间:{0}
    来源:{1}'.format(time, source))
        print('新闻内容:{0}
    '.format(content))
        print('前三关键字:{0}
    '.format(keynews))
    
    
    
    
    
    
    def Get_page(url):
        res = requests.get(url)
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text, 'html.parser')
        # print(soup.select('.tag-list-box')[0].select('.list'))
        for new in soup.select('.tag-list-box')[0].select('.list'):
            #print(new.select('.list-content')[0] .select('.name')[0].select('.n1')[0].select('a')[0]['href'])
            url =new.select('.list-content')[0] .select('.name')[0].select('.n1')[0].select('a')[0]['href']
            getNewsDetail(url)
            print(url)
            #break
            # break
    
            # print(url)
    
    
    
    
    url = 'https://voice.hupu.com/nba/tag/3023-1.html'
    resd = requests.get(url)
    resd.encoding = 'utf-8'
    soup1 = BeautifulSoup(resd.text, 'html.parser')
    
    # listCount = int(soup.select('.a1')[0].text.rstrip('条'))//10+1
    Get_page(url)
    for i in range(2, 4):
        Get_page('https://voice.hupu.com/nba/tag/3023-{}.html'.format(i))

  • 相关阅读:
    prototype的本质
    如何只用CSS做到完全居中
    CSS3 过渡效果触发时机的问题
    HTML转义字符
    SVG总结小知识
    JavaScript中Switch使用
    AngularJS注入依赖路由总结
    echart模块化单文件引入
    CSS定义input disabled样式
    海盗船长小米首页小船来回摆动CSS3.0效果
  • 原文地址:https://www.cnblogs.com/wxf2/p/8782460.html
Copyright © 2011-2022 走看看