zoukankan      html  css  js  c++  java
  • 数据化结构与保存

    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    import re
    import pandas

    #获取点击次数
    def getClickCount(newsUrl):
    newId=re.search('\_(.*).html',newsUrl).group(1).split('/')[1]
    clickUrl="http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(newId)
    clickStr = requests.get(clickUrl).text
    count = re.search("hits').html('(.*)');", clickStr).group(1)
    return count

    #获取新闻详情
    def getNewsDetail(newsurl):
    resd=requests.get(newsurl)
    resd.encoding='utf-8'
    soupd=BeautifulSoup(resd.text,'html.parser')

    news={}
    news['title']=soupd.select('.show-title')[0].text
    # news['newsurl']=newsurl
    info=soupd.select('.show-info')[0].text
    news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')
    news['click'] = int(getClickCount(newsurl))
    if info.find('来源')>0:
    news['source'] =info[info.find('来源:'):].split()[0].lstrip('来源:')
    else:
    news['source']='none'
    if info.find('作者:') > 0:
    news['author'] = info[info.find('作者:'):].split()[0].lstrip('作者:')
    else:
    news['author'] = 'none'
    # news['content']=soupd.select('.show-content')[0].text.strip()

    #获取文章内容并写入到文件中
    content=soupd.select('.show-content')[0].text.strip()
    writeNewsContent(content)

    return news

    def getListPage(listPageUrl):
    res=requests.get(listPageUrl)
    res.encoding='utf-8'
    soup=BeautifulSoup(res.text,'html.parser')

    newsList=[]
    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    a=news.select('a')[0].attrs['href']
    newsList.append(getNewsDetail(a))
    return (newsList)

    #数据写入文件
    def writeNewsContent(content):
    f=open('gzccNews.txt','a',encoding='utf-8')
    f.write(content)
    f.close()

    def getPageNumber():
    ListPageUrl="http://news.gzcc.cn/html/xiaoyuanxinwen/"
    res=requests.get(ListPageUrl)
    res.encoding='utf-8'
    soup=BeautifulSoup(res.text,'html.parser')
    n = int(soup.select('.a1')[0].text.rstrip('条'))//10+1
    return n


    newsTotal=[]
    firstPage='http://news.gzcc.cn/html/xiaoyuanxinwen/'
    newsTotal.extend(getListPage(firstPage))

    n=getPageNumber()
    for i in range(n,n+1):
    listUrl= 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
    newsTotal.extend(getListPage(listUrl))

    df=pandas.DataFrame(newsTotal)
    # df.to_excel("news.xlsx")

    # print(df.head(6))
    # print(df[['author','click','source']])
    # print(df[df['click']>3000])

    sou=['国际学院','学生工作处']
    print(df[df['source'].isin(sou)])

  • 相关阅读:
    修改Windows上MySQL的数据文件路径
    【转】Analysis Services 2005中数据完整性处理
    设置Bitvise Ssh Client 为Windows服务
    Finalize/Dispose资源清理模式
    ACM HDU BFS 题目
    BFS专题之hdu1242 rescue
    bfs专题之HUD 1429 胜利大逃亡(续)
    ACM HDU 1010 Tempter of the Bone
    流水线作业调度问题
    系统原型
  • 原文地址:https://www.cnblogs.com/AAAAAAAA/p/8869415.html
Copyright © 2011-2022 走看看