对任务进行分解:
①爬取CVPR2018的标题,简介,关键字,论文链接
②将爬取的信息生成wordCloud词云图展示
③设置点击事件,展示对应关键字的论文以及链接
一、爬虫实现
由于文章中并没有找到关键字,于是将标题进行拆分成关键字,用逗号隔开
代码:
import requests import pymysql from bs4 import BeautifulSoup db = pymysql.connect(host='localhost', user='root', password='123456', db='mytest', charset='utf8') cursor = db.cursor() headers={ "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36" } url="https://openaccess.thecvf.com/ECCV2018.py" html=requests.get(url) soup=BeautifulSoup(html.content,'html.parser') soup.a.contents=='pdf' pdfs=soup.findAll(name="a",text="pdf") lis = [] jianjie="" for i,pdf in enumerate(pdfs): pdf_name=pdf["href"].split('/')[-1] name=pdf_name.split('.')[0].replace("_CVPR_2019_paper","") link="http://openaccess.thecvf.com/content_CVPR_2019/html/"+name+"_CVPR_2019_paper.html" url1=link html1 = requests.get(url1) soup1 = BeautifulSoup(html1.content, 'html.parser') weizhi = soup1.find('div', attrs={'id':'abstract'}) if weizhi: jianjie =weizhi.get_text(); print("这是第"+str(i)+"条数据") keyword = str(name).split('_') keywords = '' for k in range(len(keyword)): if (k == 0): keywords += keyword[k] else: keywords += ',' + keyword[k] info = {} info['title'] = name info['link'] =link info['abstract']=jianjie info['keywords']=keywords lis.append(info) cursor = db.cursor() for i in range(len(lis)): cols = ", ".join('`{}`'.format(k) for k in lis[i].keys()) print(cols) # '`name`, `age`' val_cols = ', '.join('%({})s'.format(k) for k in lis[i].keys()) print(val_cols) # '%(name)s, %(age)s' sql = "insert into lunwen(%s) values(%s)" res_sql = sql % (cols, val_cols) print(res_sql) cursor.execute(res_sql, lis[i]) # 将字典a传入 db.commit() num=1 print(num) print("ok")