- robots.txt :
- UA:
- 1.指定url
- 2.发起请求
- 3.获取页面数据
- 4.数据解析
-
5.持久化存储
-
bs4:
- 实例化bs对象,将页面源码数据加载到该对象中
- 定位标签:find('name',class_='xxx') findall() select()
- 将标签中的文本内容获取 string text get_text() a['href']
- 环境安装:pip install lxml
- 解析原理:
- 获取页面源码数据
- 实例化一个etree的对象,并且将页面源码数据加载到该对象中
- 调用该对象的xpath方法进行指定标签的定位
- 注意:xpath函数必须结合着xpath表达式进行标签定位和内容捕获
/html/head/title
//head/title
//title
<html lang="en"> <head> <meta charset="UTF-8" /> <title>测试bs4</title> </head> <body> <div> <p>百里守约</p> </div> <div class="song"> <p>李清照</p> <p>王安石</p> <p>苏轼</p> <p>柳宗元</p> <a href="http://www.song.com/" title="赵匡胤" target="_self"> <span>this is span</span> 宋朝是最强大的王朝,不是军队的强大,而是经济很强大,国民都很有钱</a> <a href="" class="du">总为浮云能蔽日,长安不见使人愁</a> <img src="http://www.baidu.com/meinv.jpg" alt="" /> </div> <div class="tang"> <ul> <li><a href="http://www.baidu.com" title="qing">清明时节雨纷纷,路上行人欲断魂,借问酒家何处有,牧童遥指杏花村</a></li> <li><a href="http://www.163.com" title="qin">秦时明月汉时关,万里长征人未还,但使龙城飞将在,不教胡马度阴山</a></li> <li><a href="http://www.126.com" alt="qi">岐王宅里寻常见,崔九堂前几度闻,正是江南好风景,落花时节又逢君</a></li> <li><a href="http://www.sina.com" class="du">杜甫</a></li> <li><a href="http://www.dudu.com" class="du">杜牧</a></li> <li><b>杜小月</b></li> <li><i>度蜜月</i></li> <li><a href="http://www.haha.com" id="feng">凤凰台上凤凰游,凤去台空江自流,吴宫花草埋幽径,晋代衣冠成古丘</a></li> </ul> </div> </body> </html>
#项目需求:解析58二手房的相关数据 import requests from lxml import etree url = 'https://bj.58.com/shahe/ershoufang/?utm_source=market&spm=u-2d2yxv86y3v43nkddh1.BDPCPZ_BT&PGTID=0d30000c-0047-e4e6-f587-683307ca570e&ClickID=1' headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } page_text = requests.get(url=url,headers=headers).text tree = etree.HTML(page_text) li_list = tree.xpath('//ul[@class="house-list-wrap"]/li') fp = open('58.csv','w',encoding='utf-8') for li in li_list: title = li.xpath('./div[2]/h2/a/text()')[0] price = li.xpath('./div[3]//text()') price = ''.join(price) fp.write(title+":"+price+' ') fp.close() print('over')
# ctrl+shift+x # - 解析图片数据:http://pic.netbian.com/4kmeinv/ import requests from lxml import etree import os import urllib url = 'http://pic.netbian.com/4kmeinv/' headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } response = requests.get(url=url,headers=headers) #response.encoding = 'utf-8' if not os.path.exists('./imgs'): os.mkdir('./imgs') page_text = response.text tree = etree.HTML(page_text) li_list = tree.xpath('//div[@class="slist"]/ul/li') for li in li_list: img_name = li.xpath('./a/b/text()')[0] #处理中文乱码 img_name = img_name.encode('iso-8859-1').decode('gbk') img_url = 'http://pic.netbian.com'+li.xpath('./a/img/@src')[0] img_path = './imgs/'+img_name+'.jpg' urllib.request.urlretrieve(url=img_url,filename=img_path) print(img_path,'下载成功!') print('over!!!')
#【重点】下载煎蛋网中的图片数据:http://jandan.net/ooxx #数据加密 (反爬机制) import requests from lxml import etree import base64 import urllib headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } url = 'http://jandan.net/ooxx' page_text = requests.get(url=url,headers=headers).text tree = etree.HTML(page_text) img_hash_list = tree.xpath('//span[@class="img-hash"]/text()') for img_hash in img_hash_list: img_url = 'http:'+base64.b64decode(img_hash).decode() img_name = img_url.split('/')[-1] urllib.request.urlretrieve(url=img_url,filename=img_name)
import random #爬取站长素材中的简历模板 import requests import random from lxml import etree headers = { 'Connection':'close', #当请求成功后,马上断开该次请求(及时释放请求池中的资源) 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } url = 'http://sc.chinaz.com/jianli/free_%d.html' for page in range(1,4): if page == 1: new_url = 'http://sc.chinaz.com/jianli/free.html' else: new_url = format(url%page) response = requests.get(url=new_url,headers=headers) response.encoding = 'utf-8' page_text = response.text tree = etree.HTML(page_text) div_list = tree.xpath('//div[@id="container"]/div') for div in div_list: detail_url = div.xpath('./a/@href')[0] name = div.xpath('./a/img/@alt')[0] detail_page = requests.get(url=detail_url,headers=headers).text tree = etree.HTML(detail_page) download_list = tree.xpath('//div[@class="clearfix mt20 downlist"]/ul/li/a/@href') download_url = random.choice(download_list) data = requests.get(url=download_url,headers=headers).content fileName = name+'.rar' with open(fileName,'wb') as fp: fp.write(data) print(fileName,'下载成功')
#解析所有的城市名称 import requests from lxml import etree headers = { 'Connection':'close', #当请求成功后,马上断开该次请求(及时释放请求池中的资源) 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36' } url = 'https://www.aqistudy.cn/historydata/' page_text = requests.get(url=url,headers=headers).text tree = etree.HTML(page_text) li_list = tree.xpath('//div[@class="bottom"]/ul/li | //div[@class="bottom"]/ul/div[2]/li') for li in li_list: city_name = li.xpath('./a/text()')[0] print(city_name)
#设置请求的代理ip: www.goubanjia.com 快代理 西祠代理 #代理ip的类型必须和请求url的协议头保持一致 url = 'https://www.baidu.com/s?wd=ip' page_text = requests.get(url=url,headers=headers,proxies={'https':'61.7.170.240:8080'}).text with open('./ip.html','w',encoding='utf-8') as fp: fp.write(page_text)
- robots
- UA
- 数据加密
- 懒加载
- 代理ip