本文有2个文件
1:taobao_re_xpath
2:taobao_re_xpath_setting
#
1:taobao_re_xpath
# -*- coding:utf-8 -*-
# author : yesehngbao
# time:2018/3/20
import os
import requests
import re
import json
import pymongo
import hashlib
from taobao_re_xpath_setting import *
from multiprocessing import Pool
from lxml import etree
dirname = DIRNAME
if not os.path.exists(dirname):
os.mkdir(dirname)
dirname1 = DIRNAME1
if not os.path.exists(dirname+'/'+dirname1):
os.mkdir(dirname+'/'+dirname1)
dirname2 = DIRNAME2
if not os.path.exists(dirname+'/'+dirname2):
os.mkdir(dirname+'/'+dirname2)
dirname3 = DIRNAME3
if not os.path.exists(dirname+'/'+dirname3):
os.mkdir(dirname+'/'+dirname3)
url = 'https://s.taobao.com/search'
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)"
" Chrome/64.0.3282.186 Safari/537.36"}
def md5(strs):
strs = strs + '12sdwz..'
strs = hashlib.md5(strs.encode('utf-8'))
key = strs.hexdigest()
return key
def get_html(page):
"""
获取('首页')
:param page: 获取的页数, q: 想获取的宝贝
:return:
"""
parmas = {
'q': '%s' % GOODS,
's': page,
}
respons = requests.get(url, headers=headers, params=parmas)
if respons.status_code == 200:
return respons.text
else:
return None
def get_ajax_html():
"""
解析可能出现的ajax网页
"""
ajax_url = 'https://s.taobao.com/api'
parmas = {
'_ksTS': 1521612082036_312,
'callback': 'jsonp267',
'ajax': 'true',
'm': 'customized',
'q': '%s' % GOODS,
's': 36,
'bcoffset': 0,
'rn': '4e1dc906143376f8d2e735536fd3ee0c'
}
respons = requests.get(ajax_url, headers=headers, params=parmas).text
comp = re.compile('jsonp\d+\((.*?)\)', re.S)
strs = re.findall(comp, respons)
if strs:
strs = json.loads(strs[0])
commdity_list = strs.get('API.CustomizedApi').get('itemlist').get('auctions')
if commdity_list:
for commdity in commdity_list:
addr = commdity.get('item_loc')
nick = commdity.get('nick')
sales = commdity.get('view_sales')
detail = 'http:' + commdity.get('detail_url')
yield {
'addr': addr,
'nick': nick,
'sales': sales,
'detail': detail,
}
def analysis(html):
"""
解析列表页的数据
html:列表页源码
content : js中的数据和ajax数据 的列表
"""
content = []
comp = re.compile('g_page_config = (.*?)g_srp_loadCss', re.S)
if comp:
strs = re.findall(comp, html)[0]
strs = strs.replace(';', '')
strs = json.loads(strs)
data = strs.get('mods').get('itemlist').get('data').get('auctions')
if data:
for i in data:
detail = i.get('detail_url')
if re.match('http', detail):
pass
else:
detail = 'http:'+detail
addr = i.get('item_loc')
nick = i.get('nick')
sales = i.get('view_sales')
item = {
'addr': addr,
'nick': nick,
'sales': sales,
'detail': detail,
}
content.append(item)
if len(data) < 44:
cont = get_ajax_html()
for i in cont:
content.append(i)
return content
else:
print(len(data))
return content
else:
return None
else:
return None
def save_img(img_new, page):
"""
主图的下载
img_new : 主图地址
"""
if img_new:
img_name = img_new[30:].replace('/', '-')
respons = requests.get(img_new, headers=headers).content
with open(dirname+'/'+dirname1+'/'+md5(img_name)+'.jpg', 'wb')as fp:
fp.write(respons)
return print('第%s页————' % page + '主图片保存完成: ', img_name)
def save_color_img(color_url, page):
"""
颜色图的下载
color_url : 颜色图地址
"""
if color_url:
img_name = color_url[30:].replace('/', '-')
respons = requests.get(color_url, headers=headers).content
with open(dirname + '/' + dirname2 + '/' + md5(img_name)+'.jpg', 'wb')as fp:
fp.write(respons)
return print('第%s页————' % page + 'color图片保存完成: ', img_name)
def save_video(detail_url, title, page):
"""
视频的下载
url : 视频地址
title : 视频名字
"""
if detail_url:
respons = requests.get(detail_url, headers=headers).content
with open(dirname+'/'+dirname3+'/' + md5(title)+'.mp4', 'wb') as fp:
fp.write(respons)
print('第%s页————' % page + '视频下载完成 :('+title+')')
return 'download_ok'
def alaysis_detail(respons, page):
"""
获取详情页的图片, 颜色(类别)图, 视频
respons : 详情页源码
doc : xpath解析对象
img_new : 图片地址
color_url : 颜色图地址
video_new : 视频地址
"""
# 主图
if page == 0:
page = 1
else:
page = page // 44 + 1
doc = etree.HTML(respons)
li_list = doc.xpath('.//ul[@class="tb-clearfix" or @id="J_UlThumb"]/li')
for li in li_list:
img_old = li.xpath('./a/img/@src')
if img_old:
img_old = img_old[0]
img_new = img_old[-15:].replace('60', '400')
img_new = 'http:'+img_old[:-15]+img_new
save_img(img_new, page)
else:
pass
# 颜色(类别)图
compi = re.compile('style="background:url\((.*?)\)')
color_img = re.findall(compi, respons)
for color in color_img:
if color:
color_url = color[-15:].replace('40', '400').replace('30', '400')
color_url = 'http:'+color[:-15]+color_url
save_color_img(color_url, page)
# 获取视频
comp = re.compile('TShop.Setup\(\s(.*?)\s\)', re.S)
strs = re.findall(comp, respons)
if strs:
strs = json.loads(strs[0])
video_lod = strs.get('itemDO').get('imgVedioUrl')
if video_lod:
video_new = video_lod.replace('e/1', 'e/6').replace('t/8', 't/1')
title = strs.get('itemDO').get('title')
save_video('http:'+video_new, title, page)
else:
return '无视频'
else:
return '无视频(或有加密)'
return '下载完成'
def get_detail(content, page):
"""
获取详情页信息
content: 列表页数据
detail_url : 详情页入口
"""
if content:
for cont in content:
detail_url = cont.get('detail')
respons = requests.get(detail_url, headers=headers)
if respons.status_code == 200:
alaysis_detail(respons.text, page)
else:
print(respons.status_code)
return '详情页爬取完成'
def save_mongo(content):
"""
实现保存列表页的数据
content: 列表页数据
"""
mongo_client = pymongo.MongoClient(host=MONGO_HOST, port=MONGO_PORT)
db = mongo_client[MONGO_DB]
coll = db[MONGO_COLL]
coll.insert(content)
print('数据保存成功: ', content, len(content))
return None
def main(page):
"""
此程序为了爬取淘宝宝贝而生,可以获得宝贝图片,店面,地址,信息,视频等。。
page :获取列表页的数量
html :获取列表页的源码
content : 列表页宝贝的详细信息和地址
"""
html = get_html(page)
content = analysis(html)
save_mongo(content)
get_detail(content, page)
if __name__ == '__main__':
pool = Pool()
pool.map(main, [page*44 for page in range(NUM)])
print('程序结束')
#
2:taobao_re_xpath_setting
# -*- coding:utf-8 -*-
# author : yesehngbao
# time:2018/3/21
# 需要爬取得宝贝
GOODS = '皮鞋'
# 需要爬取的页数(不大于100)
NUM = 100
# 配置mongodb
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_DB = 'test'
MONGO_COLL = 'shoe'
# 目录包
DIRNAME = 'taobao'
# 主图片包
DIRNAME1 = 'shoe_park_img'
# 颜色(类别)图片包
DIRNAME2 = 'shoe_color_img'
# 视频包
DIRNAME3 = 'shoe_video'