zoukankan      html  css  js  c++  java
  • 爬取猫眼top100

    import urllib.request
    import random
    import re
    import json
    
    '''
    解决访问403的问题,需要模仿浏览器访问
    '''
    my_headers = [
        "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
        "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)"
    ]
    
    url2 = "http://maoyan.com/board/4?"
    
    
    
    
    
    def parse_one_page(html):
        pattern = re.compile('<dd>.*?board-index.*?>(d+)</i>.*?data-src="(.*?)".*?name"><a'
                             +'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
                             +'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
        items = re.findall(pattern, html)
        for item in items:
            yield {
                'index': item[0],
                'image': item[1],
                'title': item[2],
                'actor': item[3].strip()[3:],
                'time': item[4].strip()[5:],
                'score': item[5]+item[6]
            }
    
    def write_to_file(content):
        with open('result5.txt', 'a', encoding='utf-8') as f:
            f.write(json.dumps(content, ensure_ascii=False) + '
    ')
            f.close()
    
    '''
    呃呃,用循环爬取10页数据
    '''
    def main():
        for i in range(10):
            offset = i * 10
            url = url2 + 'offset=' + str(offset)
            randdom_header = random.choice(my_headers)
            req = urllib.request.Request(url)
            req.add_header("User-Agent", randdom_header)
            req.add_header("GET", url)
            response = urllib.request.urlopen(req)
            html = response.read().decode('utf-8')
            for item in parse_one_page(html):
                print(item)
                write_to_file(item)
    
    if __name__ == '__main__':
        main()
  • 相关阅读:
    SQL 行转列查询汇总
    c#中的委托是什么,事件是不是一种委托
    添加动画(两种)
    Follow 在地图中使地图和人物一起运动
    动作加速度Speed
    动作回调函数 (CallFunc,CallFuncN,CCCallFuncND)
    精灵沿着正方形路线运动暂停2秒后然后再将自己放大4倍
    CardinalSpline样条曲线(沿着正方形路线走)
    cocos2dx 3.2 Scale9Sprite点九图
    Label(标签)
  • 原文地址:https://www.cnblogs.com/xlsxls/p/9930069.html
Copyright © 2011-2022 走看看