zoukankan      html  css  js  c++  java
  • python3 xpath数据获取案例

    import requests
    from retrying import retry
    from lxml import etree
    import json


    class DaCheng(object):
      def __init__(self):

        self.temp_url = "http://www.dachengnet.com/cn/professionals?currentPageNo={}&"
        self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36"}

      def get_url_list(self):
        url_list = [self.temp_url.format(i) for i in range(1, 78)] # 使用列表推导式获取pageNo
        return url_list

      @retry(stop_max_attempt_number=3)
      def _parse_url(self, url):
        r = requests.get(url, headers=self.headers, timeout=3)
        assert r.status_code == 200
        return etree.HTML(r.content) # 返回一个xpath对象

      def parse_url(self, url):
        # print(url)
        try:
          html = self._parse_url(url)
        except:
          html = None
        return html

      def get_content_list(self, html):
        tr_list = html.xpath("//tbody/tr")
        content_list = []
        for tr in tr_list:
          item = dict()
          # 姓名
          item['Name'] = tr.xpath('./td[1]/a/text()')[0] if len(tr.xpath('./td[1]/a/text()')) > 0 else None
          # 邮箱
          item['Email'] = tr.xpath('./td[2]/text()')[0] if len(tr.xpath('./td[2]/text()')) > 0 else None
          # 职位
          item['Position'] = tr.xpath('./td[3]/text()')[0].strip().replace(' ', '').replace(' ', '') if len(
            tr.xpath('./td[3]/text()')) > 0 else None
          # 地点
          item['Location'] = tr.xpath('./td[4]/text()')[0].strip().replace(' ', '').replace(' ', '') if len(
            tr.xpath('./td[4]/text()')) > 0 else None
          content_list.append(item)
          # print(item)
          return content_list

      def save_content_list(self, content_list):
        with open('DaCheng.json', 'a') as f:
          for content in content_list:
            json.dump(content, f, ensure_ascii=False, indent=2)
            f.write(', ')
            print('保存成功')

      def run(self):
        # 1.获取首页url
        url_list = self.get_url_list()
        # 2.循环发送请求,获取响应
        for url in url_list:
          html = self.parse_url(url)
          # 3.提取数据
          content_list = self.get_content_list(html)
          # 4.保存
          self.save_content_list(content_list)

    if __name__ == '__main__':
    dacheng = DaCheng()
    dacheng.run()

  • 相关阅读:
    20190127-将一个文件拆分为多个新文件
    20190125-找到列表第二大的数以及自己写一个冒泡排序
    20190121-n个人围成一圈,凡报到3的人退出圈子,最后留下的是原来第几号的那位
    20190120-自定义实现split方法
    20190118-自定义实现replace方法
    20190118-利用Python实现Pig Latin游戏
    20190116-将特定数字插入一个已经排序好的序列并且不改变其排序规则
    20190112-自定义实现字符串的操作方法,如strip,upper,title,ljust,center,zfill,find,rfind等
    20190110-用笨办法找到二维矩阵的鞍点
    我想转行—程序员转行自媒体
  • 原文地址:https://www.cnblogs.com/x-pyue/p/7798819.html
Copyright © 2011-2022 走看看