zoukankan      html  css  js  c++  java
  • 智联招聘获取python岗位的数据

    import requests
    from lxml import html
    import time
    import pandas as pd
    from sqlalchemy import create_engine
    import traceback
    from fake_useragent  import UserAgent as UA #使用其random方法获取随机ua
    class ZhaoPinSpider:
         def __init__(self):
             #http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E5%8C%97%E4%BA%AC&kw=python&sm=0&sg=466cc5c88f044d419cef734bd8713830&p=90
             self.url=r"http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E5%8C%97%E4%BA%AC&kw=python&sm=0&sg=466cc5c88f044d419cef734bd8713830&p={}"
             self.headers = {'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
                        'Accept - Encoding': 'gzip, deflate',
                        'Accept-Language': 'zh-Hans-CN, zh-Hans; q=0.5',
                        'Connection': 'Keep-Alive',
                        'User-Agent': UA().random}
             #职位名称
             self.Position_Title=[]
             # url
             self.url_list = []
             #反馈率
             self.Feedback=[]
             #公司名称
             self.Company_Name=[]
             #职位月薪
             self.Monthly_Salary =[]
             #工作地点
             self.Working_Place =[]
             #发布日期
             self.Published_Date=[]
    
             self.Position_Title_Key = None
             self.Feedback_Key = None
             self.Company_Name_Key =  None
             self.Monthly_Salary_Key = None
             self.Working_Place_Key = None
             self.Published_Date_Key = None
             self.url_key="URL"
             self.content_dic={}
             self.allinfo_dic = {}
             #-------------------------------
             self.job_name_list = []
             self.corporate_name_list = []
             self.benefits_list = []
             self.monthly_salary_list = []
             self.job_place_list = []
             self.published_date_list = []
             self.job_nature_list = []
             self.job_experience_list = []
             self.qualifications_list = []
             self.need_worker_num_list = []
             self.job_type_list = []
             self.job_description_list = []
             self.company_introduction_list = []
             self.company_scale_list = []
             self.company_nature_list = []
             self.company_industry_list = []
             self.company_url_list = []
             self.company_address_list = []
    
         def start(self):
             try:
                 for i in range(1,91):
                      req = requests.Session()
                      data = req.get(self.url.format(i), headers=self.headers).text
                      self.get_info(data,req)
    
    
    
             except Exception as e:
                  print(e)
         def get_info(self,source,req):
              etree=html.fromstring(source)
              # # 职位名称
              # self.Position_Title = []
              # # 反馈率
              # self.Feedback = []
              # # 公司名称
              # self.Company_Name = []
              # # 职位月薪
              # self.Monthly_Salary = []
              # # 工作地点
              # self.Working_Place = []
              # # 发布日期
              # self.Published_Date = []
              TablesNodes=etree.xpath("//div[@class='newlist_list_content']//table")
              max=len(TablesNodes)
              if TablesNodes:
                 index=0
                 for TablesNode in TablesNodes:
                    if index!=0:
                       Nodes=TablesNode.xpath(".//tr[1]/td")
                       TitleNodes=Nodes[0]
                       url_nodes=TitleNodes.xpath("./div/a[@par]/@href")[0]
                       # FeedbackNodes=Nodes[1]
                       # CompanyNodes =Nodes[2]
                       # SalaryNodes =Nodes[3]
                       # PlaceNodes =Nodes[4]
                       # DateNodes = Nodes[5]
                       # self.Position_Title.append(TitleNodes.text_content().strip())
                       # FeedbackStr=FeedbackNodes.text_content().strip()
                       # self.Feedback.append(FeedbackStr if FeedbackStr else "0%")
                       # self.Company_Name.append(CompanyNodes.text_content().strip())
                       # self.Monthly_Salary.append(SalaryNodes.text_content().strip())
                       # self.Working_Place.append( PlaceNodes.text_content().strip())
                       # self.Published_Date.append( DateNodes.text_content().strip())
                       if "xiaoyuan" not in url_nodes:
                           print(url_nodes)
                           self.get_next_page(url_nodes, req)
                           self.allinfo_dic.setdefault("job_name", self.job_name_list)
                           self.allinfo_dic.setdefault("corporate_name", self.corporate_name_list)
                           self.allinfo_dic.setdefault("benefits", self.benefits_list)
                           self.allinfo_dic.setdefault("monthly_salary", self.monthly_salary_list)
                           self.allinfo_dic.setdefault("job_place", self.job_place_list)
                           self.allinfo_dic.setdefault("published_date", self.published_date_list)
                           self.allinfo_dic.setdefault("job_nature", self.job_nature_list)
                           self.allinfo_dic.setdefault("job_experience", self.job_experience_list)
                           self.allinfo_dic.setdefault("qualifications", self.qualifications_list)
                           self.allinfo_dic.setdefault("need_worker_num", self.need_worker_num_list)
                           self.allinfo_dic.setdefault("job_type", self.job_type_list)
                           self.allinfo_dic.setdefault("job_description", self.job_description_list)
                           self.allinfo_dic.setdefault("company_introduction", self.company_introduction_list)
                           self.allinfo_dic.setdefault("company_scale", self.company_scale_list)
                           self.allinfo_dic.setdefault("company_nature", self.company_nature_list)
                           self.allinfo_dic.setdefault("company_industry", self.company_industry_list)
                           self.allinfo_dic.setdefault("company_url", self.company_url_list)
                           self.allinfo_dic.setdefault("company_address", self.company_address_list)
                           self.save_Data()
                           self.allinfo_dic.clear()
                           self.job_name_list.clear()
                           self.corporate_name_list.clear()
                           self.benefits_list.clear()
                           self.monthly_salary_list.clear()
                           self.job_place_list.clear()
                           self.published_date_list.clear()
                           self.job_nature_list.clear()
                           self.job_experience_list.clear()
                           self.qualifications_list.clear()
                           self.need_worker_num_list.clear()
                           self.job_type_list.clear()
                           self.job_description_list.clear()
                           self.company_introduction_list.clear()
                           self.company_scale_list.clear()
                           self.company_nature_list.clear()
                           self.company_industry_list.clear()
                           self.company_url_list.clear()
                           self.company_address_list.clear()
                    else:
                        # Nodes = TablesNode.xpath(".//tr[1]/th")
                        # TitleNodes = Nodes[0]
                        # FeedbackNodes =Nodes[1]
                        # CompanyNodes =Nodes[2]
                        # SalaryNodes = Nodes[3]
                        # PlaceNodes =Nodes[4]
                        # DateNodes =Nodes[5]
                        # self.Position_Title_Key=TitleNodes.text_content().strip()
                        # self.Feedback_Key=FeedbackNodes.text_content().strip()
                        # self.Company_Name_Key=CompanyNodes.text_content().strip()
                        # self.Monthly_Salary_Key=SalaryNodes.text_content().strip()
                        # self.Working_Place_Key=PlaceNodes.text_content().strip()
                        # self.Published_Date_Key=DateNodes.text_content().strip()
                        index = index+1
         def save_Data(self):
            df=pd.DataFrame(self.allinfo_dic)
            df.to_csv("zhaopininfo.csv")
            df.to_csv("zhaopininfo.xlsx")
            pd.io.sql.to_sql(df,"all_results",schema="zhaopin",con=self.get_engine(),if_exists="append",index=False)
         def get_engine(self):
             conn=create_engine("mysql+pymysql://root:Aa1234@localhost:3306/zhaopin?charset=utf8mb4")
             return conn
    
         def get_next_page(self, nexturl,req):
             data=req.get(nexturl).text
             self.get_page_info(data)
    
         def get_page_info(self,htmlstr):
             try:
                 root=html.fromstring(htmlstr)
                 job_name=root.xpath("//div[@class='top-fixed-box' and not(@id)]//div[@class='inner-left fl']/h1/text()")[0]
                 print(job_name)
                 corporate_name=root.xpath("//div[@class='top-fixed-box' and not(@id)]//div[@class='inner-left fl']/h2/a/text()")[0]
                 benefits=root.xpath("//div[@class='top-fixed-box' and not(@id)]//div[@class='inner-left fl']/div[@class='welfare-tab-box']")[0].text_content()
                 #概要
                 Nodes = root.xpath("//div[@class='terminalpage-left']//ul/li/strong")
                 monthly_salary=Nodes[0].text_content().strip()
                 job_place=Nodes[1].text_content().strip()
                 published_date=Nodes[2].text_content().strip()
                 job_nature=Nodes[3].text_content().strip()
                 job_experience=Nodes[4].text_content().strip()
                 qualifications=Nodes[5].text_content().strip()
                 need_worker_num=Nodes[6].text_content().strip()
                 job_type=Nodes[7].text_content().strip()
    
                 job_description=root.xpath("//div[@class='tab-inner-cont']")[0].text_content().strip()
                 company_introduction=root.xpath("//div[@class='tab-inner-cont']")[1].text_content().strip()
    
                 #
                 companyNodes = root.xpath("//ul[@class='terminal-ul clearfix terminal-company mt20']/li/strong")
                 company_scale=companyNodes[0].text_content().strip()
                 company_nature=companyNodes[1].text_content().strip()
                 company_industry=companyNodes[2].text_content().strip()
                 company_url=companyNodes[3].text_content().strip()
                 company_address=None
                 try:
                     company_address=companyNodes[4].text_content().strip()
                 except:
                     company_address="地址未填写"
                 self.job_name_list.append(job_name)
                 self.corporate_name_list.append(corporate_name)
                 self.benefits_list.append(benefits)
                 self.monthly_salary_list.append(monthly_salary)
                 self.job_place_list.append(job_place)
                 self.published_date_list.append(published_date)
                 self.job_nature_list.append(job_nature)
                 self.job_experience_list.append(job_experience)
                 self.qualifications_list.append(qualifications)
                 self.need_worker_num_list.append(need_worker_num)
                 self.job_type_list.append(job_type)
                 self.job_description_list.append(job_description)
                 self.company_introduction_list.append(company_introduction)
                 self.company_scale_list.append(company_scale)
                 self.company_nature_list.append(company_nature)
                 self.company_industry_list.append(company_industry)
                 self.company_url_list.append(company_url)
                 self.company_address_list.append(company_address)
             except:
                 print(traceback.print_exc())
    
    
    if __name__=="__main__":
        ZhaoPinSpider().start()
    

      

  • 相关阅读:
    Sudoku Solver [LeetCode]
    Populating Next Right Pointers in Each Node [LeetCode]
    Binary Tree Level Order Traversal [LeetCode]
    Clone Graph [LeetCode]
    Merge k Sorted Lists [LeetCode]
    Combinations [LeetCode]
    021 面向对象 一
    给图片加点料
    质数
    ex10 找出藏在字符串中的“密码”
  • 原文地址:https://www.cnblogs.com/c-x-a/p/8511886.html
Copyright © 2011-2022 走看看