zoukankan      html  css  js  c++  java
  • 爬虫请求库selenium(三)

    一.介绍

    selenium最初是一个自动化测试工具,而爬虫中使用它主要是为了解决requests无法直接执行JavaScript代码的问题
    
    selenium本质是通过驱动浏览器,完全模拟浏览器的操作,比如跳转、输入、点击、下拉等,来拿到网页渲染之后的结果,可支持多种浏览器
    
    from selenium import webdriver
    browser=webdriver.Chrome()
    browser=webdriver.Firefox()
    browser=webdriver.PhantomJS()
    browser=webdriver.Safari()
    browser=webdriver.Edge() 

    二.安装

    1.有界面浏览器

    #安装:selenium+chromedriver
    pip3 install selenium
    下载chromdriver.exe放到python安装路径的scripts目录中即可,注意最新版本是2.38,并非2.9
    国内镜像网站地址:http://npm.taobao.org/mirrors/chromedriver/2.38/
    最新的版本去官网找:https://sites.google.com/a/chromium.org/chromedriver/downloads
    
    #验证安装
    C:UsersAdministrator>python3
    Python 3.6.1 (v3.6.1:69c0db5, Mar 21 2017, 18:41:36) [MSC v.1900 64 bit (AMD64)] on win32
    Type "help", "copyright", "credits" or "license" for more information.
    >>> from selenium import webdriver
    >>> driver=webdriver.Chrome() #弹出浏览器
    >>> driver.get('https://www.baidu.com')
    >>> driver.page_source
    
    #注意:
    selenium3默认支持的webdriver是Firfox,而Firefox需要安装geckodriver
    下载链接:https://github.com/mozilla/geckodriver/releases

    2.无界面浏览器

    在PhantomJS不再更新后,使用Chorme谷歌浏览器

    #selenium:3.12.0
    #webdriver:2.38
    #chrome.exe: 65.0.3325.181(正式版本) (32 位)
    
    from selenium import webdriver
    from selenium.webdriver.chrome.options import Options
    chrome_options = Options()
    chrome_options.add_argument('window-size=1920x3000') #指定浏览器分辨率
    chrome_options.add_argument('--disable-gpu') #谷歌文档提到需要加上这个属性来规避bug
    chrome_options.add_argument('--hide-scrollbars') #隐藏滚动条, 应对一些特殊页面
    chrome_options.add_argument('blink-settings=imagesEnabled=false') #不加载图片, 提升速度
    chrome_options.add_argument('--headless') #浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
    chrome_options.binary_location = r"C:Program Files (x86)GoogleChromeApplicationchrome.exe" #手动指定使用的浏览器位置
    
    
    driver=webdriver.Chrome(chrome_options=chrome_options)
    driver.get('https://www.baidu.com')
    
    print('hao123' in driver.page_source)
    
    driver.close() #切记关闭浏览器,回收资源

    三.基本使用

    from selenium import webdriver
    import time
    from selenium.webdriver.common.keys import Keys #键盘按键操作
    from selenium.webdriver.chrome.options import Options
    chrome_options = Options()
    chrome_options.add_argument('window-size=1920x3000') #指定浏览器分辨率
    chrome_options.add_argument('--disable-gpu') #谷歌文档提到需要加上这个属性来规避bug
    chrome_options.add_argument('--hide-scrollbars') #隐藏滚动条, 应对一些特殊页面
    chrome_options.add_argument('blink-settings=imagesEnabled=false') #不加载图片, 提升速度
    chrome_options.add_argument('--headless') #浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
    # chrome_options.binary_location = r"C:Program Files (x86)GoogleChromeApplicationchrome.exe" #手动指定使用的浏览器位置
    
    
    bro = webdriver.Chrome()
    bro.get('http://www.baidu.com')
    
    # print(bro.page_source)
    # time.sleep(3)
    # 拿到输入框
    inp = bro.find_element_by_id('kw')
    # 输入内容
    inp.send_keys('美女')
    inp.send_keys(Keys.ENTER)
    time.sleep(3)
    print(bro.page_source)
    bro.close()

    四.xpath选择器

    1.基本选择器

    #官网链接:http://selenium-python.readthedocs.io/locating-elements.html
    from selenium import webdriver
    from selenium.webdriver import ActionChains
    from selenium.webdriver.common.by import By #按照什么方式查找,By.ID,By.CSS_SELECTOR
    from selenium.webdriver.common.keys import Keys #键盘按键操作
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.wait import WebDriverWait #等待页面加载某些元素
    import time
    
    driver=webdriver.Chrome()
    driver.get('https://www.baidu.com')
    wait=WebDriverWait(driver,10)
    
    try:
        #===============所有方法===================
        # 1、find_element_by_id   根据id找
        # 2、find_element_by_link_text     根据链接名字找到控件(a标签的文字)
        # 3、find_element_by_partial_link_text   根据链接名字找到控件(a标签的文字)模糊查询
        # 4、find_element_by_tag_name       根据标签名
        # 5、find_element_by_class_name     根据类名
        # 6、find_element_by_name           根据属性名
        # 7、find_element_by_css_selector   根据css选择器
        # 8、find_element_by_xpath          根据xpath选择
        # 强调:
        # 1、上述均可以改写成find_element(By.ID,'kw')的形式
        # 2、find_elements_by_xxx的形式是查找到多个元素,结果为列表
    
    ```
    #===============示范用法===================
    # 1、find_element_by_id
    print(driver.find_element_by_id('kw'))
    
    # 2、find_element_by_link_text
    # login=driver.find_element_by_link_text('登录')
    # login.click()
    
    # 3、find_element_by_partial_link_text
    login=driver.find_elements_by_partial_link_text('')[0]
    login.click()
    
    # 4、find_element_by_tag_name
    print(driver.find_element_by_tag_name('a'))
    
    # 5、find_element_by_class_name
    button=wait.until(EC.element_to_be_clickable((By.CLASS_NAME,'tang-pass-footerBarULogin')))
    button.click()
    
    # 6、find_element_by_name
    input_user=wait.until(EC.presence_of_element_located((By.NAME,'userName')))
    input_pwd=wait.until(EC.presence_of_element_located((By.NAME,'password')))
    commit=wait.until(EC.element_to_be_clickable((By.ID,'TANGRAM__PSP_10__submit')))
    
    input_user.send_keys('18611453110')
    input_pwd.send_keys('xxxxxx')
    commit.click()
    
    # 7、find_element_by_css_selector
    driver.find_element_by_css_selector('#kw')
    
    # 8、find_element_by_xpath
    ```
    
    
        time.sleep(5)
    
    finally:
        driver.close()

    2.xpath选择器

    doc='''
    <html>
     <head>
      <base href='http://example.com/' />
      <title>Example website</title>
     </head>
     <body>
      <div id='images'>
       <a href='image1.html'>Name: My image 1 <br /><img src='image1_thumb.jpg' /></a>
       <a href='image2.html'>Name: My image 2 <br /><img src='image2_thumb.jpg' /></a>
       <a href='image3.html'>Name: My image 3 <br /><img src='image3_thumb.jpg' /></a>
       <a href='image4.html'>Name: My image 4 <br /><img src='image4_thumb.jpg' /></a>
       <a href='image5.html' class='li li-item' name='items'>Name: My image 5 <br /><img src='image5_thumb.jpg' /></a>
       <a href='image6.html' name='items'><span><h5>test</h5></span>Name: My image 6 <br /><img src='image6_thumb.jpg' /></a>
      </div>
     </body>
    </html>
    '''
    from lxml import etree
    
    html = etree.HTML(doc)
    # 1.所有节点
    a = html.xpath('//*')
    
    # 2.指定节点
    a = html.xpath('//a')
    
    # 3.子节点,子孙节点
    a = html.xpath('//div/a')
    a = html.xpath('//body/a')  # 无数据
    a = html.xpath('//body//img')
    
    # 4.父节点
    # a = html.xpath('//body//a[1]..')
    a = html.xpath('//body//a[@href="image1.html"]/parent::*')
    
    # 5.属性匹配
    a = html.xpath('//body//a[@href="image2.html"]')
    
    # 6.文本获取
    a = html.xpath('//body//a[@href="image2.html"]/text()')
    
    # 7.属性获取
    # a[1]从1开始,不是从0开始
    a = html.xpath('//body//a[1]/@href')
    
    # 8.属性多值匹配,多个属性时
    a = html.xpath('//body//a[@class="li"]')
    a = html.xpath('//body//a[contains(@class,"li")]')
    a = html.xpath('//body//a[contains(@class,"li")]/text()')
    
    # 9.多属性匹配
    a = html.xpath('//body//a[contains(@class,"li") or @name="items"]')
    a = html.xpath('//body//a[contains(@class,"li") or @name="items"]/text()')
    
    # 10.按序选择
    a = html.xpath('//a[6]/text()')
    # 最后一个
    a = html.xpath('//a[last()]/text()')
    # 位置小于3的
    a = html.xpath('//a[position()<5]/@href')
    # 导数第三个
    a = html.xpath('//a[last()-3]/@href')
    
    # 11.节点轴选择
    # 祖先节点ancestor
    a = html.xpath('//img/ancestor::*')
    # 获取指定节点
    a = html.xpath('//img/ancestor::a')
    # attribute:属性值
    a = html.xpath('//a[5]/attribute::*')
    # child:直接子节点
    a = html.xpath('//a/child::img')
    # descendant:所有子孙节点
    a = html.xpath('//body/descendant::a')
    # following:当前节点后的所有节点
    a = html.xpath('//head/following::a')
    a = html.xpath('//head/following::a/@href')
    # following-sibling:当前节点后的同级节点
    a = html.xpath('//a[1]/following-sibling::*[5]/@name')
    
    print(a)

    3.获取标签属性

    from selenium import webdriver
    from selenium.webdriver import ActionChains
    from selenium.webdriver.common.by import By #按照什么方式查找,By.ID,By.CSS_SELECTOR
    from selenium.webdriver.common.keys import Keys #键盘按键操作
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.wait import WebDriverWait #等待页面加载某些元素
    
    browser=webdriver.Chrome()
    
    browser.get('https://www.amazon.cn/')
    
    wait=WebDriverWait(browser,10)
    wait.until(EC.presence_of_element_located((By.ID,'cc-lm-tcgShowImgContainer')))
    
    tag=browser.find_element(By.CSS_SELECTOR,'#cc-lm-tcgShowImgContainer img')
    
    #获取标签属性,
    print(tag.get_attribute('src'))
    
    #获取标签ID,位置,名称,大小(了解)
    print(tag.id)
    print(tag.location)
    print(tag.tag_name)
    print(tag.size)
    
    browser.close()

    4.等待元素被加载

    #1、selenium只是模拟浏览器的行为,而浏览器解析页面是需要时间的(执行css,js),一些元素可能需要过一段时间才能加载出来,为了保证能查找到元素,必须等待
    
    #2、等待的方式分两种:
    隐式等待:在browser.get('xxx')前就设置,针对所有元素有效
    显式等待:在browser.get('xxx')之后设置,只针对某个元素有效

    隐式等待

    from selenium import webdriver
    from selenium.webdriver import ActionChains
    from selenium.webdriver.common.by import By #按照什么方式查找,By.ID,By.CSS_SELECTOR
    from selenium.webdriver.common.keys import Keys #键盘按键操作
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.wait import WebDriverWait #等待页面加载某些元素
    
    browser=webdriver.Chrome()
    
    #隐式等待:在查找所有元素时,如果尚未被加载,则等10秒
    browser.implicitly_wait(10)
    
    browser.get('https://www.baidu.com')
    
    input_tag=browser.find_element_by_id('kw')
    input_tag.send_keys('美女')
    input_tag.send_keys(Keys.ENTER)
    
    contents=browser.find_element_by_id('content_left') #没有等待环节而直接查找,找不到则会报错
    print(contents)
    
    browser.close()

    显式等待

    from selenium import webdriver
    from selenium.webdriver import ActionChains
    from selenium.webdriver.common.by import By #按照什么方式查找,By.ID,By.CSS_SELECTOR
    from selenium.webdriver.common.keys import Keys #键盘按键操作
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.wait import WebDriverWait #等待页面加载某些元素
    
    browser=webdriver.Chrome()
    browser.get('https://www.baidu.com')
    
    
    input_tag=browser.find_element_by_id('kw')
    input_tag.send_keys('美女')
    input_tag.send_keys(Keys.ENTER)
    
    #显式等待:显式地等待某个元素被加载
    wait=WebDriverWait(browser,10)
    wait.until(EC.presence_of_element_located((By.ID,'content_left')))
    
    contents=browser.find_element(By.CSS_SELECTOR,'#content_left')
    print(contents)
    
    browser.close()

    6.页面交互

    from selenium import webdriver
    from selenium.webdriver import ActionChains
    from selenium.webdriver.common.by import By #按照什么方式查找,By.ID,By.CSS_SELECTOR
    from selenium.webdriver.common.keys import Keys #键盘按键操作
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.wait import WebDriverWait #等待页面加载某些元素
    
    browser=webdriver.Chrome()
    browser.get('https://www.amazon.cn/')
    wait=WebDriverWait(browser,10)
    
    
    
    input_tag=wait.until(EC.presence_of_element_located((By.ID,'twotabsearchtextbox')))
    input_tag.send_keys('iphone 8')
    button=browser.find_element_by_css_selector('#nav-search > form > div.nav-right > div > input')
    button.click()
    
    import time
    time.sleep(3)
    
    input_tag=browser.find_element_by_id('twotabsearchtextbox')
    input_tag.clear() #清空输入框
    input_tag.send_keys('iphone7plus')
    button=browser.find_element_by_css_selector('#nav-search > form > div.nav-right > div > input')
    button.click()
    
    
    
    # browser.close()
    点击和清空
    from selenium import webdriver
    from selenium.webdriver import ActionChains
    from selenium.webdriver.common.by import By  # 按照什么方式查找,By.ID,By.CSS_SELECTOR
    from selenium.webdriver.common.keys import Keys  # 键盘按键操作
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.wait import WebDriverWait  # 等待页面加载某些元素
    import time
    
    driver = webdriver.Chrome()
    driver.get('http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
    wait=WebDriverWait(driver,3)
    # driver.implicitly_wait(3)  # 使用隐式等待
    
    try:
        driver.switch_to.frame('iframeResult') ##切换到iframeResult
        sourse=driver.find_element_by_id('draggable')
        target=driver.find_element_by_id('droppable')
    
    ```
    #方式一:基于同一个动作链串行执行
    # actions=ActionChains(driver) #拿到动作链对象
    # actions.drag_and_drop(sourse,target) #把动作放到动作链中,准备串行执行
    # actions.perform()
    
    #方式二:不同的动作链,每次移动的位移都不同
    ```
    
        ActionChains(driver).click_and_hold(sourse).perform()
        distance=target.location['x']-sourse.location['x']
    
    ```
    track=0
    while track < distance:
        ActionChains(driver).move_by_offset(xoffset=2,yoffset=0).perform()
        track+=2
    
    ActionChains(driver).release().perform()
    
    time.sleep(10)
    ```
    
    finally:
        driver.close()
    动作链(实现滑动验证码)
    from selenium import webdriver
    from selenium.webdriver import ActionChains
    from selenium.webdriver.common.by import By #按照什么方式查找,By.ID,By.CSS_SELECTOR
    from selenium.webdriver.common.keys import Keys #键盘按键操作
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.wait import WebDriverWait #等待页面加载某些元素
    
    
    
    try:
        browser=webdriver.Chrome()
        browser.get('https://www.baidu.com')
        browser.execute_script('alert("hello world")') #打印警告
    finally:
        browser.close()
    页面实现js
    #模拟浏览器的前进后退
    import time
    from selenium import webdriver
    
    browser=webdriver.Chrome()
    browser.get('https://www.baidu.com')
    browser.get('https://www.taobao.com')
    browser.get('http://www.sina.com.cn/')
    
    browser.back()
    time.sleep(10)
    browser.forward()
    browser.close()
    页面前进后退
    import time
    from selenium import webdriver
    
    browser=webdriver.Chrome()
    browser.get('https://www.baidu.com')
    browser.execute_script('window.open()')
    
    print(browser.window_handles) #获取所有的选项卡
    browser.switch_to_window(browser.window_handles[1])
    browser.get('https://www.taobao.com')
    time.sleep(10)
    browser.switch_to_window(browser.window_handles[0])
    browser.get('https://www.sina.com.cn')
    browser.close()
    多选项卡
    from selenium import webdriver
    from selenium.common.exceptions import TimeoutException,NoSuchElementException,NoSuchFrameException
    
    try:
        browser=webdriver.Chrome()
        browser.get('http://www.runoob.com/try/try.php?filename=jqueryui-api-droppable')
        browser.switch_to.frame('iframssseResult')
    
    except TimeoutException as e:
        print(e)
    except NoSuchFrameException as e:
        print(e)
    finally:
        browser.close()
    异常处理

    7.cookie存储(重点)

    当我们运行爬虫项目爬取网站时一定会生成多个cookie模拟多个用户,否则很容易被识别并且禁用

    #cookies
    from selenium import webdriver
    
    browser=webdriver.Chrome()
    browser.get('https://www.zhihu.com/explore')
    print(browser.get_cookies())
    browser.add_cookie({'k1':'xxx','k2':'yyy'})
    print(browser.get_cookies())
    
    # browser.delete_all_cookies()

    模拟登录博客园获取并存储cookie,得到后带cookie免登录进入博客园

    import time
    from selenium import webdriver
    import json
    import requests
    
    bro = webdriver.Chrome()
    bro.get('https://account.cnblogs.com/signin?returnUrl=https%3A%2F%2Fwww.cnblogs.com%2F')
    
    time.sleep(30)
    bro.refresh()
    cookie = bro.get_cookies()
    print(cookie)
    with open('cookie.txt', 'w') as f:
        json.dump(cookie, f)
    
    
    
    time.sleep(1)
    with open('cookie.txt', 'r') as f:
        v = json.load(f)
    cookies = {}
    # 获取cookie中的name和value,转换成requests可以使用形式
    for cookie in v:
        cookies[cookie['name']] = cookie['value']
    print(cookies)
    
    
    # 请求头
    headers = {
        # 'authority': 'www.jd.com',
        # 'method': 'GET',
        # 'path': '/',
        # 'scheme': 'https',
        # 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        # 'accept-encoding': 'gzip, deflate, br',
        # 'accept-language': 'zh-CN,zh;q=0.9',
        # 'cache-control': 'max-age=0',
        # 'upgrade-insecure-requests': '1',
        'authority': 'i-beta.cnblogs.com',
        'method': 'GET',
        'path': '/',
        'scheme': 'https',
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
        'accept-encoding': 'gzip, deflate, br',
        'accept-language': 'zh-CN,zh;q=0.9',
        'cache-control': 'max-age=0',
        'if-modified-since': 'Sun, 24 Nov 2019 06:14:53 GMT',
        # 'if-modified-since': 'Sun, 24 Nov 2019 06:14:53 GMT,
        'sec-fetch-mode': 'navigate',
        'sec-fetch-site': 'none',
        'sec-fetch-user': '?1',
        'upgrade-insecure-requests': '1',
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
    
    }
    # 获取cookies后发送请求
    response = requests.get('https://i-beta.cnblogs.com/api/user', headers=headers, cookies=cookies)
    print('---------------------')
    response.encoding = response.apparent_encoding
    print(response.text)
    博客园获取cookie

    8.项目

    爬取淘宝(不完善)

    淘宝的商品需要登录后才能查询,故每次爬取都需要人工手动登录,并且淘宝的安全等级较高,登录会发送验证码

    from selenium import webdriver
    from selenium.webdriver.common.keys import Keys
    import time
    
    bro = webdriver.Chrome()
    bro.get('https://www.taobao.com/')
    bro.implicitly_wait(10)
    
    
    def get_goods(bro):
        print('----------------------------------------->')
        goods_list = bro.find_elements_by_class_name('J_MouserOnverReq ')
        for goods in goods_list:
            url = goods.find_element_by_css_selector('.J_ClickStat').get_attribute('href')
            img_url = goods.find_element_by_css_selector('.J_ItemPic').get_attribute('src')
            price = goods.find_element_by_css_selector('.price strong').text
            num = goods.find_element_by_css_selector('.deal-cnt').text
            title = goods.find_element_by_css_selector('.J_ClickStat').text
            print('''
                    商品链接:%s
                    商品图片:%s
                    商品名字:%s
                    商品价格:%s
                    商品销量:%s
    
                    ''' % (url, img_url, title, price, num))
        next_page = bro.find_element_by_partial_link_text('下一页')
        next_page.click()
        time.sleep(1)
        get_goods(bro)
    
    
    inp = bro.find_element_by_id('q')
    inp.send_keys('手机')  # 此处输入想搜索的商品
    inp.send_keys(Keys.ENTER)  # 回车
    bro.find_element_by_class_name('grid')
    
    
    try:
        get_goods(bro)
    except Exception as e:
        print(e)
    finally:
        print('结束程序')
        bro.close()
    爬取淘宝商品页

    京东不需登录就能爬取,相对不复杂

    from selenium import webdriver
    from selenium.webdriver import ActionChains
    from selenium.webdriver.common.by import By #按照什么方式查找,By.ID,By.CSS_SELECTOR
    from selenium.webdriver.common.keys import Keys #键盘按键操作
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.wait import WebDriverWait #等待页面加载某些元素
    import time
    
    
    
    def get_goods(driver):
        try:
            goods=driver.find_elements_by_class_name('gl-item')
    
    ```
        for good in goods:
            detail_url=good.find_element_by_tag_name('a').get_attribute('href')
    
            p_name=good.find_element_by_css_selector('.p-name em').text.replace('
    ','')
            price=good.find_element_by_css_selector('.p-price i').text
            p_commit=good.find_element_by_css_selector('.p-commit a').text
    
            msg = '''
            商品 : %s
            链接 : %s
            价钱 :%s
            评论 :%s
            ''' % (p_name,detail_url,price,p_commit)
    
            print(msg,end='
    
    ')
    
    ```
    
    ```
        button=driver.find_element_by_partial_link_text('下一页')
        button.click()
        time.sleep(1)
        get_goods(driver)
    except Exception:
        pass
    
    ```
    
    def spider(url,keyword):
        driver = webdriver.Chrome()
        driver.get(url)
        driver.implicitly_wait(3)  # 使用隐式等待
        try:
            input_tag=driver.find_element_by_id('key')
            input_tag.send_keys(keyword)
            input_tag.send_keys(Keys.ENTER)
            get_goods(driver)
        finally:
            driver.close()
    
    if __name__ == '__main__':
        spider('https://www.jd.com/',keyword='iPhone8手机')
    爬取京东商品

    9.破解知乎登录获取cookie案例

    小猿取经大神博客:https://www.cnblogs.com/xiaoyuanqujing/articles/11843045.html
    知乎登录

     95

  • 相关阅读:
    centos和ubuntu配置路由的三种方式
    程序包编译安装
    逻辑卷磁盘管理和dd命令
    linux磁盘管理
    CDOJ 1269 ZhangYu Speech 数组处理
    poj 2236 Wireless Network 并查集
    poj 1182 食物链 并查集
    POJ 2109 Power of Cryptography 数学题 double和float精度和范围
    CDOJ 1264 人民币的构造 区间问题+数论
    CDOJ 1263 The Desire of Asuna 贪心
  • 原文地址:https://www.cnblogs.com/sxchen/p/11939244.html
Copyright © 2011-2022 走看看