zoukankan      html  css  js  c++  java
  • 【python】python2.x 与 python3.x区别对照+缩进错误解决方法

    仅仅列出我用到的,不全。

    划重点:

      1. urllib2 用 urllib.request 代替

      2. urllib.urlencode 用 urllib.parse.urlencode 代替

      3. cookielib 用 http.cookiejar 代替

      4. print " "  用 print(" ") 代替

      5. urllib2.URLError 用 urllib.error.URLError 代替

      6. urllib2.HTTPError 用 urllib.error.HTTPError 代替

      7. except urllib2.URLError, e:  用  except urllib.error.URLError as e: 代替

    在python3.4.3自带的IDLE中写代码,经常出现缩进错误,很难查找。

    解决方案:拷贝到Notepad++里面,视图中显示空格和制表符,就可以明显看出问题在哪了。

    设置了header的网络请求,在Python2.x中的写法

    import urllib  
    import urllib2  
    
    url = 'http://www.server.com/login'
    user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'  
    values = {'username' : 'kzy',  'password' : '123' }  
    headers = { 'User-Agent' : user_agent }  
    data = urllib.urlencode(values)  
    request = urllib2.Request(url, data, headers)  
    response = urllib2.urlopen(request)  
    page = response.read() 

    在Python3.x中的写法

    import urllib.parse
    import urllib.request
    
    url = 'http://www.baidu.com'
    user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.93 Safari/537.36'
    values = {'username':'kzy','password':'123'}
    headers = {'User-Agent':user_agent}
    data = urllib.parse.urlencode(values).encode(encoding='UTF8') #这里要指明编码方式
    request = urllib.request.Request(url, data, headers)
    response = urllib.request.urlopen(request)
    page = response.read()

    我在学习静觅的爬虫教程,照着把里面的基础部分的代码都写了一遍。

    教程地址:http://cuiqingcai.com/1052.html

    里面原本的代码都是2.x的,我全部用3.x学着写了一遍。如下:

    import urllib.parse
    import urllib.request
    
    """
    response = urllib.request.urlopen("http://www.baidu.com")
    print(response.read())
    """
    
    
    """
    #设置了header和data的请求
    
    url = 'http://www.baidu.com'
    user_agent = 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.93 Safari/537.36'
    values = {'username':'kzy','password':'123'}
    
    headers = {'User-Agent':user_agent}
    
    data = urllib.parse.urlencode(values).encode(encoding='UTF8')
    
    request = urllib.request.Request(url, data, headers)
    response = urllib.request.urlopen(request)
    
    page = response.read()
    """
    
    
    
    """
    #设置代理  避免因为某个IP的访问次数过多导致的禁止访问
    enable_proxy = True
    proxy_handler = urllib.request.ProxyHandler({"http":'http://some-proxy.com:8080'})
    
    null_proxy_handler = urllib.request.ProxyHandler({})
    
    if enable_proxy:
        opener = urllib.request.build_opener(proxy_handler)
    else:
        opener = urllib.request.build_opener(null_proxy_handler)
    
    
    urllib.request.install_opener(opener)    
           
    """
    
    
    """
    
    #设置Timeout
    response = urllib.request.urlopen('http://www.baidu.com', timeout = 10)
    """
    
    
    """
    #使用http的 put或delete方法
    url = 'http://www.baidu.com'
    request = urllib.request.Request(url, data=data)
    request.get_method = lambda:'PUT' #or 'DELETE'
    response = urllib.request.urlopen(request)
    """
    
    
    """
    #使用DebugLog 把收发包的内容在屏幕上打印出来 方便调试
    httpHandler = urllib.request.HTTPHandler(debuglevel=1)
    
    httpsHandler = urllib.request.HTTPSHandler(debuglevel=1)
    opener = urllib.request.build_opener(httpHandler, httpsHandler)
    
    urllib.request.install_opener(opener)
    response = urllib.request.urlopen('https://its.pku.edu.cn/netportal/netportal_UTF-8.jsp', timeout = 5)
    """
    
    
    """
    #URLError异常处理
    
    from urllib.error import URLError, HTTPError
    request = urllib.request.Request('http://www.baidu.com')
    try:
        urllib.request.urlopen(request, timeout = 5)
    except HTTPError as e:
        print('Error code:', e.code)
    except URLError as e:
        
           print('Reason:', e.reason)
    """
    
    
    """
    #URLError异常处理 属性判断
    request = urllib.request.Request('https://its.pku.edu.cn/netportal/netportal_UTF-8.jsp')
    try:
        urllib.request.urlopen(request, timeout = 5)
    except urllib.error.URLError as e:
        if hasattr(e, "code"):     #hasattr 判断变量是否有某个属性
            print(e.code)
        if hasattr(e, "reason"):        
            print(e.reason) 
    else:
        print("OK")
    """
    
    
    """
    #获取cookie保存到变量
    import http.cookiejar
    #声明一个CookieJar对象实例来保存cookie
    cookie = http.cookiejar.CookieJar()
    #利用HTTPCookieProcessor对象来创建cookie处理器
    handler = urllib.request.HTTPCookieProcessor(cookie)
    #通过handler来构建opener
    opener = urllib.request.build_opener(handler)
    #此处的open方法同urlopen
    response = opener.open('https://its.pku.edu.cn/netportal/netportal_UTF-8.jsp')
    for item in cookie:
        print('Name = '+item.name)
        print('Value = '+item.value)
    """
    
    
    """
    
    #获取cookie保存到文件
    
    import http.cookiejar
    
    #设置保存的文件
    
    filename = 'cookie.txt'
    
    #声明一个MozillaCookieJar对象实例来保存cookie,之后写入文件
    cookie = http.cookiejar.MozillaCookieJar(filename)
    
    #创建cookie处理器
    
    handler = urllib.request.HTTPCookieProcessor(cookie)
    #构建opener
    
    opener = urllib.request.build_opener(handler)
    
    
    
    response = opener.open("https://its.pku.edu.cn/netportal/netportal_UTF-8.jsp")
    
    #保存到cookie文件
    
    cookie.save(ignore_discard=True,ignore_expires=True)
    """
    
    """
    #从文件中获取cookie并访问
    
    import http.cookiejar
    
    #创建MozillaCookieJar实例对象
    
    cookie = http.cookiejar.MozillaCookieJar()
    
    #从文件中读取cookie内容到变量
    
    cookie.load('cookie.txt',ignore_discard=True,ignore_expires=True)
    
    #创建请求的request
    req = urllib.request.Request('https://its.pku.edu.cn/netportal/netportal_UTF-8.jsp')
    #创建opener
    
    opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie))
    response = opener.open(req)
    print(response.read())
    """
    
    #模拟登陆 登陆不成功 
    import http.cookiejar
    
    filename = 'cookie.txt'
    cookie = http.cookiejar.MozillaCookieJar(filename)
    opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie))
    postdata = urllib.parse.urlencode({'stuid':'******','pwd':'******'}).encode(encoding='UTF8') #这里怎么知道名字分别是stuid和pwd呢???
    loginUrl = 'http://xxxxxx.com'
    result = opener.open(loginUrl, postdata)
    cookie.save(ignore_discard=True, ignore_expires=True)
    gradeUrl='http://xxxxxx.com'
    result = opener.open(gradeUrl)
    print(result.read())
  • 相关阅读:
    用栈实现队列
    “非常规”的漏洞挖掘思路与技巧-请求参数加密,js接口- 漏洞定级评分的标准与关注点-违规测试标准详解
    【linux 文件管理】2-文件目录命令
    EHC
    kali linux高级渗透测试第七课
    maltego CE社区版-Domain与DNS name
    name servers-域名服务器
    【linux 文件管理】1-文件目录结构
    web应用安全自学指南
    kali linux高级渗透测试第六课
  • 原文地址:https://www.cnblogs.com/dplearning/p/4854746.html
Copyright © 2011-2022 走看看