什么是Urllib?
Python内置的HTTP请求库
urllib.request 请求模块
urllib.error 异常处理模块
urllib.parse url解析模块
urllib.robotparser robots.txt解析模块
相比Python的变化
Python2中的urllib2在Python3中被统一移动到了urllib.request中
python2
import urllib2
response = urllib2.urlopen('http://www.cnblogs.com/0bug')
Python3
import urllib.request
response = urllib.request.urlopen('http://www.cnblogs.com/0bug/')
urlopen()
不加data是以GET方式发送,加data是以POST发送
|
1
2
3
4
5
|
import urllib.requestresponse = urllib.request.urlopen('http://www.cnblogs.com/0bug')html = response.read().decode('utf-8')print(html) |
结果加data发送POST请求
|
1
2
3
4
5
6
|
import urllib.parseimport urllib.requestdata = bytes(urllib.parse.urlencode({'hello': '0bug'}), encoding='utf-8')response = urllib.request.urlopen('http://httpbin.org/post', data=data)print(response.read()) |
结果timeout超时间
|
1
2
3
4
|
import urllib.requestresponse = urllib.request.urlopen('http://www.cnblogs.com/0bug', timeout=0.01)print(response.read()) |
结果|
1
2
3
4
5
6
7
8
|
import urllib.requestimport socketimport urllib.errortry: response = urllib.request.urlopen('http://www.cnblogs.com/0bug', timeout=0.01)except urllib.error.URLError as e: if isinstance(e.reason,socket.timeout): print('请求超时') |
结果响应
1.响应类型
|
1
2
3
4
|
import urllib.requestresponse = urllib.request.urlopen('http://www.cnblogs.com/0bug')print(type(response)) |
结果2.状态码、响应头
|
1
2
3
4
5
6
|
import urllib.requestresponse = urllib.request.urlopen('http://www.cnblogs.com/0bug')print(response.status)print(response.getheaders())print(response.getheader('Content-Type')) |
结果3.响应体
响应体是字节流,需要decode('utf-8')
|
1
2
3
4
5
|
import urllib.requestresponse = urllib.request.urlopen('http://www.cnblogs.com/0bug')html = response.read().decode('utf-8')print(html) |
Request
|
1
2
3
4
5
|
import urllib.requestrequest = urllib.request.Request('http://www.cnblogs.com/0bug')response = urllib.request.urlopen(request)print(response.read().decode('utf-8')) |
结果添加请求头信息
|
1
2
3
4
5
6
7
8
9
10
11
12
|
from urllib import request, parseurl = 'http://httpbin.org/post'headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36', 'Host': 'httpbin.org'}dic = {'name': '0bug'}data = bytes(parse.urlencode(dic), encoding='utf-8')req = request.Request(url=url, data=data, headers=headers, method='POST')response = request.urlopen(req)print(response.read().decode('utf-8')) |
结果add_header
|
1
2
3
4
5
6
7
8
9
10
|
from urllib import request, parseurl = 'http://httpbin.org/post'dic = {'name': '0bug'}data = bytes(parse.urlencode(dic), encoding='utf-8')req = request.Request(url=url, data=data, method='POST')req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36')response = request.urlopen(req)print(response.read().decode('utf-8')) |
Handler
代理:
|
1
2
3
4
5
6
7
8
9
|
import urllib.requestproxy_handler = urllib.request.ProxyHandler({ 'http': 'http代理', 'https': 'https代理'})opener = urllib.request.build_opener(proxy_handler)response = opener.open('http://www.cnblogs.com/0bug')print(response.read()) |
Cookie
|
1
2
3
4
5
6
7
8
|
import http.cookiejar, urllib.requestcookie = http.cookiejar.CookieJar()handler = urllib.request.HTTPCookieProcessor(cookie)opener = urllib.request.build_opener(handler)response = opener.open('http://www.baidu.com')for item in cookie: print(item.name + "=" + item.value) |
结果Cookie保存为文件
|
1
2
3
4
5
6
7
8
|
import http.cookiejar, urllib.requestfilename = 'cookie.txt'cookie = http.cookiejar.MozillaCookieJar(filename)handler = urllib.request.HTTPCookieProcessor(cookie)opener = urllib.request.build_opener(handler)response = opener.open('http://www.baidu.com')cookie.save(ignore_discard=True, ignore_expires=True) |
cookie.txt另一种方式存
|
1
2
3
4
5
6
7
8
|
import http.cookiejar, urllib.requestfilename = 'cookie.txt'cookie = http.cookiejar.LWPCookieJar(filename)handler = urllib.request.HTTPCookieProcessor(cookie)opener = urllib.request.build_opener(handler)response = opener.open('http://www.baidu.com')cookie.save(ignore_discard=True, ignore_expires=True) |
cookie.txt用什么格式的存就应该用什么格式的读
|
1
2
3
4
5
6
7
8
|
import http.cookiejar, urllib.requestcookie = http.cookiejar.LWPCookieJar()cookie.load('cookie.txt', ignore_discard=True, ignore_expires=True)handler = urllib.request.HTTPCookieProcessor(cookie)opener = urllib.request.build_opener(handler)response = opener.open('http://www.baidu.com')print(response.read().decode('utf-8')) |
异常处理
|
1
2
3
4
5
6
|
from urllib import request, errortry: response = request.urlopen('http://www.cnblogs.com/0bug/xxxx')except error.URLError as e: print(e.reason) |
结果|
1
2
3
4
5
6
7
8
9
10
|
from urllib import request, errortry: response = request.urlopen('http://www.cnblogs.com/0bug/xxxx')except error.HTTPError as e: print(e.reason, e.code, e.headers, sep='
')except error.URLError as e: print(e.reason)else: print('Request Successfully') |
结果|
1
2
3
4
5
6
7
8
9
10
|
import socketimport urllib.requestimport urllib.errortry: response = urllib.request.urlopen('http://www.cnblogs.com/0bug/xxxx', timeout=0.001)except urllib.error.URLError as e: print(type(e.reason)) if isinstance(e.reason, socket.timeout): print('请求超时') |
结果URL解析
|
1
2
3
4
5
|
from urllib.parse import urlparseresult = urlparse('www.baidu.com/index.html;user?id=5#comment')print(type(result))print(result) |
结果|
1
2
3
4
|
from urllib.parse import urlparseresult = urlparse('www.baidu.com/index.html;user?id=5#comment', scheme='https')print(result) |
结果|
1
2
3
4
|
from urllib.parse import urlparseresult = urlparse('http://www.baidu.com/index.html;user?id=5#comment', scheme='https')print(result) |
结果|
1
2
3
4
|
from urllib.parse import urlparseresult = urlparse('http://www.badiu.com/index.html;user?id=5#comment', allow_fragments=False)print(result) |
结果|
1
2
3
4
|
from urllib.parse import urlparseresult = urlparse('http://www.badiu.com/index.html#comment', allow_fragments=False)print(result) |
结果urlunparse
|
1
2
3
4
|
from urllib.parse import urlunparsedata = ['http', 'www.baidu.com', 'index.html', 'user', 'id=6', 'comment']print(urlunparse(data)) |
结果urljoin
|
1
2
3
4
5
6
7
8
9
10
|
from urllib.parse import urljoinprint(urljoin('http://www.baidu.com', 'ABC.html'))print(urljoin('http://www.baidu.com', 'https://www.cnblogs.com/0bug'))print(urljoin('http://www.baidu.com/0bug', 'https://www.cnblogs.com/0bug'))print(urljoin('http://www.baidu.com/0bug', 'https://www.cnblogs.com/0bug?q=2'))print(urljoin('http://www.baidu.com/0bug?q=2', 'https://www.cnblogs.com/0bug'))print(urljoin('http://www.baidu.com', '?q=2#comment'))print(urljoin('www.baidu.com', '?q=2#comment'))print(urljoin('www.baidu.com#comment', '?q=2')) |
结果urlencode
|
1
2
3
4
5
6
7
8
9
|
from urllib.parse import urlencodeparams = { 'name': '0bug', 'age': 25}base_url = 'http://www.badiu.com?'url = base_url + urlencode(params)print(url) |