python爬虫入门requests模块
2023-09-14 09:07:13 时间
Requests
Python标准库中提供了:urllib、urllib2、httplib等模块以供Http请求,
但是,它的 API 太渣了。它是为另一个时代、另一个互联网所创建的。
它需要巨量的工作,甚至包括各种方法覆盖,来完成最简单的任务。
Requests 是使用 Apache2 Licensed 许可证的 基于Python开发的HTTP 库,
其在Python内置模块的基础上进行了高度的封装,从而使得Pythoner进行网络请求时,
变得美好了许多,使用Requests可以轻而易举的完成浏览器可有的任何操作。
简介
安装:
pip install requests
requests常用属性:
response = requests.get(url)
response.text
response.content
response.encoding
response.apparent_encoding
response.status_code
301 永久重定向
302 临时重定向
response.cookies.get_dict()
1、GET请求
1、无参数实例
import requests
ret = requests.get('https://github.com/timeline.json')
print ret.url
print ret.text
2、有参数实例
import requests
payload = {'key1': 'value1', 'key2': 'value2'}
ret = requests.get("http://httpbin.org/get", params=payload)
print ret.url
print ret.text
2、POST请求
1、基本POST实例
import requests
payload = {'key1': 'value1', 'key2': 'value2'}
ret = requests.post("http://httpbin.org/post", data=payload)
print ret.text
2、发送请求头和数据实例
import requests
import json
url = 'https://api.github.com/some/endpoint'
payload = {'some': 'data'}
headers = {'content-type': 'application/json'}
ret = requests.post(url, data=json.dumps(payload), headers=headers)
print ret.text
print ret.cookies
3、其他请求
requests.get(url, params=None, **kwargs)
requests.post(url, data=None, json=None, **kwargs)
requests.put(url, data=None, **kwargs)
requests.head(url, **kwargs)
requests.delete(url, **kwargs)
requests.patch(url, data=None, **kwargs)
requests.options(url, **kwargs)
# 以上方法均是在此方法的基础上构建
requests.request(method, url, **kwargs)
4、更多参数
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) whether the SSL cert will be verified. A CA_BUNDLE path can also be provided. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
def param_method_url():
requests.request(method='get', url='http://127.0.0.1:8000/test/')
requests.request(method='post', url='http://127.0.0.1:8000/test/')
def param_param():
# - 可以是字典
# - 可以是字符串
# - 可以是字节(ascii编码以内)
requests.request(method='get',
url='http://127.0.0.1:8000/test/',
params={'k1': 'v1', 'k2': '水电费'})
requests.request(method='get',
url='http://127.0.0.1:8000/test/',
params="k1=v1&k2=水电费&k3=v3&k3=vv3")
requests.request(method='get',
url='http://127.0.0.1:8000/test/',
params=bytes("k1=v1&k2=k2&k3=v3&k3=vv3", encoding='utf8'))
# 错误
requests.request(method='get',
url='http://127.0.0.1:8000/test/',
params=bytes("k1=v1&k2=水电费&k3=v3&k3=vv3", encoding='utf8'))
def param_data():
# 可以是字典
# 可以是字符串
# 可以是字节
# 可以是文件对象
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
data={'k1': 'v1', 'k2': '水电费'})
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
data="k1=v1; k2=v2; k3=v3; k3=v4")
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
data="k1=v1;k2=v2;k3=v3;k3=v4",
headers={'Content-Type': 'application/x-www-form-urlencoded'})
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
data=open('data_file.py', mode='r', encoding='utf-8'),
# 文件内容是:k1=v1;k2=v2;k3=v3;k3=v4
headers={'Content-Type': 'application/x-www-form-urlencoded'})
def param_json():
# 将json中对应的数据进行序列化成一个字符串,json.dumps(...)
# 然后发送到服务器端的body中,并且Content-Type是
# {'Content-Type': 'application/json'}
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
json={'k1': 'v1', 'k2': '水电费'})
def param_headers():
# 发送请求头到服务器端
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
json={'k1': 'v1', 'k2': '水电费'},
headers={'Content-Type': 'application/x-www-form-urlencoded'}
)
def param_cookies():
# 发送Cookie到服务器端
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
data={'k1': 'v1', 'k2': 'v2'},
cookies={'cook1': 'value1'},
)
# 也可以使用CookieJar(字典形式就是在此基础上封装)
from http.cookiejar import CookieJar
from http.cookiejar import Cookie
obj = CookieJar()
obj.set_cookie(Cookie(version=0, name='c1', value='v1', port=None, domain='', path='/', secure=False, expires=None,
discard=True, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False,
port_specified=False, domain_specified=False, domain_initial_dot=False, path_specified=False)
)
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
data={'k1': 'v1', 'k2': 'v2'},
cookies=obj)
def param_files():
# 发送文件
file_dict = {
'f1': open('readme', 'rb')
}
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
files=file_dict)
# 发送文件,定制文件名
file_dict = {
'f1': ('test.txt', open('readme', 'rb'))
}
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
files=file_dict)
# 发送文件,定制文件名
file_dict = {
'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf")
}
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
files=file_dict)
# 发送文件,定制文件名
file_dict = {
'f1': ('test.txt', "hahsfaksfa9kasdjflaksdjf",
'application/text', {'k1': '0'})
}
requests.request(method='POST',
url='http://127.0.0.1:8000/test/',
files=file_dict)
def param_auth():
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
ret = requests.get('https://api.github.com/user',
auth=HTTPBasicAuth('wupeiqi', 'sdfasdfasdf'))
print(ret.text)
ret = requests.get('http://192.168.1.1',
auth=HTTPBasicAuth('admin', 'admin'))
ret.encoding = 'gbk'
print(ret.text)
ret = requests.get('http://httpbin.org/digest-auth/auth/user/pass',
auth=HTTPDigestAuth('user', 'pass'))
print(ret)
def param_timeout():
ret = requests.get('http://google.com/', timeout=1)
print(ret)
ret = requests.get('http://google.com/', timeout=(5, 1))
print(ret)
def param_allow_redirects():
ret = requests.get('http://127.0.0.1:8000/test/', allow_redirects=False)
print(ret.text)
def param_proxies():
proxies = {
"http": "61.172.249.96:80",
"https": "http://61.185.219.126:3128",
}
proxies = {'http://10.20.1.128': 'http://10.10.1.10:5323'}
ret = requests.get("http://www.proxy360.cn/Proxy", proxies=proxies)
print(ret.headers)
from requests.auth import HTTPProxyAuth
proxyDict = {
'http': '77.75.105.165',
'https': '77.75.105.165'
}
auth = HTTPProxyAuth('username', 'mypassword')
r = requests.get("http://www.google.com", proxies=proxyDict, auth=auth)
print(r.text)
def param_stream():
ret = requests.get('http://127.0.0.1:8000/test/', stream=True)
print(ret.content)
ret.close()
from contextlib import closing
with closing(requests.get('http://httpbin.org/get', stream=True)) as r:
# 在此处理响应。
for i in r.iter_content():
print(i)
def requests_session():
import requests
session = requests.Session()
# 1、首先登陆任何页面,获取cookie
i1 = session.get(url="http://dig.chouti.com/help/service")
# 2、用户登陆,携带上一次的cookie,后台对cookie中的 gpsd 进行授权
i2 = session.post(
url="http://dig.chouti.com/login",
data={
'phone': "8615131255089",
'password': "xxxxxx",
'oneMonth': ""
}
)
i3 = session.post(
url="http://dig.chouti.com/link/vote?linksId=8589623",
)
print(i3.text)
参考:
相关文章
- Python子进程 (subprocess包)
- [python爬虫] 百度贴吧
- 用Python写简单的爬虫
- 使用python调用shell命令示例代码
- Python Django HttpResponse响应对象
- Python爬虫第一战 爬取小说
- 零基础学Python-爬虫-5、下载网络视频
- 〖Python零基础入门篇(58)〗- Python中的虚拟环境
- Python爬虫基础讲解:数据持久化——json 及 CSV模块简介
- Python爬虫基础讲解:数据持久化——文件操作 及 Excel
- Python爬虫基础讲解:chrome开发者工具及网络面板
- 【Python成长之路】Python爬虫 --requests库爬取网站乱码(xe4xb8xb0xe5xa)的解决方法【华为云分享】
- 手把手教你用 Python 和 Flask 创建REST API
- python爬虫模块之调度模块
- python爬虫模块之HTML下载模块
- python爬虫模块之URL管理器模块
- python爬虫:scrapy-redis实现分布式爬虫
- Python爬虫:splash+requests简单示例
- Python编程:PyThink数据库交互模块提高爬虫编写速度
- python 站点爬虫 下载在线盗墓笔记小说到本地的脚本
- 手把手教你测试技能:python 3.X搭建robotframework框架
- python基础===8道基础知识题
- python 爬虫之requests模块设置代理
- 从零开始,学会Python爬虫不再难!!! -- (14)Scrapy框架丨蓄力计划
- requests 模块python爬虫 官网2高级教程