requests请求代码部分

刚了解到爬虫的一些相关知识,之前弄了一个框架,现在把代码发出来

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import requests
import json
url='http://httpbin.org/get'
url1='http://httpbin.org/post'
url2='http://httpbin.org/cookies'
r=requests.get(url)
print("the first:")
print(r.url)
print("\n")
payload={
'the':'second'
}
d=requests.get(url,params=payload)
print('the second:%s'%[d.text])
print(d.text)
print("\n")
print("\n")
##时间
url5='http://www.baidu.com'
t=requests.get(url5,timeout=1000)
print(t.text)
print("\n")
print("\n")
##头部
headers={
'hello':'world'
}
e=requests.get(url,headers=headers)
print(e.text)
print("\n")
print("\n")
##post请求:
data={
'hello':'world'
}
j=requests.post(url1,data=data)
h=requests.post(url1,data=json.dumps(data))
print(j.text)
print(h.text)
##文件
files={
'file':open('test.txt','rb')
}
b=requests.post(url1,files=files)
print(b.text);
##cookies请求:
cookies=dict(data='working')
z=requests.get(url2,cookies=cookies)
print(z.text)
##持久会话
s=requests.Session()
s.get('http://httpbin.org/cookies/set/sessioncookie/123456789')
r=s.get('http://httpbin.org/cookies')
print(r.text)
##代理IP
proxies={
'http':'152.148.1.416'
}
n=requests.post('https://nuocheng.github.io/',proxies=proxies)
print(r.status_code);
Fork me on GitHub