『壹』 python 多线程 访问网站

#python2
#coding=utf-8
importos,re,requests,sys,time,threading
reload(sys)
sys.setdefaultencoding('utf-8')

classArchives(object):
def__init__(self,url):
self.url=url

defsave_html(self,text):
fn='{}_{}'.format(int(time.time()),self.url.split('/')[-1])
dirname='htmls'
ifnotos.path.exists(dirname):
os.mkdir(dirname)
withopen(os.path.join(dirname,fn),'w')asf:
f.write(text)

defget_htmls(self):
try:
r=requests.get(self.url)
r.raise_for_status()
r.encoding=r.apparent_encoding
print'gethtmlfrom',url
self.save_html(r.text)
exceptException,e:
print'爬取失败',e

defmain(self):
thread=threading.Thread(target=self.get_htmls())
thread.start()
thread.join()

if__name__=='__main__':
start=time.time()
fn=sys.argv[1]iflen(sys.argv)>1else'urls.txt'
withopen(fn)asf:
s=f.readlines()
forurlinset(s):
a=Archives(url.strip())
a.main()
end=time.time()
printend-start

『贰』 python3.6怎么访问网页

使用Python访问网页主要有三种方式: urllib, urllib2, httplib
urllib比较简单,功能相对也比较弱,httplib简单强大,但好像不支持session
1. 最简单的页面访问
res=urllib2.urlopen(url)
print res.read()
2. 加上要get或post的数据
data={"name":"hank", "passwd":"hjz"}
urllib2.urlopen(url, urllib.urlencode(data))
3. 加上http头
header={"User-Agent": "Mozilla-Firefox5.0"}
urllib2.urlopen(url, urllib.urlencode(data), header)使用opener和handler
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
4. 加上session
cj = cookielib.CookieJar()
cjhandler=urllib2.HTTPCookieProcessor(cj)
opener = urllib2.build_opener(cjhandler)
urllib2.install_opener(opener)
5. 加上Basic认证
password_mgr = urllib2.()
top_level_url = "http://www.163.com/"
password_mgr.add_password(None, top_level_url, username, password)
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
6. 使用代理
proxy_support = urllib2.ProxyHandler({"http":"http://1.2.3.4:3128/"})
opener = urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
7. 设置超时
socket.setdefaulttimeout(5)

『叁』 Python批量判断网站是否能访问

#coding:utf-8
#author:www.chenhaifei.com
import requests #打开
import time,random #控制时间
import sys #专门乱码的
reload(sys)
sys.setdefaultencoding(‘utf-8’)
headers={
‘User-Agent’:’Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36′,
}
url=’http://www.chenhaifei.com/’ ##检测的url
cont = requests.get(url,allow_redirects = False).status_code ##allow_redirects = False不检测跳转后的状态码
print cont
time.sleep(0.5)
上面是单个判断url状态码的,你可以把想要检测的url放在一个txt里面,这样就可以循环检测了。