利用requests库获取代理,用Beautiful库解析网页筛选ip
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
from threading import Thread
headers = {\'user-agent\': \'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:55.0) Gecko/20100101 Firefox/55.0\'}
#定义获取IP函数
def get_ip():
#写入txt
write_ip = open(\'get_ip.txt\', \'w\')
for page in range(1, 10):
url = \'http://www.xicidaili.com/nn/%s\' % page
r = requests.get(url, headers=headers,timeout=5)
# 用beautifulsoup库解析网页
soup = BeautifulSoup(r.content, \'lxml\')
trs = soup.find(\'table\', id=\'ip_list\').find_all(\'tr\')
for tr in trs[1:]:
tds = tr.find_all(\'td\')
ip = tds[1].text.strip()
port = tds[2].text.strip()
write_ip.write(\'%s\n\'%(ip+\':\'+port))
write_ip.close()
print(\'done\')
get_ip()