re模块
- re.S表示匹配单行
- re.M表示匹配多行
使用re模块提取图片url,下载所有糗事百科中的图片
普通版
import requests
import re
import os
if not os.path.exists(\'image\'):
os.mkdir(\'image\')
def get_page(number):
\'\'\'
页数
:param number:
:return:
\'\'\'
if number == 1:
url = \'https://www.qiushibaike.com/pic/\'
else:
url=\'https://www.qiushibaike.com/pic/\'+str(number)
headers = {
\'User-Agent\': \'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\'
}
page_text = requests.get(url=url,headers=headers).text
img_list = re.findall(\'<div class="thumb">.*?<img src="(.*?)".*?>.*?</div>\',page_text,re.S)
for _url in img_list:
img_url = \'https:\'+_url
response = requests.get(url=img_url,headers=headers)
filename = img_url.split(\'/\')[-1]
file_path = \'image/%s\'%filename
with open(file_path,\'wb\') as f:
f.write(response.content)
print(\'爬取第%s页数据中:%s\'%(number,filename))
for i in range(1,35):
get_page(i)
使用多线程下载
import requests
import re
import os
from concurrent.futures import ThreadPoolExecutor
if not os.path.exists(\'image\'):
os.mkdir(\'image\')
def get_page(number):
\'\'\'
页数
:param number:
:return:
\'\'\'
if number == 1:
url = \'https://www.qiushibaike.com/pic/\'
else:
url=\'https://www.qiushibaike.com/pic/\'+\'page/\'+str(number)
print(url)
headers = {
\'User-Agent\': \'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\'
}
page_text = requests.get(url=url,headers=headers).text
img_list = re.findall(\'<div class="thumb">.*?<img src="(.*?)".*?>.*?</div>\',page_text,re.S)
for _url in img_list:
img_url = \'https:\'+_url
response = requests.get(url=img_url,headers=headers)
filename = img_url.split(\'/\')[-1]
file_path = \'image/%s\'%filename
with open(file_path,\'wb\') as f:
f.write(response.content)
print(\'爬取第%s页数据中:%s\'%(number,filename))
if __name__ == \'__main__\':
pool = ThreadPoolExecutor(5)
for i in range(1,36):
pool.submit(get_page,i)
pool.shutdown()
print(\'爬取完毕\')
xpath
xpath在爬虫中的使用流程
- 下载
- 导包
- 创建etree对象进行指定数据的解析
- 本地:tree = etree.parse(\'本地文件路径\')
- etree.xpath(\'xpath表达式\')
- 网络:tree = etree.HTML(\'网络请求到的页面数据\')
- tree.xpath(\'xpath表达式\')
- 本地:tree = etree.parse(\'本地文件路径\')
#安装
pip3 install lxml
#导包
from lxml import etree
常用的xpath表达式
属性定位:
#找到class属性值为song的div标签
//div[@class="song"]
层级&索引定位:
#找到class属性值为tang的div的直系子标签ul下的第二个子标签li下的直系子标签a
//div[@class="tang"]/ul/li[2]/a
逻辑运算:
#找到href属性值为空且class属性值为du的a标签
//a[@href="" and @class="du"]
模糊匹配:
//div[contains(@class, "ng")] # 包含
//div[starts-with(@class, "ta")] #以什么开头的
取文本:
# /表示获取某个标签下的文本内容
# //表示获取某个标签下的文本内容和所有子标签下的文本内容
//div[@class="song"]/p[1]/text() # 只能获取当前标签下面直系存储的文本数据
//div[@class="tang"]//text() # 获取某一个标签下,所有子标签中存储的文本数据
取属性:
//div[@class="tang"]//li[2]/a/@href
xpath简单使用
from lxml import etree
import requests
url = \'https://www.qiushibaike.com/pic/\'
page_text = requests.get(url=url).text
tree = etree.HTML(page_text)
url_list = tree.xpath(\'//div[@class="article block untagged mb15"]/div[2]/a/img/@src\')
print(url_list)
xpath插件
- xpath插件:就是可以直接将xpath表达式作用域浏览器的网页中
- 安装:chorm浏览器 -> 更多工具 -> 扩展程序 -> 开启开发者模式 -> 将xpath插件拖动到页面
- 快捷键
- 开启关闭:ctrl + shift + x
Element类型的对象可以继续调用xpath函数,对该对象表示的局部内容进行指定内容解析
# 使用xpath提取出段子网的段子和标题
import requests
from lxml import etree
url = \'https://ishuo.cn/\'
headers = {
\'User-Agent\': \'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\'
}
page_text = requests.get(url=url,headers=headers).text
tree = etree.HTML(page_text)
li_list = tree.xpath(\'//div[@id="list"]/ul/li\')
f = open(\'段子.text\',\'w\',encoding=\'utf-8\')
for li in li_list:
content = li.xpath(\'./div[@class="content"]/text()\')[0]
title = li.xpath(\'./div[@class="info"]/a/text()\')[0]
f.write("#####################\n"+title+":\n"+content+"\n\n")
print(\'写入数据成功\')
BeautifulSoup解析
BeautifulSoup库是python独有的库,简单便捷和高效
-
核心思想:将将html文档转换成Beautiful对象,然后调用该对象中的属性和方法进行html文档指定内容的定位查找。
-
导包
from bs4 import BeautifulSoup
- 创建BeautifulSoup对象:
- 如果html文档的来源是来源于本地:Beautiful(\'open(\'本地的html文件\')\',\'lxml\')
- 如果html是来源于网络:Beautiful(‘网络请求到的页面数据’,‘lxml’)
属性和方法
- 根据标签名查找:
soup.a # 只能找到第一个符合要求的标签
- 获取属性
soup.a.attrs # 获取a所有的属性和属性值,返回一个字典
soup.a.attrs[\'href\'] # 获取href属性
soup.a[\'href\'] # 也可简写为这种形式
- 获取内容
soup.a.string //text()
soup.a.text //text()
soup.a.get_text() //text()
# 如果标签还有标签,那么string获取到的结果为None,而其它两个,可以获取文本内容
- find:找到第一个符合要求的标签
soup.find(\'a\') //找到第一个符合要求的
soup.find(\'a\', title="xxx")
soup.find(\'a\', alt="xxx")
soup.find(\'a\', class_="xxx")
soup.find(\'a\', id="xxx")
- find_All:找到所有符合要求的标签
soup.find_All(\'a\')
soup.find_All([\'a\',\'b\']) # 找到所有的a和b标签
soup.find_All(\'a\', limit=2) # 限制前两个
- 根据选择器选择指定的内容
select:soup.select(\'#feng\')
常见的选择器:标签选择器(a)、类选择器(.)、id选择器(#)、层级选择器
select选择器返回永远是列表,需要通过下标提取指定的对象
使用BeautifulSoup解析爬取三国演义小说全集
import requests
from bs4 import BeautifulSoup
url = \'http://www.shicimingju.com/book/sanguoyanyi.html\'
headers = {
\'User-Agent\': \'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\'
}
page_text = requests.get(url=url,headers=headers).text
def get_content(url,fileobj):
\'\'\'
爬取页面内容
:param url:
:param fileobj:
:return:
\'\'\'
content_page = requests.get(url=url, headers=headers).text
content_soup = BeautifulSoup(content_page, \'lxml\')
p_list = content_soup.select(\'.chapter_content > p\')
for p in p_list:
fileobj.write(\'\n\' + p.text + \'\n\n\')
soup = BeautifulSoup(page_text,\'lxml\')
a_list = soup.select(\'.book-mulu > ul > li > a\')
f = open(\'三国演义.txt\',\'w\',encoding=\'utf-8\')
for a in a_list:
f.write(\'\n\'+a.text)
content_url = \'http://www.shicimingju.com\'+a[\'href\']
get_content(content_url,f)
print(\'爬取成功\',a.text)