[TOC]
---
#### 1.图片下载
```Python
# 百度图片:http://image.baidu.com/
# 搜狗图片:https://pic.sogou.com/
```
```Python
# 图片爬取:
1).寻找图片下载的url: elements与network抓包
2).浏览器中访问url, 进行验证
3).编写代码获取url
4).请求url地址, 获取二进制流
5).将二进制流写入文件
```
```Python
# 百度图片:
import time
import requests
from lxml import etree
from selenium import webdriver
# 实例化浏览器对象
browser = webdriver.Chrome(\'./chromedriver.exe\')
# 访问网页并操控网页元素获取搜索结果
browser.get(\'http://image.baidu.com/\')
input_tag = browser.find_element_by_id(\'kw\')
input_tag.send_keys(\'乔碧萝\')
search_button = browser.find_element_by_class_name(\'s_search\')
search_button.click()
# 通过js实现鼠标向下滚动, 获取更多页面源码
js = \'window.scrollTo(0, document.body.scrollHeight)\'
for times in range(3):
browser.execute_script(js)
time.sleep(3)
html = browser.page_source
# 解析数据获取图片连接:
tree = etree.HTML(html)
url_list = tree.xpath(\'//div[@id="imgid"]/div/ul/li/@data-objurl\')
for img_url in url_list:
headers = {
\'User-Agent\': \'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36\'
}
content = requests.get(url=img_url, headers=headers).content
if \'token\' not in img_url:
with open(\'./baidupics/%s\'%img_url.split(\'/\')[-1], \'wb\') as f:
f.write(content)
```
```Python
# 搜狗图片:
import requests
import re
url = \'http://pic.sogou.com/pics?\'
params = {
\'query\': \'韩美娟\'
}
res = requests.get(url=url, params=params).text
url_list = re.findall(r\',"(https://i\d+piccdn\.sogoucdn.com/.*?)"]\', res)
for img_url in url_list:
headers = {
\'User-Agent\': \'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36\'
}
print(img_url)
content = requests.get(url=img_url, headers=headers).content
name = img_url.split(\'/\')[-1] + \'.jpg\'
with open(\'./sougoupics/%s.jpg\'%name, \'wb\') as f:
f.write(content)
```
---
#### 2.JS动态渲染
```Python
1).selenium爬取: selenium测试框架, 完全模人操作浏览器, *** page_source ***
2).基础语法:
from selenium import webdriver
# 实例化浏览器对象:
browser = webdriver.Chrome(\'浏览器驱动路径\') # 在当前路径下: \'./chromedriver.exe\'
# 访问响应的url地址:
browser.get(url)
# 获取页面元素:
find_element_by_id
find_element_by_name(): name是标签的name属性值
find_element_by_class_name: class的属性值
find_element_by_xpath: 根据xpath表达式定位元素
find_element_by_css_selector:根据css选择器
# 示例:获取一个id为kw的input输入框
input_tag = browser.find_element_by_id(\'kw\')
# 输入内容:
input_tag.clear()
input_tag.send_keys(\'乔碧萝殿下\')
# 点击button按钮:
button.click()
# 执行JS代码:
js = \'window.scrollTo(0, document.body.scrollHeight)\'
for i in range(3):
browser.execute_script(js)
# 获取HTML源码: 记住没有括号*****
html = browser.page_source # str类型
# 数据解析工作:
1).xpath提取数据:
2).正则提取: 正则表达式的书写 + re模块的使用
3).Beautifulsoup: CSS选择器 -->(节点选择器, 方法选择器, CSS选择器)
# 媒体类型: 视频, 图片, 压缩包, 软件安装包
1).下载链接
2).requests请求: response.content --> 二进制流
scrapy框架: response.body --> 二进制流
3).写文件:
with open(\'./jdkfj/name\', \'wb\') as f:
f.write(res.content | response.body)
```
---
#### 3.数据解析
```Python
1.Xpath
# 编码流程
from lxml import etree
# 实例化etree对象
tree = etree.HTML(res.text)
# 调用xpath表达式提取数据
li_list = tree.xpath(\'xpath表达式\') # xpath提取的数据在列表中
# 嵌套
for li in li_list:
li.xpath(\'xpath表达式\')
# ./
# .//
# 基础语法:
./:从当前的根节点向下匹配
../:从当前节点下的任意位置匹配
nodeName: 节点名定位
nodename[@attributename="value"]: 根据属性定位
单属性多值匹配:contains--> div[contains(@class, "item")]
多属性匹配: and --> div[@class="item" and @name="divtag"]
@attributename: 提取其属性值
text(): 提取文本信息
# 按序选择:
1).索引定位: 索引从1开始, res.xpath(\'//div/ul/li[1]/text()\'): 定位第一个li标签
requests模块请求的响应对象:
res.text-->文本
res.json()-->python的基础数据类型 --> 字典
res.content--> 二进制流
2).last()函数定位: 最后一个, 倒数第二个:last()-1
res.xpath(\'//div/ul/li[last()]\'): 定位最后一个
res.xpath(\'//div/ul/li[last()-1]\'): 定位倒数第二个
3).position()函数: 位置
res.xpath(\'//div/ul/li[position()<4]\')
2.BS4基础语法:
# 编码流程:
from bs4 import BeautifulSoup
# 实例化soup对象
soup = BeautifulSoup(res.text, \'lxml\')
# 定位节点
soup.select(\'CSS选择器\')
# CSS选择器语法:
id: #
class: .
soup.select(\'div > ul > li\') # 单层级选择器
soup.select(\'div li\') # 多层级选择器
# 获取节点的属性或文本:
tag.string: 取直接文本 --> 当标签中除了字节文本, 还包含其他标签时, 取不到直接文本
tag.get_text(): 取文本
tag[\'attributename\']: 取属性(试试属性有两个(包含)值以上时返回的数据类型)
3.正则 & re模块
分组 & 非贪婪匹配:() --> \'dfkjd(kdf.*?dfdf)dfdf\'
<a href="https://www.baidu.com/kdjfkdjf.jpg">这是一个a标签</a> -->
\'<a href="(https://www.baidu.com/.*?\.jpg)">\'
量词:
+ : 匹配1次或多次
* : 匹配0次获取多次
{m}: 匹配m次
{m,n}: 匹配m到n次
{m,}: 至少m次
{,n}: 至多n次
re模块:
re.findall(\'正则表示\', res.text) --> list列表
```
---
#### 4.持久化存储
```Python
1.txt
############# 写入txt文件 ###############
if title and joke and comment:
# with open(\'qbtxt.txt\', \'a\', encoding=\'utf-8\') as txtfile:
# txtfile.write(\'&\'.join([title[0], joke[0], comment[0]]))
# txtfile.write(\'\n\')
# txtfile.write(\'********************************************\n\')
2.json
############# 写入json文件 ################
# dic = {\'title\': title[0], \'joke\':joke[0], \'comment\':comment[0]}
# with open(\'jsnfile.json\', \'a\', encoding=\'utf-8\') as jsonfile:
# jsonfile.write(json.dumps(dic, indent=4, ensure_ascii=False))
# jsonfile.write(\',\'+\'\n\')
3.csv
############# 写入CSV文件 ##################
with open(\'csvfile.csv\', \'a\', encoding=\'utf-8\') as csvfile:
writer = csv.writer(csvfile, delimiter=\' \')
writer.writerow([title[0], joke[0], comment[0]])
############# scrapy框架 ##################
FEED_URI = \'file:///home/eli/Desktop/qtw.csv\'
FEED_FORMAT = \'CSV\'
4.mongodb
import Pymongo
# 管道类 class MongoPipeline(object):
# 初始化方法, __new__: 构造方法, 在内存中开辟一块空间
def __init__(self, mongo_uri, mongo_db):
self.mongo_uri = mongo_uri
self.mongo_db = mongo_db
@classmethod
def from_crawler(cls, crawler):
return cls(
mongo_uri = crawler.settings.get(\'MONGO_URI\'),
mongo_db = crawler.settings.get(\'MONGO_DB\')
)
def open_spider(self, spider):
self.client = pymongo.MongoClient(self.mongo_uri)
self.db = self.client[self.mongo_db]
def process_item(self, item, spider):
self.db[\'news\'].insert(dict(item))
# 在一个项目中可能存在多个管道类, 如果该管道类后面还有管道类需要存储数据, 必须return item
return item
def close_spider(self, spider):
self.client.close()
5.mysql
import pymysql
def MysqlPipeline(object):
def __init__(self, host, database, user, password, port):
self.host = host
self.database = database
self.user = user
self.password = password
self.port = port
@classmethod
def from_crawler(self, crawler):
return cls(
host = crawler.settings.get(\'MYSQL_HOST\')
database = crawler.settings.get(\'MYSQL_DATABASE\')
user = crawler.settings.get(\'MYSQL_USER\')
password= crawler.settings.get(\'MYSQL_PASSWORD\')
port = crawler.settings.get(\'MYSQL_PORT\')
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.user, self.password, self.database, charset=\'utf-8\', port=self.port)
self.cursor = self.db.cursor()
```
---