Python爬取豆瓣指定书籍的短评
#!/usr/bin/python
# coding=utf-8
import re
import sys
import time
import random
import urllib
import urllib2
import MySQLdb
# 爬取豆瓣评论
class Douban:
# 构造函数
def __init__(self, url, name):
# 采集的地址
#self.url = \'https://book.douban.com/subject/26356948/comments/hot\'
self.url = url
# 存储的文件名
self.filename = str(name) + \'.txt\'
# 数据库配置
self.database = {
\'host\': \'127.0.0.1\',
\'username\': \'root\',
\'password\': \'root\',
\'database\': \'douban\',
\'charset\': \'utf8\',
\'table\': str(name),
}
# HTTP请求超时
self.http_timeout = 10
# 请求计数器
self.request_counter = 0
# 错误代码计数器
self.error_counter = 0
# 数据库第一次错误
self.db_error_first = True
# 替换换行符
def replaceSpace(self, string):
string = string.replace(\'\r\n\', \' \')
string = string.replace(\'\n\', \' \')
string = string.replace(\'\r\', \' \')
return string
# 清洗单页评论,返回为单条评论的列表
def getCommentsPage(self, url):
user_agent = \'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36\'
headers = {\'User-Agent\': user_agent}
request = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(request, timeout=self.http_timeout)
comments_page_raw = response.read()
reg = r\'.*?<li.*?class="comment-item".*?>(.*?)</li>.*?\'
comments_page = re.findall(reg, comments_page_raw, re.I | re.M | re.S)
return comments_page
except urllib2.HTTPError, e:
errmsg = \'HTTP Error: \' + e.code + \',\' + e.reason
print unicode(errmsg, \'utf-8\')
if e.code == 403 or e.code == \'403\':
# 服务器拒绝服务,等等...等等... 等120秒
print unicode(\'服务器拒绝服务,等等...等等... 等120秒\', \'utf-8\')
time.sleep(120)
response = urllib2.urlopen(request, timeout=self.http_timeout)
comments_page_raw = response.read()
reg = r\'.*?<li.*?class="comment-item".*?>(.*?)</li>.*?\'
comments_page = re.findall(reg, comments_page_raw, re.I | re.M | re.S)
return comments_page
elif e.code == 404 or e.code == \'404\':
# not found... 已爬取完毕,结束程序
print unicode(\'not found... 已爬取完毕,结束程序\', \'utf-8\')
# 结束程序
sys.exit(0)
else:
print unicode(\'HTTP 未知错误\', \'utf-8\')
# 错误次数累加
self.error_counter += 1
# 判断是否进行中断
if self.error_counter == 20:
info = \'错误次数已达%d次,结束程序\' % self.error_counter
print unicode(info, \'utf-8\')
sys.exit(1)
# 清洗单条评论,返回单条评论的昵称、头像、内容...的列表
def getCommentsItem(self, comment_raw):
# 昵称, 豆瓣主页,头像, 点赞, 星级评价, 日期, 评价内容
reg = r\'<div.*?<a.*?title="(.*?)" href="(.*?)".*?<img src="(.*?)".*?class="vote-count">(.*?)</span>.*?<span.*?title="(.*?)"></span>.*?<span>(.*?)</span>.*?<p class="comment-content">(.*?)</p>.*?</div>\'
# 匹配
comment = re.findall(reg, comment_raw, re.I | re.M | re.S)
# 返回数据
return comment
# 操作单页内容
def getPageContent(self, url):
# 获取单页评论的列表
comments = self.getCommentsPage(url)
# 迭代出单条评论
for comment in comments:
# 清洗单条评论
contents = self.getCommentsItem(comment)
for content in contents:
# 存到文件
self.saveFile(content)
# 存到数据库
self.saveDatabase(content)
# 测试输出
self.testPrint(content)
# 测试输出
def testPrint(self, content):
print content[0]
print content[1]
print content[2]
print content[3]
print content[4]
print content[5]
print self.replaceSpace(content[6])
print \'\n\'
# 保存到数据库
def saveDatabase(self, content):
# 打开数据库连接
db = MySQLdb.connect(
self.database[\'host\'],
self.database[\'username\'],
self.database[\'password\'],
self.database[\'database\'],
charset=self.database[\'charset\'],
)
# 获取游标
cursor = db.cursor()
# SQL语句
sql = "INSERT INTO %s (username, homepage, header_img, like_num, star_rating, publish_date, comment_content) VALUES (\'%s\', \'%s\', \'%s\', \'%s\', \'%s\', \'%s\', \'%s\');" % (
self.database[\'table\'], content[0], content[1], content[2], content[3], content[4], content[5],
self.replaceSpace(content[6]))
# print sql
# 添加数据
try:
# 执行SQL语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
except:
# 回滚
db.rollback()
# 判断数据库是否是第一次错误
if self.db_error_first:
print \'Failed to find database table, Trying to create database [%s].\n\n\' % self.database[\'table\']
time.sleep(2)
# 创建数据表的SQL语句
create_table = \'\'\'CREATE TABLE IF NOT EXISTS %s (
id INT(6) NOT NULL PRIMARY KEY AUTO_INCREMENT COMMENT \'评论id\',
username VARCHAR(50) NOT NULL DEFAULT \'\' COMMENT \'用户名\',
homepage VARCHAR(200) NOT NULL DEFAULT \'\' COMMENT \'豆瓣主页\',
header_img VARCHAR(200) NOT NULL DEFAULT \'\' COMMENT \'头像\',
like_num INT(6) NOT NULL DEFAULT \'0\' COMMENT \'点赞量\',
star_rating VARCHAR(10) NOT NULL DEFAULT \'\' COMMENT \'星级评价\',
publish_date VARCHAR(12) NOT NULL DEFAULT \'\' COMMENT \'发表日期\',
comment_content VARCHAR(2000) NOT NULL DEFAULT \'\' COMMENT \'评价内容\'
)DEFAULT CHARSET=utf8;\'\'\' % self.database[\'table\']
# 执行SQL语句
cursor.execute(create_table)
db.commit()
# 设定数据库错误的标志
self.db_error_first = False
# 执行异常前的SQL语句
cursor.execute(sql)
db.commit()
else:
# 数据库非第一次放生错误
print \'Database Error\'
# 关闭数据库连接
db.close()
# 保存到文件
def saveFile(self, content):
# 一行数据(即一条评论)
# item = content[5] + \'\t\' + content[4] + \'\t\' + content[3] + \'\t\' + content[0] + \'\t\t\' + content[1] + \'\t\t\' + content[2] + \'\t\t\' + content[6] + \'\r\n\'
item = self.replaceSpace(content[6]) + \'\r\n\'
# 尝试去将文件存储到文件
try:
# 打开文件
file = open(self.filename, \'ab+\')
# 写入
file.write(item)
# 关闭文件
file.close()
except:
# 发生错误
print \'File Error\'
# 入口函数
def start(self, first=1, last=10):
# 按设定页码,采集数据
for page in range(first, last + 1):
# 时间戳
timestrap = str(int(time.time())) + \'000\'
# get参数
getdata = {\'p\': page, \'_\': timestrap}
# URL编码的url
url = self.url + \'?\' + urllib.urlencode(getdata)
# 开始采集的提示信息
info_start = "----- ----- ----- [INFO] 正在获取第%d页 ----- ----- ----- \n%s\n" % (page, url)
print unicode(info_start, \'utf-8\')
# 休眠2秒
time.sleep(2)
# 获取第page页的内容
self.getPageContent(url)
# 结束采集的提示信息
info_end = "----- ----- ----- [INFO] 第%d页已加载完毕 ----- ----- ----- \n" % page
print unicode(info_end, \'utf-8\')
# 采集次数自增
self.request_counter += 1
# 判断采集次数,并根据条件休眠
if self.request_counter % 50 == 0:
# 特定情况下,休息60秒
print unicode(\'\n每采集50页,休息60秒...\n\', \'utf-8\')
# 休眠
time.sleep(60)
else:
# 普通情况下,休息随机的时间
_time = 5 + int(random.uniform(0, 6))
print unicode(\'sleep %ds\n\n\n\' % _time, \'utf-8\')
# 休眠
time.sleep(_time)
# 摆渡人
#url = \'https://book.douban.com/subject/26356948/comments/hot\' # 短评地址
#name = \'baiduren\' # 名称
# 芳华
url = \'https://book.douban.com/subject/27010212/comments/hot\' # 短评地址
name = \'fanghua\' # 名称
# 实例化爬虫对象
bookSpyder = Douban(url, name)
# 设定爬取的页码范围,并开始爬取
bookSpyder.start(1, 333)