##第一步 导包
from bs4 import BeautifulSoup
import requests
import sys
##准备
class downloder(object):
def __init__(self):
self.server = \'http://www.biqukan.com\'
self.target = \'http://www.biqukan.com/1_1094/\'
self.names = [] #存放章节名字
self.urls = [] #存放章节链接
self.nums = 0 # 章节数量
def get_download_url(self):
req = requests.get(url=self.target)
html = req.text
div_bf = BeautifulSoup(html)
div = div_bf.find_all(\'div\',class_=\'listmain\')
a_bf = BeautifulSoup(str(div[0]))
a = a_bf.find_all(\'a\')
self.nums = len(a[15:])
for eatch in a[15:]:
self.names.append(eatch.string)
self.urls.append(self.server +eatch.get(\'href\'))
def writer(self ,name,path,text):
write_flag = True
with open(path,\'a\',encoding=\'utf-8\') as f:
f.write(name +\'\n\')
f.writelines(text)
f.writelines(\'\n\n\')
def get_contents(self,target):
req = requests.get(url=target)
html = req.text
bf = BeautifulSoup(html)
texts = bf.find_all(\'div\',class_ = \'showtxt\')
texts = texts[0].text.replace( \'\xa0\'*8,\'\n\n\')
return texts
if __name__ == \'__main__\':
dl = downloder()
dl.get_download_url()
print(\'开始下载\')
for i in range(dl.nums):
dl.writer(dl.names[i],\'用点.txt\',dl.get_contents(dl.urls[i]))
print("下载完成")
参考华哥的内容... 还有好多不懂
http://cuijiahua.com/blog/2017/10/spider_tutorial_1.html
相关文章:
- python小说爬虫 2021-08-30
- Python爬虫——小说 2021-08-30
- 爬虫之小说爬取 2021-05-26
- Python爬虫-爬取17K小说 2021-08-30
- Python 爬虫 之 垃圾爬虫爬了点垃圾小说。。 2021-08-30
- python爬虫,爬起点小说网小说 2021-06-25
- 爬虫 爬小说 2022-12-23