# coding:utf-8
import requests
import json
from bs4 import BeautifulSoup

user_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'

headers = {'User-Agent': user_agent}

r = requests.get("http://seputu.com/", headers=headers)

soup = BeautifulSoup(r.text, 'html.parser', from_encoding='utf-8') # html.parser

content = []

for mulu in soup.find_all(class_="mulu"):

h2 = mulu.find('h2')

if h2 != None:

h2_title = h2.string # 获取标题

list = []

for a in mulu.find(class_='box').find_all('a'): # 获取所有的a标记中url和章节内容

href = a.get('href')

box_title = a.get('title')

list.append({'href': href, 'box_title':box_title});

content.append({'title': h2_title, 'content': list})

with open('qiye.json', 'wb') as fp:
json.dump(content, fp=fp, indent=4)

 

相关文章:

  • 2022-12-23
  • 2022-12-23
  • 2021-05-07
  • 2021-04-02
  • 2021-08-07
  • 2021-11-25
  • 2021-09-14
  • 2022-12-23
猜你喜欢
  • 2021-07-10
  • 2022-12-23
  • 2021-10-28
  • 2022-12-23
  • 2022-12-23
  • 2022-12-23
  • 2022-12-23
相关资源
相似解决方案