nuochengze
import scrapy


class BaiduSpider(scrapy.Spider):
name = \'baidu\'
allowed_domains = [\'www.baidu.com\']
start_urls = [\'http://www.baidu.com/\']

def parse(self, response):
# 通过导入CookieJar来实现cookie的获取
from scrapy.http.cookies import CookieJar
cookie_jar = CookieJar()
cookie_jar.extract_cookies(response, response.request)
print(cookie_jar) # <scrapy.http.cookies.CookieJar object at 0x7f8888a0f940>
cookie_dict = dict()
for k, v in cookie_jar._cookies.items():
for i, j in v.items():
for m, n in j.items():
cookie_dict[m] = n.value
print("cookie_dict>>>", cookie_dict)
# cookie_dict>>> {\'BDSVRTM\': \'0\', \'BD_HOME\': \'1\', \'H_PS_PSSID\': \'32293_1465_31669_32380_32359_31254_32046_32116_26350\'}

分类:

技术点:

相关文章:

  • 2021-12-13
  • 2021-11-16
  • 2022-02-10
  • 2022-01-20
  • 2021-11-28
  • 2022-02-02
  • 2021-11-26
猜你喜欢
  • 2021-12-13
  • 2022-02-21
  • 2022-12-23
  • 2021-12-19
  • 2021-05-18
  • 2022-12-23
相关资源
相似解决方案