1、知识点

""""
pipelines使用:
    1、在spiders里面使用yield生成器
        list_li = response.xpath("//div[@class='swiper-wrapper']//li")
        #print(list_li)
        for li in list_li:
            #print(li.extract_first())
            item = { }
            item["name"] = li.xpath(".//h3/text()").extract_first()
            item["content"] = li.xpath(".//p[@class='teacherBrief']/text()").extract_first()
            #item["content"] = li.xpath(".//p[@class='teacherIntroduction']/text()").extract_first()
            #print(item)
            yield  item  #将数据传递道pipelines
            
    2、在pipelines中打印item
            class MyspiderPipeline(object):
                """
                #第一个管道,这个process_item方法名是不能改
                """
                def process_item(self, item, spider):
                    item["hello"] = "world"
                    print(item)
                    return item
            
            class MyspiderPipeline1(object):
                """
                #第二个管道
                """
                def process_item(self, item, spider):
                    print(item)
                    return item

    
    3、在settings文件添加pipelines的支持
        ITEM_PIPELINES = {
            #执行顺序为从小到大,即先执行300,然后在301
           'myspider.pipelines.MyspiderPipeline': 300,
            'myspider.pipelines.MyspiderPipeline1': 301,
        }
"""

2、spider.py文件中通过

yield  item  #将数据传递道pipelines.py中的item
JulyeduSpider.py文件代码
# -*- coding: utf-8 -*-
import scrapy
import  logging

logger = logging.getLogger(__name__)
class JulyeduSpider(scrapy.Spider):
    name = 'julyedu'
    allowed_domains = ['julyedu.com']
    start_urls = ['http://julyedu.com/']
    #这个parse方法名不能改
    def parse(self, response):
        """
        爬虫七月在线的导师名单
        :param response:
        :return:
        """
        list_li = response.xpath("//div[@class='swiper-wrapper']//li")
        #print(list_li)
        item = {}
        for li in list_li:
            item["name"] = li.xpath(".//h3/text()").extract_first()
            item["content"] = li.xpath(".//p[@class='teacherBrief']/text()").extract_first()
            #item["content"] = li.xpath(".//p[@class='teacherIntroduction']/text()").extract_first()
            #print(item)
            #将数据传递道pipelines,yield只接受Request,BaseItem,dict,None四种类型
            logger.warning(item) #打印日志
            yield  item

2、修改pipelines.py文件,对其中的item可以操作

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

class MyspiderPipeline(object):
    """
    第一个管道,这个process_item方法名是不能改
    """
    def process_item(self, item, spider):
        """
        针对不同的爬虫的数据处理
        :param item:spider 传过来的值
        :param spider: 传递过来spider的类
        :return:
        """
        if spider.name == "julyedu":
            #print(item)
            return item
        else:
            return item

class MyspiderPipeline1(object):
    """
    第二个管道
    """
    def process_item(self, item, spider):
        #print(item)
        return item
View Code

相关文章:

  • 2022-02-15
  • 2021-12-10
  • 2022-01-26
  • 2022-12-23
  • 2021-08-07
  • 2021-12-07
  • 2021-11-15
  • 2021-10-25
猜你喜欢
  • 2022-12-23
  • 2021-09-19
  • 2021-09-06
  • 2021-04-22
  • 2022-12-23
  • 2022-12-23
  • 2022-12-23
相关资源
相似解决方案