hahahu

- 框架:具有很多功能且具有很强通用性的一个项目模板。

环境换装

        Linux:

        pip3 install scrapy

 

Windows:

      a. pip3 install wheel

      b. 下载twisted http://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted

      c. 进入下载目录,执行 pip3 install Twisted‑17.1.0‑cp35‑cp35m‑win_amd64.whl

      d. pip3 install pywin32

    e. pip3 install scrapy

在命令行:

创建一个工程:scrapy startproject proName

      切换到创建的工程下:cd proName

      创建爬虫文件:scrapy genspider spiderName www.xxx.com

      执行工程:scrapy crawl spiderName

 

爬取数据(__init__.py文件同一级的文件中(wangyi.py)):

# -*- coding: utf-8 -*-
from wangyiPro.items import WangyiproItem
import scrapy
import re


class WangyiSpider(scrapy.Spider):
    name = \'wangyi\'
    # allowed_domains = [\'www.xxx.com\']
    start_urls = [\'https://temp.163.com/special/00804KVA/cm_guonei.js?callback=data_callback\'] #发送请求的url
    
    def layer_parse(self,response):
        item = response.meta[\'item\']
        p = response.xpath(\'//*[@id="endText"]/p/text() | //*[@id="endText"]/p/b/b/text() | //*[@id="endText"]/p/b/text()\')
        item[\'content\'] = p
        yield item    #将item发送给管道进行存储


    def parse(self, response):
        ret = response.text
        ex = \'"title":"(.*?)",.*?"docurl":"(.*?)",\' #正则表达式
        img_src_list = re.findall(ex, ret, re.S)    #正则匹配
        for i in img_src_list:
            item = WangyiproItem()    #实例化item对象
            name = i[0].encode(\'iso-8859-1\').decode(\'gbk\')    #解决中文乱码
            name_url = i[1]
            item[\'name\'] = name
            item[\'name_url\'] = name_url
            yield scrapy.Request(url=name_url,callback=self.layer_parse,meta={\'item\':item})    #callback是回调函数,  meta是给回调函数传参

创建item对象(在items.py文件中)

# -*- coding: utf-8 -*-


# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html


import scrapy
class WangyiproItem(scrapy.Item):
    # define the fields for your item here like:
    name = scrapy.Field()    #格式必须是这样的
    content = scrapy.Field()
    name_url = scrapy.Field()

配置 settings文件

# -*- coding: utf-8 -*-


# Scrapy settings for xioahuaPro project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html


BOT_NAME = \'xioahuaPro\'


SPIDER_MODULES = [\'xioahuaPro.spiders\']
NEWSPIDER_MODULE = \'xioahuaPro.spiders\'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = \'xioahuaPro (+http://www.yourdomain.com)\'
USER_AGENT = \'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36\'


# Obey robots.txt rules
ROBOTSTXT_OBEY = False   #反爬机制


# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32


# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16


# Disable cookies (enabled by default)
#COOKIES_ENABLED = False


# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False


# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   \'Accept\': \'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\',
#   \'Accept-Language\': \'en\',
#}


# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    \'xioahuaPro.middlewares.XioahuaproSpiderMiddleware\': 543,
#}


# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    \'xioahuaPro.middlewares.XioahuaproDownloaderMiddleware\': 543,
#}


# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    \'scrapy.extensions.telnet.TelnetConsole\': None,
#}


# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#300表示的是优先级,数值越小优先级越高
ITEM_PIPELINES = {
    \'xioahuaPro.pipelines.XioahuaproPipeline\': 300, #数据库(权重越小越先执行)
    # \'xioahuaPro.pipelines.MysqlPipeline\': 301,
    # \'xioahuaPro.pipelines.RedisPipeline\': 302,
}


# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False


# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = \'httpcache\'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = \'scrapy.extensions.httpcache.FilesystemCacheStorage\'


LOG_LEVEL = \'ERROR\'      #只打印错误日志
LOG_FILE = \'./log.txt\'   #

管道存储item(在pipelines.py文件中)

# -*- coding: utf-8 -*-
# Define your item pipelines here
# Don\'t forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
#作用:将解析到的数据存储到某一个平台中。
import pymysql
from redis import Redis
#存在本地磁盘中
class XioahuaproPipeline(object):
    fp = None
    def open_spider(self,spider):
        print(\'开始爬虫!\')
        self.fp = open(\'./xiaohua.txt\',\'w\',encoding=\'utf-8\')
    #作用:实现持久化存储的操作
    #该方法的item参数就可以接收爬虫文件提交过来的item对象
    #该方法每接收一个item就会被调用一次(调用多次)
    def process_item(self, item, spider):
        name = item[\'name\']
        img_url = item[\'img_url\']
        self.fp.write(name+\':\'+img_url+\'\n\')
        #返回值的作用:就是将item传递给下一个即将被执行的管道类
        return item




    def close_spider(self,spider):
        print(\'结束爬虫!\')
        self.fp.close()


#存在mysql数据库中
class MysqlPipeline(object):
    conn = None
    cursor = None
    def open_spider(self, spider):
        #解决数据库字段无法存储中文处理:alter table tableName convert to charset utf8;
        self.conn = pymysql.Connect(host=\'127.0.0.1\',port=3306,user=\'root\',password=\'123\',db=\'test\')
        print(self.conn)
    def process_item(self, item, spider):
        self.cursor = self.conn.cursor()
        try:
            self.cursor.execute(\'insert into xiahua values ("%s","%s")\'%(item[\'name\'],item[\'img_url\']))
            self.conn.commit()
        except Exception as e:
            print(e)
            self.conn.rollback()
        return item
    def close_spider(self, spider):
        self.cursor.close()
        self.conn.close()


#存在redis数据库中
class RedisPipeline(object):
    conn = None
    def open_spider(self, spider):
        self.conn = Redis(host=\'127.0.0.1\',port=6379)
        print(self.conn)
    def process_item(self, item, spider):
        dic = {
            \'name\':item[\'name\'],
            \'img_url\':item[\'img_url\']
        }
        print(dic)
        self.conn.lpush(\'xiaohua\',dic)#保存到redis数据库
        return item
    def close_spider(self, spider):
        p

分类:

技术点:

相关文章: