有的页面会使用frame 框架,使用Selenium + PhantomJS 后并不会加载iframe 框架中的网页内容。iframe 框架相当于在页面中又加载了一个页面,需要使用Selenium 的 switch_to.frame() 方法加载
(官网给的方法是switch_to_frame(),但是IDE提醒使用前面的方法替代该方法)。
比如:
driver.switch_to.frame(\'g_iframe\')
一、介绍
本例子用Selenium +phantomjs爬取流媒体(http://www.lmtw.com/search.php?show=title%2Ckeyboard%2Cwriter&searchget=1&keyboard=%E7%94%B5%E8%A7%86)的资讯信息,输入给定关键字抓取资讯信息。
给定关键字:数字;融合;电视
抓取信息内如下:
1、资讯标题
2、资讯链接
3、资讯时间
4、资讯来源
二、网站信息
三、数据抓取
针对上面的网站信息,来进行抓取
1、首先抓取信息列表
抓取代码:Elements = doc(\'h2[class="r"]\')
2、抓取标题
抓取代码:title = element.find(\'a\').text().encode(\'utf8\').strip()
3、抓取链接
抓取代码:url = element.find(\'a\').attr(\'href\')
4、抓取日期
抓取代码:dateElements = doc(\'span[class="a"]\')
5、抓取来源
抓取代码:strSources = dochtml(\'div[class="from"]\').text().encode(\'utf8\').strip()
四、完整代码
# coding=utf-8 import os import re from selenium import webdriver import selenium.webdriver.support.ui as ui import time import datetime from pyquery import PyQuery as pq import LogFile import mongoDB import urllib class lmtwSpider(object): def __init__(self,driver,log,keyword_list,websearch_url,valid,filter): \'\'\' :param driver: 驱动 :param log: 日志 :param keyword_list: 关键字列表 :param websearch_url: 搜索网址 :param valid: 采集参数 0:采集当天;1:采集昨天 :param filter: 过滤项,下面这些内容如果出现在资讯标题中,那么这些内容不要,过滤掉 \'\'\' self.log = log self.driver = driver self.webSearchUrl_list = websearch_url.split(\';\') self.keyword_list = keyword_list self.db = mongoDB.mongoDbBase() self.valid = valid self.start_urls = [] # 过滤项 self.filter = filter for keyword in keyword_list: url = websearch_url + urllib.quote(keyword) self.start_urls.append(url) def Comapre_to_days(self,leftdate, rightdate): \'\'\' 比较连个字符串日期,左边日期大于右边日期多少天 :param leftdate: 格式:2017-04-15 :param rightdate: 格式:2017-04-15 :return: 天数 \'\'\' l_time = time.mktime(time.strptime(leftdate, \'%Y-%m-%d\')) r_time = time.mktime(time.strptime(rightdate, \'%Y-%m-%d\')) result = int(l_time - r_time) / 86400 return result def date_isValid(self, strDateText): \'\'\' 判断日期时间字符串是否合法:如果给定时间大于当前时间是合法,或者说当前时间给定的范围内 :param strDateText: \'2017-06-20 10:22 \' :return: True:合法;False:不合法 \'\'\' currentDate = time.strftime(\'%Y-%m-%d\') datePattern = re.compile(r\'\d{4}-\d{2}-\d{2}\') strDate = re.findall(datePattern, strDateText) if len(strDate) == 1: if self.valid == 0 and self.Comapre_to_days(currentDate, strDate[0])==0: return True, currentDate elif self.valid == 1 and self.Comapre_to_days(currentDate, strDate[0])==1: return True,str(datetime.datetime.now() - datetime.timedelta(days=1))[0:10] elif self.valid == 2 and self.Comapre_to_days(currentDate, strDate[0]) == 2: return True,str(datetime.datetime.now() - datetime.timedelta(days=2))[0:10] return False, \'\' def log_print(self, msg): \'\'\' # 日志函数 # :param msg: 日志信息 # :return: # \'\'\' print \'%s: %s\' % (time.strftime(\'%Y-%m-%d %H-%M-%S\'), msg) def scrapy_date(self): try: strsplit = \'------------------------------------------------------------------------------------\' # isbreak = False keywordIndex = 0 for link in self.start_urls: self.driver.get(link) self.driver.switch_to.frame(\'iframepage\') keyword = self.keyword_list[keywordIndex] keywordIndex += 1 selenium_html = self.driver.execute_script("return document.documentElement.outerHTML") doc = pq(selenium_html) infoList = [] self.log.WriteLog(strsplit) self.log_print(strsplit) Elements = doc(\'h2[class="r"]\') dateElements = doc(\'span[class="a"]\') index = 0 for element in Elements.items(): date = dateElements[index].text index += 1 flag,strDate = self.date_isValid(date) if flag: title = element.find(\'a\').text().encode(\'utf8\').strip() # 考虑过滤项 if title.find(self.filter)>-1: continue if title.find(keyword) > -1: url = element.find(\'a\').attr(\'href\') dictM = {\'title\': title, \'date\': strDate, \'url\': url, \'keyword\': keyword, \'introduction\': title} infoList.append(dictM) if len(infoList) > 0: for item in infoList: item[\'sourceType\']=1 url = item[\'url\'] self.driver.get(url) # self.driver.switch_to.frame(\'iframepage\') htext = self.driver.execute_script("return document.documentElement.outerHTML") dochtml = pq(htext) strSources = dochtml(\'div[class="from"]\').text().encode(\'utf8\').strip() txtsource = strSources[strSources.find(\'来源:\') + 9:] item[\'source\']=txtsource[0:txtsource.find(\' \')] self.log.WriteLog(\'title:%s\' % item[\'title\']) self.log.WriteLog(\'url:%s\' % item[\'url\']) self.log.WriteLog(\'date:%s\' % item[\'date\']) self.log.WriteLog(\'source:%s\' % item[\'source\']) self.log.WriteLog(\'kword:%s\' % item[\'keyword\']) self.log.WriteLog(strsplit) self.log_print(\'title:%s\' % item[\'title\']) self.log_print(\'url:%s\' % item[\'url\']) self.log_print(\'date:%s\' % item[\'date\']) self.log_print(\'source:%s\' % item[\'source\']) self.log_print(\'kword:%s\' % item[\'keyword\']) self.log_print(strsplit) self.db.SaveInformations(infoList) except Exception,e: self.log.WriteLog(\'lmtwSpider:\'+ e.message) finally: pass