淘宝估计是爬虫界一直想要去尝试的网页,小彬自学爬虫已有一段时间,在掌握selenium后就一直想要本着学习交流的态度和淘宝程序员叔叔切磋一下,嘻嘻。

唉,没想到这一切磋就耗费了我一天时间,不过还好,最后也小有成就,并且这次收获也不少,尤其对于网页的反蜘蛛机制。
我先梳理下遇到的困难:

1·网页登入,淘宝账号登入需要滑动验证码;支付宝账号登入,却找不到密码输入框标签。唉,愁死个人了
2·成功登入后,下一页标签居然是变动的,,,这谁能想的到呀。唉,等我发现这个问题的时候已经下午了
3·页面加载问题,加载不完全,获取到的数据就不全,甚至连下一页的标签都找不到

解决方案:
1·要是选择淘宝账号登入的话,那就拿fiddler替换js吧,其他的小彬也没啥子办法;要是支付宝账号登入的话,,我,,扫码登行不行,实在找不到输入框标签呀
2·标签变动还好说啦,找到规律就好了。
3·页面加载那就设置时间等待吧,只要时间充足,一个页面你等一个小时也没问题。还需要考虑到网络问题,有时报错不是因为匹配不对,就是网不好

(小彬还是一个小小白,要是有大神看帖莫喷,毕竟从学Python到现在就没人带过,全靠自己摸索。对了,在爬取到十几页的时候被淘宝发现了,,,,,我也没再继续修改程序,获取到的数据不是太多)

数据分析:
数据分析小彬暂且还没学习,只能做些简单的分析:排名,云图之类的,勿怪


import time
import random
from selenium import webdriver
from urllib.parse import quote
from pyquery import PyQuery as PQ
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

# 驱动
__browser_url = r'D:\软件安装地址\360Chrome\Chrome\Application\360chrome.exe'
chrome_options = Options()
chrome_options.binary_location = __browser_url
driver = webdriver.Chrome(chrome_options=chrome_options)

# 窗口界面设置
wait = WebDriverWait(driver, 10)
driver.get(
    'https://auth.alipay.com/login/index.htm?loginScene=7&goto=https%3A%2F%2Fauth.alipay.com%2Flogin%2Ftaobao_trust_login.htm%3Ftarget%3Dhttps%253A%252F%252Flogin.taobao.com%252Fmember%252Falipay_sign_dispatcher.jhtml%253Ftg%253D&params=VFBMX3JlZGlyZWN0X3VybD0%3D')
driver.maximize_window()  # 窗口最大化
driver.implicitly_wait(3)
driver.find_element_by_xpath('//*[@id="J-loginMethod-tabs"]/li[2]').click()
time.sleep(1)

# 输入账号密码
driver.find_element_by_xpath('//*[@id="J-input-user"]').send_keys('XXXXXXXX')
time.sleep(15)
driver.find_element_by_class_name('ui-button').click()

# 输入商品
driver.find_element_by_xpath('//*[@id="q"]').send_keys("篮球鞋")
driver.find_element_by_xpath('//*[@id="J_TSearchForm"]/div[1]/button').click()
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#q')))  # 等界面完全加载


def get_texts():

    html = driver.page_source
    pq_file = PQ(html)
    all = pq_file('#mainsrp-itemlist .items .item').items()
#提取所需信息
    for i in all:
        text = [
            i.find('.price').text(),
            i.find('.deal-cnt').text(),
            i.find('.title').text(),
            i.find('.shop').text(),
            i.find('.location').text()]
        print(text)
#保存至本地
        with open('C://Users/Administrator/Desktop/Taobao.txt', 'a', encoding='utf-8', )as f:
            f.write(str(text) + '\n')


def change_pages():
#获取100页信息
    for i in range(1, 101):
        wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#q')))
        time.sleep(2)

#获取切换页面标签并点击
        if i in [1, 2, 3]:
            target = driver.find_element_by_xpath('//*[@id="mainsrp-pager"]/div/div/div/ul/li[8]/a')
        elif i in [4]:
            target = driver.find_element_by_xpath('//*[@id="mainsrp-pager"]/div/div/div/ul/li[9]/a')
        elif i in [5]:
            target = driver.find_element_by_xpath('//*[@id="mainsrp-pager"]/div/div/div/ul/li[10]/a')
        else:
            target = driver.find_element_by_xpath('//*[@id="mainsrp-pager"]/div/div/div/ul/li[11]/a')

        time.sleep(1)
        driver.execute_script("arguments[0].scrollIntoView();", target)
        time.sleep(3)
        target.click()
        get_texts()

if __name__ == '__main__':
    change_pages()

数据分析代码:

因Python无法将列表直接转换为字典,用zip方法时会去重数据,所以排名分析结果并不准确,正在寻找解决方法


import wordcloud, collections, jieba


def ana_prices():

    prices = []
    with open('C://Users/Administrator/Desktop/Taobao.txt', 'r', encoding='utf-8') as f:
        for line in f:
            prices.append(eval(eval(line)[0][1::]))
        print("总价格:" + str(sum(prices)), "\n" + "总数量:" + str(len(prices)),
              "\n" + "平均价格:" + str(sum(prices) / len(prices)))


def ana_area():

    # 所在地排名分析
    areas = []
    with open('C://Users/Administrator/Desktop/Taobao.txt', 'r', encoding='utf-8') as f:
        for line in f:
            area = eval(line)[4]
            areas.append(area)
            with open('C://Users/Administrator/Desktop/Areas.txt', 'a', encoding='utf-8')as f:
                f.write(area)

    print("拥有篮球鞋店家最多的前十省份:")
    print(collections.Counter(areas).most_common(10))

    # 云图统计分析
    f = open('C://Users/Administrator/Desktop/Areas.txt', "r", encoding='utf-8')
    t = f.read()
    f.close()
    ls = jieba.lcut(t)
    txt = " ".join(ls)
    w = wordcloud.WordCloud(font_path='simfang.ttf', width=1000, height=700,
                            background_color="white", )  # 'STXINWEI.TTF:简体华文新魏字体,字体格式不对会报错
    w.generate(txt)
    w.to_file("C://Users/Administrator/Desktop/" + "grwordcloud.png")


def price_number():

    prices = []
    numbers = []
    with open('C://Users/Administrator/Desktop/Taobao.txt', 'r', encoding='utf-8') as f:
        for line in f:
            prices.append(eval(line)[0])
            numbers.append(((eval(line)[1])[:-3]))
    number = []
    for i in numbers:
        if len(i) == 5:
            i = i[0:4]
            i = eval(i)
            number.append(i)
        else:
            i = eval(i)
            number.append(i)
#生成字典
    dic = zip(number, prices)
    c = dict(dic)
    print("购买量前十与其对应价格:")
    print(sorted(c.items(), key=lambda x: x[0], reverse=True)[0:10])


#def number_area():

numbers = []
with open('C://Users/Administrator/Desktop/Taobao.txt', 'r', encoding='utf-8') as f:
    for line in f:
        numbers.append(((eval(line)[1])[:-3]))
number = []
for i in numbers:
    if len(i) == 5:
        i = i[0:4]
        i = eval(i)
        number.append(i)
    else:
        i = eval(i)
        number.append(i)
d=number


areas = []
with open('C://Users/Administrator/Desktop/Taobao.txt', 'r', encoding='utf-8') as f:
    for line in f:
        area = eval(line)[4]
        areas.append(area)
e=areas
print(e)
print(d)
dic = zip(e,d )
c = dict(dic)
print("购买地前十与其对应购买数量:")
print(sorted(c.items(), key=lambda x: x[1], reverse=True))


def number_shop():

    shops = []
    with open('C://Users/Administrator/Desktop/Taobao.txt', 'r', encoding='utf-8') as f:
        for line in f:
            shop = eval(line)[3]
            shops.append(shop)

    numbers = []
    with open('C://Users/Administrator/Desktop/Taobao.txt', 'r', encoding='utf-8') as f:
        for line in f:
            numbers.append(((eval(line)[1])[:-3]))
    number = []
    for i in numbers:
        if len(i) == 5:
            i = i[0:4]
            i = eval(i)
            number.append(i)
        else:
            i = eval(i)
            number.append(i)
    dic = zip(shops, number)
    c = dict(dic)
    print("购买店前十与其对应购买数量:")
    print(sorted(c.items(), key=lambda x: x[1], reverse=True)[0:10])

if __name__ == '__main__':
    ana_prices()
    ana_area()
    price_number()
    number_area()
    number_shop()

结果展示:

(要是店名有侵权可以联系我删掉哦)


总价格:563562.77 
总数量:796 
平均价格:707.9934296482412

拥有篮球鞋店家最多的前十省份:
[('上海', 226), ('福建 泉州', 107), ('江苏 苏州', 48), ('美国', 30), ('河南 郑州', 29), ('浙江 杭州', 29), ('福建 厦门', 24), ('福建 莆田', 24), ('广东 广州', 23), ('福建 福州', 23)]

购买量前十与其对应价格:
[(6000, '¥159.00'), (5500, '¥409.00'), (5000, '¥219.00'), (4901, '¥138.00'), (4514, '¥179.00'), (4439, '¥168.00'), (3967, '¥158.00'), (3965, '¥219.00'), (3902, '¥288.00'), (3845, '¥329.00')]

购买地前十与其对应购买数量:
[('福建 漳州', 2612), ('湖北 襄阳', 1750), ('江苏 镇江', 990), ('湖南 长沙', 810), ('福建 福州', 468), ('福建 厦门', 424), ('福建 泉州', 370), ('江西 南昌', 344), ('天津', 313), ('山东 烟台', 304)]

购买店前十与其对应购买数量:
[('豪杰运动户外专营店', 6000), ('鹰伍旗舰店', 4901), ('微笑先生旗舰店', 4439), ('乔丹元翔专卖店', 3967), ('喜得宝电子商务', 3902), ('施杨杰1', 2741), ('熠峰运动专营店', 2612), ('何泉娟', 2609), ('匹克美淘淘专卖店', 2512), ('乔丹萌芽草专卖店', 2454)]

进阶4·淘宝商品爬取与分析(selenium)
进阶4·淘宝商品爬取与分析(selenium)

相关文章: