demo1 wc累加案例

  • pom.xml
  • 注意运行时报错java.lang.ClassNotFoundException: backtype.storm.topology.IRichSpout。注释掉作用域
  • 注意运行时报错java.lang.NoSuchMethodError: com.lmax.disruptor.RingBuffer.。制定lmax版本
    <dependency>
      <groupId>org.apache.storm</groupId>
      <artifactId>storm-core</artifactId>
      <version>0.9.3</version>
      <scope>provided</scope>
    </dependency>
    <dependency>
      <groupId>com.lmax</groupId>
      <artifactId>disruptor</artifactId>
      <version>3.2.0</version>
    </dependency>
  • WcSpout.java
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import java.util.List;
import java.util.Map;

public class WcSpout extends BaseRichSpout {
    private Map map;
    private TopologyContext topologyContext;
    private SpoutOutputCollector spoutOutputCollector;
    int i = 0;

    @Override
    public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
        this.map = map;
        this.topologyContext = topologyContext;
        this.spoutOutputCollector = spoutOutputCollector;
    }

    //会一直传递tuple数据流到bolt
    @Override
    public void nextTuple() {
        i++;
        List<Object> tuple = new Values(i);
        spoutOutputCollector.emit(tuple);
        System.err.println("spout-------------"+i);
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        //告知传递的数据流名字
        outputFieldsDeclarer.declare(new Fields("num"));
    }
}
  • WcBolt.java
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Tuple;
import java.util.Map;

public class WcBolt extends BaseRichBolt {
    private Map map;
    private TopologyContext topologyContext;
    private OutputCollector outputCollector;
    int sum = 0;

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
        this.map = map;
        this.topologyContext = topologyContext;
        this.outputCollector = outputCollector;
    }
    //会一直接受来自spout的tuple数据流
    @Override
    public void execute(Tuple tuple) {
        //获取数据
        Integer i = tuple.getIntegerByField("num");
        //求和累加
        sum += i;
        System.out.println("sum-----------" + sum);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {

    }
}
  • 测试类Test.java
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.topology.TopologyBuilder;

public class Test {
    public static void main(String[] args) {
        TopologyBuilder tb = new TopologyBuilder();
        //设定spout和bolt关联
        tb.setSpout("wcSpout", new WcSpout());
        tb.setBolt("wcBolt", new WcBolt()).shuffleGrouping("wcSpout");

        //放入本地集群
        LocalCluster lc = new LocalCluster();
        lc.submitTopology("wc", new Config(), tb.createTopology());
    }
}
  • 测试结果
    21 Storm案例

demo2 WorldCount单词统计案例

  • WcSpout.java
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import backtype.storm.utils.Utils;

import java.util.List;
import java.util.Map;
import java.util.Random;

public class WcSpout extends BaseRichSpout {

    private SpoutOutputCollector spoutOutputCollector;
    String[] text = {
            "hello welcome hadoop",
            "hello hadoop",
            "welcome storm"
    };
    Random r = new Random();
    @Override
    public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
        this.spoutOutputCollector = spoutOutputCollector;
    }

    //发送数据给bolt
    @Override
    public void nextTuple() {
        //模拟无限文章
        List line = new Values(text[r.nextInt(text.length)]);
        spoutOutputCollector.emit(line);
        System.err.println("spout---------"+line);
        Utils.sleep(1000);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        outputFieldsDeclarer.declare(new Fields("line"));
    }
}
  • WcSplitBolt.java切分单词数据
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;

import java.util.List;
import java.util.Map;

public class WcSplitBolt extends BaseRichBolt {
    private OutputCollector outputCollector;

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
        this.outputCollector = outputCollector;
    }

    //获取数据  切分单词数据 发送单词
    @Override
    public void execute(Tuple tuple) {
        String line = tuple.getStringByField("line");
        String[] worlds = line.split(" ");
        for(String world : worlds){
            List w = new Values(world);
            outputCollector.emit(w);
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        outputFieldsDeclarer.declare(new Fields("w"));
    }
}
  • WcCountBolt
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Tuple;

import java.util.HashMap;
import java.util.Map;

public class WcCountBolt extends BaseRichBolt {
    private Map<String, Integer> map = new HashMap<>();

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
    }

    @Override
    public void execute(Tuple tuple) {
        String w = tuple.getStringByField("w");
        Integer count = 1;
        if (map.containsKey(w)) {
            count = map.get(w) + 1;
        }
        map.put(w, count);
        System.out.println(w + "-------" + count);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
    }
}
  • Test.java测试类
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.generated.AlreadyAliveException;
import backtype.storm.generated.InvalidTopologyException;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.tuple.Fields;

public class Test {
    public static void main(String[] args) {
        TopologyBuilder tb = new TopologyBuilder();
        tb.setSpout("WcSpout", new WcSpout());
        //tb.setBolt("WcSplitBolt", new WcSplitBolt()).shuffleGrouping("WcSpout");
        //tb.setBolt("WcCountBolt", new WcCountBolt()).shuffleGrouping("WcSplitBolt");
        //多线程情况下,分发策略需要调整,下面的策略相同的key交给相同的bolt
        tb.setBolt("WcSplitBolt", new WcSplitBolt(), 3).shuffleGrouping("WcSpout");
        tb.setBolt("WcCountBolt", new WcCountBolt(),3).fieldsGrouping("WcSplitBolt",new Fields("w"));

        Config conf = new Config();
        conf.setDebug(false);
        conf.setMessageTimeoutSecs(30);
        if (args.length > 0) {
            try {
                StormSubmitter.submitTopology(args[0], conf, tb.createTopology());
            } catch (AlreadyAliveException e) {
                e.printStackTrace();
            } catch (InvalidTopologyException e) {
                e.printStackTrace();
            }
        } else {
            LocalCluster lc = new LocalCluster();
            lc.submitTopology("wc", conf, tb.createTopology());
        }

    }
}
  • 测试结果
    21 Storm案例

数据流分组(即数据分发策略)

  • 1.Shuffle Grouping 
    • 随机分组,随机派发stream里面的tuple,保证每个bolt task接收到的tuple数目大致相同。
    • 轮询,平均分配 
  • 2.Fields Grouping
    • 按字段分组,比如,按"user-id"这个字段来分组,那么具有同样"user-id"的 tuple 会被分到相同的Bolt里的一个task, 而不同的"user-id"则可能会被分配到不同的task。 
  • 3.All Grouping
    • 广播发送,对于每一个tuple,所有的bolts都会收到 
  • 4.Global Grouping
    • 全局分组,把tuple分配给task id最低的task 。
  • 5.None Grouping
    • 不分组,这个分组的意思是说stream不关心到底怎样分组。目前这种分组和Shuffle grouping是一样的效果。 有一点不同的是storm会把使用none grouping的这个bolt放到这个bolt的订阅者同一个线程里面去执行(未来Storm如果可能的话会这样设计)。 
  • 6.Direct Grouping
    • 指向型分组, 这是一种比较特别的分组方法,用这种分组意味着消息(tuple)的发送者指定由消息接收者的哪个task处理这个消息。只有被声明为 Direct Stream 的消息流可以声明这种分组方法。而且这种消息tuple必须使用 emitDirect 方法来发射。消息处理者可以通过 TopologyContext 来获取处理它的消息的task的id (OutputCollector.emit方法也会返回task的id)  
  • 7.Local or shuffle grouping
    • 本地或随机分组。如果目标bolt有一个或者多个task与源bolt的task在同一个工作进程中,tuple将会被随机发送给这些同进程中的tasks。否则,和普通的Shuffle Grouping行为一致
  • 8.customGrouping
    • 自定义,相当于mapreduce那里自己去实现一个partition一样。

demo3 数据流分组案例

  • 数据源track.log
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 12:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 09:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:51
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 12:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 08:40:52
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 12:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 09:40:49
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 08:40:53
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 09:40:49
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 10:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 12:40:49
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 08:40:50
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:50
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 09:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 09:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:52
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 11:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:51
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:53
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 08:40:50
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 08:40:53
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 12:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 11:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:50
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:52
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 08:40:51
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 10:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 09:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017
  • pom.xml
  • 注意运行时报错java.lang.ClassNotFoundException: backtype.storm.topology.IRichSpout。注释掉作用域
  • 注意运行时报错java.lang.NoSuchMethodError: com.lmax.disruptor.RingBuffer.。制定lmax版本
    <dependency>
      <groupId>org.apache.storm</groupId>
      <artifactId>storm-core</artifactId>
      <version>0.9.3</version>
      <scope>provided</scope>
    </dependency>
    <dependency>
      <groupId>com.lmax</groupId>
      <artifactId>disruptor</artifactId>
      <version>3.2.0</version>
    </dependency>
  • WcSpout.java
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import java.util.List;
import java.util.Map;

public class WcSpout extends BaseRichSpout {
    private Map map;
    private TopologyContext topologyContext;
    private SpoutOutputCollector spoutOutputCollector;
    int i = 0;

    @Override
    public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
        this.map = map;
        this.topologyContext = topologyContext;
        this.spoutOutputCollector = spoutOutputCollector;
    }

    //会一直传递tuple数据流到bolt
    @Override
    public void nextTuple() {
        i++;
        List<Object> tuple = new Values(i);
        spoutOutputCollector.emit(tuple);
        System.err.println("spout-------------"+i);
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        //告知传递的数据流名字
        outputFieldsDeclarer.declare(new Fields("num"));
    }
}
  • WcBolt.java
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Tuple;
import java.util.Map;

public class WcBolt extends BaseRichBolt {
    private Map map;
    private TopologyContext topologyContext;
    private OutputCollector outputCollector;
    int sum = 0;

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
        this.map = map;
        this.topologyContext = topologyContext;
        this.outputCollector = outputCollector;
    }
    //会一直接受来自spout的tuple数据流
    @Override
    public void execute(Tuple tuple) {
        //获取数据
        Integer i = tuple.getIntegerByField("num");
        //求和累加
        sum += i;
        System.out.println("sum-----------" + sum);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {

    }
}
  • 测试类Test.java
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.topology.TopologyBuilder;

public class Test {
    public static void main(String[] args) {
        TopologyBuilder tb = new TopologyBuilder();
        //设定spout和bolt关联
        tb.setSpout("wcSpout", new WcSpout());
        tb.setBolt("wcBolt", new WcBolt()).shuffleGrouping("wcSpout");

        //放入本地集群
        LocalCluster lc = new LocalCluster();
        lc.submitTopology("wc", new Config(), tb.createTopology());
    }
}
  • 测试结果
    21 Storm案例

demo2 WorldCount单词统计案例

  • WcSpout.java
import backtype.storm.spout.SpoutOutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichSpout;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Values;
import backtype.storm.utils.Utils;

import java.util.List;
import java.util.Map;
import java.util.Random;

public class WcSpout extends BaseRichSpout {

    private SpoutOutputCollector spoutOutputCollector;
    String[] text = {
            "hello welcome hadoop",
            "hello hadoop",
            "welcome storm"
    };
    Random r = new Random();
    @Override
    public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
        this.spoutOutputCollector = spoutOutputCollector;
    }

    //发送数据给bolt
    @Override
    public void nextTuple() {
        //模拟无限文章
        List line = new Values(text[r.nextInt(text.length)]);
        spoutOutputCollector.emit(line);
        System.err.println("spout---------"+line);
        Utils.sleep(1000);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        outputFieldsDeclarer.declare(new Fields("line"));
    }
}
  • WcSplitBolt.java切分单词数据
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Fields;
import backtype.storm.tuple.Tuple;
import backtype.storm.tuple.Values;

import java.util.List;
import java.util.Map;

public class WcSplitBolt extends BaseRichBolt {
    private OutputCollector outputCollector;

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
        this.outputCollector = outputCollector;
    }

    //获取数据  切分单词数据 发送单词
    @Override
    public void execute(Tuple tuple) {
        String line = tuple.getStringByField("line");
        String[] worlds = line.split(" ");
        for(String world : worlds){
            List w = new Values(world);
            outputCollector.emit(w);
        }
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
        outputFieldsDeclarer.declare(new Fields("w"));
    }
}
  • WcCountBolt
import backtype.storm.task.OutputCollector;
import backtype.storm.task.TopologyContext;
import backtype.storm.topology.OutputFieldsDeclarer;
import backtype.storm.topology.base.BaseRichBolt;
import backtype.storm.tuple.Tuple;

import java.util.HashMap;
import java.util.Map;

public class WcCountBolt extends BaseRichBolt {
    private Map<String, Integer> map = new HashMap<>();

    @Override
    public void prepare(Map map, TopologyContext topologyContext, OutputCollector outputCollector) {
    }

    @Override
    public void execute(Tuple tuple) {
        String w = tuple.getStringByField("w");
        Integer count = 1;
        if (map.containsKey(w)) {
            count = map.get(w) + 1;
        }
        map.put(w, count);
        System.out.println(w + "-------" + count);
    }

    @Override
    public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
    }
}
  • Test.java测试类
import backtype.storm.Config;
import backtype.storm.LocalCluster;
import backtype.storm.StormSubmitter;
import backtype.storm.generated.AlreadyAliveException;
import backtype.storm.generated.InvalidTopologyException;
import backtype.storm.topology.TopologyBuilder;
import backtype.storm.tuple.Fields;

public class Test {
    public static void main(String[] args) {
        TopologyBuilder tb = new TopologyBuilder();
        tb.setSpout("WcSpout", new WcSpout());
        //tb.setBolt("WcSplitBolt", new WcSplitBolt()).shuffleGrouping("WcSpout");
        //tb.setBolt("WcCountBolt", new WcCountBolt()).shuffleGrouping("WcSplitBolt");
        //多线程情况下,分发策略需要调整,下面的策略相同的key交给相同的bolt
        tb.setBolt("WcSplitBolt", new WcSplitBolt(), 3).shuffleGrouping("WcSpout");
        tb.setBolt("WcCountBolt", new WcCountBolt(),3).fieldsGrouping("WcSplitBolt",new Fields("w"));

        Config conf = new Config();
        conf.setDebug(false);
        conf.setMessageTimeoutSecs(30);
        if (args.length > 0) {
            try {
                StormSubmitter.submitTopology(args[0], conf, tb.createTopology());
            } catch (AlreadyAliveException e) {
                e.printStackTrace();
            } catch (InvalidTopologyException e) {
                e.printStackTrace();
            }
        } else {
            LocalCluster lc = new LocalCluster();
            lc.submitTopology("wc", conf, tb.createTopology());
        }

    }
}
  • 测试结果
    21 Storm案例

数据流分组(即数据分发策略)

  • 1.Shuffle Grouping 
    • 随机分组,随机派发stream里面的tuple,保证每个bolt task接收到的tuple数目大致相同。
    • 轮询,平均分配 
  • 2.Fields Grouping
    • 按字段分组,比如,按"user-id"这个字段来分组,那么具有同样"user-id"的 tuple 会被分到相同的Bolt里的一个task, 而不同的"user-id"则可能会被分配到不同的task。 
  • 3.All Grouping
    • 广播发送,对于每一个tuple,所有的bolts都会收到 
  • 4.Global Grouping
    • 全局分组,把tuple分配给task id最低的task 。
  • 5.None Grouping
    • 不分组,这个分组的意思是说stream不关心到底怎样分组。目前这种分组和Shuffle grouping是一样的效果。 有一点不同的是storm会把使用none grouping的这个bolt放到这个bolt的订阅者同一个线程里面去执行(未来Storm如果可能的话会这样设计)。 
  • 6.Direct Grouping
    • 指向型分组, 这是一种比较特别的分组方法,用这种分组意味着消息(tuple)的发送者指定由消息接收者的哪个task处理这个消息。只有被声明为 Direct Stream 的消息流可以声明这种分组方法。而且这种消息tuple必须使用 emitDirect 方法来发射。消息处理者可以通过 TopologyContext 来获取处理它的消息的task的id (OutputCollector.emit方法也会返回task的id)  
  • 7.Local or shuffle grouping
    • 本地或随机分组。如果目标bolt有一个或者多个task与源bolt的task在同一个工作进程中,tuple将会被随机发送给这些同进程中的tasks。否则,和普通的Shuffle Grouping行为一致
  • 8.customGrouping
    • 自定义,相当于mapreduce那里自己去实现一个partition一样。

demo3 数据流分组案例

  • 数据源track.log
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 12:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 09:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:51
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 12:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:51
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 08:40:52
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 12:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 09:40:49
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 08:40:53
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 09:40:49
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 10:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 12:40:49
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 08:40:50
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 11:40:49
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 11:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:50
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 09:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 09:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:52
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 11:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:51
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:53
www.taobao.com  VVVYH6Y4V4SFXZ56JIPDPB4V678 2017-02-21 08:40:50
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 08:40:53
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 12:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 11:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:50
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017-02-21 08:40:53
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 08:40:52
www.taobao.com  CYYH6Y2345GHI899OFG4V9U567  2017-02-21 08:40:51
www.taobao.com  ABYH6Y4V4SCVXTG6DPB4VH9U123 2017-02-21 10:40:49
www.taobao.com  BBYH61456FGHHJ7JL89RG5VV9UYU7   2017-02-21 09:40:49
www.taobao.com  XXYH6YCGFJYERTT834R52FDXV9U34   2017

相关文章:

  • 2022-01-13
  • 2021-10-29
  • 2021-12-23
  • 2021-06-16
  • 2021-08-19
  • 2022-12-23
  • 2022-01-20
猜你喜欢
  • 2021-05-29
  • 2021-05-04
  • 2021-10-18
  • 2021-07-01
  • 2022-03-09
  • 2022-12-23
相关资源
相似解决方案