mongodb分片测试:

一基础定义:
1>系统:centos_x64_6.6

2>几点对应地址
node-1  192.168.1.31
node-2  192.168.1.32
node-3  192.168.1.33

3>下载地址
curl -O https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-rhel70-3.2.0.tgz


二:安装(每台机器执行相同操作)
tar zvfx mongodb-linux-x86_64-rhel70-3.2.0.tgz
/bin/mv mongodb-linux-x86_64-rhel70-3.2.0 /usr/local/mongodb
useradd mongodb -s /sbin/nologin
mkdir -p /data/mongodb

#创建以下目录:mongos 、config 、 shard1 、shard2、shard3
#因为mongos不存储数据只需要建立日志文件目录即可。

#建立mongos日志目录
mkdir -p /data/mongodb/mongos/log

#建立config server 数据和日志目录
mkdir -p /data/mongodb/config/data
mkdir -p /data/mongodb/config/log

#建立shard1 数据和日志目录
mkdir -p /data/mongodb/shard1/data
mkdir -p /data/mongodb/shard1/log

#建立shard2 数据文件和日志目录
mkdir -p /data/mongodb/shard2/data
mkdir -p /data/mongodb/shard2/log

#建立shard3 数据文件和日志
mkdir -p /data/mongodb/shard3/data
mkdir -p /data/mongodb/shard3/log


#端口定义:
#mongos==>20000
#config ==>21000
#shard1==>22001
#shard2==>22002
#shard3==>22003

 
#6启动config服务器(所有机器操作):
/usr/local/mongodb/bin/mongod --configsvr --dbpath /data/mongodb/config/data --port 21000 --logpath /data/mongodb/config/log/config.log --fork

#7启动mongos服务器(所有机器操作):
/usr/local/mongodb/bin/mongos  --configdb 192.168.1.31:21000,192.168.1.32:21000,192.168.1.33:21000  --port 20000   --logpath /data/mongodb/mongos/log/mongos.log --fork
 
#8配置各个分片的副本集:

#在每个机器里分别设置分片1服务器及副本集shard1
numactl --interleave=all /usr/local/mongodb/bin/mongod --shardsvr --replSet shard1 --port 22001 --dbpath /data/mongodb/shard1/data  \
--logpath /data/mongodb/shard1/log/shard1.log --fork --nojournal  --oplogSize 10

这里加上nojournal 是为了关闭日志信息,在我们的测试环境不需要初始化这么大的redo日志;
同样设置oplogsize是为了降低 local 文件的大小,oplog是一个固定长度的 capped collection,
它存在于"local"数据库中,用于记录Replica Sets操作日志。注意,这里的设置是为了测试!

#在每个机器里分别设置分片2服务器及副本集shard2
numactl --interleave=all /usr/local/mongodb/bin/mongod  --shardsvr --replSet shard2 --port 22002 --dbpath /data/mongodb/shard2/data  --logpath /data/mongodb/shard2/log/shard2.log --fork --nojournal  --oplogSize 10
 
#在每个机器里分别设置分片3服务器及副本集shard3
numactl --interleave=all /usr/local/mongodb/bin/mongod --shardsvr --replSet shard3 --port 22003 --dbpath /data/mongodb/shard3/data  --logpath /data/mongodb/shard3/log/shard3.log --fork --nojournal  --oplogSize 10

9配置副本集:(任意一台机器操作 执行三个副本集配置操作)
#------------------------------------------------------------------
/usr/local/mongodb/bin/mongo  127.0.0.1:22001 第一台机器操作

use admin
config = { _id:"shard1", members:[
                     {_id:0,host:"192.168.1.31:22001"},
                     {_id:1,host:"192.168.1.32:22001",arbiterOnly: true },
                     {_id:2,host:"192.168.1.33:22001"}
                ]
         }


rs.initiate(config);
#------------------------------------------------------------------
/usr/local/mongodb/bin/mongo  127.0.0.1:22002 第二台机器操作

use admin
config = { _id:"shard2", members:[
                     {_id:0,host:"192.168.1.31:22002" },
                     {_id:1,host:"192.168.1.32:22002" },
                     {_id:2,host:"192.168.1.33:22002",arbiterOnly: true }
                ]
         }


rs.initiate(config);
#------------------------------------------------------------------
/usr/local/mongodb/bin/mongo  127.0.0.1:22003

use admin
config = { _id:"shard3", members:[
                     {_id:0,host:"192.168.1.31:22003",arbiterOnly: true },
                     {_id:1,host:"192.168.1.32:22003"},
                     {_id:2,host:"192.168.1.33:22003"}
                ]
         }


rs.initiate(config);
#------------------------------------------------------------------

10设置分片配置(第一台操作 全部会同步过去)
#连接到mongos
/usr/local/mongodb/bin/mongo  127.0.0.1:20000
user  admin
 
#串联路由服务器与分配副本集  添加的时候 可能会报错 去掉本机器的mongodb实例即可:
db.runCommand( { addshard : "shard1/192.168.1.31:22001,192.168.1.33:22001,192.168.1.32:22001",allowLocal:true});
db.runCommand( { addshard : "shard2/192.168.1.31:22002,192.168.1.33:22002,192.168.1.32:22002",allowLocal:true});
db.runCommand( { addshard : "shard3/192.168.1.31:22003,192.168.1.33:22003,192.168.1.32:22003",allowLocal:true});

PS:
如里shard是单台服务器,用 db.runCommand( { addshard : “[: ]” } )这样的命令加入;
如果shard是副本集,用db.runCommand( { addshard : “replicaSetName/[:port][,serverhostname2[:port],…]” });这样的格式表示

#查看分片服务器的配置
mongos> db.runCommand( { listshards : 1 } );
{
     "shards" : [
          {
               "_id" : "shard1",
               "host" : "shard1/192.168.1.192:22001,192.168.1.195:22001,192.168.1.196:22001"
          },
          {
               "_id" : "shard2",
               "host" : "shard2/192.168.1.192:22002,192.168.1.195:22002,192.168.1.196:22002"
          },
          {
               "_id" : "shard3",
               "host" : "shard3/192.168.1.192:22003,192.168.1.195:22003,192.168.1.196:22003"
          }
     ],
     "ok" : 1
}

11:设置数据插入,自动分片,连接在mongos上,准备让指定的数据库,指定的集合分片生效。
#指定test_share分片生效  启用数据库分片并没有将数据进行分开,还需要对 collection 进行分片。
db.runCommand( { enablesharding :"zhangluya"});
 
#指定数据库里需要分片的集合和片键
db.runCommand({"shardcollection":"zhangluya.test1", "key":{"_id":"hashed"}});
 
#我们设置test_share的 table1表需要分片,根据 id自动分片到 shard1|shard2|shard3上面去,要这样设置是因为不是所有mongodb的数据库和表都需要分片.


12测试分片配置结果
/usr/local/mongodb/bin/mongo  127.0.0.1:20000
use  zhangluya;
 
#插入测试数据
use zhangluya
for(i=0;i<10000000;i++){ db.test1.insert({"Uid":i,"Name":"jesse","Age":13,"Date":new Date()}); }

#查看分片情况如下:
db.table1.stats();

mongos> db.zly.stats()
{
     "sharded" : true,
     "paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
     "userFlags" : 1,
     "capped" : false,
     "ns" : "test_share.zly",
     "count" : 10000,
     "numExtents" : 7,
     "size" : 1120000,
     "storageSize" : 2809856,
     "totalIndexSize" : 654080,
     "indexSizes" : {
          "Uid_1" : 302512,
          "_id_" : 351568
     },
     "avgObjSize" : 112,
     "nindexes" : 2,
     "nchunks" : 3,
     "shards" : {
          "shard1" : {
               "ns" : "test_share.zly",
               "count" : 8,
               "size" : 896,
               "avgObjSize" : 112,
               "numExtents" : 1,
               "storageSize" : 8192,
               "lastExtentSize" : 8192,
               "paddingFactor" : 1,
               "paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
               "userFlags" : 1,
               "capped" : false,
               "nindexes" : 2,
               "totalIndexSize" : 16352,
               "indexSizes" : {
                    "_id_" : 8176,
                    "Uid_1" : 8176
               },
               "ok" : 1,
               "$gleStats" : {
                    "lastOpTime" : Timestamp(0, 0),
                    "electionId" : ObjectId("55ae1204c1d7c7aa4293335f")
               }
          },
          "shard2" : {
               "ns" : "test_share.zly",
               "count" : 9991,
               "size" : 1118992,
               "avgObjSize" : 112,
               "numExtents" : 5,
               "storageSize" : 2793472,
               "lastExtentSize" : 2097152,
               "paddingFactor" : 1,
               "paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
               "userFlags" : 1,
               "capped" : false,
               "nindexes" : 2,
               "totalIndexSize" : 621376,
               "indexSizes" : {
                    "_id_" : 335216,
                    "Uid_1" : 286160
               },
               "ok" : 1,
               "$gleStats" : {
                    "lastOpTime" : Timestamp(0, 0),
                    "electionId" : ObjectId("55ae129f010c2d2fb034613e")
               }
          },
          "shard3" : {
               "ns" : "test_share.zly",
               "count" : 1,
               "size" : 112,
               "avgObjSize" : 112,
               "numExtents" : 1,
               "storageSize" : 8192,
               "lastExtentSize" : 8192,
               "paddingFactor" : 1,
               "paddingFactorNote" : "paddingFactor is unused and unmaintained in 3.0. It remains hard coded to 1.0 for compatibility only.",
               "userFlags" : 1,
               "capped" : false,
               "nindexes" : 2,
               "totalIndexSize" : 16352,
               "indexSizes" : {
                    "_id_" : 8176,
                    "Uid_1" : 8176
               },
               "ok" : 1,
               "$gleStats" : {
                    "lastOpTime" : Timestamp(0, 0),
                    "electionId" : ObjectId("55ae12dae0da8d2ad0616ebd")
               }
          }
     },
     "ok" : 1
}


http://blog.jobbole.com/72643/
http://www.lanceyan.com/tech/mongodb/mongodb_repset1.html

备份:http://my.oschina.net/169/blog/158976
 
删除分片操作:
db.runCommand( { removeshard: "shard2" } )
 
 查看 Balancer 进程详细信息
db.locks.find( { _id : "balancer" } ).pretty();
 
3 查看 Balancer 进程是否开启  
use congig
sh.getBalancerState();  
 
4 停 Balancer 进
use config
sh.stopBalancer();

 

 
5 开启 Balancer 进程
mongos>use config;
mongos> sh.startBalancer();
mongos> sh.getBalancerState();
true
  
6设置 Balancer 进程运行时间窗口
默认情况下Balancing 进程时时在运行 为了降低 Balancing 进程对系统的影响,也可以设置
Balancer 进程的运行时间窗口,让 Balancer 进程在指定时间窗口操作。
mongos>db.settings.update({_id:"balancer"},{$set:{activeWindow:{start:"01:00",stop:"6:00"}}},true);
mongos> db.settings.find();
{ "_id" : "chunksize", "value" : NumberLong(64) }
{ "_id" : "balancer", "stopped" : false, "activeWindow" : { "start" : "01:00", "stop" : "6:00" } }
 
备注:以上设置 balancer 进程在 23:00 到 6:00 时间窗口执行,如果要设置时间窗口,确保在指定时间段内能够完成数据分布。
  
 7删除Balancer进程运行时间窗口

 

 

 mongos> db.settings.update({ "_id" : "balancer" }, { $unset : { activeWindow : 1 }});

mongos> db.settings.find();
{ "_id" : "chunksize", "value" : 10 }
{ "_id" : "balancer", "stopped" : false }

   
 
备份:
mongodump --host 192.168.1.110 --port 30001 -d histar -o /tmp/
 
恢复:
/usr/local/mongodb/bin/mongorestore --host 192.168.1.31 --port 20000 -d bisys /root/bisys
 
#查看mongodb同步状态
db.printSlaveReplicationInfo()
 
 

相关文章:

  • 2021-09-17
  • 2021-11-02
  • 2022-03-06
  • 2021-07-05
  • 2021-10-09
  • 2022-01-08
猜你喜欢
  • 2021-07-29
  • 2021-12-06
  • 2021-12-06
  • 2021-09-10
  • 2022-01-11
相关资源
相似解决方案