整体架构图:
1. 配置域名
Server1:
OS version: CentOS Linux release 8.5.2111
hostnamectl --static set-hostname mongo01
vi /etc/sysconfig/network
# Created by anaconda
hostname=mong01
echo "192.168.88.20 mong1 mongo01.com mongo02.com" >> /etc/hosts
echo "192.168.88.24 mong2 mongo03.com mongo04.com" >> /etc/hosts
echo "192.168.88.26 mong3 mongo05.com mongo06.com" >> /etc/hosts
关闭防火墙:
systemctl stop firewalld
systemctl disable firewalld
Server2:
OS version: CentOS Linux release 8.5.2111
hostnamectl --static set-hostname mongo2
vi /etc/sysconfig/network
# Created by anaconda
hostname=mong2
echo "192.168.88.20 mong1 mongo01.com mongo02.com" >> /etc/hosts
echo "192.168.88.24 mong2 mongo03.com mongo04.com" >> /etc/hosts
echo "192.168.88.26 mong3 mongo05.com mongo06.com" >> /etc/hosts
Server3:
OS version: CentOS Linux release 8.5.2111
hostnamectl --static set-hostname mongo3
vi /etc/sysconfig/network
# Created by anaconda
hostname=mong3
echo "192.168.88.20 mong1 mongo01.com mongo02.com" >> /etc/hosts
echo "192.168.88.24 mong2 mongo03.com mongo04.com" >> /etc/hosts
echo "192.168.88.26 mong3 mongo05.com mongo06.com" >> /etc/hosts
2. 准备分片目录
在各服务器上创建数据目录,使用/data,也可以按特定需求指定目录
在mongo01.com,mongo03.com,mongo05.com上执行:
mkdir -p /data/shard1/db /data/shard1/log /data/config/db /data/config/log
在mongo02.com,mongo04.com,mongo06.com上执行:
mkdir -p /data/shard2/db /data/shard2/log /data/mongos
3. download mongo image
Download MongoDB Community Server | MongoDB
将安装包上传到服务器,因为我实验服务器centos版本与mongdb兼容问题,最后选择了mongod 4.4.13的版本进行安装。每台服务器进行同样的安装。
upload install file to:
/root/install/mongodb-linux-x86_64-rhel80-4.4.13.tgz
cd /root/install
mkdir -p /opt/db
tar -xvf mongodb-linux-x86_64-rhel80-4.4.13.tgz -C /opt/db
cd /opt/db
mv mongodb-linux-x86_64-rhel80-4.4.13 mongodb-4.4.13
rm -rf /usr/bin/mongod
ln -s /opt/db/mongodb-4.4.13/bin/mongod /usr/bin/mongod
vi /etc/profile
...
#add below at last line
export PATH=/opt/db/mongodb-4.4.13/bin:$PATH
3. 分片
3.1 创建分片复制集,三台服务器上分别执行:
[root@mongo1 db]# mongod --bind_ip 0.0.0.0 --replSet shard1 --dbpath /data/shard1/db --logpath /data/shard1/log/mongod.log --port 27010 --fork --shardsvr --wiredTigerCacheSizeGB 1
about to fork child process, waiting until server is ready for connections.
forked process: 5473
child process started successfully, parent exiting
在高版本的 MongoDB 中,--shardsvr 参数已被弃用,因为 MongoDB 的新版本改进了集群的自动配置和发现机制。若要在高版本 MongoDB 中设置分片集群,你不再需要显式指定 --shardsvr。
ps -ef |grep mongod
root 5473 1 3 01:02 ? 00:00:26 mongod --bind_ip 0.0.0.0 --replSet shard1 --dbpath /data/shard1/db --logpath /data/shard1/log/mongod.log --port 27010 --fork --shardsvr --wiredTigerCacheSizeGB 1
3.2 初始化分片复制集:
# 进入mongo shell
mongo mongo01.com:27010
#shard1复制集节点初始化
rs.initiate({
_id: "shard1",
"members" : [
{
"_id": 0,
"host" : "mongo01.com:27010"
},
{
"_id": 1,
"host" : "mongo03.com:27010"
},
{
"_id": 2,
"host" : "mongo05.com:27010"
}
]
})
查看复制状态:
shard1:PRIMARY> rs.status()
{
"set" : "shard1",
"date" : ISODate("2024-06-16T07:47:41.418Z"),
"myState" : 1,
"term" : NumberLong(1),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1718524053, 1),
"t" : NumberLong(1)
},
"lastCommittedWallTime" : ISODate("2024-06-16T07:47:33.075Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1718524053, 1),
"t" : NumberLong(1)
},
"readConcernMajorityWallTime" : ISODate("2024-06-16T07:47:33.075Z"),
"appliedOpTime" : {
"ts" : Timestamp(1718524053, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {
"ts" : Timestamp(1718524053, 1),
"t" : NumberLong(1)
},
"lastAppliedWallTime" : ISODate("2024-06-16T07:47:33.075Z"),
"lastDurableWallTime" : ISODate("2024-06-16T07:47:33.075Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1718524013, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "electionTimeout",
"lastElectionDate" : ISODate("2024-06-16T07:37:52.589Z"),
"electionTerm" : NumberLong(1),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1718523462, 1),
"t" : NumberLong(-1)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 1,
"electionTimeoutMillis" : NumberLong(10000),
"numCatchUpOps" : NumberLong(0),
"newTermStartDate" : ISODate("2024-06-16T07:37:52.780Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2024-06-16T07:37:53.499Z")
},
"members" : [
{
"_id" : 0,
"name" : "mongo01.com:27010",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 629,
"optime" : {
"ts" : Timestamp(1718524053, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2024-06-16T07:47:33Z"),
"lastAppliedWallTime" : ISODate("2024-06-16T07:47:33.075Z"),
"lastDurableWallTime" : ISODate("2024-06-16T07:47:33.075Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1718523472, 1),
"electionDate" : ISODate("2024-06-16T07:37:52Z"),
"configVersion" : 1,
"configTerm" : -1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "mongo03.com:27010",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 599,
"optime" : {
"ts" : Timestamp(1718524053, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1718524053, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2024-06-16T07:47:33Z"),
"optimeDurableDate" : ISODate("2024-06-16T07:47:33Z"),
"lastAppliedWallTime" : ISODate("2024-06-16T07:47:33.075Z"),
"lastDurableWallTime" : ISODate("2024-06-16T07:47:33.075Z"),
"lastHeartbeat" : ISODate("2024-06-16T07:47:40.602Z"),
"lastHeartbeatRecv" : ISODate("2024-06-16T07:47:39.815Z"),
"pingMs" : NumberLong(1),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongo01.com:27010",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : -1
},
{
"_id" : 2,
"name" : "mongo05.com:27010",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 599,
"optime" : {
"ts" : Timestamp(1718524053, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1718524053, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2024-06-16T07:47:33Z"),
"optimeDurableDate" : ISODate("2024-06-16T07:47:33Z"),
"lastAppliedWallTime" : ISODate("2024-06-16T07:47:33.075Z"),
"lastDurableWallTime" : ISODate("2024-06-16T07:47:33.075Z"),
"lastHeartbeat" : ISODate("2024-06-16T07:47:40.902Z"),
"lastHeartbeatRecv" : ISODate("2024-06-16T07:47:39.927Z"),
"pingMs" : NumberLong(1),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "mongo01.com:27010",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : -1
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1718524053, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1718524053, 1)
}
shard1:PRIMARY>
3.3 创建 config server 复制集
在三台服务器上执行以下命令:
mongod --bind_ip 0.0.0.0 --replSet config --dbpath /data/config/db \
--logpath /data/config/log/mongod.log --port 27019 --fork \
--configsvr --wiredTigerCacheSizeGB 1
[root@mongo1 db]# mongod --bind_ip 0.0.0.0 --replSet config --dbpath /data/config/db \
> --logpath /data/config/log/mongod.log --port 27019 --fork \
> --configsvr --wiredTigerCacheSizeGB 1
about to fork child process, waiting until server is ready for connections.
forked process: 36934
child process started successfully, parent exiting
3.4 初始化 config server 复制集
# 进入mongo shell
mongo mongo01.com:27019
#config复制集节点初始化
rs.initiate({
_id: "config",
"members" : [
{
"_id": 0,
"host" : "mongo01.com:27019"
},
{
"_id": 1,
"host" : "mongo03.com:27019"
},
{
"_id": 2,
"host" : "mongo05.com:27019"
}
]
})
4. 搭建 mongos
4.1 指定config复制集
在mongo01.com / mongo03.com / mongo05.com上执行以下命令:
#启动mongos,指定config复制集
mongos --bind_ip 0.0.0.0 --logpath /data/mongos/mongos.log --port 27017 --fork \
--configdb config/mongo01.com:27019,mongo03.com:27019,mongo05.com:27019
4.2 将mongos加入第1个分片
# 连接到mongos
mongo mongo01.com:27017
#添加分片
mongos>sh.addShard("shard1/mongo01.com:27010,mongo03.com:27010,mongo05.com:27010")
#查看mongos状态
mongos>sh.status()
5. 创建分片集合
连接到mongos, 创建分片集合
mongo mongo01.com:27017
mongos>sh.status()
#为了使集合支持分片,需要先开启database的分片功能
mongos>sh.enableSharding("company")
# 执行shardCollection命令,对集合执行分片初始化
mongos>sh.shardCollection("company.emp", {_id: 'hashed'})
mongos>sh.status()
#插入测试数据
use company
for (var i = 0; i < 10000; i++) {
db.emp.insert({i: i});
}
#查询数据分布
db.emp.getShardDistribution()
6. 创建第2个分片的复制集
在mongo02.com / mongo04.com / mongo06.com上执行以下命令:
mkdir -p /data/shard2/db /data/shard2/log/
mongod --bind_ip 0.0.0.0 --replSet shard2 --dbpath /data/shard2/db \
--logpath /data/shard2/log/mongod.log --port 27011 --fork \
--shardsvr --wiredTigerCacheSizeGB 1
初始化第二个分片的复制集
# 进入mongo shell
mongo mongo06.com:27011
#shard2复制集节点初始化
rs.initiate({
_id: "shard2",
"members" : [
{
"_id": 0,
"host" : "mongo06.com:27011"
},
{
"_id": 1,
"host" : "mongo02.com:27011"
},
{
"_id": 2,
"host" : "mongo04.com:27011"
}
]
})
#查看复制集状态
rs.status()
mongos加入第2个分片
# 连接到mongos
mongo mongo01.com:27017
#添加分片
mongos>sh.addShard("shard2/mongo02.com:27011,mongo04.com:27011,mongo06.com:27011")
#查看mongos状态
mongos>sh.status()
7. check
8.说明
本文主要目的是记录安装过程,也给有类似需求的同学参考。
参考了其他作者的文档。猿创征文|MongoDB数据库 分片集群搭建部署实战_搭建mongodb分片集群-CSDN博客
MongoDB分片集群搭建_哔哩哔哩_bilibili