一、部署软件
成都创新互联公司自2013年创立以来,是专业互联网技术服务公司,拥有项目成都网站设计、成都网站制作、外贸网站建设网站策划,项目实施与项目整合能力。我们以让每一个梦想脱颖而出为使命,1280元淮安区做网站,已为上家服务,为淮安区各地企业和个人服务,联系电话:13518219792
pc1、pc2、pc3分别安装MongoDB,操作如下:
[root@pc1 ~]# tail /etc/security/limits.conf
mongod soft nproc 40000 * hard nofile 1000000 * soft nofile 1000000 * soft core unlimited * soft stack 10240 * - nofile 65535 push - nproc 65535 push - nofile 320000 work - nproc 10000
[root@pc1 ~]# tail /etc/security/limits.d/90-nproc.conf
* soft nproc 1024 root soft nproc unlimited
[root@pc1 ~]# cat /etc/yum.repos.d/mongodb.repo
[mongodb-org-3.2] name=MongoDB Repository baseurl=https://repo.mongodb.org/yum/amazon/2013.03/mongodb-org/3.2/x86_64/ gpgcheck=1 enabled=1 gpgkey=https://www.mongodb.org/static/pgp/server-3.2.asc
[root@pc1 ~]# yum -y install mongodb-org
[root@pc1 ~]# chkconfig mongodb off
[root@pc1 ~]# mkdir -p /data/mongodb/{config,data/{config,logs,shard{1,2,3}_1}}
[root@pc1 ~]# chown -R mongod.mongod /data/mongodb
[root@pc1 ~]# cp /etc/mongod.conf /data/mongodb/config/shard1.conf
[root@pc1 ~]# tree /data/
/data/
└── mongodb
├── config
│ └── shard1.conf
└── data
├── config
├── logs
├── shard1_1
├── shard2_1
└── shard3_1
二、配置服务
副本集1的配置文件与启动脚本如下:
[root@pc1 ~]# egrep "^[^#$]" /data/mongodb/config/shard1.conf
systemLog: destination: file logAppend: true path: /data/mongodb/data/logs/shard1.log storage: dbPath: /data/mongodb/data/shard1_1 directoryPerDB: true journal: enabled: true processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/shard1.pid # location of pidfile net: port: 27027 #bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. replication: oplogSizeMB: 500 replSetName: shard1 sharding: clusterRole: shardsvr
[root@pc1 ~]# cp /etc/init.d/mongod /etc/init.d/shard1
[root@pc1 ~]# vim /etc/init.d/shard1
[root@pc1 ~]# egrep "^[^#$]" /etc/init.d/shard1
. /etc/rc.d/init.d/functions CONFIGFILE="/data/mongodb/config/shard1.conf" OPTIONS=" -f $CONFIGFILE" mongod=${MONGOD-/usr/bin/mongod} MONGO_USER=mongod MONGO_GROUP=mongod SYSCONFIG="/etc/sysconfig/mongod" if [ -f "$SYSCONFIG" ]; then . "$SYSCONFIG" fi NUMACTL_ARGS="--interleave=all" if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null then NUMACTL="numactl $NUMACTL_ARGS" else NUMACTL="" fi PIDFILEPATH=`awk -F'[:=]' -v IGNORECASE=1 '/^[[:blank:]]*(processManagement\.)?pidfilepath[[:blank:]]*[:=][[:blank:]]*/{print $2}' "$CONFIGFILE" | tr -d "[:blank:]\"'" | awk -F'#' '{print $1}'` PIDDIR=`dirname $PIDFILEPATH` start() { # Make sure the default pidfile directory exists if [ ! -d $PIDDIR ]; then install -d -m 0755 -o $MONGO_USER -g $MONGO_GROUP $PIDDIR fi if test -f /sys/kernel/mm/transparent_hugepage/defrag; then echo never > /sys/kernel/mm/transparent_hugepage/defrag fi # Recommended ulimit values for mongod or mongos # See http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings # ulimit -f unlimited ulimit -t unlimited ulimit -v unlimited ulimit -n 64000 ulimit -m unlimited ulimit -u 64000 echo -n $"Starting mongod: " daemon --user "$MONGO_USER" --check $mongod "$NUMACTL $mongod $OPTIONS >/dev/null 2>&1" RETVAL=$? echo [ $RETVAL -eq 0 ] && touch /var/lock/subsys/shard1 } stop() { echo -n $"Stopping mongod: " mongo_killproc "$PIDFILEPATH" $mongod RETVAL=$? echo [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/shard1 } restart () { stop start } mongo_killproc() { local pid_file=$1 local procname=$2 local -i delay=300 local -i duration=10 local pid=`pidofproc -p "${pid_file}" ${procname}` kill -TERM $pid >/dev/null 2>&1 usleep 100000 local -i x=0 while [ $x -le $delay ] && checkpid $pid; do sleep $duration x=$(( $x + $duration)) done kill -KILL $pid >/dev/null 2>&1 usleep 100000 checkpid $pid # returns 0 only if the process exists local RC=$? [ "$RC" -eq 0 ] && failure "${procname} shutdown" || rm -f "${pid_file}"; success "${procname} shutdown" RC=$((! $RC)) # invert return code so we return 0 when process is dead. return $RC } RETVAL=0 case "$1" in start) start ;; stop) stop ;; restart|reload|force-reload) restart ;; condrestart) [ -f /var/lock/subsys/shard1 ] && restart || : ;; status) status $mongod RETVAL=$? ;; *) echo "Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart}" RETVAL=1 esac exit $RETVAL
副本集2的配置文件与启动脚本如下:
[root@pc1 ~]# egrep "^[^#$]" /data/mongodb/config/shard2.conf
systemLog: destination: file logAppend: true path: /data/mongodb/data/logs/shard2.log storage: dbPath: /data/mongodb/data/shard2_1 directoryPerDB: true journal: enabled: true processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/shard2.pid # location of pidfile net: port: 27028 #bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. replication: oplogSizeMB: 500 replSetName: shard2 sharding: clusterRole: shardsvr
[root@pc1 ~]# cp /etc/init.d/mongod /etc/init.d/shard2
[root@pc1 ~]# vim /etc/init.d/shard2
[root@pc1 ~]# egrep "^[^#$]" /etc/init.d/shard2
. /etc/rc.d/init.d/functions CONFIGFILE="/data/mongodb/config/shard2.conf" OPTIONS=" -f $CONFIGFILE" mongod=${MONGOD-/usr/bin/mongod} MONGO_USER=mongod MONGO_GROUP=mongod SYSCONFIG="/etc/sysconfig/mongod" if [ -f "$SYSCONFIG" ]; then . "$SYSCONFIG" fi NUMACTL_ARGS="--interleave=all" if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null then NUMACTL="numactl $NUMACTL_ARGS" else NUMACTL="" fi PIDFILEPATH=`awk -F'[:=]' -v IGNORECASE=1 '/^[[:blank:]]*(processManagement\.)?pidfilepath[[:blank:]]*[:=][[:blank:]]*/{print $2}' "$CONFIGFILE" | tr -d "[:blank:]\"'" | awk -F'#' '{print $1}'` PIDDIR=`dirname $PIDFILEPATH` start() { # Make sure the default pidfile directory exists if [ ! -d $PIDDIR ]; then install -d -m 0755 -o $MONGO_USER -g $MONGO_GROUP $PIDDIR fi if test -f /sys/kernel/mm/transparent_hugepage/defrag; then echo never > /sys/kernel/mm/transparent_hugepage/defrag fi # Recommended ulimit values for mongod or mongos # See http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings # ulimit -f unlimited ulimit -t unlimited ulimit -v unlimited ulimit -n 64000 ulimit -m unlimited ulimit -u 64000 echo -n $"Starting mongod: " daemon --user "$MONGO_USER" --check $mongod "$NUMACTL $mongod $OPTIONS >/dev/null 2>&1" RETVAL=$? echo [ $RETVAL -eq 0 ] && touch /var/lock/subsys/shard2 } stop() { echo -n $"Stopping mongod: " mongo_killproc "$PIDFILEPATH" $mongod RETVAL=$? echo [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/shard2 } restart () { stop start } mongo_killproc() { local pid_file=$1 local procname=$2 local -i delay=300 local -i duration=10 local pid=`pidofproc -p "${pid_file}" ${procname}` kill -TERM $pid >/dev/null 2>&1 usleep 100000 local -i x=0 while [ $x -le $delay ] && checkpid $pid; do sleep $duration x=$(( $x + $duration)) done kill -KILL $pid >/dev/null 2>&1 usleep 100000 checkpid $pid # returns 0 only if the process exists local RC=$? [ "$RC" -eq 0 ] && failure "${procname} shutdown" || rm -f "${pid_file}"; success "${procname} shutdown" RC=$((! $RC)) # invert return code so we return 0 when process is dead. return $RC } RETVAL=0 case "$1" in start) start ;; stop) stop ;; restart|reload|force-reload) restart ;; condrestart) [ -f /var/lock/subsys/shard2 ] && restart || : ;; status) status $mongod RETVAL=$? ;; *) echo "Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart}" RETVAL=1 esac exit $RETVAL
副本集3的配置文件与启动脚本如下:
[root@pc1 ~]# egrep "^[^#$]" /data/mongodb/config/shard3.conf
systemLog: destination: file logAppend: true path: /data/mongodb/data/logs/shard3.log storage: dbPath: /data/mongodb/data/shard3_1 directoryPerDB: true journal: enabled: true processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/shard3.pid # location of pidfile net: port: 27026 #bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. replication: oplogSizeMB: 500 replSetName: shard3 sharding: clusterRole: shardsvr
[root@pc1 ~]# cp /etc/init.d/mongod /etc/init.d/shard3
[root@pc1 ~]# vim /etc/init.d/shard3
[root@pc1 ~]# egrep "^[^#$]" /etc/init.d/shard3 (略,内容基本与shard2 、shard1相同)
复制配置文件到pc2、pc3,并启动服务
[root@pc1 ~]# scp /data/mongodb/config/*.conf pc2:/data/mongodb/config/
[root@pc1 ~]# scp /etc/init.d/shard* pc2:/etc/init.d/
[root@pc1 ~]# scp /data/mongodb/config/*.conf pc3:/data/mongodb/config/
[root@pc1 ~]# scp /etc/init.d/shard* pc2:/etc/init.d/
[root@pc1 ~]#
[root@pc1 ~]# /etc/init.d/shard1 start
[root@pc1 ~]# ssh pc2 '/etc/init.d/shard1 start'
[root@pc1 ~]# ssh pc3 '/etc/init.d/shard1 start'
[root@pc1 ~]# /etc/init.d/shard2 start
[root@pc1 ~]# ssh pc2 '/etc/init.d/shard2 start'
[root@pc1 ~]# ssh pc3 '/etc/init.d/shard2 start'
[root@pc1 ~]# /etc/init.d/shard3 start
[root@pc1 ~]# ssh pc2 '/etc/init.d/shard3 start'
[root@pc1 ~]# ssh pc3 '/etc/init.d/shard3 start'
配置副本集1如下操作:
[root@pc1 ~]# mongo localhost:27027
MongoDB shell version: 3.2.7 connecting to: localhost:27027/test > use admin switched to db admin > rs.initiate() { "info2" : "no configuration specified. Using a default configuration for the set", "me" : "pc1:27027", "ok" : 1 } shard1:OTHER> rs.add("pc2:27027") { "ok" : 1 } shard1:PRIMARY> rs.addArb("pc3:27027") { "ok" : 1 } shard1:PRIMARY> rs.status() { "set" : "shard1", "date" : ISODate("2016-06-28T03:59:41.403Z"), "myState" : 1, "term" : NumberLong(1), "heartbeatIntervalMillis" : NumberLong(2000), "members" : [ { "_id" : 0, "name" : "pc1:27027", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", "uptime" : 1634, "optime" : { "ts" : Timestamp(1467086346, 1), "t" : NumberLong(1) }, "optimeDate" : ISODate("2016-06-28T03:59:06Z"), "infoMessage" : "could not find member to sync from", "electionTime" : Timestamp(1467086301, 2), "electionDate" : ISODate("2016-06-28T03:58:21Z"), "configVersion" : 3, "self" : true }, { "_id" : 1, "name" : "pc2:27027", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 54, "optime" : { "ts" : Timestamp(1467086346, 1), "t" : NumberLong(1) }, "optimeDate" : ISODate("2016-06-28T03:59:06Z"), "lastHeartbeat" : ISODate("2016-06-28T03:59:40.050Z"), "lastHeartbeatRecv" : ISODate("2016-06-28T03:59:41.041Z"), "pingMs" : NumberLong(1), "syncingTo" : "pc1:27027", "configVersion" : 3 }, { "_id" : 2, "name" : "pc3:27027", "health" : 1, "state" : 7, "stateStr" : "ARBITER", "uptime" : 35, "lastHeartbeat" : ISODate("2016-06-28T03:59:40.053Z"), "lastHeartbeatRecv" : ISODate("2016-06-28T03:59:41.044Z"), "pingMs" : NumberLong(2), "configVersion" : 3 } ], "ok" : 1 }
配置副本集2、配置副本集3的操作与配置副本集基本相同,本文不再罗嗦。注意:配置副本集1时,pc1为primary、pc2为secondary,而pc3为arbiter;配置副本集2时,pc2为primary、pc3为secondary,而pc1为arbiter;配置副本集3时,pc3为primary、pc1为secondary,而pc2为arbiter。
config server的配置文件与启动文件如下:
[[root@pc1 ~]# egrep "^[^#$]" /data/mongodb/config/config.conf
systemLog: destination: file logAppend: true path: /data/mongodb/data/logs/config.log storage: dbPath: /data/mongodb/data/config directoryPerDB: true journal: enabled: true processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/config.pid # location of pidfile net: port: 27029 #bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. replication: oplogSizeMB: 500 replSetName: configrs sharding: clusterRole: configsvr
[root@pc1 ~]# cp /etc/init.d/mongod /etc/init.d/mconfig
[root@pc1 ~]# vim /etc/init.d/mconfig
[root@pc1 ~]# egrep "^[^#$]" /etc/init.d/mconfig
. /etc/rc.d/init.d/functions CONFIGFILE="/data/mongodb/config/config.conf" OPTIONS=" -f $CONFIGFILE" mongod=${MONGOD-/usr/bin/mongod} MONGO_USER=mongod MONGO_GROUP=mongod SYSCONFIG="/etc/sysconfig/mongod" if [ -f "$SYSCONFIG" ]; then . "$SYSCONFIG" fi NUMACTL_ARGS="--interleave=all" if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null then NUMACTL="numactl $NUMACTL_ARGS" else NUMACTL="" fi PIDFILEPATH=`awk -F'[:=]' -v IGNORECASE=1 '/^[[:blank:]]*(processManagement\.)?pidfilepath[[:blank:]]*[:=][[:blank:]]*/{print $2}' "$CONFIGFILE" | tr -d "[:blank:]\"'" | awk -F'#' '{print $1}'` PIDDIR=`dirname $PIDFILEPATH` start() { # Make sure the default pidfile directory exists if [ ! -d $PIDDIR ]; then install -d -m 0755 -o $MONGO_USER -g $MONGO_GROUP $PIDDIR fi if test -f /sys/kernel/mm/transparent_hugepage/defrag; then echo never > /sys/kernel/mm/transparent_hugepage/defrag fi # Recommended ulimit values for mongod or mongos # See http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings ulimit -f unlimited ulimit -t unlimited ulimit -v unlimited ulimit -n 64000 ulimit -m unlimited ulimit -u 64000 echo -n $"Starting mongod: " daemon --user "$MONGO_USER" --check $mongod "$NUMACTL $mongod $OPTIONS >/dev/null 2>&1" RETVAL=$? echo [ $RETVAL -eq 0 ] && touch /var/lock/subsys/mconfig } stop() { echo -n $"Stopping mongod: " mongo_killproc "$PIDFILEPATH" $mongod RETVAL=$? echo [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/mconfig } restart () { stop start } mongo_killproc() { local pid_file=$1 local procname=$2 local -i delay=300 local -i duration=10 local pid=`pidofproc -p "${pid_file}" ${procname}` kill -TERM $pid >/dev/null 2>&1 usleep 100000 local -i x=0 while [ $x -le $delay ] && checkpid $pid; do sleep $duration x=$(( $x + $duration)) done kill -KILL $pid >/dev/null 2>&1 usleep 100000 checkpid $pid # returns 0 only if the process exists local RC=$? [ "$RC" -eq 0 ] && failure "${procname} shutdown" || rm -f "${pid_file}"; success "${procname} shutdown" RC=$((! $RC)) # invert return code so we return 0 when process is dead. return $RC } RETVAL=0 case "$1" in start) start ;; stop) stop ;; restart|reload|force-reload) restart ;; condrestart) [ -f /var/lock/subsys/mconfig ] && restart || : ;; status) status $mongod RETVAL=$? ;; *) echo "Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart}" RETVAL=1 esac exit $RETVAL
[root@pc1 ~]# scp /data/mongodb/config/config.conf pc2:/data/mongodb/config
[root@pc1 ~]# scp /etc/init.d/mconfig pc2:/etc/init.d/
[root@pc1 ~]#
[root@pc1 ~]# scp /data/mongodb/config/config.conf pc2:/data/mongodb/config
[root@pc1 ~]# scp /etc/init.d/mconfig pc3:/etc/init.d/
[root@pc1 ~]#
[root@pc1 ~]# /etc/init.d/mconfig start
Starting mongod: [确定]
[root@pc1 ~]# lsof -i:27029
COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
mongod 2765 mongod 6u IPv4 11520 0t0 TCP *:27029 (LISTEN)
[root@pc1 ~]# ssh pc2 '/etc/init.d/mconfig start'
[root@pc1 ~]# ssh pc2 '/etc/init.d/mconfig start'
为了保持config server的一致性,实现各config server的副本集功能,完成效果如下所示(config server的副本集不支持使用arbiter):
[root@pc1 ~]# mongo localhost:27029
MongoDB shell version: 3.2.7 connecting to: localhost:27029/test configrs:SECONDARY> use admin switched to db admin configrs:SECONDARY> rs.slaveOk() configrs:SECONDARY> rs.status() { "set" : "configrs", "date" : ISODate("2016-07-01T03:25:49.471Z"), "myState" : 2, "term" : NumberLong(2), "syncingTo" : "pc3:27029", "configsvr" : true, "heartbeatIntervalMillis" : NumberLong(2000), "members" : [ { "_id" : 0, "name" : "pc1:27029", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 3937, "optime" : { "ts" : Timestamp(1467339616, 1), "t" : NumberLong(2) }, "optimeDate" : ISODate("2016-07-01T02:20:16Z"), "syncingTo" : "pc3:27029", "configVersion" : 3, "self" : true }, { "_id" : 1, "name" : "pc2:27029", "health" : 1, "state" : 2, "stateStr" : "SECONDARY", "uptime" : 3936, "optime" : { "ts" : Timestamp(1467339616, 1), "t" : NumberLong(2) }, "optimeDate" : ISODate("2016-07-01T02:20:16Z"), "lastHeartbeat" : ISODate("2016-07-01T03:25:48.578Z"), "lastHeartbeatRecv" : ISODate("2016-07-01T03:25:47.481Z"), "pingMs" : NumberLong(1), "syncingTo" : "pc3:27029", "configVersion" : 3 }, { "_id" : 2, "name" : "pc3:27029", "health" : 1, "state" : 1, "stateStr" : "PRIMARY", "uptime" : 3936, "optime" : { "ts" : Timestamp(1467339616, 1), "t" : NumberLong(2) }, "optimeDate" : ISODate("2016-07-01T02:20:16Z"), "lastHeartbeat" : ISODate("2016-07-01T03:25:49.146Z"), "lastHeartbeatRecv" : ISODate("2016-07-01T03:25:47.585Z"), "pingMs" : NumberLong(1), "electionTime" : Timestamp(1467339615, 1), "electionDate" : ISODate("2016-07-01T02:20:15Z"), "configVersion" : 3 } ], "ok" : 1 } configrs:SECONDARY> exit bye [root@pc1 ~]#
mongos路由配置文件与启动脚本内容如下:
[root@pc1 ~]# egrep "^[^#$]" /data/mongodb/config/mongos.conf
systemLog: destination: file logAppend: true path: /data/mongodb/data/logs/mongos.log processManagement: fork: true # fork and run in background pidFilePath: /var/run/mongodb/mongos.pid # location of pidfile net: port: 27017 #bindIp: 127.0.0.1 # Listen to local interface only, comment to listen on all interfaces. sharding: configDB: pc1:27029,pc2:27029,pc3:27029 #注: mongodb3.4时格式必须为 configDB: configrs_name/configServer_list
[root@pc1 ~]# cp /etc/init.d/mongod /etc/init.d/mongos
[root@pc1 ~]# vim /etc/init.d/mongos
[root@pc1 ~]# egrep "^[^#$]" /etc/init.d/mongos
. /etc/rc.d/init.d/functions CONFIGFILE="/data/mongodb/config/mongos.conf" OPTIONS=" -f $CONFIGFILE" mongod=${MONGOD-/usr/bin/mongos} MONGO_USER=mongod MONGO_GROUP=mongod SYSCONFIG="/etc/sysconfig/mongod" if [ -f "$SYSCONFIG" ]; then . "$SYSCONFIG" fi NUMACTL_ARGS="--interleave=all" if which numactl >/dev/null 2>/dev/null && numactl $NUMACTL_ARGS ls / >/dev/null 2>/dev/null then NUMACTL="numactl $NUMACTL_ARGS" else NUMACTL="" fi PIDFILEPATH=`awk -F'[:=]' -v IGNORECASE=1 '/^[[:blank:]]*(processManagement\.)?pidfilepath[[:blank:]]*[:=][[:blank:]]*/{print $2}' "$CONFIGFILE" | tr -d "[:blank:]\"'" | awk -F'#' '{print $1}'` PIDDIR=`dirname $PIDFILEPATH` start() { # Make sure the default pidfile directory exists if [ ! -d $PIDDIR ]; then install -d -m 0755 -o $MONGO_USER -g $MONGO_GROUP $PIDDIR fi if test -f /sys/kernel/mm/transparent_hugepage/defrag; then echo never > /sys/kernel/mm/transparent_hugepage/defrag fi # Recommended ulimit values for mongod or mongos # See http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings # ulimit -f unlimited ulimit -t unlimited ulimit -v unlimited ulimit -n 64000 ulimit -m unlimited ulimit -u 64000 echo -n $"Starting mongod: " daemon --user "$MONGO_USER" --check $mongod "$NUMACTL $mongod $OPTIONS >/dev/null 2>&1" RETVAL=$? echo [ $RETVAL -eq 0 ] && touch /var/lock/subsys/mongos } stop() { echo -n $"Stopping mongod: " mongo_killproc "$PIDFILEPATH" $mongod RETVAL=$? echo [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/mongos } restart () { stop start } mongo_killproc() { local pid_file=$1 local procname=$2 local -i delay=300 local -i duration=10 local pid=`pidofproc -p "${pid_file}" ${procname}` kill -TERM $pid >/dev/null 2>&1 usleep 100000 local -i x=0 while [ $x -le $delay ] && checkpid $pid; do sleep $duration x=$(( $x + $duration)) done kill -KILL $pid >/dev/null 2>&1 usleep 100000 checkpid $pid # returns 0 only if the process exists local RC=$? [ "$RC" -eq 0 ] && failure "${procname} shutdown" || rm -f "${pid_file}"; success "${procname} shutdown" RC=$((! $RC)) # invert return code so we return 0 when process is dead. return $RC } RETVAL=0 case "$1" in start) start ;; stop) stop ;; restart|reload|force-reload) restart ;; condrestart) [ -f /var/lock/subsys/mongos ] && restart || : ;; status) status $mongod RETVAL=$? ;; *) echo "Usage: $0 {start|stop|status|restart|reload|force-reload|condrestart}" RETVAL=1 esac exit $RETVAL
[root@pc1 ~]# scp /data/mongodb/config/mongos pc2:/data/mongodb/config/
[root@pc1 ~]# scp /etc/init.d/mongos pc2:/etc/init.d/
[root@pc1 ~]# scp /data/mongodb/config/mongos pc3:/data/mongodb/config/
[root@pc1 ~]# scp /etc/init.d/mongos pc3:/etc/init.d/
[root@pc1 ~]# /etc/init.d/mongos start
Starting mongod: [确定]
[root@pc1 ~]# ssh pc2 '/etc/init.d/mongos start'
Starting mongod: [确定]
[root@pc1 ~]# ssh pc3 '/etc/init.d/mongos start'
Starting mongod: [确定]
[root@pc1 ~]#
有些网朋友说:需要实现mongos路由的副本集(即多台mongos路由之间的复制),但从原理分析,我个人没能理解这种方案的功能,当然本人也尝试过,发现设置副本集之后mongos服务无法启动(并无深入研究,请各位朋友提宝贝意见)
三、实现分片
连接到任何一台mongos路由上,操作如下:
[root@pc1 ~]# mongo
MongoDB shell version: 3.2.7
connecting to: test
mongos> use admin
switched to db admin
mongos> sh.status()
--- Sharding Status --- sharding version: { "_id" : 1, "minCompatibleVersion" : 5, "currentVersion" : 6, "clusterId" : ObjectId("577226b7511b1f96da0ddba2") } shards: active mongoses: "3.2.7" : 3 balancer: Currently enabled: yes Currently running: no Failed balancer rounds in last 5 attempts: 0 Migration Results for the last 24 hours: No recent migrations databases:
mongos> sh.addShard("shard1/pc1:27027")
{ "shardAdded" : "shard1", "ok" : 1 }
mongos> sh.addShard("shard3/pc2:27026")
{ "shardAdded" : "shard3", "ok" : 1 }
mongos> sh.addShard("shard2/pc3:27028")
{ "shardAdded" : "shard2", "ok" : 1 }
mongos> sh.status()
--- Sharding Status --- sharding version: { "_id" : 1, "minCompatibleVersion" : 5, "currentVersion" : 6, "clusterId" : ObjectId("577226b7511b1f96da0ddba2") } shards: { "_id" : "shard1", "host" : "shard1/pc1:27027,pc2:27027" } { "_id" : "shard2", "host" : "shard2/pc1:27028,pc3:27028" } { "_id" : "shard3", "host" : "shard3/pc2:27026,pc3:27026" } active mongoses: "3.2.7" : 3 balancer: Currently enabled: yes Currently running: no Failed balancer rounds in last 5 attempts: 5 Last reported error: mongos specified a different config database string : stored : pc1:27029 vs given : pc1:27029,pc2:27029,pc3:27029 Time of Reported error: Tue Jun 28 2016 07:34:07 GMT+0000 (UTC) Migration Results for the last 24 hours: No recent migrations databases:
mongos> use config
switched to db config
mongos> db.chunks.find()
mongos> db.settings.find()
{ "_id" : "chunksize", "value" : NumberLong(64) }
mongos> db.settings.save({"_id":"chunksize","value":NumberLong(5)}) #不建议修改chunk的值,本例为了实例效果改为5M。最小的chunksize(1M),默认chunksize为64M
WriteResult({ "nMatched" : 1, "nUpserted" : 0, "nModified" : 1 })
mongos> db.settings.find()
{ "_id" : "chunksize", "value" : NumberLong(5) }
mongos> use admin
switched to db admin
mongos> sh.enableSharding("test")
{ "ok" : 1 }
mongos> sh.shardCollection("test.user",{uid:1})
{ "collectionsharded" : "test.user", "ok" : 1 }
mongos> use test
switched to db test
mongos> db.use.ensureIndex({uid:1})
{ "raw" : { "shard1/pc1:27027,pc2:27027" : { "createdCollectionAutomatically" : true, "numIndexesBefore" : 1, "numIndexesAfter" : 2, "ok" : 1, "$gleStats" : { "lastOpTime" : Timestamp(1467102931, 2), "electionId" : ObjectId("7fffffff0000000000000001") } } }, "ok" : 1 }
mongos> for (i=1;i<=100000;i++) db.user.insert({uid:'user'+i,age:(i%15),address:'#'+i+',shangdi south road,beijing',preferbooks:['book'+i,'hello world']})
WriteResult({ "nInserted" : 1 })
mongos> sh.status()
--- Sharding Status --- sharding version: { "_id" : 1, "minCompatibleVersion" : 5, "currentVersion" : 6, "clusterId" : ObjectId("577234b7c92b86a15a2901d4") } shards: { "_id" : "shard1", "host" : "shard1/pc1:27027,pc2:27027" } { "_id" : "shard2", "host" : "shard2/pc2:27028,pc3:27028" } { "_id" : "shard3", "host" : "shard3/pc1:27026,pc3:27026" } active mongoses: "3.2.7" : 3 balancer: Currently enabled: yes Currently running: no Failed balancer rounds in last 5 attempts: 0 Migration Results for the last 24 hours: 2 : Success 1 : Failed with error 'aborted', from shard1 to shard2 databases: { "_id" : "test", "primary" : "shard1", "partitioned" : true } test.user shard key: { "uid" : 1 } unique: false balancing: true chunks: shard11 shard21 shard31 { "uid" : { "$minKey" : 1 } } -->> { "uid" : "user2" } on : shard2 Timestamp(2, 0) { "uid" : "user2" } -->> { "uid" : "user8" } on : shard3 Timestamp(3, 0) { "uid" : "user8" } -->> { "uid" : { "$maxKey" : 1 } } on : shard1 Timestamp(3, 1)
mongos> use config
switched to db config
mongos> show collections
actionlog changelog chunks collections databases lockpings locks mongos settings shards tags version
mongos> db.settings.find()
{ "_id" : "chunksize", "value" : NumberLong(64) }
mongos> db.shards.find()
{ "_id" : "shard1", "host" : "shard1/pc1:27027,pc2:27027" } { "_id" : "shard3", "host" : "shard3/pc1:27026,pc3:27026" } { "_id" : "shard2", "host" : "shard2/pc2:27028,pc3:27028" }
mongos> db.databases.find()
{ "_id" : "test", "primary" : "shard1", "partitioned" : true }
mongos> db.chunks.find()
{ "_id" : "test.user-uid_MinKey", "lastmod" : Timestamp(2, 0), "lastmodEpoch" : ObjectId("57723675c92b86a15a290209"), "ns" : "test.user", "min" : { "uid" : { "$minKey" : 1 } }, "max" : { "uid" : "user2" }, "shard" : "shard2" } { "_id" : "test.user-uid_\"user2\"", "lastmod" : Timestamp(3, 0), "lastmodEpoch" : ObjectId("57723675c92b86a15a290209"), "ns" : "test.user", "min" : { "uid" : "user2" }, "max" : { "uid" : "user8" }, "shard" : "shard3" } { "_id" : "test.user-uid_\"user8\"", "lastmod" : Timestamp(3, 1), "lastmodEpoch" : ObjectId("57723675c92b86a15a290209"), "ns" : "test.user", "min" : { "uid" : "user8" }, "max" : { "uid" : { "$maxKey" : 1 } }, "shard" : "shard1" }
mongos>exit
四、补充命令
db.runCommand({addshard:"shard1/pc1:27027,pc2:27027,pc3:27027”,name:"shard1"});
db.runCommand({addshard:"shard2/pc1:27028,pc2:27028,pc3:27028”,name:"shard2"});
db.runCommand({addshard:"shard3/pc1:27026,pc2:27026,pc3:27026”,name:"shard3"});
use admin
db.runCommand({enablesharding:"db_name"})
db.runCommand({shardcollection:"db_name.collection_name",key:{fields_name:"hashed"}})
db.runCommand({shardcollection:"db_name.collection_name",key:{fields_name:1}})
sh.shardCollection("db_name.collection_name",{fields_name1:1,fields_name2:1})
网站栏目:mongodb3.2shardingdeploy
文章分享:https://www.cdcxhl.com/article16/jjpogg.html
成都网站建设公司_创新互联,为您提供定制开发、网站维护、移动网站建设、品牌网站设计、App开发、商城网站
声明:本网站发布的内容(图片、视频和文字)以用户投稿、用户转载内容为主,如果涉及侵权请尽快告知,我们将会在第一时间删除。文章观点不代表本网站立场,如需处理请联系客服。电话:028-86922220;邮箱:631063699@qq.com。内容未经允许不得转载,或转载时需注明来源: 创新互联