设为首页 收藏本站
查看: 949|回复: 0

[经验分享] MongoDB 3.2.4 配置参考

[复制链接]

尚未签到

发表于 2018-10-25 12:45:43 | 显示全部楼层 |阅读模式
DSC0000.png

  [mongo@vq12stmsg01 /app/mongo]
  $top
  top - 13:00:41 up 2 days, 21:35,  3 users,  load average: 0.31, 0.23, 0.23
  Tasks: 250 total,   1 running, 249 sleeping,   0 stopped,   0 zombie

  %Cpu0  :  0.7 us,  1.7 sy,  0.0 ni, 97.7>
  %Cpu1  :  5.0 us,  7.3 sy,  0.0 ni, 87.7>
  %Cpu2  :  1.0 us,  1.7 sy,  0.0 ni, 97.3>
  %Cpu3  :  0.7 us,  1.0 sy,  0.0 ni, 98.3>
  %Cpu4  :  5.6 us,  7.3 sy,  0.0 ni, 87.0>
  %Cpu5  :  0.0 us,  0.7 sy,  0.0 ni, 99.3>
  %Cpu6  :  0.3 us,  1.0 sy,  0.0 ni, 98.7>
  %Cpu7  :  5.3 us,  7.9 sy,  0.0 ni, 86.8>
  %Cpu8  :  0.3 us,  1.0 sy,  0.0 ni, 98.7>
  %Cpu9  :  1.0 us,  2.0 sy,  0.0 ni, 97.0>
  %Cpu10 :  5.3 us,  7.0 sy,  0.0 ni, 87.7>
  %Cpu11 :  1.3 us,  2.0 sy,  0.0 ni, 96.7>  KiB Mem:  41038580 total,  1805648 used, 39232932 free,   319880 buffers
  KiB Swap:        0 total,        0 used,        0 free.   469688 cached Mem
  [mongo@vq12stmsg01 /app/mongo]
  $free -m
  total       used       free     shared    buffers     cached
  Mem:         40076       1762      38314          8        312        458
  -/+ buffers/cache:        991      39085
  Swap:            0          0          0
  [mongo@vq12stmsg01 /app/mongo]
  $df -h
  Filesystem                Size  Used Avail Use% Mounted on
  /dev/mapper/vg00-lv_root   19G  3.5G   14G  21% /
  devtmpfs                   20G     0   20G   0% /dev
  tmpfs                      20G   80K   20G   1% /dev/shm
  tmpfs                      20G  8.9M   20G   1% /run
  tmpfs                      20G     0   20G   0% /sys/fs/cgroup
  /dev/sda1                 969M   95M  809M  11% /boot
  /dev/mapper/vg00-lv_data   40G  1.3G   80G   4% /data
  /dev/mapper/vg00-lv_app    21G  328M   19G   2% /app
  echo -ne "
  10.78.200.105    vq12stmsg01
  10.78.200.106    vq12stmsg02
  10.78.200.107    vq12stmsg03
  " >>/etc/hosts
  mkdir -p /app/logs/
  105:
  mkdir -p /data/mdb/{mdb1_1,mdb1_1/repair,mdb2_2,mdb2_2/repair,mdb3_3,mdb3_3/repair}
  mkdir -p /data/configdb/configdb1_1/repair
  106:
  mkdir -p /data/mdb/{mdb1_3,mdb1_3/repair,mdb2_1,mdb2_1/repair,mdb3_2,mdb3_2/repair}
  mkdir -p /data/configdb/configdb2_1/repair
  107:
  mkdir -p /data/mdb/{mdb1_2,mdb1_2/repair,mdb2_3,mdb2_3/repair,mdb3_1,mdb3_1/repair}
  mkdir -p /data/configdb/configdb3_1/repair
  systemctl stop firewalld.service
  systemctl disable firewalld.service
  openssl rand -base64 741 > /app/conf/keyfile
  chmod 600 /app/mongodb/conf/keyfile
  copy keyfile文件到各个节点
  主机环境变量及参数,参考
  $cat /etc/rc.local
  .....
  #####MongoDB#####
  sleep 2
  blockdev --setra 32 /dev/mapper/vg00-lv_data
  sleep 2
  echo '512' > /sys/block/sdb/queue/nr_requests
  sleep 2
  echo 0 > /proc/sys/vm/zone_reclaim_mode
  #####blockdev --report /dev/mapper/vg00-lv_data
  #chown mongo:dba -R /app/
  #chown mongo:dba -R /data/
  #####MongoDB#####
  $cat /etc/fstab
  #
  # /etc/fstab
  # Created by anaconda on Wed Mar 16 06:50:02 2016
  #
  # Accessible filesystems, by reference, are maintained under '/dev/disk'
  # See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
  #
  /dev/mapper/vg00-lv_root /                       ext3    defaults        1 1
  /dev/mapper/vg00-lv_app  /app                    ext3    defaults        1 2
  UUID=4f86d0d2-a559-4f07-8a91-a605e0005f5c /boot                   ext3    defaults        1 2
  /dev/mapper/vg00-lv_data /data                   ext4    noatime        0 0
  $cat /etc/security/limits.conf
  mongo      soft    nofile  65535
  mongo      hard    nofile  65535
  mongo      soft    nproc   65535
  mongo      hard    nproc   65535
  关闭透明页
  https://docs.mongodb.org/manual/tutorial/transparent-huge-pages/
  https://docs.mongodb.org/manual/administration/production-checklist/
  [mongo@vq12stmsg01 /app/mongo]
  $cat .bash_profile
  ...
  #####add by mongoDB#####
  export LANG=en_US
  export PATH=$PATH:/app/mongodb_3_2_4/bin
  set -o vi
  stty erase ^H
  umask 022
  export HISTTIMEFORMAT=`whoami`" : %h/%d - %H:%M:%S "
  export PS1='\n\e[1;37m[\e[m\e[1;32m\u\e[m\e[1;33m@\e[m\e[1;36m\h\e[m \e[4m`pwd`\e[m\e[1;37m]\e[m\e[1;36m\e[m\n\$'
  alias 'l=ls -altr'
  alias 'cdm=cd /app/mongodb_3_2_4'
  alias 'cdl=cd /app/logs'
  alias 'cdc=cd /app/conf'
  alias 'cddb=cd /data/'
  alias 'mongosstart=numactl --interleave=all mongos --config=/app/conf/mongos1.conf'
  alias 'configstart=numactl --interleave=all mongod --config=/app/conf/configsvr1_1.conf'
  alias 'mongodstart1=numactl --interleave=all mongod --config=/app/conf/shardsvr1_1.conf'
  alias 'mongodstart2=numactl --interleave=all mongod --config=/app/conf/shardsvr2_2.conf'
  alias 'mongodstart3=numactl --interleave=all mongod --config=/app/conf/shardsvr3_3.conf'
  alias "mongosstop=ps -ef | grep -v grep | grep mongos | cut -c 9-15 | xargs kill -2"
  alias "mongodstop=ps -ef | grep -v grep | grep shardsvr | grep mongod | cut -c 9-15 | xargs kill -2"
  alias "mongocfgstop=ps -ef | grep -v grep | grep configsvr | cut -c 9-15 | xargs kill -2"
  #####add by mongoDB#####
  [mongo@vq12stmsg01 /app/conf]
  $cat configsvr1_1.conf
  systemLog:
  destination: file
  path: "/app/logs/configsvr1_1.log"
  logAppend: true
  storage:
  dbPath: "/data/configdb/configdb1_1"
  repairPath: "/data/configdb/configdb1_1/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 1
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.105
  port: 20000
  sharding:
  clusterRole: configsvr
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 5000
  $cat mongos1.conf
  systemLog:
  destination: file
  path: /app/logs/mongos1.log
  logAppend: true
  net:
  bindIp: 127.0.0.1,10.78.200.105
  port: 10000
  processManagement:
  fork: true
  replication:
  localPingThresholdMs: 15
  sharding:
  configDB: 10.78.200.105:20000,10.78.200.106:20000,10.78.200.107:20000
  chunkSize: 64
  security:
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 5000
  $cat shardsvr1_1.conf
  systemLog:
  destination: file
  path: "/app/logs/sharedsvr1_1.log"
  logAppend: true
  storage:
  dbPath: "/data/mdb/mdb1_1/"
  repairPath: "/data/mdb/mdb1_1/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 10
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.105
  port: 30001
  replication:
  oplogSizeMB: 10240
  replSetName: pns1
  sharding:
  clusterRole: shardsvr
  archiveMovedChunks: false
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 15000
  $cat shardsvr2_2.conf
  systemLog:
  destination: file
  path: "/app/logs/sharedsvr2_2.log"
  logAppend: true
  storage:
  dbPath: "/data/mdb/mdb2_2/"
  repairPath: "/data/mdb/mdb2_2/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 10
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.105
  port: 30002
  replication:
  oplogSizeMB: 10240
  replSetName: pns2
  sharding:
  clusterRole: shardsvr
  archiveMovedChunks: false
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 15000
  $cat shardsvr3_3.conf
  systemLog:
  destination: file
  path: "/app/logs/sharedsvr3_3.log"
  logAppend: true
  storage:
  dbPath: "/data/mdb/mdb3_3/"
  repairPath: "/data/mdb/mdb3_3/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 10
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.105
  port: 30003
  replication:
  oplogSizeMB: 10240
  replSetName: pns3
  sharding:
  clusterRole: shardsvr
  archiveMovedChunks: false
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 15000
  106主机环境变量及参数:
  #####add by mongoDB#####
  export LANG=en_US
  export PATH=$PATH:/app/mongodb_3_2_4/bin
  set -o vi
  stty erase ^H
  umask 022
  export HISTTIMEFORMAT=`whoami`" : %h/%d - %H:%M:%S "
  export PS1='\n\e[1;37m[\e[m\e[1;32m\u\e[m\e[1;33m@\e[m\e[1;36m\h\e[m \e[4m`pwd`\e[m\e[1;37m]\e[m\e[1;36m\e[m\n\$'
  alias 'l=ls -altr'
  alias 'cdm=cd /app/mongodb_3_2_4'
  alias 'cdl=cd /app/logs'
  alias 'cdc=cd /app/conf'
  alias 'cddb=cd /data/'
  alias 'mongosstart=numactl --interleave=all mongos --config=/app/conf/mongos2.conf'
  alias 'configstart=numactl --interleave=all mongod --config=/app/conf/configsvr1_2.conf'
  alias 'mongodstart1=numactl --interleave=all mongod --config=/app/conf/shardsvr1_3.conf'
  alias 'mongodstart2=numactl --interleave=all mongod --config=/app/conf/shardsvr2_1.conf'
  alias 'mongodstart3=numactl --interleave=all mongod --config=/app/conf/shardsvr3_2.conf'
  alias "mongosstop=ps -ef | grep -v grep | grep mongos | cut -c 9-15 | xargs kill -2"
  alias "mongodstop=ps -ef | grep -v grep | grep shardsvr | grep mongod | cut -c 9-15 | xargs kill -2"
  alias "mongocfgstop=ps -ef | grep -v grep | grep configsvr | cut -c 9-15 | xargs kill -2"
  #####add by mongoDB#####
  [root@vq12stmsg02 conf]# ll
  total 24
  -rw-r--r-- 1 mongo dba  859 Mar 24 15:47 configsvr1_2.conf
  -rw------- 1 mongo dba 1004 Mar 24 11:02 keyfile
  -rw-r--r-- 1 mongo dba  527 Mar 24 15:40 mongos2.conf
  -rw-r--r-- 1 mongo dba  930 Mar 24 15:47 shardsvr1_3.conf
  -rw-r--r-- 1 mongo dba  928 Mar 24 15:48 shardsvr2_1.conf
  -rw-r--r-- 1 mongo dba  930 Mar 24 15:48 shardsvr3_2.conf
  [root@vq12stmsg02 conf]# cat configsvr1_2.conf
  systemLog:
  destination: file
  path: "/app/logs/configsvr2_1.log"
  logAppend: true
  storage:
  dbPath: "/data/configdb/configdb2_1"
  repairPath: "/data/configdb/configdb2_1/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 1
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.106
  port: 20000
  sharding:
  clusterRole: configsvr
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 5000
  [root@vq12stmsg02 conf]# cat mongos2.conf
  systemLog:
  destination: file
  path: /app/logs/mongos2.log
  logAppend: true
  net:
  bindIp: 127.0.0.1,10.78.200.106
  port: 10000
  processManagement:
  fork: true
  replication:
  localPingThresholdMs: 15
  sharding:
  configDB: 10.78.200.105:20000,10.78.200.106:20000,10.78.200.107:20000
  chunkSize: 64
  security:
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 5000
  [root@vq12stmsg02 conf]# cat shardsvr1_3.conf
  systemLog:
  destination: file
  path: "/app/logs/sharedsvr1_3.log"
  logAppend: true
  storage:
  dbPath: "/data/mdb/mdb1_3/"
  repairPath: "/data/mdb/mdb1_3/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 10
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.106
  port: 30003
  replication:
  oplogSizeMB: 10240
  replSetName: pns1
  sharding:
  clusterRole: shardsvr
  archiveMovedChunks: false
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 15000
  [root@vq12stmsg02 conf]# cat shardsvr2_1.conf
  systemLog:
  destination: file
  path: "/app/logs/sharedsvr2_1.log"
  logAppend: true
  storage:
  dbPath: "/data/mdb/mdb2_1/"
  repairPath: "/data/mdb/mdb2_1/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 10
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.106
  port: 30001
  replication:
  oplogSizeMB: 10240
  replSetName: pns2
  sharding:
  clusterRole: shardsvr
  archiveMovedChunks: false
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 15000
  [root@vq12stmsg02 conf]# cat shardsvr3_2.conf
  systemLog:
  destination: file
  path: "/app/logs/sharedsvr3_2.log"
  logAppend: true
  storage:
  dbPath: "/data/mdb/mdb3_2/"
  repairPath: "/data/mdb/mdb3_2/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 10
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.106
  port: 30002
  replication:
  oplogSizeMB: 10240
  replSetName: pns3
  sharding:
  clusterRole: shardsvr
  archiveMovedChunks: false
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 15000
  [root@vq12stmsg02 conf]#
  107主机环境变量及参数:
  #####add by mongoDB#####
  export LANG=en_US
  export PATH=$PATH:/app/mongodb_3_2_4/bin
  set -o vi
  stty erase ^H
  umask 022
  export HISTTIMEFORMAT=`whoami`" : %h/%d - %H:%M:%S "
  export PS1='\n\e[1;37m[\e[m\e[1;32m\u\e[m\e[1;33m@\e[m\e[1;36m\h\e[m \e[4m`pwd`\e[m\e[1;37m]\e[m\e[1;36m\e[m\n\$'
  alias 'l=ls -altr'
  alias 'cdm=cd /app/mongodb_3_2_4'
  alias 'cdl=cd /app/logs'
  alias 'cdc=cd /app/conf'
  alias 'cddb=cd /data/'
  alias 'mongosstart=numactl --interleave=all mongos --config=/app/conf/mongos3.conf'
  alias 'configstart=numactl --interleave=all mongod --config=/app/conf/configsvr1_3.conf'
  alias 'mongodstart1=numactl --interleave=all mongod --config=/app/conf/shardsvr1_2.conf'
  alias 'mongodstart2=numactl --interleave=all mongod --config=/app/conf/shardsvr2_3.conf'
  alias 'mongodstart3=numactl --interleave=all mongod --config=/app/conf/shardsvr3_1.conf'
  alias "mongosstop=ps -ef | grep -v grep | grep mongos | cut -c 9-15 | xargs kill -2"
  alias "mongodstop=ps -ef | grep -v grep | grep shardsvr | grep mongod | cut -c 9-15 | xargs kill -2"
  alias "mongocfgstop=ps -ef | grep -v grep | grep configsvr | cut -c 9-15 | xargs kill -2"
  #####add by mongoDB#####
  [root@vq12stmsg03 conf]# ll
  total 24
  -rw-r--r-- 1 mongo dba  861 Mar 24 15:45 configsvr1_3.conf
  -rw------- 1 mongo dba 1004 Mar 24 11:02 keyfile
  -rw-r--r-- 1 mongo dba  526 Mar 24 15:46 mongos3.conf
  -rw-r--r-- 1 mongo dba  930 Mar 24 15:46 shardsvr1_2.conf
  -rw-r--r-- 1 mongo dba  930 Mar 24 15:47 shardsvr2_3.conf
  -rw-r--r-- 1 mongo dba  930 Mar 24 15:47 shardsvr3_1.conf
  [root@vq12stmsg03 conf]# cat configsvr1_3.conf
  systemLog:
  destination: file
  path: "/app/logs/configsvr3_1.log"
  logAppend: true
  storage:
  dbPath: "/data/configdb/configdb3_1"
  repairPath: "/data/configdb/configdb3_1/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 1
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.107
  port: 20000
  sharding:
  clusterRole: configsvr
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 5000
  [root@vq12stmsg03 conf]# cat mongos3.conf
  systemLog:
  destination: file
  path: /app/logs/mongos3.log
  logAppend: true
  net:
  bindIp: 127.0.0.1,10.78.200.107
  port: 10000
  processManagement:
  fork: true
  replication:
  localPingThresholdMs: 15
  sharding:
  configDB: 10.78.200.105:20000,10.78.200.106:20000,10.78.200.107:20000
  chunkSize: 64
  security:
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 5000
  [root@vq12stmsg03 conf]# cat shardsvr1_2.conf
  systemLog:
  destination: file
  path: "/app/logs/sharedsvr1_2.log"
  logAppend: true
  storage:
  dbPath: "/data/mdb/mdb1_2/"
  repairPath: "/data/mdb/mdb1_2/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 10
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.107
  port: 30002
  replication:
  oplogSizeMB: 10240
  replSetName: pns1
  sharding:
  clusterRole: shardsvr
  archiveMovedChunks: false
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 15000
  [root@vq12stmsg03 conf]# cat shardsvr2_3.conf
  systemLog:
  destination: file
  path: "/app/logs/sharedsvr2_3.log"
  logAppend: true
  storage:
  dbPath: "/data/mdb/mdb2_3/"
  repairPath: "/data/mdb/mdb2_3/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 10
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.107
  port: 30003
  replication:
  oplogSizeMB: 10240
  replSetName: pns2
  sharding:
  clusterRole: shardsvr
  archiveMovedChunks: false
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 15000
  [root@vq12stmsg03 conf]# cat shardsvr3_1.conf
  systemLog:
  destination: file
  path: "/app/logs/sharedsvr3_1.log"
  logAppend: true
  storage:
  dbPath: "/data/mdb/mdb3_1/"
  repairPath: "/data/mdb/mdb3_1/repair"
  journal:
  enabled: true
  commitIntervalMs: 100
  directoryPerDB: true
  syncPeriodSecs: 60
  engine: wiredTiger
  wiredTiger:
  engineConfig:
  cacheSizeGB: 10
  journalCompressor: snappy
  directoryForIndexes: true
  collectionConfig:
  blockCompressor: snappy
  indexConfig:
  prefixCompression: true
  processManagement:
  fork: true
  net:
  bindIp: 127.0.0.1,10.78.200.107
  port: 30001
  replication:
  oplogSizeMB: 10240
  replSetName: pns3
  sharding:
  clusterRole: shardsvr
  archiveMovedChunks: false
  security:
  authorization: disabled
  clusterAuthMode: keyFile
  keyFile: /app/conf/keyfile
  setParameter:
  enableLocalhostAuthBypass: true
  authenticationMechanisms: SCRAM-SHA-1
  connPoolMaxShardedConnsPerHost: 200
  connPoolMaxConnsPerHost: 15000
  分片/副本集配置:
  mongo 127.0.0.1:30001
  rs.initiate()
  rs.add("10.78.200.107:30002")
  rs.add("10.78.200.106:30003")
  rs.conf()
  mongo 127.0.0.1:30001
  rs.initiate()
  rs.add("10.78.200.105:30002")
  rs.add("10.78.200.107:30003")
  rs.conf()
  mongo 127.0.0.1:30001
  rs.initiate()
  rs.add("10.78.200.106:30002")
  rs.add("10.78.200.105:30003")
  rs.conf()
  mongo 127.0.0.1:10000
  use admin
  sh.addShard( "pns1/10.78.200.105:30001,10.78.200.107:30002,10.78.200.106:30003" )
  sh.addShard( "pns2/10.78.200.106:30001,10.78.200.105:30002,10.78.200.107:30003" )
  sh.addShard( "pns3/10.78.200.107:30001,10.78.200.106:30002,10.78.200.105:30003" )
  db.runCommand({listshards:1})
  用户及权限:
  use admin
  db.createUser(
  {
  user: "admin",
  pwd: "xxxxxx",
  roles: [ { role: "root", db: "admin" },{ role: "clusterManager", db: "admin" } ,{ role: "clusterMonitor", db: "admin" },{ role: "hostManager", db: "admin" } ]
  }
  )
  db.updateUser( "admin",
  {
  roles: [ { role: "root", db: "admin" },{ role: "dbAdminAnyDatabase", db: "admin" },{"role" : "readWriteAnyDatabase",db: "admin" }, { role: "userAdminAnyDatabase", db: "admin" },{ role: "clusterManager", db: "admin" } ,{ role: "clusterMonitor", db: "admin" },{ role: "hostManager", db: "admin" } ]
  }
  );
  mongo 10.78.200.107:10000/admin -u admin -p xxxxxx
  --db.auth("admin","xxxxxx")
  mongo 10.78.200.107:10000/mdb -u pns -p xxxxxx
  mongostat -h 10.78.200.107:30001 -u admin -p xxxxxx --discover --authenticationDatabase admin 1
  mongotop -h 10.78.200.107:30001 -u admin -p xxxxxx --authenticationDatabase admin
  mongo 10.78.200.107:30001  -u pns -p xxxxxx --eval "printjson(db.printSlaveReplicationInfo())"
  mongo 127.0.0.1:10000
  use admin
  db.runCommand({"enablesharding":"mdb"})
  db.runCommand({"shardcollection":"mdb.mcUser293","key":{"_id":"hashed"}})
  db.runCommand({shardcollection:"mdb.mcUser293", key:{_id:1}})
  use mdb
  for(var i=1;i

运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-626370-1-1.html 上篇帖子: MongoDB 2.2.4 配置文件参考 下篇帖子: Java操纵MongoDB_1(环境设置)
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表