设为首页 收藏本站
查看: 565|回复: 0

[经验分享] mongodb3.4的falcon监控搭建

[复制链接]
发表于 2018-10-24 12:50:04 | 显示全部楼层 |阅读模式
defmongodb_connect(self,host=None, port=None, user=None, password=None):  #print "start trying to connect to mongodb server"
  #print "mognodb read the conf from conf file list is :"
  #print host
  #print port
  #print user
  #print password
  try:
  conn =MongoClient(host, port, serverSelectionTimeoutMS=1000) # conntion timeout 1sec.
  if userand password:
  db_admin = conn["admin"]
  ifnot db_admin.authenticate(user,password):
  pass;
  conn.server_info()
  except :
  e =sys.exc_info()[0]
  return e,None
  return 0,conn
  #data node(1):standalone, replset primary, replset secondary. mongos(2), mongoConfigSrv(3)
  def get_mongo_role(self,conn):
  mongo_role =1
  conn.server_info()
  if(conn.is_mongos):
  mongo_role = 2
  elif("chunks" inconn.get_database("config").collection_names()): #  Role is a config servers?  not mongos and has config.chunks collections.it's a config server.
  mongo_role = 3
  returnmongo_role
  defget_mongo_monitor_data(self, conn):
  mongo_monitor_dict ={}
  mongo_monitor_dict["mongo_local_alive"] = 1  # mongo local alive metric for all nodes.
  mongo_role =self.get_mongo_role(conn)
  if(mongo_role== 1):
  mongodb_role,serverStatus_dict = self.serverStatus(conn)
  mongo_monitor_dict.update(serverStatus_dict)
  repl_status_dict = {}
  if(mongodb_role == "master" or mongodb_role == "secondary"):
  repl_status_dict = self.repl_status(conn)
  mongo_monitor_dict.update(repl_status_dict)
  else:
  print("this is standalone node")
  elif(mongo_role == 2): # mongos
  shards_dict = self.shard_status(conn)
  mongo_monitor_dict.update(shards_dict)
  returnmongo_monitor_dict
  def  serverStatus(self,connection):
  serverStatus= connection.admin.command(pymongo.son_manipulator.SON([('serverStatus', 1)]))
  mongodb_server_dict = {}  #mongodb server status metric for upload to falcon
  mongo_version= serverStatus["version"]
  #uptimemetric
  mongodb_server_dict["uptime"] =int(serverStatus["uptime"])
  #assertssection metrics
  mongo_asserts= serverStatus["asserts"]
  forasserts_key in mongo_asserts.keys():
  asserts_key_name = "asserts_" + asserts_key
  mongodb_server_dict[asserts_key_name] = mongo_asserts[asserts_key]
  ###"extra_info" section metrics: page_faults.  falcon counter type.
  #ifserverStatus.has_key("extra_info"):
  if"extra_info" in serverStatus :
  mongodb_server_dict["page_faults"]= serverStatus["extra_info"]["page_faults"]
  ###"connections" section metrics
  current_conn= serverStatus["connections"]["current"]
  available_conn =serverStatus["connections"]["available"]
  mongodb_server_dict["connections_current"] = current_conn
  mongodb_server_dict["connections_available"] = available_conn
  # mongodbconnection used percent
  mongodb_server_dict["connections_used_percent"] =int((current_conn/(current_conn + available_conn)*100))
  # totalcreated from mongodb started.  COUNTERmetric
  mongodb_server_dict["connections_totalCreated"] = serverStatus["connections"]["totalCreated"]
  #  "globalLock" currentQueue
  mongodb_server_dict["globalLock_currentQueue_total"]=serverStatus["globalLock"]["currentQueue"]["total"]
  mongodb_server_dict["globalLock_currentQueue_readers"] =serverStatus["globalLock"]["currentQueue"]["readers"]
  mongodb_server_dict["globalLock_currentQueue_writers"] =serverStatus["globalLock"]["currentQueue"]["writers"]
  #"locks" section, Changed in version 3.0
  if"locks" in serverStatus and mongo_version >"3.0":
  #ifserverStatus.has_key("locks") and mongo_version >"3.0":
  locks_dict_keys = serverStatus["locks"].keys()
  forlock_scope in locks_dict_keys:  # Global,Database,Collection,Oplog
  forlock_metric  in serverStatus["locks"][lock_scope]:
  for lock_type inserverStatus["locks"][lock_scope][lock_metric]:
  if lock_type == "R":
  lock_name = "Slock"
  elif lock_type == "W":
  lock_name ="Xlock"
  elif lock_type == "r":
  lock_name = "ISlock"
  elif lock_type == "w":
  lock_name = "IXlock"
  lock_metric_key = "locks_" +lock_scope + "_" + lock_metric + "_" + lock_name
  mongodb_server_dict[lock_metric_key] = serverStatus["locks"][lock_scope][lock_metric][lock_type]
  #"network" section metrics: bytesIn, bytesOut, numRequests;  counter type
  #ifserverStatus.has_key("network"):
  if"network" in serverStatus :
  fornetwork_metric in serverStatus["network"].keys():
  network_metric_key = "network_"  + network_metric   # network metric key for upload
  mongodb_server_dict[network_metric_key] =serverStatus["network"][network_metric]
  ###"opcounters" section metrics:insert, query, update, delete, getmore, command.couter type
  #ifserverStatus.has_key("opcounters"):
  if"opcounters" in serverStatus :
  foropcounters_metric in serverStatus["opcounters"].keys():
  opcounters_metric_key = "opcounters_" + opcounters_metric
  mongodb_server_dict[opcounters_metric_key] =serverStatus["opcounters"][opcounters_metric]
  ###"opcountersRepl" section metrics: insert, query, update, delete,getmore, command. couter type
  #ifserverStatus.has_key("opcountersRepl"):
  if"opcountersRepl" in serverStatus :
  foropcountersRepl_metric in serverStatus["opcountersRepl"].keys():
  opcountersRepl_metric_key = "opcountersRepl_" +opcountersRepl_metric
  mongodb_server_dict[opcountersRepl_metric_key] =serverStatus["opcounters"][opcountersRepl_metric]
  ###"mem" section metrics:
  #ifserverStatus.has_key("mem"):
  if"mem" in serverStatus :
  formem_metric in serverStatus["mem"].keys():
  mem_metric_key ="mem_"  + mem_metric
  if(mem_metric in ["bits","supported"] ):
  mongodb_server_dict[mem_metric_key] =serverStatus["mem"][mem_metric]
  else:
  mongodb_server_dict[mem_metric_key] =serverStatus["mem"][mem_metric]*1024*1024
  ###"dur" section metrics:
  #ifserverStatus.has_key("dur"):
  if"dur" in serverStatus :
  mongodb_server_dict["dur_journaledBytes"] =serverStatus["dur"]["journaledMB"]*1024*1024
  mongodb_server_dict["dur_writeToDataFilesBytes"] =serverStatus["dur"]["writeToDataFilesMB"]*1024*1024
  mongodb_server_dict["dur_commitsInWriteLock"] =serverStatus["dur"]["commitsInWriteLock"]
  ###"repl" section
  mongodb_role= ""
  #if(serverStatus.has_key("repl") and serverStatus["repl"].has_key("secondary")):
  if("repl" in serverStatus and "secondary" inserverStatus["repl"]):
  ifserverStatus["repl"]["ismaster"]:
  mongodb_role = "master"
  if serverStatus["repl"]["secondary"]:
  mongodb_role = "secondary"
  else: # notReplica sets mode
  mongodb_role = "standalone"
  ###"backgroundFlushing" section metrics, only for MMAPv1
  #ifserverStatus.has_key("backgroundFlushing"):
  if"backgroundFlushing" in serverStatus :
  forbgFlush_metric in serverStatus["backgroundFlushing"].keys():
  ifbgFlush_metric != "last_finished": # discard last_finished metric
  bgFlush_metric_key = "backgroundFlushing_" + bgFlush_metric
  mongodb_server_dict[bgFlush_metric_key] =serverStatus["backgroundFlushing"][bgFlush_metric]
  ### cursorfrom "metrics" section
  #ifserverStatus.has_key("metrics") and serverStatus["metrics"].has_key("cursor"):
  if"metrics" in serverStatus and "cursor" inserverStatus["metrics"]:
  cursor_status = serverStatus["metrics"]["cursor"]
  mongodb_server_dict["cursor_timedOut"] =cursor_status["timedOut"]
  mongodb_server_dict["cursor_open_noTimeout"] = cursor_status["open"]["noTimeout"]
  mongodb_server_dict["cursor_open_pinned"] =  cursor_status["open"]["pinned"]
  mongodb_server_dict["cursor_open_total"] = cursor_status["open"]["total"]
  ###"wiredTiger" section
  #ifserverStatus.has_key("wiredTiger"):
  if"wiredTiger" in serverStatus :
  serverStatus_wt = serverStatus["wiredTiger"]
  #cache
  wt_cache= serverStatus_wt["cache"]
  mongodb_server_dict["wt_cache_used_total_bytes"] =wt_cache["bytes currently in the cache"]
  mongodb_server_dict["wt_cache_dirty_bytes"] =wt_cache["tracked dirty bytes in the cache"]
  mongodb_server_dict["wt_cache_readinto_bytes"] =wt_cache["bytes read into cache"]
  mongodb_server_dict["wt_cache_writtenfrom_bytes"] =wt_cache["bytes written from cache"]
  #concurrentTransactions
  wt_concurrentTransactions =serverStatus_wt["concurrentTransactions"]
  mongodb_server_dict["wt_concurrentTransactions_write"] =wt_concurrentTransactions["write"]["available"]
  mongodb_server_dict["wt_concurrentTransactions_read"]= wt_concurrentTransactions["read"]["available"]
  #"block-manager" section
  wt_block_manager = serverStatus_wt["block-manager"]
  mongodb_server_dict["wt_bm_bytes_read"] =wt_block_manager["bytes read"]
  mongodb_server_dict["wt_bm_bytes_written"] =wt_block_manager["bytes written"]
  mongodb_server_dict["wt_bm_blocks_read"] =wt_block_manager["blocks read" ]
  mongodb_server_dict["wt_bm_blocks_written"] =wt_block_manager["blocks written"]
  ###"rocksdb" engine
  #ifserverStatus.has_key("rocksdb"):
  if"rocksdb" in serverStatus :
  serverStatus_rocksdb = serverStatus["rocksdb"]
  mongodb_server_dict["rocksdb_num_immutable_mem_table"]  =serverStatus_rocksdb["num-immutable-mem-table"]
  mongodb_server_dict["rocksdb_mem_table_flush_pending"] =serverStatus_rocksdb["mem-table-flush-pending"]
  mongodb_server_dict["rocksdb_compaction_pending"]= serverStatus_rocksdb["compaction-pending"]
  mongodb_server_dict["rocksdb_background_errors"] =serverStatus_rocksdb["background-errors"]
  mongodb_server_dict["rocksdb_num_entries_active_mem_table"] =serverStatus_rocksdb["num-entries-active-mem-table"]
  mongodb_server_dict["rocksdb_num_entries_imm_mem_tables"] =serverStatus_rocksdb["num-entries-imm-mem-tables"]
  mongodb_server_dict["rocksdb_num_snapshots"] = serverStatus_rocksdb["num-snapshots"]
  mongodb_server_dict["rocksdb_oldest_snapshot_time"] =serverStatus_rocksdb["oldest-snapshot-time"]
  mongodb_server_dict["rocksdb_num_live_versions"] =serverStatus_rocksdb["num-live-versions"]
  mongodb_server_dict["rocksdb_total_live_recovery_units"]= serverStatus_rocksdb["total-live-recovery-units"]
  ###"PerconaFT" engine
  #ifserverStatus.has_key("PerconaFT"):
  if"PerconaFT" in serverStatus :
  serverStatus_PerconaFT = serverStatus["PerconaFT"]
  mongodb_server_dict["PerconaFT_log_count"] =serverStatus_PerconaFT["log"]["count"]
  mongodb_server_dict["PerconaFT_log_time"] =serverStatus_PerconaFT["log"]["time"]
  mongodb_server_dict["PerconaFT_log_bytes"] =serverStatus_PerconaFT["log"]["bytes"]
  mongodb_server_dict["PerconaFT_fsync_count"] =serverStatus_PerconaFT["fsync"]["count"]
  mongodb_server_dict["PerconaFT_fsync_time"] =  serverStatus_PerconaFT["fsync"]["time"]
  ###cachetable
  PerconaFT_cachetable = serverStatus_PerconaFT["cachetable"]
  mongodb_server_dict["PerconaFT_cachetable_size_current"] =PerconaFT_cachetable["size"]["current"]
  mongodb_server_dict["PerconaFT_cachetable_size_writing"]  =PerconaFT_cachetable["size"]["writing"]
  mongodb_server_dict["PerconaFT_cachetable_size_limit"]  =PerconaFT_cachetable["size"]["limit"]
  ###PerconaFT checkpoint
  PerconaFT_checkpoint = serverStatus_PerconaFT["checkpoint"]
  mongodb_server_dict["PerconaFT_checkpoint_count"] =PerconaFT_checkpoint["count"]
  mongodb_server_dict["PerconaFT_checkpoint_time"] =PerconaFT_checkpoint["time"]
  mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_count"]=PerconaFT_checkpoint["write"]["nonleaf"]["count"]
  mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_time"]=PerconaFT_checkpoint["write"]["nonleaf"]["time"]
  mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_bytes_compressed"]=PerconaFT_checkpoint["write"]["nonleaf"]["bytes"]["compressed"]
  mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_bytes_uncompressed"]= PerconaFT_checkpoint["write"]["nonleaf"]["bytes"]["uncompressed"]
  mongodb_server_dict["PerconaFT_checkpoint_write_leaf_count"] =PerconaFT_checkpoint["write"]["leaf"]["count"]
  mongodb_server_dict["PerconaFT_checkpoint_write_leaf_time"] =PerconaFT_checkpoint["write"]["leaf"]["time"]
  mongodb_server_dict["PerconaFT_checkpoint_write_leaf_bytes_compressed"]=PerconaFT_checkpoint["write"]["leaf"]["bytes"]["compressed"]
  mongodb_server_dict["PerconaFT_checkpoint_write_leaf_bytes_uncompressed"]=PerconaFT_checkpoint["write"]["leaf"]["bytes"]["uncompressed"]
  ###serializeTime
  forserializeTime_item  inserverStatus_PerconaFT["serializeTime"]:
  prefix = "PerconaFT_serializeTime_" + serializeTime_item
  forserializeTime_key inserverStatus_PerconaFT["serializeTime"][serializeTime_item]:
  key_name = prefix + "_" + serializeTime_key
  mongodb_server_dict[key_name] = serverStatus_PerconaFT["serializeTime"][serializeTime_item][serializeTime_key]
  ###PerconaFT  compressionRatio
  forcompressionRatio_item in serverStatus_PerconaFT["compressionRatio"]:
  key_name = "PerconaFT_compressionRatio_" + compressionRatio_item
  mongodb_server_dict[key_name] =serverStatus_PerconaFT["compressionRatio"][compressionRatio_item]
  return(mongodb_role, mongodb_server_dict)
  defrepl_status(self,connection):
  replStatus = connection.admin.command("replSetGetStatus")
  repl_status_dict = {}  # repl setmetric dict
  # myState"1" for PRIMARY , "2" for SECONDARY, "3":
  repl_status_dict["repl_myState"] = replStatus["myState"]
  repl_status_members = replStatus["members"]
  master_optime = 0 # Master oplog ops time
  myself_optime = 0 # SECONDARY oplog ops time
  forrepl_member in repl_status_members:
  #ifrepl_member.has_key("self") and repl_member["self"]:
  if"self" in repl_member and repl_member["self"]:
  repl_status_dict["repl_health"] =repl_member["health"]
  repl_status_dict["repl_optime"] = repl_member["optime"].time
  #if repl_member.has_key("repl_electionTime"):
  if "repl_electionTime" in repl_member :
  repl_status_dict["repl_electionTime"] =repl_member["electionTime"].time
  #ifrepl_member.has_key("repl_configVersion"):
  if "repl_configVersion" in repl_member :
  repl_status_dict["repl_configVersion"] =repl_member["configVersion"]
  myself_optime = repl_member["optime"].time
  if(replStatus["myState"] == 2 and repl_member["state"] == 1):  # CONDARY ,get repl lag
  master_optime = repl_member["optime"].time
  ifreplStatus["myState"] == 2 :
  repl_status_dict["repl_lag"] = master_optime - myself_optime
  ###oplog window  hours
  oplog_collection = connection["local"]["oplog.rs"]
  oplog_tFirst =  oplog_collection.find({},{"ts":1}).sort('$natural',pymongo.ASCENDING).limit(1).next()
  oplog_tLast =oplog_collection.find({},{"ts":1}).sort('$natural',pymongo.DESCENDING).limit(1).next()
  oplogrs_collstats =  connection["local"].command("collstats","oplog.rs")
  window_multiple = 1   ##oplog.rscollections is not full
  #ifoplogrs_collstats.has_key("maxSize"):
  if"maxSize" in oplogrs_collstats :
  window_multiple = oplogrs_collstats["maxSize"]/(oplogrs_collstats["count"]* oplogrs_collstats["avgObjSize"])
  else:
  window_multiple = oplogrs_collstats["storageSize"]/(oplogrs_collstats["count"]* oplogrs_collstats["avgObjSize"])
  #oplog_window  .xx hours
  oplog_window = round((oplog_tLast["ts"].time -oplog_tFirst["ts"].time)/3600.0,2) * window_multiple  # full
  repl_status_dict["repl_oplog_window"] = oplog_window
  return repl_status_dict
  # only for mongosnode
  defshard_status(self, conn):
  config_db= conn["config"]
  settings_col = config_db["settings"]
  balancer_doc = settings_col.find_one({'_id':'balancer'})
  shards_dict = {}
  if balancer_doc is  None:
  shards_dict["shards_BalancerState"] = 1
  elifbalancer_doc["stopped"]:
  shards_dict["shards_BalancerState"] = 0
  else:
  shards_dict["shards_BalancerState"] = 1
  #shards_activeWindow metric,0: without setting, 1:setting
  #shards_activeWindow_start  metric,  { "start" : "23:30","stop" : "6:00" } : 23.30 for  23:30
  #shards_activeWindow_stop metric
  if balancer_doc is  None:
  shards_dict["shards_activeWindow"] = 0
  #elifbalancer_doc.has_key("activeWindow"):
  elif"activeWindow" in balancer_doc :
  shards_dict["shards_activeWindow"] = 1
  #if balancer_doc["activeWindow"].has_key("start"):
  if "start" in balancer_doc["activeWindow"] :
  window_start = balancer_doc["activeWindow"]["start"]
  shards_dict["shards_activeWindow_start"] = window_start.replace(":",".")
  #if balancer_doc["activeWindow"].has_key("stop"):
  if "stop" in balancer_doc["activeWindow"] :
  window_stop  =balancer_doc["activeWindow"]["stop"]
  shards_dict["shards_activeWindow_stop"] =window_stop.replace(":",".")
  #shards_chunkSize metric
  chunksize_doc = settings_col.find_one({"_id" :"chunksize"})
  ifchunksize_doc is not None:
  shards_dict["shards_chunkSize"] =chunksize_doc["value"]
  #shards_isBalancerRunning metric
  locks_col= config_db["locks"]
  balancer_lock_doc = locks_col.find_one({'_id':'balancer'})
  ifbalancer_lock_doc is None:
  print ("config.locks collection empty or missing. be sure you areconnected to a mongos")
  shards_dict["shards_isBalancerRunning"] = 0
  elifbalancer_lock_doc["state"] > 0:
  shards_dict["shards_isBalancerRunning"] = 1
  else:
  shards_dict["shards_isBalancerRunning"] = 0
  #shards_size metric
  shards_col = config_db["shards"]
  shards_dict["shards_size"] = shards_col.count()
  #shards_mongosSize metric
  mongos_col = config_db["mongos"]
  shards_dict["shards_mongosSize"] = mongos_col.count()
  returnshards_dict


运维网声明 1、欢迎大家加入本站运维交流群:群②:261659950 群⑤:202807635 群⑦870801961 群⑧679858003
2、本站所有主题由该帖子作者发表,该帖子作者与运维网享有帖子相关版权
3、所有作品的著作权均归原作者享有,请您和我们一样尊重他人的著作权等合法权益。如果您对作品感到满意,请购买正版
4、禁止制作、复制、发布和传播具有反动、淫秽、色情、暴力、凶杀等内容的信息,一经发现立即删除。若您因此触犯法律,一切后果自负,我们对此不承担任何责任
5、所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其内容的准确性、可靠性、正当性、安全性、合法性等负责,亦不承担任何法律责任
6、所有作品仅供您个人学习、研究或欣赏,不得用于商业或者其他用途,否则,一切后果均由您自己承担,我们对此不承担任何法律责任
7、如涉及侵犯版权等问题,请您及时通知我们,我们将立即采取措施予以解决
8、联系人Email:admin@iyunv.com 网址:www.yunweiku.com

所有资源均系网友上传或者通过网络收集,我们仅提供一个展示、介绍、观摩学习的平台,我们不对其承担任何法律责任,如涉及侵犯版权等问题,请您及时通知我们,我们将立即处理,联系人Email:kefu@iyunv.com,QQ:1061981298 本贴地址:https://www.yunweiku.com/thread-625946-1-1.html 上篇帖子: Open-Falcon 监控系统监控 MySQL/Redis/MongoDB 状态监控 下篇帖子: 为什么MongoDB采用B树索引,而Mysql用B+树做索引
您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

扫码加入运维网微信交流群X

扫码加入运维网微信交流群

扫描二维码加入运维网微信交流群,最新一手资源尽在官方微信交流群!快快加入我们吧...

扫描微信二维码查看详情

客服E-mail:kefu@iyunv.com 客服QQ:1061981298


QQ群⑦:运维网交流群⑦ QQ群⑧:运维网交流群⑧ k8s群:运维网kubernetes交流群


提醒:禁止发布任何违反国家法律、法规的言论与图片等内容;本站内容均来自个人观点与网络等信息,非本站认同之观点.


本站大部分资源是网友从网上搜集分享而来,其版权均归原作者及其网站所有,我们尊重他人的合法权益,如有内容侵犯您的合法权益,请及时与我们联系进行核实删除!



合作伙伴: 青云cloud

快速回复 返回顶部 返回列表