沈阳格力专卖店 发表于 2018-10-24 12:50:04

mongodb3.4的falcon监控搭建

defmongodb_connect(self,host=None, port=None, user=None, password=None):  #print "start trying to connect to mongodb server"
  #print "mognodb read the conf from conf file list is :"
  #print host
  #print port
  #print user
  #print password
  try:
  conn =MongoClient(host, port, serverSelectionTimeoutMS=1000) # conntion timeout 1sec.
  if userand password:
  db_admin = conn["admin"]
  ifnot db_admin.authenticate(user,password):
  pass;
  conn.server_info()
  except :
  e =sys.exc_info()
  return e,None
  return 0,conn
  #data node(1):standalone, replset primary, replset secondary. mongos(2), mongoConfigSrv(3)
  def get_mongo_role(self,conn):
  mongo_role =1
  conn.server_info()
  if(conn.is_mongos):
  mongo_role = 2
  elif("chunks" inconn.get_database("config").collection_names()): #Role is a config servers?not mongos and has config.chunks collections.it's a config server.
  mongo_role = 3
  returnmongo_role
  defget_mongo_monitor_data(self, conn):
  mongo_monitor_dict ={}
  mongo_monitor_dict["mongo_local_alive"] = 1# mongo local alive metric for all nodes.
  mongo_role =self.get_mongo_role(conn)
  if(mongo_role== 1):
  mongodb_role,serverStatus_dict = self.serverStatus(conn)
  mongo_monitor_dict.update(serverStatus_dict)
  repl_status_dict = {}
  if(mongodb_role == "master" or mongodb_role == "secondary"):
  repl_status_dict = self.repl_status(conn)
  mongo_monitor_dict.update(repl_status_dict)
  else:
  print("this is standalone node")
  elif(mongo_role == 2): # mongos
  shards_dict = self.shard_status(conn)
  mongo_monitor_dict.update(shards_dict)
  returnmongo_monitor_dict
  defserverStatus(self,connection):
  serverStatus= connection.admin.command(pymongo.son_manipulator.SON([('serverStatus', 1)]))
  mongodb_server_dict = {}#mongodb server status metric for upload to falcon
  mongo_version= serverStatus["version"]
  #uptimemetric
  mongodb_server_dict["uptime"] =int(serverStatus["uptime"])
  #assertssection metrics
  mongo_asserts= serverStatus["asserts"]
  forasserts_key in mongo_asserts.keys():
  asserts_key_name = "asserts_" + asserts_key
  mongodb_server_dict = mongo_asserts
  ###"extra_info" section metrics: page_faults.falcon counter type.
  #ifserverStatus.has_key("extra_info"):
  if"extra_info" in serverStatus :
  mongodb_server_dict["page_faults"]= serverStatus["extra_info"]["page_faults"]
  ###"connections" section metrics
  current_conn= serverStatus["connections"]["current"]
  available_conn =serverStatus["connections"]["available"]
  mongodb_server_dict["connections_current"] = current_conn
  mongodb_server_dict["connections_available"] = available_conn
  # mongodbconnection used percent
  mongodb_server_dict["connections_used_percent"] =int((current_conn/(current_conn + available_conn)*100))
  # totalcreated from mongodb started.COUNTERmetric
  mongodb_server_dict["connections_totalCreated"] = serverStatus["connections"]["totalCreated"]
  #"globalLock" currentQueue
  mongodb_server_dict["globalLock_currentQueue_total"]=serverStatus["globalLock"]["currentQueue"]["total"]
  mongodb_server_dict["globalLock_currentQueue_readers"] =serverStatus["globalLock"]["currentQueue"]["readers"]
  mongodb_server_dict["globalLock_currentQueue_writers"] =serverStatus["globalLock"]["currentQueue"]["writers"]
  #"locks" section, Changed in version 3.0
  if"locks" in serverStatus and mongo_version >"3.0":
  #ifserverStatus.has_key("locks") and mongo_version >"3.0":
  locks_dict_keys = serverStatus["locks"].keys()
  forlock_scope in locks_dict_keys:# Global,Database,Collection,Oplog
  forlock_metricin serverStatus["locks"]:
  for lock_type inserverStatus["locks"]:
  if lock_type == "R":
  lock_name = "Slock"
  elif lock_type == "W":
  lock_name ="Xlock"
  elif lock_type == "r":
  lock_name = "ISlock"
  elif lock_type == "w":
  lock_name = "IXlock"
  lock_metric_key = "locks_" +lock_scope + "_" + lock_metric + "_" + lock_name
  mongodb_server_dict = serverStatus["locks"]
  #"network" section metrics: bytesIn, bytesOut, numRequests;counter type
  #ifserverStatus.has_key("network"):
  if"network" in serverStatus :
  fornetwork_metric in serverStatus["network"].keys():
  network_metric_key = "network_"+ network_metric   # network metric key for upload
  mongodb_server_dict =serverStatus["network"]
  ###"opcounters" section metrics:insert, query, update, delete, getmore, command.couter type
  #ifserverStatus.has_key("opcounters"):
  if"opcounters" in serverStatus :
  foropcounters_metric in serverStatus["opcounters"].keys():
  opcounters_metric_key = "opcounters_" + opcounters_metric
  mongodb_server_dict =serverStatus["opcounters"]
  ###"opcountersRepl" section metrics: insert, query, update, delete,getmore, command. couter type
  #ifserverStatus.has_key("opcountersRepl"):
  if"opcountersRepl" in serverStatus :
  foropcountersRepl_metric in serverStatus["opcountersRepl"].keys():
  opcountersRepl_metric_key = "opcountersRepl_" +opcountersRepl_metric
  mongodb_server_dict =serverStatus["opcounters"]
  ###"mem" section metrics:
  #ifserverStatus.has_key("mem"):
  if"mem" in serverStatus :
  formem_metric in serverStatus["mem"].keys():
  mem_metric_key ="mem_"+ mem_metric
  if(mem_metric in ["bits","supported"] ):
  mongodb_server_dict =serverStatus["mem"]
  else:
  mongodb_server_dict =serverStatus["mem"]*1024*1024
  ###"dur" section metrics:
  #ifserverStatus.has_key("dur"):
  if"dur" in serverStatus :
  mongodb_server_dict["dur_journaledBytes"] =serverStatus["dur"]["journaledMB"]*1024*1024
  mongodb_server_dict["dur_writeToDataFilesBytes"] =serverStatus["dur"]["writeToDataFilesMB"]*1024*1024
  mongodb_server_dict["dur_commitsInWriteLock"] =serverStatus["dur"]["commitsInWriteLock"]
  ###"repl" section
  mongodb_role= ""
  #if(serverStatus.has_key("repl") and serverStatus["repl"].has_key("secondary")):
  if("repl" in serverStatus and "secondary" inserverStatus["repl"]):
  ifserverStatus["repl"]["ismaster"]:
  mongodb_role = "master"
  if serverStatus["repl"]["secondary"]:
  mongodb_role = "secondary"
  else: # notReplica sets mode
  mongodb_role = "standalone"
  ###"backgroundFlushing" section metrics, only for MMAPv1
  #ifserverStatus.has_key("backgroundFlushing"):
  if"backgroundFlushing" in serverStatus :
  forbgFlush_metric in serverStatus["backgroundFlushing"].keys():
  ifbgFlush_metric != "last_finished": # discard last_finished metric
  bgFlush_metric_key = "backgroundFlushing_" + bgFlush_metric
  mongodb_server_dict =serverStatus["backgroundFlushing"]
  ### cursorfrom "metrics" section
  #ifserverStatus.has_key("metrics") and serverStatus["metrics"].has_key("cursor"):
  if"metrics" in serverStatus and "cursor" inserverStatus["metrics"]:
  cursor_status = serverStatus["metrics"]["cursor"]
  mongodb_server_dict["cursor_timedOut"] =cursor_status["timedOut"]
  mongodb_server_dict["cursor_open_noTimeout"] = cursor_status["open"]["noTimeout"]
  mongodb_server_dict["cursor_open_pinned"] =cursor_status["open"]["pinned"]
  mongodb_server_dict["cursor_open_total"] = cursor_status["open"]["total"]
  ###"wiredTiger" section
  #ifserverStatus.has_key("wiredTiger"):
  if"wiredTiger" in serverStatus :
  serverStatus_wt = serverStatus["wiredTiger"]
  #cache
  wt_cache= serverStatus_wt["cache"]
  mongodb_server_dict["wt_cache_used_total_bytes"] =wt_cache["bytes currently in the cache"]
  mongodb_server_dict["wt_cache_dirty_bytes"] =wt_cache["tracked dirty bytes in the cache"]
  mongodb_server_dict["wt_cache_readinto_bytes"] =wt_cache["bytes read into cache"]
  mongodb_server_dict["wt_cache_writtenfrom_bytes"] =wt_cache["bytes written from cache"]
  #concurrentTransactions
  wt_concurrentTransactions =serverStatus_wt["concurrentTransactions"]
  mongodb_server_dict["wt_concurrentTransactions_write"] =wt_concurrentTransactions["write"]["available"]
  mongodb_server_dict["wt_concurrentTransactions_read"]= wt_concurrentTransactions["read"]["available"]
  #"block-manager" section
  wt_block_manager = serverStatus_wt["block-manager"]
  mongodb_server_dict["wt_bm_bytes_read"] =wt_block_manager["bytes read"]
  mongodb_server_dict["wt_bm_bytes_written"] =wt_block_manager["bytes written"]
  mongodb_server_dict["wt_bm_blocks_read"] =wt_block_manager["blocks read" ]
  mongodb_server_dict["wt_bm_blocks_written"] =wt_block_manager["blocks written"]
  ###"rocksdb" engine
  #ifserverStatus.has_key("rocksdb"):
  if"rocksdb" in serverStatus :
  serverStatus_rocksdb = serverStatus["rocksdb"]
  mongodb_server_dict["rocksdb_num_immutable_mem_table"]=serverStatus_rocksdb["num-immutable-mem-table"]
  mongodb_server_dict["rocksdb_mem_table_flush_pending"] =serverStatus_rocksdb["mem-table-flush-pending"]
  mongodb_server_dict["rocksdb_compaction_pending"]= serverStatus_rocksdb["compaction-pending"]
  mongodb_server_dict["rocksdb_background_errors"] =serverStatus_rocksdb["background-errors"]
  mongodb_server_dict["rocksdb_num_entries_active_mem_table"] =serverStatus_rocksdb["num-entries-active-mem-table"]
  mongodb_server_dict["rocksdb_num_entries_imm_mem_tables"] =serverStatus_rocksdb["num-entries-imm-mem-tables"]
  mongodb_server_dict["rocksdb_num_snapshots"] = serverStatus_rocksdb["num-snapshots"]
  mongodb_server_dict["rocksdb_oldest_snapshot_time"] =serverStatus_rocksdb["oldest-snapshot-time"]
  mongodb_server_dict["rocksdb_num_live_versions"] =serverStatus_rocksdb["num-live-versions"]
  mongodb_server_dict["rocksdb_total_live_recovery_units"]= serverStatus_rocksdb["total-live-recovery-units"]
  ###"PerconaFT" engine
  #ifserverStatus.has_key("PerconaFT"):
  if"PerconaFT" in serverStatus :
  serverStatus_PerconaFT = serverStatus["PerconaFT"]
  mongodb_server_dict["PerconaFT_log_count"] =serverStatus_PerconaFT["log"]["count"]
  mongodb_server_dict["PerconaFT_log_time"] =serverStatus_PerconaFT["log"]["time"]
  mongodb_server_dict["PerconaFT_log_bytes"] =serverStatus_PerconaFT["log"]["bytes"]
  mongodb_server_dict["PerconaFT_fsync_count"] =serverStatus_PerconaFT["fsync"]["count"]
  mongodb_server_dict["PerconaFT_fsync_time"] =serverStatus_PerconaFT["fsync"]["time"]
  ###cachetable
  PerconaFT_cachetable = serverStatus_PerconaFT["cachetable"]
  mongodb_server_dict["PerconaFT_cachetable_size_current"] =PerconaFT_cachetable["size"]["current"]
  mongodb_server_dict["PerconaFT_cachetable_size_writing"]=PerconaFT_cachetable["size"]["writing"]
  mongodb_server_dict["PerconaFT_cachetable_size_limit"]=PerconaFT_cachetable["size"]["limit"]
  ###PerconaFT checkpoint
  PerconaFT_checkpoint = serverStatus_PerconaFT["checkpoint"]
  mongodb_server_dict["PerconaFT_checkpoint_count"] =PerconaFT_checkpoint["count"]
  mongodb_server_dict["PerconaFT_checkpoint_time"] =PerconaFT_checkpoint["time"]
  mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_count"]=PerconaFT_checkpoint["write"]["nonleaf"]["count"]
  mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_time"]=PerconaFT_checkpoint["write"]["nonleaf"]["time"]
  mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_bytes_compressed"]=PerconaFT_checkpoint["write"]["nonleaf"]["bytes"]["compressed"]
  mongodb_server_dict["PerconaFT_checkpoint_write_nonleaf_bytes_uncompressed"]= PerconaFT_checkpoint["write"]["nonleaf"]["bytes"]["uncompressed"]
  mongodb_server_dict["PerconaFT_checkpoint_write_leaf_count"] =PerconaFT_checkpoint["write"]["leaf"]["count"]
  mongodb_server_dict["PerconaFT_checkpoint_write_leaf_time"] =PerconaFT_checkpoint["write"]["leaf"]["time"]
  mongodb_server_dict["PerconaFT_checkpoint_write_leaf_bytes_compressed"]=PerconaFT_checkpoint["write"]["leaf"]["bytes"]["compressed"]
  mongodb_server_dict["PerconaFT_checkpoint_write_leaf_bytes_uncompressed"]=PerconaFT_checkpoint["write"]["leaf"]["bytes"]["uncompressed"]
  ###serializeTime
  forserializeTime_iteminserverStatus_PerconaFT["serializeTime"]:
  prefix = "PerconaFT_serializeTime_" + serializeTime_item
  forserializeTime_key inserverStatus_PerconaFT["serializeTime"]:
  key_name = prefix + "_" + serializeTime_key
  mongodb_server_dict = serverStatus_PerconaFT["serializeTime"]
  ###PerconaFTcompressionRatio
  forcompressionRatio_item in serverStatus_PerconaFT["compressionRatio"]:
  key_name = "PerconaFT_compressionRatio_" + compressionRatio_item
  mongodb_server_dict =serverStatus_PerconaFT["compressionRatio"]
  return(mongodb_role, mongodb_server_dict)
  defrepl_status(self,connection):
  replStatus = connection.admin.command("replSetGetStatus")
  repl_status_dict = {}# repl setmetric dict
  # myState"1" for PRIMARY , "2" for SECONDARY, "3":
  repl_status_dict["repl_myState"] = replStatus["myState"]
  repl_status_members = replStatus["members"]
  master_optime = 0 # Master oplog ops time
  myself_optime = 0 # SECONDARY oplog ops time
  forrepl_member in repl_status_members:
  #ifrepl_member.has_key("self") and repl_member["self"]:
  if"self" in repl_member and repl_member["self"]:
  repl_status_dict["repl_health"] =repl_member["health"]
  repl_status_dict["repl_optime"] = repl_member["optime"].time
  #if repl_member.has_key("repl_electionTime"):
  if "repl_electionTime" in repl_member :
  repl_status_dict["repl_electionTime"] =repl_member["electionTime"].time
  #ifrepl_member.has_key("repl_configVersion"):
  if "repl_configVersion" in repl_member :
  repl_status_dict["repl_configVersion"] =repl_member["configVersion"]
  myself_optime = repl_member["optime"].time
  if(replStatus["myState"] == 2 and repl_member["state"] == 1):# CONDARY ,get repl lag
  master_optime = repl_member["optime"].time
  ifreplStatus["myState"] == 2 :
  repl_status_dict["repl_lag"] = master_optime - myself_optime
  ###oplog windowhours
  oplog_collection = connection["local"]["oplog.rs"]
  oplog_tFirst =oplog_collection.find({},{"ts":1}).sort('$natural',pymongo.ASCENDING).limit(1).next()
  oplog_tLast =oplog_collection.find({},{"ts":1}).sort('$natural',pymongo.DESCENDING).limit(1).next()
  oplogrs_collstats =connection["local"].command("collstats","oplog.rs")
  window_multiple = 1   ##oplog.rscollections is not full
  #ifoplogrs_collstats.has_key("maxSize"):
  if"maxSize" in oplogrs_collstats :
  window_multiple = oplogrs_collstats["maxSize"]/(oplogrs_collstats["count"]* oplogrs_collstats["avgObjSize"])
  else:
  window_multiple = oplogrs_collstats["storageSize"]/(oplogrs_collstats["count"]* oplogrs_collstats["avgObjSize"])
  #oplog_window.xx hours
  oplog_window = round((oplog_tLast["ts"].time -oplog_tFirst["ts"].time)/3600.0,2) * window_multiple# full
  repl_status_dict["repl_oplog_window"] = oplog_window
  return repl_status_dict
  # only for mongosnode
  defshard_status(self, conn):
  config_db= conn["config"]
  settings_col = config_db["settings"]
  balancer_doc = settings_col.find_one({'_id':'balancer'})
  shards_dict = {}
  if balancer_doc isNone:
  shards_dict["shards_BalancerState"] = 1
  elifbalancer_doc["stopped"]:
  shards_dict["shards_BalancerState"] = 0
  else:
  shards_dict["shards_BalancerState"] = 1
  #shards_activeWindow metric,0: without setting, 1:setting
  #shards_activeWindow_startmetric,{ "start" : "23:30","stop" : "6:00" } : 23.30 for23:30
  #shards_activeWindow_stop metric
  if balancer_doc isNone:
  shards_dict["shards_activeWindow"] = 0
  #elifbalancer_doc.has_key("activeWindow"):
  elif"activeWindow" in balancer_doc :
  shards_dict["shards_activeWindow"] = 1
  #if balancer_doc["activeWindow"].has_key("start"):
  if "start" in balancer_doc["activeWindow"] :
  window_start = balancer_doc["activeWindow"]["start"]
  shards_dict["shards_activeWindow_start"] = window_start.replace(":",".")
  #if balancer_doc["activeWindow"].has_key("stop"):
  if "stop" in balancer_doc["activeWindow"] :
  window_stop=balancer_doc["activeWindow"]["stop"]
  shards_dict["shards_activeWindow_stop"] =window_stop.replace(":",".")
  #shards_chunkSize metric
  chunksize_doc = settings_col.find_one({"_id" :"chunksize"})
  ifchunksize_doc is not None:
  shards_dict["shards_chunkSize"] =chunksize_doc["value"]
  #shards_isBalancerRunning metric
  locks_col= config_db["locks"]
  balancer_lock_doc = locks_col.find_one({'_id':'balancer'})
  ifbalancer_lock_doc is None:
  print ("config.locks collection empty or missing. be sure you areconnected to a mongos")
  shards_dict["shards_isBalancerRunning"] = 0
  elifbalancer_lock_doc["state"] > 0:
  shards_dict["shards_isBalancerRunning"] = 1
  else:
  shards_dict["shards_isBalancerRunning"] = 0
  #shards_size metric
  shards_col = config_db["shards"]
  shards_dict["shards_size"] = shards_col.count()
  #shards_mongosSize metric
  mongos_col = config_db["mongos"]
  shards_dict["shards_mongosSize"] = mongos_col.count()
  returnshards_dict

页: [1]
查看完整版本: mongodb3.4的falcon监控搭建