from datetime import datetime
import agent_util
import re
import datetime
from agent_util import float
import logging
def custom_json_converter(d):
if "__datetime__" in d:
dt = d["__datetime__"]
# handling for the two different type of datetime formats mongo uses
try:
dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%SZ")
return dt
except:
dt = datetime.datetime.strptime(dt, "%Y-%m-%dT%H:%M:%S.%fZ")
return dt
return d
def execute_query(query, config):
mongo_bin = agent_util.which("mongo", exc=True)
hostname = config.get("hostname", "localhost").strip()
port = int(config.get("port", "27017"))
user = config.get("username", "").strip()
password = config.get("password", "").strip()
database = config.get("database", "").strip()
client_cert = config.get("client_cert", "").strip()
ca_cert = config.get("ca_cert", "").strip()
authenticationdb = config.get("authenticationdb", "").strip()
mongo_connection = "%s " % mongo_bin
if user:
mongo_connection += "-u %s " % user
if password:
mongo_connection += "-p %s" % password.strip()
if client_cert and ca_cert:
mongo_connection += "--ssl --sslPEMKeyFile %s --sslCAFile %s --authenticationDatabase '$external' --authenticationMechanism MONGODB-X509"\
% (client_cert, ca_cert)
if authenticationdb:
mongo_connection += " --authenticationDatabase %s" % authenticationdb.strip()
mongo_connection += " %s:%s/%s" % (hostname, port, database)
query = "printjson(%s)" % query
cmd = "%s --quiet --eval %r" % (mongo_connection, query)
status, raw_output = agent_util.execute_command(cmd)
# strip out some of the special mongo objects from the JSON so we can parse it
result = raw_output.strip()
result = re.sub(r"Timestamp\((.*?), (.*?)\)", r'{"__timestamp__": {"t": \1, "i": \2}}', result)
result = re.sub(r"ISODate\((.*?)\)", r'{"__datetime__": \1}', result)
result = re.sub(r'NumberLong\("?-?([0-9]+)"?\)', r'\1', result)
result = re.sub(r'ObjectId\((.*?)\)', r'\1', result)
result = re.sub(r'BinData\((.*?),(.*?)\)', r'{"__bindata__:": {"a": \1, "b": \2}}', result)
# conver the cleaned JSON to a dict
try:
result = agent_util.json_loads(result, object_hook=custom_json_converter)
except:
result = {}
return (status, result, raw_output)
class MongoPlugin(agent_util.Plugin):
textkey = "mongo"
label = "Mongo"
@classmethod
def get_metadata(self, config):
# used for general, single node instances
status = agent_util.SUPPORTED
msg = None
# if mongo server is detected in replica set, extended options will become available
replica_set_status = agent_util.SUPPORTED
replica_set_msg = None
mongo = agent_util.which("mongo")
if not mongo:
self.log.info("mongo binary not found")
status = agent_util.UNSUPPORTED
msg = "mongo binary not found"
return {}
if status is agent_util.SUPPORTED:
# we're testing the general connection with this and if it's in a replica set
cmd_status, result, raw_output = execute_query("rs.status()", config)
self.log.debug("Mongo result: %s" % result)
# connection failed
if cmd_status != 0:
self.log.exception("error connecting to mongo server. raw output: %s" % raw_output)
status = agent_util.MISCONFIGURED
msg = "Check your Mongo connection settings in the agent config file."
replica_set_status = agent_util.UNSUPPORTED
replica_set_msg = "Check your Mongo connection settings in the agent config file."
self.log.warning(replica_set_msg)
# connection succeeded, now check for replica set prescense
else:
if not int(result.get("ok", 0)):
replica_set_status = agent_util.UNSUPPORTED
replica_set_msg = "Mongo server is not part of a replica set"
self.log.warning(replica_set_msg)
self.log.error(raw_output)
self.log.error("Mongo keys: %s" % result.keys())
data = {
"reads": {
"label": "Reads",
"options": None,
"status": status,
"error_message": msg,
"unit": "reads/sec"
},
"writes": {
"label": "Writes",
"options": None,
"status": status,
"error_message": msg,
"unit": "writes/sec"
},
"objects": {
"label": "Objects",
"options": None,
"status": status,
"error_message": msg,
"unit": "objects"
},
"dataSize": {
"label": "Size of documents",
"options": None,
"status": status,
"error_message": msg,
"unit": "bytes"
},
"indexSize": {
"label": "Size of indexes",
"options": None,
"status": status,
"error_message": msg,
"unit": "bytes"
},
"extra_info.page_faults": {
"label": "Page Faults",
"options": None,
"status": status,
"error_message": msg,
"unit": "faults/sec"
},
"storageSize": {
"label": "Size of extents",
"options": None,
"status": status,
"error_message": msg,
"unit": "bytes"
},
"connections.current": {
"label": "Current connections",
"options": None,
"status": status,
"error_message": msg,
"unit": "connections"
},
"connections.available": {
"label": "Available connections",
"options": None,
"status": status,
"error_message": msg,
"unit": "connections"
},
"cursors.total_open": {
"label": "Open cursors",
"options": None,
"status": status,
"error_message": msg,
"unit": "cursors"
},
"cursors.timed_out": {
"label": "Timed out cursors",
"options": None,
"status": status,
"error_message": msg,
"unit": "cursors"
},
"globalLock.currentQueue.total": {
"label": "Total number of operations queued waiting for lock",
"options": None,
"status": status,
"error_message": msg,
"unit": "operations"
},
"globalLock.currentQueue.readers": {
"label": "Number of operations queued waiting for read lock",
"options": None,
"status": status,
"error_message": msg,
"unit": "operations"
},
"globalLock.currentQueue.writers": {
"label": "Number of operations queued waiting for write lock",
"options": None,
"status": status,
"error_message": msg,
"unit": "operations"
},
"replica_set.is_primary": {
"label": "Is replica set PRIMARY member",
"options": None,
"status": replica_set_status,
"error_message": replica_set_msg,
"unit": "boolean"
},
"replica_set.state": {
"label": "MongoDB replica set member state",
"options": None,
"status": replica_set_status,
"error_message": replica_set_msg,
"unit": "state"
},
"replica_set.health": {
"label": "MongoDB replica set member health",
"options": None,
"status": replica_set_status,
"error_message": replica_set_msg,
"unit": "health"
},
"replica_set.primary_optime_date_difference": {
"label": "Difference of primary node's optime date and realtime",
"options": None,
"status": replica_set_status,
"error_message": replica_set_msg,
"unit": "seconds"
},
"replica_set.max_members_ping_ms_difference": {
"label": "Maximum difference of members' ping ms from primary node's ping ms",
"options": None,
"status": replica_set_status,
"error_message": replica_set_msg,
"unit": "ms"
},
}
return data
@classmethod
def get_metadata_docker(self, container, config):
if 'hostname' not in config:
try:
ip = agent_util.get_container_ip(container)
config['hostname'] = ip
except Exception:
self.log.exception('Docker metadata error')
config['from_docker'] = True
return self.get_metadata(config)
def get_mongo_cache(self, textkey, curr_reqs):
cache = self.get_cache_results("mongo:%s" % textkey, None)
if not cache:
self.log.info("Empty mongo cache! Building for the first time.")
self.cache_result("mongo:%s" % textkey, None, curr_reqs, replace=True)
return None, None
delta, previous_reqs = cache[0]
self.cache_result("mongo:%s" % textkey, None, curr_reqs, replace=True)
return delta, float(previous_reqs)
def check(self, textkey, data, config):
rs_status, rs_result, rs_raw_output = execute_query("rs.status()", config)
ss_status, ss_result, ss_raw_output = execute_query("db.serverStatus()", config)
db_stats, db_result, db_raw_output = execute_query("db.stats()", config)
self.log.debug("Debug Output from Mongo Plugin:")
self.log.debug(rs_result)
self.log.debug(ss_raw_output)
if textkey == "reads":
queries = ss_result.get("opcounters", {}).get("query", None)
get_mores = ss_result.get("opcounters", {}).get("getmore", None)
if queries is None or get_mores is None:
return None
queries = float(queries)
get_mores = float(get_mores)
curr_data = queries + get_mores
delta, previous_data = self.get_mongo_cache(textkey, curr_data)
if previous_data is None or curr_data < previous_data:
return None
return (curr_data - previous_data) / float(delta)
if textkey == "writes":
inserts = ss_result.get("opcounters", {}).get("insert", None)
updates = ss_result.get("opcounters", {}).get("update", None)
deletes = ss_result.get("opcounters", {}).get("delete", None)
if inserts is None or updates is None or deletes is None:
return None
inserts = float(inserts)
updates = float(updates)
deletes = float(deletes)
curr_data = inserts + updates + deletes
delta, previous_data = self.get_mongo_cache(textkey, curr_data)
if previous_data is None or curr_data < previous_data:
return None
return (curr_data - previous_data) / float(delta)
if textkey == "extra_info.page_faults":
curr_data = ss_result.get("extra_info", {}).get("page_faults", None)
if curr_data is None:
return None
curr_data = float(curr_data)
delta, previous_data = self.get_mongo_cache(textkey, curr_data)
if previous_data is None or curr_data < previous_data:
return None
return (curr_data - previous_data) / float(delta)
if textkey == "connections.current":
return ss_result.get("connections", {}).get("current", None)
if textkey == "dataSize":
return db_result.get("dataSize", None)
if textkey == "storageSize":
return db_result.get("storageSize", None)
if textkey == "indexSize":
return db_result.get("indexSize", None)
if textkey == "objects":
return db_result.get("objects", None)
elif textkey == "connections.available":
return ss_result.get("connections", {}).get("available", None)
elif textkey == "cursors.total_open":
return ss_result.get("cursors", {}).get("totalOpen", None)
elif textkey == "cursors.timed_out":
return ss_result.get("cursors", {}).get("timedOut", None)
elif textkey == "globalLock.currentQueue.total":
return ss_result.get("globalLock", {}).get("currentQueue", {}).get("total", None)
elif textkey == "globalLock.currentQueue.readers":
return ss_result.get("globalLock", {}).get("currentQueue", {}).get("readers", None)
elif textkey == "globalLock.currentQueue.writers":
return ss_result.get("globalLock", {}).get("currentQueue", {}).get("writers", None)
elif textkey == "replica_set.is_primary":
is_primary = 0
for member in rs_result.get("members", []):
if member.get("self") in (True, "true") and member.get("stateStr") == "PRIMARY":
is_primary = 1
return is_primary
elif textkey == "replica_set.state":
return rs_result["myState"]
elif textkey == "replica_set.health":
health = None
for member in rs_result.get("members", []):
if member.get("self") in (True, "true"):
health = member.get("health", None)
return health
elif "all_members_healthy" in textkey:
for member in rs_result["members"]:
try:
if member["health"] != 1.0: return False
except:
continue
return True
else:
try: primaryNodeStatus = filter(lambda node: node["stateStr"] == "PRIMARY", rs_result["members"])[0]
except: return None
if "primary_optime_date_difference" in textkey:
try:
td = datetime.datetime.now() - primaryNodeStatus["optimeDate"]
return_val = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)
self.log.debug("primary_optime_date_difference: %s" % str(return_val))
return return_val
except: return 999999
elif "max_members_ping_ms_difference" in textkey:
if "pingMs" not in primaryNodeStatus:
return 0
max_ping_ms_difference = -1
for member in status["members"]:
if member["stateStr"] != "PRIMARY":
try: max_ping_ms_difference = max(max_ping_ms_difference, abs(primaryNodeStatus["pingMs"] - member["pingMs"]))
except: continue
if max_ping_ms_difference == -1:
return 999999
self.log.debug("max_ping_ms_difference: %s" % max_ping_ms_difference)
return max_ping_ms_difference
def check_docker(self, container, textkey, data, config):
if 'hostname' not in config:
try:
ip = agent_util.get_container_ip(container)
config['hostname'] = ip
except Exception as e:
self.log.exception(e)
config['from_docker'] = True
return self.check(textkey, data, config)