import agent_util
import csv
import sys
if sys.version[0] == '3':
from io import StringIO
else:
from StringIO import StringIO
from agent_util import float
def execute_query(config, query):
cmd = agent_util.which("mysql", exc=True)
if "host" in config: cmd += " -h %s" % config["host"]
if "port" in config: cmd += " -P %s" % config["port"]
if "username" in config: cmd += " -u %s" % config["username"]
if "password" in config and config["password"].strip():
cmd += " --password='%s'" % config["password"].strip()
cmd += (" -Be %r" % str(query))
if "database" in config:
cmd += " %s" % config["database"].strip()
status, output = agent_util.execute_command(cmd)
if status != 0: raise Exception(output)
output = StringIO(output)
parsed_output = list(csv.reader(output, delimiter="\t"))
matcher = 'mysql: [Warning] Using a password on the command line interface can be insecure.'
for i in parsed_output:
if matcher in parsed_output[0]:
del parsed_output[0]
return parsed_output
def parse_instance_configs(config):
"""
Parse the configuration info for multiple instances
from the MySQL config block that gets passed in
from the agent.
"""
instance_configs = {}
extended_metrics = None
for key, values in config.items():
if key == 'extended_metrics':
extended_metrics = values
else:
for i, value in enumerate(str(values).split(',')):
if i not in instance_configs:
instance_configs[i] = {}
if len(value) > 0:
instance_configs[i][key] = value
# We assume that the extended metrics option
# applies to all instances, so if it was found in the
# config, apply it to all instances
if extended_metrics is not None:
for i, config in instance_configs.items():
config['extended_metrics'] = extended_metrics
return instance_configs.values()
def resource_name(instance_config):
"""
Returns the resource name for a given instance_config
"""
host = instance_config.get('host', '127.0.0.1')
port = instance_config.get('port', '3306')
db = instance_config.get('database', None)
resource = '%s:%s' % (host, port)
if db:
resource += '-%s' % db
return resource
def get_instance_config(resource, all_configs):
"""
Search for an instance config in 'all_configs'
that has a resource name equal to 'resource'
"""
for config in all_configs:
if resource_name(config) == resource:
return config
return all_configs[0]
def metadata_options(instance_configs):
"""
Given a set of instance configs, generate the
'option' field for the agent metadata payload.
"""
options = []
for config in instance_configs:
d = {
'host': config.get('host', '127.0.0.1'),
'port': config.get('port', '3306'),
'resource': resource_name(config),
}
if 'database' in config and config['database']:
d['database'] = config['database']
options.append(d)
return options
READ_COMMANDS = {
'com_selects_per_second': 'Com_select',
'com_writes_per_second': 'Com_insert',
'com_updates_per_second': 'Com_update',
'com_deletes_per_second': 'Com_delete',
}
class MySQLPlugin(agent_util.Plugin):
textkey = "mysql"
label = "MySQL"
@classmethod
def get_metadata(self, config):
status = agent_util.SUPPORTED
msg = None
instance_configs = parse_instance_configs(config)
options = metadata_options(instance_configs)
# check if mysql is even installed
client_installed = agent_util.which("mysql")
if not client_installed:
msg = "MySQL client was not found. Please install the client or add it to the default path."
self.log.info(msg)
status = agent_util.MISCONFIGURED
if status == agent_util.SUPPORTED and not config:
msg = "The [mysql] config block was not found in the agent config file."
self.log.info(msg)
status = agent_util.MISCONFIGURED
if status == agent_util.SUPPORTED:
for cfg in instance_configs:
if not "username" in cfg or not "password" in cfg:
msg = "The username and password entries were not found in the [mysql] block of the agent config file."
self.log.info(msg)
status = agent_util.MISCONFIGURED
if status == agent_util.SUPPORTED:
for cfg in instance_configs:
try:
output = execute_query(cfg, "SHOW DATABASES")
if cfg.get("debug", False):
self.log.debug('#####################################################')
self.log.debug("Mysql command 'SHOW DATABASES' output:")
self.log.debug(str(output))
self.log.debug('#####################################################')
except:
self.log.exception("error running mysql query")
status = agent_util.MISCONFIGURED
msg = "Unable to authenticate with MySQL, please double-check the credentials in the agent config file."
data = {
# basic
"queries_per_second": {
"label": "Average queries per second",
"options": options,
"status": status,
"error_message": msg,
"unit": "queries/s"
},
"slow_queries_per_minute": {
"label": "Average number of slow queries per minute",
"options": options,
"status": status,
"error_message": msg,
"unit": "queries/minute"
},
"query_cache.percent_free": {
"label": "MySQL query cache percent free",
"options": options,
"status": status,
"error_message": msg,
"unit": "percent"
},
"query_cache.kb_free": {
"label": "MySQL query cache amount free (kB)",
"options": options,
"status": status,
"error_message": msg,
"unit": "kB"
},
"connections": {
"label": "MySQL connections",
"options": options,
"status": status,
"error_message": msg,
"unit": "connections"
},
"com_selects_per_second": {
"label": "SELECT rate",
"options": options,
"status": status,
"error_message": msg,
"unit": "transactions/s"
},
"com_writes_per_second": {
"label": "INSERT rate",
"options": options,
"status": status,
"error_message": msg,
"unit": "transactions/s"
},
"com_updates_per_second": {
"label": "UPDATE rate",
"options": options,
"status": status,
"error_message": msg,
"unit": "transactions/s"
},
"com_deletes_per_second": {
"label": "DELETE rate",
"options": options,
"status": status,
"error_message": msg,
"unit": "transactions/s"
},
# replication
"slave.running": {
"label": "MySQL Slave server is replicating",
"options": options,
"status": status,
"error_message": msg
},
"slave.io_running": {
"label": "MySQL Slave server is connected to the Master",
"options": options,
"status": status,
"error_message": msg
},
"slave.latency": {
"label": "MySQL Slave server latency (seconds)",
"options": options,
"status": status,
"error_message": msg,
"unit": "seconds"
},
"extended_metric.innodb_row_lock_current_waits": {
"label": "Innodb current row lock waits",
"options": options,
"status": status,
"error_message": msg,
"unit": "waits"
},
"extended_metric.innodb_row_lock_time_avg": {
"label": "Innodb row lock time avg",
"options": options,
"status": status,
"error_message": msg,
"unit": "ms"
},
"extended_metric.threads_connected": {
"label": "Threads connected",
"options": options,
"status": status,
"error_message": msg,
"unit": "threads"
},
"extended_metric.threads_created": {
"label": "Threads Created",
"options": options,
"status": status,
"error_message": msg,
"unit": "threads"
},
"extended_metric.threads_running": {
"label": "Threads Running",
"options": options,
"status": status,
"error_message": msg,
"unit": "threads"
},
"extended_metric.questions": {
"label": "MySQL Questions - count of statements executed from the client",
"options": options,
"status": status,
"error_message": msg,
"unit": "questions"
},
"extended_metric.innodb_buffer_pool_pages_total": {
"label": "Total pages in the buffer pool",
"options": options,
"status": status,
"error_message": msg,
"unit": "pages"
},
"extended_metric.innodb_buffer_pool_read_requests": {
"label": "Requests made to the buffer pool",
"options": options,
"status": status,
"error_message": msg,
"unit": "requests/s"
},
"extended_metric.innodb_buffer_pool_reads": {
"label": "Requests unfulfilled by the buffer pool",
"options": options,
"status": status,
"error_message": msg,
"unit": "reads/s"
},
}
extended_metrics = []
for cfg in instance_configs:
if 'extended_metrics' in cfg:
extended_metrics = [m.strip().lower() for m in cfg["extended_metrics"].split(',')]
for m in extended_metrics:
data["extended_metric.%s" % m] = {
"label": "MySQL %s" % m.replace('_', " "),
"options": options,
"status": status,
"error_message": msg
}
# galera cluster metrics
if status == agent_util.SUPPORTED:
galera_instance_configs = []
for cfg in instance_configs:
if "wsrep" in str(execute_query(cfg, "SHOW GLOBAL STATUS LIKE 'wsrep%'")):
galera_instance_configs.append(cfg)
if len(galera_instance_configs) > 0:
galera_options = metadata_options(galera_instance_configs)
data["galera_cluster_size"] = {
"label": "Galera Cluster Size",
"options": galera_options,
"status": status,
"error_message": msg,
}
data["galera_local_send_queue_avg"] = {
"label": "Galera Send Queue Length",
"options": galera_options,
"status": status,
"error_message": msg,
}
data["galera_local_rcv_queue_avg"] = {
"label": "Galera Average size of Local Received Queue",
"options": galera_options,
"status": status,
"error_message": msg,
}
return data
@classmethod
def get_metadata_docker(self, container, config):
if 'host' not in config:
try:
ip = agent_util.get_container_ip(container)
config['host'] = ip
except Exception:
self.log.exception('get_metadata_docker error')
return self.get_metadata(config)
def check(self, textkey, data, config):
instance_configs = parse_instance_configs(config)
instance_config = get_instance_config(data, instance_configs)
return self.check_instance(textkey, data, instance_config)
def check_instance(self, textkey, resource, config):
if textkey == 'queries_per_second':
col, res = execute_query(config, "SHOW GLOBAL STATUS where Variable_name='Queries';")
if not res:
return None
curr_reqs = float(res[1])
delta, previous_reqs = self.get_mysql_cache(resource, textkey, curr_reqs)
if not previous_reqs or curr_reqs < previous_reqs:
return None
return int((curr_reqs - previous_reqs) / float(delta))
if textkey == 'slow_queries_per_second':
col, res = execute_query(config, "SHOW GLOBAL STATUS where Variable_name='Slow_queries';")
if not res:
return None
curr_reqs = float(res[1])
delta, previous_reqs = self.get_mysql_cache(resource, textkey, curr_reqs)
if previous_reqs is None or curr_reqs < previous_reqs:
return None
return (curr_reqs - previous_reqs) / float(delta)
if textkey == 'slow_queries_per_minute':
col, res = execute_query(config, "SHOW GLOBAL STATUS where Variable_name='Slow_queries';")
if not res:
return None
curr_reqs = float(res[1])
delta, previous_reqs = self.get_mysql_cache(resource, textkey, curr_reqs)
if previous_reqs is None or curr_reqs < previous_reqs:
return None
return (curr_reqs - previous_reqs) / float(delta/60.)
if textkey in ("com_selects_per_second",
"com_writes_per_second",
"com_updates_per_second",
"com_deletes_per_second"):
column_name = READ_COMMANDS.get(textkey)
_, data = execute_query(config, "SHOW GLOBAL STATUS WHERE Variable_name='%s';" % column_name)
if not data:
return None
curr_data = float(data[1])
delta, previous_data = self.get_mysql_cache(resource, textkey, curr_data)
if previous_data is None or curr_data < previous_data:
return None
return (curr_data - previous_data) / float(delta)
if "query_cache" in textkey:
res = execute_query(config, "SHOW GLOBAL STATUS LIKE 'Qcache_free_memory';")
if not res:
return None
row = res[1]
free = int(row[1])
self.log.debug("cache_free_memory: %d" % free)
res = execute_query(config, "SHOW VARIABLES LIKE 'query_cache_size';")
row = res[1]
total = int(row[1])
self.log.debug("query_cache_size: %d" % total)
if "percent_free" in textkey:
if not total: return 0
return_val = int(float(free) / total * 100)
self.log.debug("Percent free: %d" % return_val)
return return_val
else: return free
elif textkey == "connections":
res = execute_query(config, "SHOW GLOBAL STATUS LIKE 'Threads_connected';")
if not res:
return None
row = res[1]
self.log.debug("Threads connected: %s" % str(row[1]))
return int(row[1])
#galera
if textkey == "galera_cluster_size":
res = execute_query(config, "SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size';")
if not res:
return None
row = res[1]
free = int(row[1])
self.log.debug("galera_cluster_size: %d" % free)
return int(row[1])
if textkey == "galera_local_send_queue_avg":
res = execute_query(config, "SHOW GLOBAL STATUS LIKE 'wsrep_local_send_queue';")
if not res:
return None
row = res[1]
free = int(row[1])
self.log.debug("galera_local_send_queue_avg: %d" % free)
return int(row[1])
if textkey == "galera_local_rcv_queue_avg":
res = execute_query(config, "SHOW GLOBAL STATUS LIKE 'wsrep_local_recv_queue';")
if not res:
return None
row = res[1]
free = int(row[1])
self.log.debug("galera_local_rcv_queue_avg: %d" % free)
return int(row[1])
# extended_metrics
elif "extended_metric" in textkey:
metric_name = textkey.replace("extended_metric.", "")
res = execute_query(config, "SHOW GLOBAL STATUS LIKE '" + metric_name + "';")
try:
row = res[1]
result = int(row[1])
except:
result = 0
if metric_name in ("innodb_buffer_pool_read_requests", "innodb_buffer_pool_reads"):
curr_data = result
delta, previous_data = self.get_mysql_cache(resource, textkey, curr_data)
if previous_data is None or curr_data < previous_data:
result = None
else:
result = (curr_data - previous_data) / float(delta)
self.log.debug("%s: %s" % (textkey.replace("extended_metric.", "").title(), str(result)))
return result
# replication
action = textkey.split(".")[-1]
query_map = {
"running": ("Slave_SQL_Running", lambda c: int(c == "Yes")),
"io_running": ("Slave_IO_Running", lambda c: int(c == "Yes")),
"latency": ("Seconds_Behind_Master", lambda c: int(c)),
}
column, fn = query_map[action]
try:
keys, values = execute_query(config, "SHOW SLAVE STATUS")
except:
self.log.info("Replication metrics not available")
return None
row = values[keys.index(column)]
# apparently some mysql modules return an integer
# and some a string.... normalize it
row = str(row)
if action == "latency" and not row: return 999999
elif not row: return False
return fn(row)
def check_docker(self, container, textkey, data, config):
if 'host' not in config:
try:
ip = agent_util.get_container_ip(container)
config['host'] = ip
except Exception as e:
self.log.exception(e)
return self.check(textkey, data, config)
# USING THIS FOR 'D.R.Y.' and just because we may add cached results of other types in the future
def get_mysql_cache(self, resource, textkey, curr_reqs):
cache = self.get_cache_results("mysql:%s:%s" % (resource, textkey), None)
if not cache:
self.log.info("Empty mySQL cache! Building for the first time.")
self.cache_result("mysql:%s:%s" % (resource, textkey), None, curr_reqs, replace=True)
return None, None
delta, previous_reqs = cache[0]
self.cache_result("mysql:%s:%s" % (resource, textkey), None, curr_reqs, replace=True)
return delta, float(previous_reqs)