import agent_util
import sys
import os
import platform
from agent_util import float
import json
NETWORK_FS = ['ncpfs', 'nfs', 'ntfs', 'smb', 'vfat', 'smb2', 'cifs', 'nfs4']
# Timeout after 10 seconds, so we don't get hung on remote filesystems
TIMEOUT_LIMIT = 10
def get_findmnt_cmd(extra_args=""):
timeout = ""
if agent_util.which("timeout"):
timeout = "timeout %s " % TIMEOUT_LIMIT
return "%sfindmnt --fstab --df --bytes --raw --evaluate --all %s" % (timeout, extra_args)
def get_df_cmd(extra_arg=""):
timeout = ""
if agent_util.which("timeout"):
timeout = "timeout %s " % TIMEOUT_LIMIT
df_cmd = 'df -PkT'
if "vmware" in sys.platform:
df_cmd = 'df -kT'
elif 'sunos' in sys.platform:
df_cmd = 'df -kt'
elif "darwin" in sys.platform or "aix" in sys.platform or 'freebsd' in sys.platform:
df_cmd = "df -Pk"
return "%s%s %s" % (timeout, df_cmd, extra_arg)
def get_idf_cmd(extra_arg=""):
timeout = ""
if agent_util.which("timeout"):
timeout = "timeout %s " % TIMEOUT_LIMIT
idf_cmd = 'df -iPT'
if 'sunos' in sys.platform or "vmware" in sys.platform:
idf_cmd = 'df -iT'
elif 'aix' == sys.platform:
idf_cmd = 'df -ik'
elif 'darwin' == sys.platform or 'freebsd' in sys.platform:
idf_cmd = 'df -Pik'
return "%s%s %s" % (timeout, idf_cmd, extra_arg)
class DiskDFParser:
def __init__(self, log, config):
self.log = log
self.device_ignore_list = ("tmpfs", "devtmpfs", "none", "proc", "swap", "devices", "cgroup", "/dev/loop")
self.mountpoint_excludes = ()
cfg_device_list = config.get("device_ignore_list", None)
if cfg_device_list is not None:
self.device_ignore_list = self.parse_ignore_list(cfg_device_list)
if "aix" in sys.platform or "sunos" in sys.platform:
self.device_ignore_list = self.device_ignore_list + ("/proc", "/swap", "/ahafs")
if 'darwin' == sys.platform:
self.mountpoint_excludes = ('/Library/Developer/CoreSimulator/Volumes', )
mpe = config.get('mountpoint_excludes', None)
if mpe is not None:
self.mountpoint_excludes = self.parse_ignore_list(mpe)
def __str__(self):
return "Disk df parser"
def parse_ignore_list(self, device_list):
try:
dl_type = type(device_list)
if type(tuple) == dl_type:
return device_list
if type("") == dl_type:
if '(' in device_list and ')' in device_list:
device_list_items = device_list.replace('(', '').replace(')', '').split(',')
items = [d.strip().strip('"') for d in device_list_items]
return tuple(items)
except:
self.log.error('Error parsing device list {}'.format(device_list))
return ()
def parse_df_output(self, output):
outlines = output.splitlines()
headers = self.build_header_data(outlines[0])
df_table = {}
for df_line in outlines[1:]:
df_line = df_line.strip().split()
mount_point = None
mount_point_idx = headers.get('mounted on', None)
if mount_point_idx:
mount_point = ' '.join(df_line[mount_point_idx:])
if not mount_point:
self.log.warning('No mount point in {}'.format(df_line))
continue
df_table[mount_point] = {}
for entry in headers.keys():
val = df_line[headers[entry]]
if 'mounted on' == entry:
val = mount_point
df_table[mount_point][entry] = val
return df_table
def build_header_data(self, header_line):
hdr_idx = 0
headers = {}
for hdr in header_line.split():
#
# For lines that end with 'Mounted on' - skip the last split
#
hdr = hdr.lower()
if 'on' == hdr:
continue
if 'mounted' == hdr:
hdr = 'mounted on'
if hdr in ['iuse%', '%iused']:
hdr = 'iuse_pct'
elif hdr in ['available', 'avail']:
hdr = 'available'
headers[hdr] = hdr_idx
hdr_idx += 1
return headers
def get_device_data(self, output, key_map, custom_network_fs):
df_table = self.parse_df_output(output)
devices = []
max_disk = {}
for mountpoint_table in df_table.values():
try:
device_key = key_map.get('device')
device = mountpoint_table[device_key]
fs_type_key = key_map.get('fs_type')
filesystem = mountpoint_table.get(fs_type_key, '')
skip_device = False
for test_device in self.device_ignore_list:
if device.startswith(test_device) or filesystem.startswith(test_device):
self.log.debug("Skipping metadata for device %s" % device)
skip_device = True
break
if skip_device:
continue
mounted_key = key_map.get('mountpoint')
mounted = mountpoint_table.get(mounted_key, None)
if not mounted:
continue
skip_mp = False
for mp in self.mountpoint_excludes:
if mounted.startswith(mp):
self.log.debug('Skipping mountpoint {}'.format(mounted))
skip_mp = True
break
if skip_mp:
continue
desc = "%s mounted at %s" % (device, mounted)
devices.append(
{
"device": device,
"mountpoint": mounted,
"filesystem": filesystem,
"resource": desc,
"is_network": filesystem in NETWORK_FS or filesystem in custom_network_fs
}
)
available_key = key_map.get('available')
available = mountpoint_table.get(available_key, None)
if available is not None:
max_disk[desc] = available
except:
self.log.error("Unable to parse df output")
continue
return devices, max_disk
class DiskUsagePlugin(agent_util.Plugin):
textkey = "disk"
label = "Disk"
darwin_fstype_excludes = 'nullfs,nodev,devfs,autofs'
# adding min for disk usage
min_capacity = 0
if "AIX" in os.uname():
sys.platform = "aix"
@classmethod
def dump_disk_output(self, config, cmd, raw_output):
if config.get("debug", False):
self.log.debug('#####################################################')
self.log.debug("Disk command '%s' output :" % cmd)
self.log.debug(raw_output)
self.log.debug('#####################################################')
@classmethod
def get_metadata(self, config):
status = agent_util.SUPPORTED
msg = None
if not agent_util.which("df", exc=False):
self.log.warning("df binary not found")
status = agent_util.UNSUPPORTED
if agent_util.SUPPORTED != status:
return {}
# See if there are custom DF flags specified in the config file
extra_df_arg = self.gather_extra_df_arg(config=config)
# See if the config file specifies to use findmnt to identify expected disks
use_findmnt = config.get("use_findmnt") and agent_util.which("findmnt")
extra_findmnt_arg = config.get("extra_findmnt_arg", "")
custom_network_fs = config.get('network_fs', [])
if custom_network_fs:
custom_network_fs = custom_network_fs.split(',')
table_keys = {
'device' : 'filesystem',
'fs_type' : 'type',
'mountpoint': 'mounted on',
'available' : 'available',
}
if use_findmnt:
table_keys = {
'device' : 'source',
'fs_type' : 'fstype',
'mountpoint' : 'target',
'available' : 'avail',
}
block_query = get_findmnt_cmd(extra_findmnt_arg)
inode_query = block_query
else:
block_query = get_df_cmd(extra_df_arg)
inode_query = get_idf_cmd(extra_df_arg)
parser = DiskDFParser(self.log, config)
ret_code, block_result = agent_util.execute_command(block_query, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT)
if 0 != ret_code:
devices = []
max_disk = {}
msg = 'Command exit status {}'.format(ret_code)
self.log.error('{} exit status {}'.format(block_query, ret_code))
status = agent_util.UNSUPPORTED
else:
self.dump_disk_output(config, block_query, block_result)
devices, max_disk = parser.get_device_data(
block_result, table_keys, custom_network_fs
)
inode_status = agent_util.SUPPORTED
idevices = []
imax_disk = {}
inode_status_msg = None
if 'sunos' in sys.platform or 'hp-ux' in sys.platform or 'aix' in sys.platform:
inode_status = agent_util.UNSUPPORTED
inode_status_msg = 'Unsupported on this platform'
else:
ret_code, inode_result = agent_util.execute_command(inode_query, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT)
if 0 != ret_code:
inode_status_msg = 'Command exit status {}'.format(ret_code)
self.log.error('{} exit status {}'.format(inode_query, ret_code))
inode_status = agent_util.UNSUPPORTED
else:
if not use_findmnt:
table_keys['available'] = 'ifree'
self.dump_disk_output(config, inode_query, inode_result)
idevices, imax_disk = parser.get_device_data(
inode_result, table_keys, custom_network_fs
)
options_schema = {
'device': 'string',
'mountpoint': 'string',
'filesystem': 'string',
'resource': 'string',
'is_network': 'boolean'
}
data = {
"usage.percent_used": {
"label": "Percentage of disk used",
"options": devices,
"options_schema": options_schema,
"status": status,
"error_message": msg,
"unit": "percent",
"min_value": 0,
"max_value": 100,
},
"usage.kb_available": {
"label": "Disk space available",
"options": devices,
"options_schema": options_schema,
"status": status,
"error_message": msg,
"unit": "kB",
"min_value": 0,
"max_value": max_disk,
},
"filesystem.mounted": {
"label": "Filesystem mounted",
"options": devices,
"options_schema": options_schema,
"status": status,
"error_message": msg,
},
"inode.percent_used": {
"label": "Inodes percent used",
"options": idevices,
"options_schema": options_schema,
"status": inode_status,
"error_message": inode_status_msg,
"unit": "percent",
"min_value": 0,
"max_value": 100,
},
"inode.used": {
"label": "Inode used",
"options": idevices,
"options_schema": options_schema,
"status": inode_status,
"error_message": inode_status_msg,
"unit": "Inodes",
"min_value": 0,
"max_value": imax_disk,
},
"inode.available": {
"label": "Inodes Available",
"options": idevices,
"options_schema": options_schema,
"status": inode_status,
"error_message": inode_status_msg,
"unit": "Inodes",
"min_value": 0,
"max_value": imax_disk,
},
}
# no inodes for vmware
to_del = []
if 'vmware' in sys.platform:
for k in data.keys():
if 'inode' in k:
to_del.append(k)
for d in to_del:
del data[d]
return data
def collect_vmware(self, textkey, mounted):
ret, output = agent_util.execute_command("stat -f %s" % mounted, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT)
# make sure it's mounted first
if ret != 0 and textkey != 'filesystem.mounted':
self.log.error("Unable to find disk %s, is it mounted?!" % mounted)
self.log.error(output)
return None
elif ret != 0 and textkey == 'filesystem.mounted':
return 0
block_size = 0
metrics = {}
for line in output.split('\n'):
l = str(line).strip().lower()
if l.startswith('file:') or l.startswith('id:'):
continue
elif l.startswith('block size:'):
block_size = l.split()[-1]
if l.startswith('blocks:'):
try:
btext, ttext, total_size, ftext, free_size, atext, avail_size = l.split()
except:
self.log.error("Unable to parse disk output!")
self.log.error(output)
return None
metrics['usage.percent_used'] = 100. - ((float(free_size) / float(total_size)) * 100)
metrics['usage.kb_available'] = float(free_size) * float(block_size)
return metrics[str(textkey)]
@classmethod
def gather_extra_df_arg(self, config):
extra_df_arg = config.get("extra_df_arg", "")
if 'darwin' in sys.platform:
configKey = 'ignore_fstypes'
ignores = self.darwin_fstype_excludes
if config.get(configKey, None):
ignores = '{},{}'.format(ignores, config.get(configKey))
extra_df_arg ='{} -T no{}'.format(extra_df_arg, ignores)
return extra_df_arg
def check(self, textkey, dev_mount, config):
dev_mount = dev_mount.split()
mounted = ' '.join(dev_mount[3:])
extra_df_arg = self.gather_extra_df_arg(config)
if "vmware" in sys.platform:
return self.collect_vmware(textkey, mounted)
is_inode_query = False
if textkey.startswith("i"):
df_cmd = get_idf_cmd(extra_df_arg)
is_inode_query = True
else:
df_cmd = get_df_cmd(extra_df_arg)
rc, output = agent_util.execute_command(df_cmd, cache_timeout=agent_util.DEFAULT_CACHE_TIMEOUT)
if 0 != rc:
return None
self.log.debug(u"%s output: %s" % (df_cmd, output))
parser = DiskDFParser(self.log, config)
df_data = parser.parse_df_output(output)
mountpoint_data = df_data.get(mounted, None)
if not mountpoint_data:
self.log.error("Mountpoint %r not found" % mounted)
if textkey == "filesystem.mounted":
return False
return None
def convert_capacity_field(capacity):
if capacity is None:
return None
if capacity == '-':
return 0
else:
return int(capacity.rstrip('%'))
if 'filesystem.mounted' == textkey:
return True
if textkey in ['usage.percent_used', "inode.percent_used"]:
key = 'capacity'
if is_inode_query:
key = 'iuse_pct'
return convert_capacity_field(mountpoint_data.get(key, None))
key = None
if "inode.used" == textkey:
key = 'iused'
elif "inode.available" == textkey:
key = 'ifree'
elif 'usage.kb_available' == textkey:
key = 'available'
if not key:
return None
mv = mountpoint_data.get(key, None)
if mv is None:
return None
if '-' == mv:
return 0
return int(mv)