MSV FM

[email protected]: ~ $
Path : /lib/fm-agent/library/
File Upload :
Current < : //lib/fm-agent/library/agent_util.py

""" This file is a library of some essential classes for our agent's use."""
import time
import logging
import os
import itertools
import subprocess
import sys
import signal
import locale
import socket

try:
    import json
except ImportError:
    import simplejson as json

try:
    # Python 2.x
    import httplib
except:
    import http.client as httplib

if sys.version[0] == '3':
    from io import BytesIO as StringIO
else:
    from StringIO import StringIO

# Set the locale for LC numeric to prevent issue with weird languages like de_DE
# and wrap that in a custom float function that converts int to str so atof
# doesnt break.  If we don't have an LC_ALL environment variable, set one to avoid
# an exception from locale
if "LC_ALL" not in os.environ:
    os.environ["LC_ALL"] = "C"
locale.resetlocale(locale.LC_NUMERIC)


def custom_float(value):
    try:
        return locale.atof(str(value))
    except Exception:
        if type(__builtins__) == type({}):
            return __builtins__['float'](value)
        return __builtins__.float(value)


float = custom_float
LOCAL_CACHE_RESULTS = {}
DEFAULT_CACHE_TIMEOUT = 60


# Needed to support older version so of Python
def any(iterable):
    for element in iterable:
        if element:
            return True
    return False


def all(iterable):
    for element in iterable:
        if not element:
            return False
    return True


def total_seconds(timedelta):
    return (timedelta.microseconds +
            (timedelta.seconds + timedelta.days * 24 * 3600)
            * 10**6) / 10**6

# statuses for the individual resource textkeys
SUPPORTED = 0
UNSUPPORTED = 1
MISCONFIGURED = 2
# these are additional paths that which() should search, that may not be on the
# user's path.  some of these contain binaries that the plugins need to run,
# for example, apachectl on centos lives in /usr/sbin
ADDITIONAL_SEARCH_PATHS = ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"]


def which(program, exc=False):
    def is_exe(fpath):
        return os.path.exists(fpath) and os.access(fpath, os.X_OK)

    fpath, fname = os.path.split(program)
    if fpath:
        if is_exe(program): return program
    else:
        if "PATH" not in os.environ: return None
        for path in itertools.chain(os.environ["PATH"].split(os.pathsep), ADDITIONAL_SEARCH_PATHS):
            exe_file = os.path.join(path, program)
            if is_exe(exe_file):
                return exe_file

    if exc: raise Exception("%r not found" % program)
    else: return None


def execute_command(cmd, cwd=None, shell=True, kill_tree=True, timeout=15, env=None, block=True, cache_timeout=None):
    "Run a command line call with a timeout after which it will be forcibly killed."

    if shell is False:
        cmd = cmd.split()

    log = logging.getLogger("execute_command")

    if cache_timeout:
        # We search first for the cached result for that specific command.
        cached_result = LOCAL_CACHE_RESULTS.get(cmd)
        if cached_result and time.time() - cached_result.get('timestamp') < cache_timeout:
            log.debug('Retrieved information from the local cache timeout for %s' % cmd)
            return cached_result.get('retcode'), cached_result.get('output')

    p = None
    if 'darwin' == sys.platform.lower():
        p = subprocess.Popen(cmd, shell=shell, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
        if not block:
            return

        try:
            stdout, stderr = p.communicate(timeout=timeout)
        except subprocess.TimeoutExpired as te:
            pids = [p.pid]
            if kill_tree:
                pids.extend(get_process_children(p.pid))
            for pid in pids:
                # process might have died before getting to this line
                # so wrap to avoid OSError: no such process
                try:
                    os.kill(pid, signal.SIGKILL)
                except OSError:
                    pass
            return -9, 'Timeout exceeded, process killed'

    else:
        class Alarm(Exception):
            pass

        def alarm_handler(signum, frame):
            raise Alarm

        # Kick off the command, and exit if we're not running in blocking mode waiting for a response
        p = subprocess.Popen(cmd, shell=shell, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
        if not block:
            return

        if timeout != -1 and timeout != None:
            signal.signal(signal.SIGALRM, alarm_handler)
            signal.alarm(timeout)
        try:
            stdout, stderr = p.communicate()
            if timeout != -1 and timeout != None:
                signal.alarm(0)
        except Alarm:
            pids = [p.pid]
            if kill_tree:
                pids.extend(get_process_children(p.pid))
            for pid in pids:
                # process might have died before getting to this line
                # so wrap to avoid OSError: no such process
                try:
                    os.kill(pid, signal.SIGKILL)
                except OSError:
                    pass
            return -9, 'Timeout exceeded, process killed'

    retcode = p.returncode
    if not stdout: stdout = stderr
    output = stdout.decode("utf8")

    if cache_timeout:
        # Create the cache for this result so subsequent calls use it
        # instead of making the same calls.
        log.debug('Created cache for cmd %s' % cmd)
        LOCAL_CACHE_RESULTS[cmd] = {'retcode': retcode, 'output': output, 'timestamp': time.time()}

    log.debug("%s: %s %s" % (cmd, retcode, output))
    return (retcode, output)


def get_process_children(pid):
    "Helper method for killing off child processes when they timeout"
    p = subprocess.Popen('ps --no-headers -o pid --ppid %d' % pid, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    stdout, stderr = p.communicate()
    return [int(p) for p in stdout.split()]


def get_container_ip(container):
    try:
        networks = container["NetworkSettings"]["Networks"]
        network = list(networks.values())[0]
        return network["IPAddress"]
    except Exception:
        t, e = sys.exc_info()[:2]
        logging.exception(e)
        raise e


def json_loads(val, **kwargs):
    try:
        return json.loads(val, **kwargs)
    except TypeError:
        # Python 3.5 json module does not support bytes
        return json.loads(val.decode(), **kwargs)


# Subclass of HTTPConnection that allows connecting to a UNIX socket
# Adapted from uhttplib
class UnixHTTPConnection(httplib.HTTPConnection):
    def __init__(self, path, host='localhost', **kwargs):
        # Can't use super() as httplib.HTTPConnection is not new-style
        httplib.HTTPConnection.__init__(self, host, **kwargs)
        self.path = path

    def connect(self):
        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        sock.connect(self.path)
        self.sock = sock


# XXX Move this into library!
# Base class for our plugins
class Plugin(object):
    # a unique textkey to identify this plugin
    textkey = "undefined"

    # One-line human-readable description of the plugin
    description = ""

    log = logging

    def __init__(self, schedule):
        self.schedule = schedule
        self.log = logging.getLogger("plugin %r" % self.textkey)


    # saves some check data in a temporary location in the db, for use by a
    # future call, for figuring things like N per second.
    def cache_result(self, textkey, option, value, replace=False):
        now = time.time()
        cache = self.schedule.cached_results
        tcache = cache.setdefault(textkey, {})

        if replace: tcache[option] = []
        results = tcache.setdefault(option, [])

        results.append((now, value))

        # don't let our data structure get too big.  this number is arbitrary.
        # we could use a collections.deque, but its maxlen property is not
        # supported before 2.5
        while len(results) > 1000: results.pop(0)


    # fetches some cached data from a previous check call.
    # arg is passed in because each call to check receives a different
    # arg (if that resource textkey has multiple args), so the cached results
    # are specific to that arg.
    def get_cache_results(self, textkey, option, num=1):
        now = time.time()
        cache = self.schedule.cached_results
        tcache = cache.setdefault(textkey, {})
        results = tcache.setdefault(option, [])

        ret = []
        for stored, result in results[-num:]:
            ret.append((now - stored, result))
        ret.reverse()

        return ret


    def get_metadata(self, config):
        return {}

    def check(self, textkey, data, config):
        return 0
Bethany
Bethany
0%

THE FINEST HOTEL NEAR LAKE KIVU

The Perfect Base For You

Required fields are followed by *





EC1A68011

About Us

Delicious Interior With The Pinch Of Everything

Bethany Investment group is Presbyterian church in Rwanda(EPR) company that manage Hotel and Guest house in Karongi (Bethany Hotel), ISANO branch in GIKONDO(Kigali), Kiyovu branch(Kigali), AMIZERO branch(Nyagatare-East) and Gisenyi Branch(Rubavu).

Accomodation

Get a Comfortable Room
Feel The Comfort

Get a comfortable room and feel our hotel’s comfort. Bethany Hotel features a variety of fully furnished rooms with extra space, Executive rooms, Deluxe rooms with a beautiful lake view and garden space, Deluxe rooms, comfort rooms, family rooms and standard rooms at your service.

Standard Single

Services

We Provide Top Class Facility
Especially For You

Beach BBQ Party

Kick back on the beach& and enjoy our berbecue from our masterchef

Breakfast

Kick back at our hotels& enjoy our breakfast from our masterchef

Conference Hall

Kick back at our hotels& enjoy our conference halls from all bethany branches

Enjoy with your partner

Honeymoon Package

80%

Get In Touch

Don’t Miss Any Update

    +

    Search your Room

    Required fields are followed by *