Use rnpe functions from charmhelpers
This commit is contained in:
parent
0845580263
commit
5ee9df1cd5
@ -18,6 +18,7 @@ from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
relations_of_type,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import service
|
||||
@ -54,6 +55,12 @@ from charmhelpers.core.host import service
|
||||
# juju-myservice-0
|
||||
# If you're running multiple environments with the same services in them
|
||||
# this allows you to differentiate between them.
|
||||
# nagios_servicegroups:
|
||||
# default: ""
|
||||
# type: string
|
||||
# description: |
|
||||
# A comma-separated list of nagios servicegroups.
|
||||
# If left empty, the nagios_context will be used as the servicegroup
|
||||
#
|
||||
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
|
||||
#
|
||||
@ -125,9 +132,6 @@ define service {{
|
||||
|
||||
def _locate_cmd(self, check_cmd):
|
||||
search_path = (
|
||||
'/',
|
||||
os.path.join(os.environ['CHARM_DIR'],
|
||||
'files/nrpe-external-master'),
|
||||
'/usr/lib/nagios/plugins',
|
||||
'/usr/local/lib/nagios/plugins',
|
||||
)
|
||||
@ -141,7 +145,7 @@ define service {{
|
||||
log('Check command not found: {}'.format(parts[0]))
|
||||
return ''
|
||||
|
||||
def write(self, nagios_context, hostname):
|
||||
def write(self, nagios_context, hostname, nagios_servicegroups=None):
|
||||
nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
|
||||
self.command)
|
||||
with open(nrpe_check_file, 'w') as nrpe_check_config:
|
||||
@ -153,16 +157,21 @@ define service {{
|
||||
log('Not writing service config as {} is not accessible'.format(
|
||||
NRPE.nagios_exportdir))
|
||||
else:
|
||||
self.write_service_config(nagios_context, hostname)
|
||||
self.write_service_config(nagios_context, hostname,
|
||||
nagios_servicegroups)
|
||||
|
||||
def write_service_config(self, nagios_context, hostname):
|
||||
def write_service_config(self, nagios_context, hostname,
|
||||
nagios_servicegroups=None):
|
||||
for f in os.listdir(NRPE.nagios_exportdir):
|
||||
if re.search('.*{}.cfg'.format(self.command), f):
|
||||
os.remove(os.path.join(NRPE.nagios_exportdir, f))
|
||||
|
||||
if not nagios_servicegroups:
|
||||
nagios_servicegroups = nagios_context
|
||||
|
||||
templ_vars = {
|
||||
'nagios_hostname': hostname,
|
||||
'nagios_servicegroup': nagios_context,
|
||||
'nagios_servicegroup': nagios_servicegroups,
|
||||
'description': self.description,
|
||||
'shortname': self.shortname,
|
||||
'command': self.command,
|
||||
@ -186,6 +195,10 @@ class NRPE(object):
|
||||
super(NRPE, self).__init__()
|
||||
self.config = config()
|
||||
self.nagios_context = self.config['nagios_context']
|
||||
if 'nagios_servicegroups' in self.config:
|
||||
self.nagios_servicegroups = self.config['nagios_servicegroups']
|
||||
else:
|
||||
self.nagios_servicegroups = 'juju'
|
||||
self.unit_name = local_unit().replace('/', '-')
|
||||
if hostname:
|
||||
self.hostname = hostname
|
||||
@ -211,7 +224,8 @@ class NRPE(object):
|
||||
nrpe_monitors = {}
|
||||
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
|
||||
for nrpecheck in self.checks:
|
||||
nrpecheck.write(self.nagios_context, self.hostname)
|
||||
nrpecheck.write(self.nagios_context, self.hostname,
|
||||
self.nagios_servicegroups)
|
||||
nrpe_monitors[nrpecheck.shortname] = {
|
||||
"command": nrpecheck.command,
|
||||
}
|
||||
@ -220,3 +234,75 @@ class NRPE(object):
|
||||
|
||||
for rid in relation_ids("local-monitors"):
|
||||
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
||||
|
||||
|
||||
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
||||
"""
|
||||
Query relation with nrpe subordinate, return the nagios_host_context
|
||||
|
||||
:param str relation_name: Name of relation nrpe sub joined to
|
||||
"""
|
||||
for rel in relations_of_type(relation_name):
|
||||
if 'nagios_hostname' in rel:
|
||||
return rel['nagios_host_context']
|
||||
|
||||
|
||||
def get_nagios_hostname(relation_name='nrpe-external-master'):
|
||||
"""
|
||||
Query relation with nrpe subordinate, return the nagios_hostname
|
||||
|
||||
:param str relation_name: Name of relation nrpe sub joined to
|
||||
"""
|
||||
for rel in relations_of_type(relation_name):
|
||||
if 'nagios_hostname' in rel:
|
||||
return rel['nagios_hostname']
|
||||
|
||||
|
||||
def get_nagios_unit_name(relation_name='nrpe-external-master'):
|
||||
"""
|
||||
Return the nagios unit name prepended with host_context if needed
|
||||
|
||||
:param str relation_name: Name of relation nrpe sub joined to
|
||||
"""
|
||||
host_context = get_nagios_hostcontext(relation_name)
|
||||
if host_context:
|
||||
unit = "%s:%s" % (host_context, local_unit())
|
||||
else:
|
||||
unit = local_unit()
|
||||
return unit
|
||||
|
||||
|
||||
def add_init_service_checks(nrpe, services, unit_name):
|
||||
"""
|
||||
Add checks for each service in list
|
||||
|
||||
:param NRPE nrpe: NRPE object to add check to
|
||||
:param list services: List of services to check
|
||||
:param str unit_name: Unit name to use in check description
|
||||
"""
|
||||
for svc in services:
|
||||
upstart_init = '/etc/init/%s.conf' % svc
|
||||
sysv_init = '/etc/init.d/%s' % svc
|
||||
if os.path.exists(upstart_init):
|
||||
nrpe.add_check(
|
||||
shortname=svc,
|
||||
description='process check {%s}' % unit_name,
|
||||
check_cmd='check_upstart_job %s' % svc
|
||||
)
|
||||
elif os.path.exists(sysv_init):
|
||||
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
||||
cron_file = ('*/5 * * * * root '
|
||||
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
|
||||
'-s /etc/init.d/%s status > '
|
||||
'/var/lib/nagios/service-check-%s.txt\n' % (svc,
|
||||
svc)
|
||||
)
|
||||
f = open(cronpath, 'w')
|
||||
f.write(cron_file)
|
||||
f.close()
|
||||
nrpe.add_check(
|
||||
shortname=svc,
|
||||
description='process check {%s}' % unit_name,
|
||||
check_cmd='check_status_file.py -f '
|
||||
'/var/lib/nagios/service-check-%s.txt' % svc,
|
||||
)
|
||||
|
@ -2,7 +2,8 @@
|
||||
Functions for managing volumes in juju units. One volume is supported per unit.
|
||||
Subordinates may have their own storage, provided it is on its own partition.
|
||||
|
||||
Configuration stanzas:
|
||||
Configuration stanzas::
|
||||
|
||||
volume-ephemeral:
|
||||
type: boolean
|
||||
default: true
|
||||
@ -20,7 +21,8 @@ Configuration stanzas:
|
||||
is 'true' and no volume-map value is set. Use 'juju set' to set a
|
||||
value and 'juju resolved' to complete configuration.
|
||||
|
||||
Usage:
|
||||
Usage::
|
||||
|
||||
from charmsupport.volumes import configure_volume, VolumeConfigurationError
|
||||
from charmsupport.hookenv import log, ERROR
|
||||
def post_mount_hook():
|
||||
@ -34,6 +36,7 @@ Usage:
|
||||
after_change=post_mount_hook)
|
||||
except VolumeConfigurationError:
|
||||
log('Storage could not be configured', ERROR)
|
||||
|
||||
'''
|
||||
|
||||
# XXX: Known limitations
|
||||
|
@ -13,6 +13,7 @@ clustering-related helpers.
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
from socket import gethostname as get_unit_hostname
|
||||
|
||||
import six
|
||||
@ -28,12 +29,19 @@ from charmhelpers.core.hookenv import (
|
||||
WARNING,
|
||||
unit_get,
|
||||
)
|
||||
from charmhelpers.core.decorators import (
|
||||
retry_on_exception,
|
||||
)
|
||||
|
||||
|
||||
class HAIncompleteConfig(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CRMResourceNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def is_elected_leader(resource):
|
||||
"""
|
||||
Returns True if the charm executing this is the elected cluster leader.
|
||||
@ -68,23 +76,29 @@ def is_clustered():
|
||||
return False
|
||||
|
||||
|
||||
def is_crm_leader(resource):
|
||||
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
|
||||
def is_crm_leader(resource, retry=False):
|
||||
"""
|
||||
Returns True if the charm calling this is the elected corosync leader,
|
||||
as returned by calling the external "crm" command.
|
||||
|
||||
We allow this operation to be retried to avoid the possibility of getting a
|
||||
false negative. See LP #1396246 for more info.
|
||||
"""
|
||||
cmd = [
|
||||
"crm", "resource",
|
||||
"show", resource
|
||||
]
|
||||
cmd = ['crm', 'resource', 'show', resource]
|
||||
try:
|
||||
status = subprocess.check_output(cmd).decode('UTF-8')
|
||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||
if not isinstance(status, six.text_type):
|
||||
status = six.text_type(status, "utf-8")
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if get_unit_hostname() in status:
|
||||
status = None
|
||||
|
||||
if status and get_unit_hostname() in status:
|
||||
return True
|
||||
else:
|
||||
|
||||
if status and "resource %s is NOT running" % (resource) in status:
|
||||
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
|
||||
|
||||
return False
|
||||
|
||||
|
||||
|
@ -54,6 +54,17 @@ def enable():
|
||||
if is_enabled():
|
||||
return True
|
||||
|
||||
if not os.path.isdir('/proc/sys/net/ipv6'):
|
||||
# disable IPv6 support in ufw
|
||||
hookenv.log("This machine doesn't have IPv6 enabled", level="INFO")
|
||||
exit_code = subprocess.call(['sed', '-i', 's/IPV6=yes/IPV6=no/g',
|
||||
'/etc/default/ufw'])
|
||||
if exit_code == 0:
|
||||
hookenv.log('IPv6 support in ufw disabled', level='INFO')
|
||||
else:
|
||||
hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
|
||||
raise Exception("Couldn't disable IPv6 support in ufw")
|
||||
|
||||
output = subprocess.check_output(['ufw', 'enable'],
|
||||
env={'LANG': 'en_US',
|
||||
'PATH': os.environ['PATH']})
|
||||
|
@ -491,6 +491,7 @@ class HAProxyContext(OSContextGenerator):
|
||||
ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
ctxt['ipv6'] = True
|
||||
ctxt['local_host'] = 'ip6-localhost'
|
||||
ctxt['haproxy_host'] = '::'
|
||||
ctxt['stat_port'] = ':::8888'
|
||||
|
@ -152,9 +152,15 @@ def neutron_plugins():
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron',
|
||||
ssl_dir=NEUTRON_CONF_DIR)],
|
||||
'services': ['calico-compute', 'bird', 'neutron-dhcp-agent'],
|
||||
'services': ['calico-felix',
|
||||
'bird',
|
||||
'neutron-dhcp-agent',
|
||||
'nova-api-metadata'],
|
||||
'packages': [[headers_package()] + determine_dkms_package(),
|
||||
['calico-compute', 'bird', 'neutron-dhcp-agent']],
|
||||
['calico-compute',
|
||||
'bird',
|
||||
'neutron-dhcp-agent',
|
||||
'nova-api-metadata']],
|
||||
'server_packages': ['neutron-server', 'calico-control'],
|
||||
'server_services': ['neutron-server']
|
||||
}
|
||||
|
@ -53,6 +53,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||
('saucy', 'havana'),
|
||||
('trusty', 'icehouse'),
|
||||
('utopic', 'juno'),
|
||||
('vivid', 'kilo'),
|
||||
])
|
||||
|
||||
|
||||
@ -64,6 +65,7 @@ OPENSTACK_CODENAMES = OrderedDict([
|
||||
('2013.2', 'havana'),
|
||||
('2014.1', 'icehouse'),
|
||||
('2014.2', 'juno'),
|
||||
('2015.1', 'kilo'),
|
||||
])
|
||||
|
||||
# The ugly duckling
|
||||
@ -84,6 +86,7 @@ SWIFT_CODENAMES = OrderedDict([
|
||||
('2.0.0', 'juno'),
|
||||
('2.1.0', 'juno'),
|
||||
('2.2.0', 'juno'),
|
||||
('2.2.1', 'kilo'),
|
||||
])
|
||||
|
||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||
@ -289,6 +292,9 @@ def configure_installation_source(rel):
|
||||
'juno': 'trusty-updates/juno',
|
||||
'juno/updates': 'trusty-updates/juno',
|
||||
'juno/proposed': 'trusty-proposed/juno',
|
||||
'kilo': 'trusty-updates/kilo',
|
||||
'kilo/updates': 'trusty-updates/kilo',
|
||||
'kilo/proposed': 'trusty-proposed/kilo',
|
||||
}
|
||||
|
||||
try:
|
||||
|
@ -372,3 +372,46 @@ def ceph_version():
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class CephBrokerRq(object):
|
||||
"""Ceph broker request.
|
||||
|
||||
Multiple operations can be added to a request and sent to the Ceph broker
|
||||
to be executed.
|
||||
|
||||
Request is json-encoded for sending over the wire.
|
||||
|
||||
The API is versioned and defaults to version 1.
|
||||
"""
|
||||
def __init__(self, api_version=1):
|
||||
self.api_version = api_version
|
||||
self.ops = []
|
||||
|
||||
def add_op_create_pool(self, name, replica_count=3):
|
||||
self.ops.append({'op': 'create-pool', 'name': name,
|
||||
'replicas': replica_count})
|
||||
|
||||
@property
|
||||
def request(self):
|
||||
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
|
||||
|
||||
|
||||
class CephBrokerRsp(object):
|
||||
"""Ceph broker response.
|
||||
|
||||
Response is json-decoded and contents provided as methods/properties.
|
||||
|
||||
The API is versioned and defaults to version 1.
|
||||
"""
|
||||
def __init__(self, encoded_rsp):
|
||||
self.api_version = None
|
||||
self.rsp = json.loads(encoded_rsp)
|
||||
|
||||
@property
|
||||
def exit_code(self):
|
||||
return self.rsp.get('exit-code')
|
||||
|
||||
@property
|
||||
def exit_msg(self):
|
||||
return self.rsp.get('stderr')
|
||||
|
@ -162,11 +162,14 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False):
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
realpath = os.path.abspath(path)
|
||||
if os.path.exists(realpath):
|
||||
if force and not os.path.isdir(realpath):
|
||||
path_exists = os.path.exists(realpath)
|
||||
if path_exists and force:
|
||||
if not os.path.isdir(realpath):
|
||||
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||
os.unlink(realpath)
|
||||
else:
|
||||
os.makedirs(realpath, perms)
|
||||
os.chown(realpath, uid, gid)
|
||||
elif not path_exists:
|
||||
os.makedirs(realpath, perms)
|
||||
os.chown(realpath, uid, gid)
|
||||
|
||||
|
@ -64,9 +64,16 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||
'trusty-juno/updates': 'trusty-updates/juno',
|
||||
'trusty-updates/juno': 'trusty-updates/juno',
|
||||
'juno/proposed': 'trusty-proposed/juno',
|
||||
'juno/proposed': 'trusty-proposed/juno',
|
||||
'trusty-juno/proposed': 'trusty-proposed/juno',
|
||||
'trusty-proposed/juno': 'trusty-proposed/juno',
|
||||
# Kilo
|
||||
'kilo': 'trusty-updates/kilo',
|
||||
'trusty-kilo': 'trusty-updates/kilo',
|
||||
'trusty-kilo/updates': 'trusty-updates/kilo',
|
||||
'trusty-updates/kilo': 'trusty-updates/kilo',
|
||||
'kilo/proposed': 'trusty-proposed/kilo',
|
||||
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
||||
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
||||
}
|
||||
|
||||
# The order of this list is very important. Handlers should be listed in from
|
||||
|
@ -1,7 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from base64 import b64decode
|
||||
import os
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log, ERROR, WARNING,
|
||||
@ -10,8 +9,6 @@ from charmhelpers.core.hookenv import (
|
||||
relation_get,
|
||||
relation_set,
|
||||
relation_ids,
|
||||
relations_of_type,
|
||||
local_unit,
|
||||
unit_get,
|
||||
Hooks, UnregisteredHookError
|
||||
)
|
||||
@ -38,7 +35,7 @@ from charmhelpers.contrib.openstack.utils import (
|
||||
from charmhelpers.payload.execd import execd_preinstall
|
||||
from charmhelpers.core.sysctl import create as create_sysctl
|
||||
|
||||
from charmhelpers.contrib.charmsupport.nrpe import NRPE
|
||||
from charmhelpers.contrib.charmsupport import nrpe
|
||||
|
||||
import sys
|
||||
from quantum_utils import (
|
||||
@ -224,61 +221,27 @@ def stop():
|
||||
@hooks.hook('nrpe-external-master-relation-joined',
|
||||
'nrpe-external-master-relation-changed')
|
||||
def update_nrpe_config():
|
||||
# Find out if nrpe set nagios_hostname
|
||||
hostname = None
|
||||
host_context = None
|
||||
for rel in relations_of_type('nrpe-external-master'):
|
||||
if 'nagios_hostname' in rel:
|
||||
hostname = rel['nagios_hostname']
|
||||
host_context = rel['nagios_host_context']
|
||||
break
|
||||
nrpe = NRPE(hostname=hostname)
|
||||
# python-dbus is used by check_upstart_job
|
||||
apt_install('python-dbus')
|
||||
|
||||
if host_context:
|
||||
current_unit = "%s:%s" % (host_context, local_unit())
|
||||
else:
|
||||
current_unit = local_unit()
|
||||
|
||||
services_to_monitor = services()
|
||||
for service in services_to_monitor:
|
||||
upstart_init = '/etc/init/%s.conf' % service
|
||||
sysv_init = '/etc/init.d/%s' % service
|
||||
|
||||
if os.path.exists(upstart_init):
|
||||
nrpe.add_check(
|
||||
shortname=service,
|
||||
description='process check {%s}' % current_unit,
|
||||
check_cmd='check_upstart_job %s' % service,
|
||||
)
|
||||
elif os.path.exists(sysv_init):
|
||||
cronpath = '/etc/cron.d/nagios-service-check-%s' % service
|
||||
cron_template = '*/5 * * * * root \
|
||||
/usr/local/lib/nagios/plugins/check_exit_status.pl -s /etc/init.d/%s \
|
||||
status > /var/lib/nagios/service-check-%s.txt\n' % (service, service)
|
||||
f = open(cronpath, 'w')
|
||||
f.write(cron_template)
|
||||
f.close()
|
||||
nrpe.add_check(
|
||||
shortname=service,
|
||||
description='process check {%s}' % current_unit,
|
||||
check_cmd='check_status_file.py -f \
|
||||
/var/lib/nagios/service-check-%s.txt' % service,
|
||||
)
|
||||
hostname = nrpe.get_nagios_hostname()
|
||||
current_unit = nrpe.get_nagios_unit_name()
|
||||
nrpe_setup = nrpe.NRPE(hostname=hostname)
|
||||
nrpe.add_init_service_checks(nrpe_setup, services(), current_unit)
|
||||
|
||||
cronpath = '/etc/cron.d/nagios-netns-check'
|
||||
cron_template = '*/5 * * * * root \
|
||||
/usr/local/lib/nagios/plugins/check_netns.sh \
|
||||
> /var/lib/nagios/netns-check.txt\n'
|
||||
cron_template = ('*/5 * * * * root '
|
||||
'/usr/local/lib/nagios/plugins/check_netns.sh '
|
||||
'> /var/lib/nagios/netns-check.txt\n'
|
||||
)
|
||||
f = open(cronpath, 'w')
|
||||
f.write(cron_template)
|
||||
f.close()
|
||||
nrpe.add_check(
|
||||
nrpe_setup.add_check(
|
||||
shortname="netns",
|
||||
description='Network Namespace check {%s}' % current_unit,
|
||||
check_cmd='check_status_file.py -f /var/lib/nagios/netns-check.txt'
|
||||
)
|
||||
nrpe.write()
|
||||
nrpe_setup.write()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
Loading…
Reference in New Issue
Block a user