From 649195946f8d3c6376a6f09b2ebc9dcb7ae557d1 Mon Sep 17 00:00:00 2001 From: Bilal Baqar Date: Wed, 29 Jul 2015 11:07:31 -0700 Subject: [PATCH] Addressed reviews by Charmers --- README.ex | 26 +- bin/charm_helpers_sync.py | 253 ---- charm-helpers-sync.yaml | 9 +- config.yaml | 18 +- copyright | 2 +- hooks/charmhelpers-pl/__init__.py | 38 - .../contrib/hahelpers/apache.py | 82 - .../contrib/hahelpers/cluster.py | 272 ---- hooks/charmhelpers-pl/contrib/network/ip.py | 450 ------ .../contrib/network/ovs/__init__.py | 96 -- .../contrib/openstack/__init__.py | 15 - .../contrib/openstack/amulet/__init__.py | 15 - .../contrib/openstack/amulet/deployment.py | 146 -- .../contrib/openstack/amulet/utils.py | 294 ---- .../contrib/openstack/context.py | 1343 ----------------- .../contrib/openstack/files/check_haproxy.sh | 32 - .../files/check_haproxy_queue_depth.sh | 30 - hooks/charmhelpers-pl/contrib/openstack/ip.py | 146 -- .../contrib/openstack/neutron.py | 337 ----- .../contrib/openstack/templates/__init__.py | 18 - .../contrib/openstack/templates/ceph.conf | 15 - .../contrib/openstack/templates/git.upstart | 17 - .../contrib/openstack/templates/haproxy.cfg | 58 - .../templates/openstack_https_frontend | 24 - .../templates/openstack_https_frontend.conf | 24 - .../templates/section-keystone-authtoken | 9 - .../openstack/templates/section-rabbitmq-oslo | 22 - .../openstack/templates/section-zeromq | 14 - .../contrib/openstack/templating.py | 295 ---- .../contrib/openstack/utils.py | 642 -------- .../contrib/python/__init__.py | 15 - .../contrib/python/packages.py | 96 -- .../contrib/storage/__init__.py | 15 - .../contrib/storage/linux/__init__.py | 15 - .../contrib/storage/linux/ceph.py | 444 ------ .../contrib/storage/linux/loopback.py | 78 - .../contrib/storage/linux/lvm.py | 105 -- .../contrib/storage/linux/utils.py | 70 - hooks/charmhelpers-pl/core/__init__.py | 15 - hooks/charmhelpers-pl/core/decorators.py | 57 - hooks/charmhelpers-pl/core/fstab.py | 134 -- hooks/charmhelpers-pl/core/hookenv.py | 667 -------- hooks/charmhelpers-pl/core/host.py | 450 ------ hooks/charmhelpers-pl/core/services/base.py | 329 ---- .../charmhelpers-pl/core/services/helpers.py | 267 ---- hooks/charmhelpers-pl/core/strutils.py | 42 - hooks/charmhelpers-pl/core/sysctl.py | 56 - hooks/charmhelpers-pl/core/templating.py | 68 - hooks/charmhelpers-pl/core/unitdata.py | 477 ------ hooks/charmhelpers-pl/fetch/__init__.py | 439 ------ hooks/charmhelpers-pl/fetch/archiveurl.py | 161 -- hooks/charmhelpers-pl/fetch/bzrurl.py | 78 - hooks/charmhelpers-pl/fetch/giturl.py | 71 - hooks/charmhelpers-pl/payload/__init__.py | 17 - hooks/charmhelpers-pl/payload/execd.py | 66 - .../contrib/amulet}/__init__.py | 0 .../charmhelpers/contrib/amulet/deployment.py | 93 ++ hooks/charmhelpers/contrib/amulet/utils.py | 533 +++++++ .../charmhelpers/contrib/ansible/__init__.py | 254 ++++ .../contrib/benchmark/__init__.py | 126 ++ .../contrib/charmhelpers/__init__.py | 208 +++ .../contrib/charmsupport}/__init__.py | 0 .../charmhelpers/contrib/charmsupport/nrpe.py | 360 +++++ .../contrib/charmsupport/volumes.py | 175 +++ .../charmhelpers/contrib/database/__init__.py | 0 hooks/charmhelpers/contrib/database/mysql.py | 412 +++++ .../charmhelpers/contrib/hahelpers/cluster.py | 29 +- hooks/charmhelpers/contrib/network/ufw.py | 319 ++++ .../contrib/openstack/amulet/deployment.py | 47 +- .../contrib/openstack/amulet/utils.py | 400 ++++- .../charmhelpers/contrib/openstack/context.py | 21 +- .../contrib/openstack/files/check_haproxy.sh | 32 - .../files/check_haproxy_queue_depth.sh | 30 - hooks/charmhelpers/contrib/openstack/ip.py | 93 +- .../charmhelpers/contrib/openstack/neutron.py | 28 +- .../contrib/openstack/templates/ceph.conf | 15 - .../contrib/openstack/templates/git.upstart | 17 - .../contrib/openstack/templates/haproxy.cfg | 58 - .../templates/openstack_https_frontend | 24 - .../templates/openstack_https_frontend.conf | 24 - .../templates/section-keystone-authtoken | 9 - .../openstack/templates/section-rabbitmq-oslo | 22 - .../openstack/templates/section-zeromq | 14 - .../contrib/openstack/templating.py | 4 +- hooks/charmhelpers/contrib/openstack/utils.py | 110 +- .../contrib/peerstorage/__init__.py | 268 ++++ hooks/charmhelpers/contrib/python/debug.py | 56 + hooks/charmhelpers/contrib/python/packages.py | 35 +- hooks/charmhelpers/contrib/python/rpdb.py | 58 + .../contrib/python/version.py} | 20 +- .../contrib/saltstack/__init__.py | 118 ++ hooks/charmhelpers/contrib/ssl/__init__.py | 94 ++ hooks/charmhelpers/contrib/ssl/service.py | 279 ++++ .../contrib/storage/linux/ceph.py | 12 +- .../contrib/storage/linux/utils.py | 2 +- .../contrib/templating}/__init__.py | 0 .../contrib/templating/contexts.py | 139 ++ .../contrib/templating/jinja.py} | 36 +- .../contrib/templating/pyformat.py} | 15 +- hooks/charmhelpers/contrib/unison/__init__.py | 313 ++++ hooks/charmhelpers/core/files.py | 45 + hooks/charmhelpers/core/hookenv.py | 189 ++- hooks/charmhelpers/core/host.py | 66 +- hooks/charmhelpers/core/services/base.py | 58 +- hooks/charmhelpers/core/services/helpers.py | 4 +- hooks/charmhelpers/fetch/__init__.py | 23 +- hooks/charmhelpers/fetch/archiveurl.py | 8 +- hooks/charmhelpers/fetch/giturl.py | 14 +- hooks/charmhelpers/payload/archive.py | 73 + hooks/neutron-plugin-api-relation-changed | 1 - hooks/neutron-plugin-api-relation-departed | 1 - hooks/neutron-plugin-api-relation-joined | 1 - hooks/pg_dir_context.py | 54 +- hooks/pg_dir_hooks.py | 65 +- hooks/pg_dir_utils.py | 171 ++- hooks/plumgrid-plugin-relation-broken | 1 - hooks/plumgrid-plugin-relation-changed | 1 - hooks/plumgrid-plugin-relation-departed | 1 - hooks/plumgrid-relation-broken | 1 - hooks/plumgrid-relation-changed | 1 - hooks/plumgrid-relation-departed | 1 - ...utron-plugin-api-relation-broken => start} | 0 hooks/upgrade-charm | 1 - metadata.yaml | 3 - 124 files changed, 5115 insertions(+), 9544 deletions(-) delete mode 100644 bin/charm_helpers_sync.py delete mode 100644 hooks/charmhelpers-pl/__init__.py delete mode 100644 hooks/charmhelpers-pl/contrib/hahelpers/apache.py delete mode 100644 hooks/charmhelpers-pl/contrib/hahelpers/cluster.py delete mode 100644 hooks/charmhelpers-pl/contrib/network/ip.py delete mode 100644 hooks/charmhelpers-pl/contrib/network/ovs/__init__.py delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/__init__.py delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/amulet/__init__.py delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/amulet/deployment.py delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/amulet/utils.py delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/context.py delete mode 100755 hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy.sh delete mode 100755 hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy_queue_depth.sh delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/ip.py delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/neutron.py delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templates/__init__.py delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templates/ceph.conf delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templates/git.upstart delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templates/haproxy.cfg delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend.conf delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templates/section-keystone-authtoken delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templates/section-rabbitmq-oslo delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templates/section-zeromq delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/templating.py delete mode 100644 hooks/charmhelpers-pl/contrib/openstack/utils.py delete mode 100644 hooks/charmhelpers-pl/contrib/python/__init__.py delete mode 100644 hooks/charmhelpers-pl/contrib/python/packages.py delete mode 100644 hooks/charmhelpers-pl/contrib/storage/__init__.py delete mode 100644 hooks/charmhelpers-pl/contrib/storage/linux/__init__.py delete mode 100644 hooks/charmhelpers-pl/contrib/storage/linux/ceph.py delete mode 100644 hooks/charmhelpers-pl/contrib/storage/linux/loopback.py delete mode 100644 hooks/charmhelpers-pl/contrib/storage/linux/lvm.py delete mode 100644 hooks/charmhelpers-pl/contrib/storage/linux/utils.py delete mode 100644 hooks/charmhelpers-pl/core/__init__.py delete mode 100644 hooks/charmhelpers-pl/core/decorators.py delete mode 100644 hooks/charmhelpers-pl/core/fstab.py delete mode 100644 hooks/charmhelpers-pl/core/hookenv.py delete mode 100644 hooks/charmhelpers-pl/core/host.py delete mode 100644 hooks/charmhelpers-pl/core/services/base.py delete mode 100644 hooks/charmhelpers-pl/core/services/helpers.py delete mode 100644 hooks/charmhelpers-pl/core/strutils.py delete mode 100644 hooks/charmhelpers-pl/core/sysctl.py delete mode 100644 hooks/charmhelpers-pl/core/templating.py delete mode 100644 hooks/charmhelpers-pl/core/unitdata.py delete mode 100644 hooks/charmhelpers-pl/fetch/__init__.py delete mode 100644 hooks/charmhelpers-pl/fetch/archiveurl.py delete mode 100644 hooks/charmhelpers-pl/fetch/bzrurl.py delete mode 100644 hooks/charmhelpers-pl/fetch/giturl.py delete mode 100644 hooks/charmhelpers-pl/payload/__init__.py delete mode 100644 hooks/charmhelpers-pl/payload/execd.py rename hooks/{charmhelpers-pl/contrib => charmhelpers/contrib/amulet}/__init__.py (100%) create mode 100644 hooks/charmhelpers/contrib/amulet/deployment.py create mode 100644 hooks/charmhelpers/contrib/amulet/utils.py create mode 100644 hooks/charmhelpers/contrib/ansible/__init__.py create mode 100644 hooks/charmhelpers/contrib/benchmark/__init__.py create mode 100644 hooks/charmhelpers/contrib/charmhelpers/__init__.py rename hooks/{charmhelpers-pl/contrib/hahelpers => charmhelpers/contrib/charmsupport}/__init__.py (100%) create mode 100644 hooks/charmhelpers/contrib/charmsupport/nrpe.py create mode 100644 hooks/charmhelpers/contrib/charmsupport/volumes.py create mode 100644 hooks/charmhelpers/contrib/database/__init__.py create mode 100644 hooks/charmhelpers/contrib/database/mysql.py create mode 100644 hooks/charmhelpers/contrib/network/ufw.py delete mode 100755 hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh delete mode 100755 hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh delete mode 100644 hooks/charmhelpers/contrib/openstack/templates/ceph.conf delete mode 100644 hooks/charmhelpers/contrib/openstack/templates/git.upstart delete mode 100644 hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg delete mode 100644 hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend delete mode 100644 hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf delete mode 100644 hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken delete mode 100644 hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo delete mode 100644 hooks/charmhelpers/contrib/openstack/templates/section-zeromq create mode 100644 hooks/charmhelpers/contrib/peerstorage/__init__.py create mode 100644 hooks/charmhelpers/contrib/python/debug.py create mode 100644 hooks/charmhelpers/contrib/python/rpdb.py rename hooks/{charmhelpers-pl/contrib/openstack/files/__init__.py => charmhelpers/contrib/python/version.py} (58%) create mode 100644 hooks/charmhelpers/contrib/saltstack/__init__.py create mode 100644 hooks/charmhelpers/contrib/ssl/__init__.py create mode 100644 hooks/charmhelpers/contrib/ssl/service.py rename hooks/{charmhelpers-pl/contrib/network => charmhelpers/contrib/templating}/__init__.py (100%) create mode 100644 hooks/charmhelpers/contrib/templating/contexts.py rename hooks/{charmhelpers-pl/contrib/openstack/alternatives.py => charmhelpers/contrib/templating/jinja.py} (54%) rename hooks/{charmhelpers-pl/core/services/__init__.py => charmhelpers/contrib/templating/pyformat.py} (65%) create mode 100644 hooks/charmhelpers/contrib/unison/__init__.py create mode 100644 hooks/charmhelpers/core/files.py create mode 100644 hooks/charmhelpers/payload/archive.py delete mode 120000 hooks/neutron-plugin-api-relation-changed delete mode 120000 hooks/neutron-plugin-api-relation-departed delete mode 120000 hooks/neutron-plugin-api-relation-joined delete mode 120000 hooks/plumgrid-plugin-relation-broken delete mode 120000 hooks/plumgrid-plugin-relation-changed delete mode 120000 hooks/plumgrid-plugin-relation-departed delete mode 120000 hooks/plumgrid-relation-broken delete mode 120000 hooks/plumgrid-relation-changed delete mode 120000 hooks/plumgrid-relation-departed rename hooks/{neutron-plugin-api-relation-broken => start} (100%) delete mode 120000 hooks/upgrade-charm diff --git a/README.ex b/README.ex index 49dc7c8..00ebec1 100644 --- a/README.ex +++ b/README.ex @@ -7,19 +7,17 @@ Once deployed this charm performs the configurations required for a PLUMgrid Dir Step by step instructions on using the charm: juju deploy neutron-api - juju deploy neutron-plumgrid-plugin neutron-api - juju deploy neutron-iovisor - juju deploy plumgrid-director --to + juju deploy neutron-api-plumgrid + juju deploy plumgrid-director - juju add-relation neutron-api neutron-plumgrid-plugin - juju add-relation neutron-plumgrid-plugin neutron-iovisor - juju add-relation neutron-iovisor plumgrid-director + juju add-relation neutron-api neutron-api-plumgrid + juju add-relation neutron-api-plumgrid plumgrid-director -For plumgrid-director to work make the configuration in the neutron-api, neutron-plumgrid-plugin and neutron-iovisor charms as specified in the configuration section below. +For plumgrid-director to work make the configuration in the neutron-api and neutron-api-plumgrid charms as specified in the configuration section below. # Known Limitations and Issues -This is an early access version of the PLUMgrid Director charm and it is not meant for production deployments. The charm only works with JUNO for now. This charm needs to be deployed on a node where a unit of neutron-iovisor charm exists. Also plumgrid-edge and plumgrid-gateway charms should not be deployed on the same node. +This is an early access version of the PLUMgrid Director charm and it is not meant for production deployments. The charm only supports Kilo Openstack Release. # Configuration @@ -27,10 +25,9 @@ Example Config plumgrid-director: plumgrid-virtual-ip: "192.168.100.250" - neutron-iovisor: install_sources: 'ppa:plumgrid-team/stable' install_keys: 'null' - neutron-plumgrid-plugin: + neutron-api-plumgrid: install_sources: 'ppa:plumgrid-team/stable' install_keys: 'null' enable-metadata: False @@ -38,9 +35,12 @@ Example Config neutron-plugin: "plumgrid" plumgrid-virtual-ip: "192.168.100.250" -The plumgrid-virtual-ip is the IP address of the PLUMgrid Director's Management interface and that the same IP is used to access PLUMgrid Console. -Ensure that the same ip is specified in the neutron-api charm configuration. -Using the example config provided above PLUMgrid Console can be accessed at https://192.168.100.250 +Provide the virtual IP you want PLUMgrid GUI to be accessible. +Make sure that it is the same IP specified in the neutron-api charm configuration for PLUMgrid. +The virtual IP passed on in the neutron-api charm has to be same as the one passed in the plumgrid-director charm. +Provide the source repo path for PLUMgrid Debs in 'install_sources' and the corresponding keys in 'install_keys'. + +You can access the PG Console at https://192.168.100.250 # Contact Information diff --git a/bin/charm_helpers_sync.py b/bin/charm_helpers_sync.py deleted file mode 100644 index f67fdb9..0000000 --- a/bin/charm_helpers_sync.py +++ /dev/null @@ -1,253 +0,0 @@ -#!/usr/bin/python - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# Authors: -# Adam Gandelman - -import logging -import optparse -import os -import subprocess -import shutil -import sys -import tempfile -import yaml -from fnmatch import fnmatch - -import six - -CHARM_HELPERS_BRANCH = 'lp:charm-helpers' - - -def parse_config(conf_file): - if not os.path.isfile(conf_file): - logging.error('Invalid config file: %s.' % conf_file) - return False - return yaml.load(open(conf_file).read()) - - -def clone_helpers(work_dir, branch): - dest = os.path.join(work_dir, 'charm-helpers') - logging.info('Checking out %s to %s.' % (branch, dest)) - cmd = ['bzr', 'checkout', '--lightweight', branch, dest] - subprocess.check_call(cmd) - return dest - - -def _module_path(module): - return os.path.join(*module.split('.')) - - -def _src_path(src, module): - return os.path.join(src, 'charmhelpers', _module_path(module)) - - -def _dest_path(dest, module): - return os.path.join(dest, _module_path(module)) - - -def _is_pyfile(path): - return os.path.isfile(path + '.py') - - -def ensure_init(path): - ''' - ensure directories leading up to path are importable, omitting - parent directory, eg path='/hooks/helpers/foo'/: - hooks/ - hooks/helpers/__init__.py - hooks/helpers/foo/__init__.py - ''' - for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])): - _i = os.path.join(d, '__init__.py') - if not os.path.exists(_i): - logging.info('Adding missing __init__.py: %s' % _i) - open(_i, 'wb').close() - - -def sync_pyfile(src, dest): - src = src + '.py' - src_dir = os.path.dirname(src) - logging.info('Syncing pyfile: %s -> %s.' % (src, dest)) - if not os.path.exists(dest): - os.makedirs(dest) - shutil.copy(src, dest) - if os.path.isfile(os.path.join(src_dir, '__init__.py')): - shutil.copy(os.path.join(src_dir, '__init__.py'), - dest) - ensure_init(dest) - - -def get_filter(opts=None): - opts = opts or [] - if 'inc=*' in opts: - # do not filter any files, include everything - return None - - def _filter(dir, ls): - incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt] - _filter = [] - for f in ls: - _f = os.path.join(dir, f) - - if not os.path.isdir(_f) and not _f.endswith('.py') and incs: - if True not in [fnmatch(_f, inc) for inc in incs]: - logging.debug('Not syncing %s, does not match include ' - 'filters (%s)' % (_f, incs)) - _filter.append(f) - else: - logging.debug('Including file, which matches include ' - 'filters (%s): %s' % (incs, _f)) - elif (os.path.isfile(_f) and not _f.endswith('.py')): - logging.debug('Not syncing file: %s' % f) - _filter.append(f) - elif (os.path.isdir(_f) and not - os.path.isfile(os.path.join(_f, '__init__.py'))): - logging.debug('Not syncing directory: %s' % f) - _filter.append(f) - return _filter - return _filter - - -def sync_directory(src, dest, opts=None): - if os.path.exists(dest): - logging.debug('Removing existing directory: %s' % dest) - shutil.rmtree(dest) - logging.info('Syncing directory: %s -> %s.' % (src, dest)) - - shutil.copytree(src, dest, ignore=get_filter(opts)) - ensure_init(dest) - - -def sync(src, dest, module, opts=None): - - # Sync charmhelpers/__init__.py for bootstrap code. - sync_pyfile(_src_path(src, '__init__'), dest) - - # Sync other __init__.py files in the path leading to module. - m = [] - steps = module.split('.')[:-1] - while steps: - m.append(steps.pop(0)) - init = '.'.join(m + ['__init__']) - sync_pyfile(_src_path(src, init), - os.path.dirname(_dest_path(dest, init))) - - # Sync the module, or maybe a .py file. - if os.path.isdir(_src_path(src, module)): - sync_directory(_src_path(src, module), _dest_path(dest, module), opts) - elif _is_pyfile(_src_path(src, module)): - sync_pyfile(_src_path(src, module), - os.path.dirname(_dest_path(dest, module))) - else: - logging.warn('Could not sync: %s. Neither a pyfile or directory, ' - 'does it even exist?' % module) - - -def parse_sync_options(options): - if not options: - return [] - return options.split(',') - - -def extract_options(inc, global_options=None): - global_options = global_options or [] - if global_options and isinstance(global_options, six.string_types): - global_options = [global_options] - if '|' not in inc: - return (inc, global_options) - inc, opts = inc.split('|') - return (inc, parse_sync_options(opts) + global_options) - - -def sync_helpers(include, src, dest, options=None): - if not os.path.isdir(dest): - os.makedirs(dest) - - global_options = parse_sync_options(options) - - for inc in include: - if isinstance(inc, str): - inc, opts = extract_options(inc, global_options) - sync(src, dest, inc, opts) - elif isinstance(inc, dict): - # could also do nested dicts here. - for k, v in six.iteritems(inc): - if isinstance(v, list): - for m in v: - inc, opts = extract_options(m, global_options) - sync(src, dest, '%s.%s' % (k, inc), opts) - -if __name__ == '__main__': - parser = optparse.OptionParser() - parser.add_option('-c', '--config', action='store', dest='config', - default=None, help='helper config file') - parser.add_option('-D', '--debug', action='store_true', dest='debug', - default=False, help='debug') - parser.add_option('-b', '--branch', action='store', dest='branch', - help='charm-helpers bzr branch (overrides config)') - parser.add_option('-d', '--destination', action='store', dest='dest_dir', - help='sync destination dir (overrides config)') - (opts, args) = parser.parse_args() - - if opts.debug: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig(level=logging.INFO) - - if opts.config: - logging.info('Loading charm helper config from %s.' % opts.config) - config = parse_config(opts.config) - if not config: - logging.error('Could not parse config from %s.' % opts.config) - sys.exit(1) - else: - config = {} - - if 'branch' not in config: - config['branch'] = CHARM_HELPERS_BRANCH - if opts.branch: - config['branch'] = opts.branch - if opts.dest_dir: - config['destination'] = opts.dest_dir - - if 'destination' not in config: - logging.error('No destination dir. specified as option or config.') - sys.exit(1) - - if 'include' not in config: - if not args: - logging.error('No modules to sync specified as option or config.') - sys.exit(1) - config['include'] = [] - [config['include'].append(a) for a in args] - - sync_options = None - if 'options' in config: - sync_options = config['options'] - tmpd = tempfile.mkdtemp() - try: - checkout = clone_helpers(tmpd, config['branch']) - sync_helpers(config['include'], checkout, config['destination'], - options=sync_options) - except Exception as e: - logging.error("Could not sync: %s" % e) - raise e - finally: - logging.debug('Cleaning up %s' % tmpd) - shutil.rmtree(tmpd) diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index 9b5e79e..390163e 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -3,10 +3,5 @@ destination: hooks/charmhelpers include: - core - fetch - - contrib.openstack|inc=* - - contrib.hahelpers - - contrib.network.ovs - - contrib.storage.linux - - payload.execd - - contrib.network.ip - - contrib.python.packages + - contrib + - payload diff --git a/config.yaml b/config.yaml index 93641f4..547ddeb 100644 --- a/config.yaml +++ b/config.yaml @@ -2,4 +2,20 @@ options: plumgrid-virtual-ip: default: 192.168.100.250 type: string - description: The IP on which PG Console will be accessible. + description: IP address of the Director's Management interface. Same IP can be used to access PG Console. + lcm-ssh-key: + default: 'null' + type: string + description: Public SSH key of PLUMgrid LCM which is running PG-Tools + network-device-mtu: + type: string + default: '1580' + description: The MTU size for interfaces managed by director. + install_sources: + default: 'ppa:plumgrid-team/stable' + type: string + description: Provide the install source from where to install the PLUMgrid debs + install_keys: + default: null + type: string + description: Provide the respective keys of the install sources diff --git a/copyright b/copyright index d44f24c..8ac66cc 100644 --- a/copyright +++ b/copyright @@ -1,7 +1,7 @@ Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0 Files: * -Copyright: 2012, Canonical Ltd. +Copyright: 2015, PLUMgrid Inc. License: GPL-3 License: GPL-3 diff --git a/hooks/charmhelpers-pl/__init__.py b/hooks/charmhelpers-pl/__init__.py deleted file mode 100644 index f72e7f8..0000000 --- a/hooks/charmhelpers-pl/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# Bootstrap charm-helpers, installing its dependencies if necessary using -# only standard libraries. -import subprocess -import sys - -try: - import six # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa - -try: - import yaml # flake8: noqa -except ImportError: - if sys.version_info.major == 2: - subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) - else: - subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa diff --git a/hooks/charmhelpers-pl/contrib/hahelpers/apache.py b/hooks/charmhelpers-pl/contrib/hahelpers/apache.py deleted file mode 100644 index 0091719..0000000 --- a/hooks/charmhelpers-pl/contrib/hahelpers/apache.py +++ /dev/null @@ -1,82 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# -# Copyright 2012 Canonical Ltd. -# -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# - -import subprocess - -from charmhelpers.core.hookenv import ( - config as config_get, - relation_get, - relation_ids, - related_units as relation_list, - log, - INFO, -) - - -def get_cert(cn=None): - # TODO: deal with multiple https endpoints via charm config - cert = config_get('ssl_cert') - key = config_get('ssl_key') - if not (cert and key): - log("Inspecting identity-service relations for SSL certificate.", - level=INFO) - cert = key = None - if cn: - ssl_cert_attr = 'ssl_cert_{}'.format(cn) - ssl_key_attr = 'ssl_key_{}'.format(cn) - else: - ssl_cert_attr = 'ssl_cert' - ssl_key_attr = 'ssl_key' - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - if not cert: - cert = relation_get(ssl_cert_attr, - rid=r_id, unit=unit) - if not key: - key = relation_get(ssl_key_attr, - rid=r_id, unit=unit) - return (cert, key) - - -def get_ca_cert(): - ca_cert = config_get('ssl_ca') - if ca_cert is None: - log("Inspecting identity-service relations for CA SSL certificate.", - level=INFO) - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - if ca_cert is None: - ca_cert = relation_get('ca_cert', - rid=r_id, unit=unit) - return ca_cert - - -def install_ca_cert(ca_cert): - if ca_cert: - with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', - 'w') as crt: - crt.write(ca_cert) - subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/hooks/charmhelpers-pl/contrib/hahelpers/cluster.py b/hooks/charmhelpers-pl/contrib/hahelpers/cluster.py deleted file mode 100644 index 9333efc..0000000 --- a/hooks/charmhelpers-pl/contrib/hahelpers/cluster.py +++ /dev/null @@ -1,272 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# -# Copyright 2012 Canonical Ltd. -# -# Authors: -# James Page -# Adam Gandelman -# - -""" -Helpers for clustering and determining "cluster leadership" and other -clustering-related helpers. -""" - -import subprocess -import os - -from socket import gethostname as get_unit_hostname - -import six - -from charmhelpers.core.hookenv import ( - log, - relation_ids, - related_units as relation_list, - relation_get, - config as config_get, - INFO, - ERROR, - WARNING, - unit_get, -) -from charmhelpers.core.decorators import ( - retry_on_exception, -) -from charmhelpers.core.strutils import ( - bool_from_string, -) - - -class HAIncompleteConfig(Exception): - pass - - -class CRMResourceNotFound(Exception): - pass - - -def is_elected_leader(resource): - """ - Returns True if the charm executing this is the elected cluster leader. - - It relies on two mechanisms to determine leadership: - 1. If the charm is part of a corosync cluster, call corosync to - determine leadership. - 2. If the charm is not part of a corosync cluster, the leader is - determined as being "the alive unit with the lowest unit numer". In - other words, the oldest surviving unit. - """ - if is_clustered(): - if not is_crm_leader(resource): - log('Deferring action to CRM leader.', level=INFO) - return False - else: - peers = peer_units() - if peers and not oldest_peer(peers): - log('Deferring action to oldest service unit.', level=INFO) - return False - return True - - -def is_clustered(): - for r_id in (relation_ids('ha') or []): - for unit in (relation_list(r_id) or []): - clustered = relation_get('clustered', - rid=r_id, - unit=unit) - if clustered: - return True - return False - - -@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) -def is_crm_leader(resource, retry=False): - """ - Returns True if the charm calling this is the elected corosync leader, - as returned by calling the external "crm" command. - - We allow this operation to be retried to avoid the possibility of getting a - false negative. See LP #1396246 for more info. - """ - cmd = ['crm', 'resource', 'show', resource] - try: - status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if not isinstance(status, six.text_type): - status = six.text_type(status, "utf-8") - except subprocess.CalledProcessError: - status = None - - if status and get_unit_hostname() in status: - return True - - if status and "resource %s is NOT running" % (resource) in status: - raise CRMResourceNotFound("CRM resource %s not found" % (resource)) - - return False - - -def is_leader(resource): - log("is_leader is deprecated. Please consider using is_crm_leader " - "instead.", level=WARNING) - return is_crm_leader(resource) - - -def peer_units(peer_relation="cluster"): - peers = [] - for r_id in (relation_ids(peer_relation) or []): - for unit in (relation_list(r_id) or []): - peers.append(unit) - return peers - - -def peer_ips(peer_relation='cluster', addr_key='private-address'): - '''Return a dict of peers and their private-address''' - peers = {} - for r_id in relation_ids(peer_relation): - for unit in relation_list(r_id): - peers[unit] = relation_get(addr_key, rid=r_id, unit=unit) - return peers - - -def oldest_peer(peers): - """Determines who the oldest peer is by comparing unit numbers.""" - local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) - for peer in peers: - remote_unit_no = int(peer.split('/')[1]) - if remote_unit_no < local_unit_no: - return False - return True - - -def eligible_leader(resource): - log("eligible_leader is deprecated. Please consider using " - "is_elected_leader instead.", level=WARNING) - return is_elected_leader(resource) - - -def https(): - ''' - Determines whether enough data has been provided in configuration - or relation data to configure HTTPS - . - returns: boolean - ''' - use_https = config_get('use-https') - if use_https and bool_from_string(use_https): - return True - if config_get('ssl_cert') and config_get('ssl_key'): - return True - for r_id in relation_ids('identity-service'): - for unit in relation_list(r_id): - # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN - rel_state = [ - relation_get('https_keystone', rid=r_id, unit=unit), - relation_get('ca_cert', rid=r_id, unit=unit), - ] - # NOTE: works around (LP: #1203241) - if (None not in rel_state) and ('' not in rel_state): - return True - return False - - -def determine_api_port(public_port, singlenode_mode=False): - ''' - Determine correct API server listening port based on - existence of HTTPS reverse proxy and/or haproxy. - - public_port: int: standard public port for given service - - singlenode_mode: boolean: Shuffle ports when only a single unit is present - - returns: int: the correct listening port for the API service - ''' - i = 0 - if singlenode_mode: - i += 1 - elif len(peer_units()) > 0 or is_clustered(): - i += 1 - if https(): - i += 1 - return public_port - (i * 10) - - -def determine_apache_port(public_port, singlenode_mode=False): - ''' - Description: Determine correct apache listening port based on public IP + - state of the cluster. - - public_port: int: standard public port for given service - - singlenode_mode: boolean: Shuffle ports when only a single unit is present - - returns: int: the correct listening port for the HAProxy service - ''' - i = 0 - if singlenode_mode: - i += 1 - elif len(peer_units()) > 0 or is_clustered(): - i += 1 - return public_port - (i * 10) - - -def get_hacluster_config(exclude_keys=None): - ''' - Obtains all relevant configuration from charm configuration required - for initiating a relation to hacluster: - - ha-bindiface, ha-mcastport, vip - - param: exclude_keys: list of setting key(s) to be excluded. - returns: dict: A dict containing settings keyed by setting name. - raises: HAIncompleteConfig if settings are missing. - ''' - settings = ['ha-bindiface', 'ha-mcastport', 'vip'] - conf = {} - for setting in settings: - if exclude_keys and setting in exclude_keys: - continue - - conf[setting] = config_get(setting) - missing = [] - [missing.append(s) for s, v in six.iteritems(conf) if v is None] - if missing: - log('Insufficient config data to configure hacluster.', level=ERROR) - raise HAIncompleteConfig - return conf - - -def canonical_url(configs, vip_setting='vip'): - ''' - Returns the correct HTTP URL to this host given the state of HTTPS - configuration and hacluster. - - :configs : OSTemplateRenderer: A config tempating object to inspect for - a complete https context. - - :vip_setting: str: Setting in charm config that specifies - VIP address. - ''' - scheme = 'http' - if 'https' in configs.complete_contexts(): - scheme = 'https' - if is_clustered(): - addr = config_get(vip_setting) - else: - addr = unit_get('private-address') - return '%s://%s' % (scheme, addr) diff --git a/hooks/charmhelpers-pl/contrib/network/ip.py b/hooks/charmhelpers-pl/contrib/network/ip.py deleted file mode 100644 index fff6d5c..0000000 --- a/hooks/charmhelpers-pl/contrib/network/ip.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import glob -import re -import subprocess -import six -import socket - -from functools import partial - -from charmhelpers.core.hookenv import unit_get -from charmhelpers.fetch import apt_install -from charmhelpers.core.hookenv import ( - log, - WARNING, -) - -try: - import netifaces -except ImportError: - apt_install('python-netifaces') - import netifaces - -try: - import netaddr -except ImportError: - apt_install('python-netaddr') - import netaddr - - -def _validate_cidr(network): - try: - netaddr.IPNetwork(network) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Network (%s) is not in CIDR presentation format" % - network) - - -def no_ip_found_error_out(network): - errmsg = ("No IP address found in network: %s" % network) - raise ValueError(errmsg) - - -def get_address_in_network(network, fallback=None, fatal=False): - """Get an IPv4 or IPv6 address within the network from the host. - - :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. - :param fallback (str): If no address is found, return fallback. - :param fatal (boolean): If no address is found, fallback is not - set and fatal is True then exit(1). - """ - if network is None: - if fallback is not None: - return fallback - - if fatal: - no_ip_found_error_out(network) - else: - return None - - _validate_cidr(network) - network = netaddr.IPNetwork(network) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if network.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - if cidr in network: - return str(cidr.ip) - - if network.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - if cidr in network: - return str(cidr.ip) - - if fallback is not None: - return fallback - - if fatal: - no_ip_found_error_out(network) - - return None - - -def is_ipv6(address): - """Determine whether provided address is IPv6 or not.""" - try: - address = netaddr.IPAddress(address) - except netaddr.AddrFormatError: - # probably a hostname - so not an address at all! - return False - - return address.version == 6 - - -def is_address_in_network(network, address): - """ - Determine whether the provided address is within a network range. - - :param network (str): CIDR presentation format. For example, - '192.168.1.0/24'. - :param address: An individual IPv4 or IPv6 address without a net - mask or subnet prefix. For example, '192.168.1.1'. - :returns boolean: Flag indicating whether address is in network. - """ - try: - network = netaddr.IPNetwork(network) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Network (%s) is not in CIDR presentation format" % - network) - - try: - address = netaddr.IPAddress(address) - except (netaddr.core.AddrFormatError, ValueError): - raise ValueError("Address (%s) is not in correct presentation format" % - address) - - if address in network: - return True - else: - return False - - -def _get_for_address(address, key): - """Retrieve an attribute of or the physical interface that - the IP address provided could be bound to. - - :param address (str): An individual IPv4 or IPv6 address without a net - mask or subnet prefix. For example, '192.168.1.1'. - :param key: 'iface' for the physical interface name or an attribute - of the configured interface, for example 'netmask'. - :returns str: Requested attribute or None if address is not bindable. - """ - address = netaddr.IPAddress(address) - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - if address.version == 4 and netifaces.AF_INET in addresses: - addr = addresses[netifaces.AF_INET][0]['addr'] - netmask = addresses[netifaces.AF_INET][0]['netmask'] - network = netaddr.IPNetwork("%s/%s" % (addr, netmask)) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - else: - return addresses[netifaces.AF_INET][0][key] - - if address.version == 6 and netifaces.AF_INET6 in addresses: - for addr in addresses[netifaces.AF_INET6]: - if not addr['addr'].startswith('fe80'): - network = netaddr.IPNetwork("%s/%s" % (addr['addr'], - addr['netmask'])) - cidr = network.cidr - if address in cidr: - if key == 'iface': - return iface - elif key == 'netmask' and cidr: - return str(cidr).split('/')[1] - else: - return addr[key] - - return None - - -get_iface_for_address = partial(_get_for_address, key='iface') - - -get_netmask_for_address = partial(_get_for_address, key='netmask') - - -def format_ipv6_addr(address): - """If address is IPv6, wrap it in '[]' otherwise return None. - - This is required by most configuration files when specifying IPv6 - addresses. - """ - if is_ipv6(address): - return "[%s]" % address - - return None - - -def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False, - fatal=True, exc_list=None): - """Return the assigned IP address for a given interface, if any.""" - # Extract nic if passed /dev/ethX - if '/' in iface: - iface = iface.split('/')[-1] - - if not exc_list: - exc_list = [] - - try: - inet_num = getattr(netifaces, inet_type) - except AttributeError: - raise Exception("Unknown inet type '%s'" % str(inet_type)) - - interfaces = netifaces.interfaces() - if inc_aliases: - ifaces = [] - for _iface in interfaces: - if iface == _iface or _iface.split(':')[0] == iface: - ifaces.append(_iface) - - if fatal and not ifaces: - raise Exception("Invalid interface '%s'" % iface) - - ifaces.sort() - else: - if iface not in interfaces: - if fatal: - raise Exception("Interface '%s' not found " % (iface)) - else: - return [] - - else: - ifaces = [iface] - - addresses = [] - for netiface in ifaces: - net_info = netifaces.ifaddresses(netiface) - if inet_num in net_info: - for entry in net_info[inet_num]: - if 'addr' in entry and entry['addr'] not in exc_list: - addresses.append(entry['addr']) - - if fatal and not addresses: - raise Exception("Interface '%s' doesn't have any %s addresses." % - (iface, inet_type)) - - return sorted(addresses) - - -get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET') - - -def get_iface_from_addr(addr): - """Work out on which interface the provided address is configured.""" - for iface in netifaces.interfaces(): - addresses = netifaces.ifaddresses(iface) - for inet_type in addresses: - for _addr in addresses[inet_type]: - _addr = _addr['addr'] - # link local - ll_key = re.compile("(.+)%.*") - raw = re.match(ll_key, _addr) - if raw: - _addr = raw.group(1) - - if _addr == addr: - log("Address '%s' is configured on iface '%s'" % - (addr, iface)) - return iface - - msg = "Unable to infer net iface on which '%s' is configured" % (addr) - raise Exception(msg) - - -def sniff_iface(f): - """Ensure decorated function is called with a value for iface. - - If no iface provided, inject net iface inferred from unit private address. - """ - def iface_sniffer(*args, **kwargs): - if not kwargs.get('iface', None): - kwargs['iface'] = get_iface_from_addr(unit_get('private-address')) - - return f(*args, **kwargs) - - return iface_sniffer - - -@sniff_iface -def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None, - dynamic_only=True): - """Get assigned IPv6 address for a given interface. - - Returns list of addresses found. If no address found, returns empty list. - - If iface is None, we infer the current primary interface by doing a reverse - lookup on the unit private-address. - - We currently only support scope global IPv6 addresses i.e. non-temporary - addresses. If no global IPv6 address is found, return the first one found - in the ipv6 address list. - """ - addresses = get_iface_addr(iface=iface, inet_type='AF_INET6', - inc_aliases=inc_aliases, fatal=fatal, - exc_list=exc_list) - - if addresses: - global_addrs = [] - for addr in addresses: - key_scope_link_local = re.compile("^fe80::..(.+)%(.+)") - m = re.match(key_scope_link_local, addr) - if m: - eui_64_mac = m.group(1) - iface = m.group(2) - else: - global_addrs.append(addr) - - if global_addrs: - # Make sure any found global addresses are not temporary - cmd = ['ip', 'addr', 'show', iface] - out = subprocess.check_output(cmd).decode('UTF-8') - if dynamic_only: - key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*") - else: - key = re.compile("inet6 (.+)/[0-9]+ scope global.*") - - addrs = [] - for line in out.split('\n'): - line = line.strip() - m = re.match(key, line) - if m and 'temporary' not in line: - # Return the first valid address we find - for addr in global_addrs: - if m.group(1) == addr: - if not dynamic_only or \ - m.group(1).endswith(eui_64_mac): - addrs.append(addr) - - if addrs: - return addrs - - if fatal: - raise Exception("Interface '%s' does not have a scope global " - "non-temporary ipv6 address." % iface) - - return [] - - -def get_bridges(vnic_dir='/sys/devices/virtual/net'): - """Return a list of bridges on the system.""" - b_regex = "%s/*/bridge" % vnic_dir - return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)] - - -def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'): - """Return a list of nics comprising a given bridge on the system.""" - brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge) - return [x.split('/')[-1] for x in glob.glob(brif_regex)] - - -def is_bridge_member(nic): - """Check if a given nic is a member of a bridge.""" - for bridge in get_bridges(): - if nic in get_bridge_nics(bridge): - return True - - return False - - -def is_ip(address): - """ - Returns True if address is a valid IP address. - """ - try: - # Test to see if already an IPv4 address - socket.inet_aton(address) - return True - except socket.error: - return False - - -def ns_query(address): - try: - import dns.resolver - except ImportError: - apt_install('python-dnspython') - import dns.resolver - - if isinstance(address, dns.name.Name): - rtype = 'PTR' - elif isinstance(address, six.string_types): - rtype = 'A' - else: - return None - - answers = dns.resolver.query(address, rtype) - if answers: - return str(answers[0]) - return None - - -def get_host_ip(hostname, fallback=None): - """ - Resolves the IP for a given hostname, or returns - the input if it is already an IP. - """ - if is_ip(hostname): - return hostname - - ip_addr = ns_query(hostname) - if not ip_addr: - try: - ip_addr = socket.gethostbyname(hostname) - except: - log("Failed to resolve hostname '%s'" % (hostname), - level=WARNING) - return fallback - return ip_addr - - -def get_hostname(address, fqdn=True): - """ - Resolves hostname for given IP, or returns the input - if it is already a hostname. - """ - if is_ip(address): - try: - import dns.reversename - except ImportError: - apt_install("python-dnspython") - import dns.reversename - - rev = dns.reversename.from_address(address) - result = ns_query(rev) - if not result: - return None - else: - result = address - - if fqdn: - # strip trailing . - if result.endswith('.'): - return result[:-1] - else: - return result - else: - return result.split('.')[0] diff --git a/hooks/charmhelpers-pl/contrib/network/ovs/__init__.py b/hooks/charmhelpers-pl/contrib/network/ovs/__init__.py deleted file mode 100644 index 77e2db7..0000000 --- a/hooks/charmhelpers-pl/contrib/network/ovs/__init__.py +++ /dev/null @@ -1,96 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -''' Helpers for interacting with OpenvSwitch ''' -import subprocess -import os -from charmhelpers.core.hookenv import ( - log, WARNING -) -from charmhelpers.core.host import ( - service -) - - -def add_bridge(name): - ''' Add the named bridge to openvswitch ''' - log('Creating bridge {}'.format(name)) - subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name]) - - -def del_bridge(name): - ''' Delete the named bridge from openvswitch ''' - log('Deleting bridge {}'.format(name)) - subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name]) - - -def add_bridge_port(name, port, promisc=False): - ''' Add a port to the named openvswitch bridge ''' - log('Adding port {} to bridge {}'.format(port, name)) - subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port", - name, port]) - subprocess.check_call(["ip", "link", "set", port, "up"]) - if promisc: - subprocess.check_call(["ip", "link", "set", port, "promisc", "on"]) - else: - subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) - - -def del_bridge_port(name, port): - ''' Delete a port from the named openvswitch bridge ''' - log('Deleting port {} from bridge {}'.format(port, name)) - subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port", - name, port]) - subprocess.check_call(["ip", "link", "set", port, "down"]) - subprocess.check_call(["ip", "link", "set", port, "promisc", "off"]) - - -def set_manager(manager): - ''' Set the controller for the local openvswitch ''' - log('Setting manager for local ovs to {}'.format(manager)) - subprocess.check_call(['ovs-vsctl', 'set-manager', - 'ssl:{}'.format(manager)]) - - -CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem' - - -def get_certificate(): - ''' Read openvswitch certificate from disk ''' - if os.path.exists(CERT_PATH): - log('Reading ovs certificate from {}'.format(CERT_PATH)) - with open(CERT_PATH, 'r') as cert: - full_cert = cert.read() - begin_marker = "-----BEGIN CERTIFICATE-----" - end_marker = "-----END CERTIFICATE-----" - begin_index = full_cert.find(begin_marker) - end_index = full_cert.rfind(end_marker) - if end_index == -1 or begin_index == -1: - raise RuntimeError("Certificate does not contain valid begin" - " and end markers.") - full_cert = full_cert[begin_index:(end_index + len(end_marker))] - return full_cert - else: - log('Certificate not found', level=WARNING) - return None - - -def full_restart(): - ''' Full restart and reload of openvswitch ''' - if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'): - service('start', 'openvswitch-force-reload-kmod') - else: - service('force-reload-kmod', 'openvswitch-switch') diff --git a/hooks/charmhelpers-pl/contrib/openstack/__init__.py b/hooks/charmhelpers-pl/contrib/openstack/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/openstack/amulet/__init__.py b/hooks/charmhelpers-pl/contrib/openstack/amulet/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/amulet/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers-pl/contrib/openstack/amulet/deployment.py deleted file mode 100644 index 461a702..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/amulet/deployment.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import six -from collections import OrderedDict -from charmhelpers.contrib.amulet.deployment import ( - AmuletDeployment -) - - -class OpenStackAmuletDeployment(AmuletDeployment): - """OpenStack amulet deployment. - - This class inherits from AmuletDeployment and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, series=None, openstack=None, source=None, stable=True): - """Initialize the deployment environment.""" - super(OpenStackAmuletDeployment, self).__init__(series) - self.openstack = openstack - self.source = source - self.stable = stable - # Note(coreycb): this needs to be changed when new next branches come - # out. - self.current_next = "trusty" - - def _determine_branch_locations(self, other_services): - """Determine the branch locations for the other services. - - Determine if the local branch being tested is derived from its - stable or next (dev) branch, and based on this, use the corresonding - stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] - - if self.series in ['precise', 'trusty']: - base_series = self.series - else: - base_series = self.current_next - - if self.stable: - for svc in other_services: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) - else: - for svc in other_services: - if svc['name'] in base_charms: - temp = 'lp:charms/{}/{}' - svc['location'] = temp.format(base_series, - svc['name']) - else: - temp = 'lp:~openstack-charmers/charms/{}/{}/next' - svc['location'] = temp.format(self.current_next, - svc['name']) - return other_services - - def _add_services(self, this_service, other_services): - """Add services to the deployment and set openstack-origin/source.""" - other_services = self._determine_branch_locations(other_services) - - super(OpenStackAmuletDeployment, self)._add_services(this_service, - other_services) - - services = other_services - services.append(this_service) - use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', - 'ceph-osd', 'ceph-radosgw'] - # Openstack subordinate charms do not expose an origin option as that - # is controlled by the principle - ignore = ['neutron-openvswitch'] - - if self.openstack: - for svc in services: - if svc['name'] not in use_source + ignore: - config = {'openstack-origin': self.openstack} - self.d.configure(svc['name'], config) - - if self.source: - for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: - config = {'source': self.source} - self.d.configure(svc['name'], config) - - def _configure_services(self, configs): - """Configure all of the services.""" - for service, config in six.iteritems(configs): - self.d.configure(service, config) - - def _get_openstack_release(self): - """Get openstack release. - - Return an integer representing the enum value of the openstack - release. - """ - # Must be ordered by OpenStack release (not by Ubuntu release): - (self.precise_essex, self.precise_folsom, self.precise_grizzly, - self.precise_havana, self.precise_icehouse, - self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo) = range(10) - - releases = { - ('precise', None): self.precise_essex, - ('precise', 'cloud:precise-folsom'): self.precise_folsom, - ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, - ('precise', 'cloud:precise-havana'): self.precise_havana, - ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, - ('trusty', None): self.trusty_icehouse, - ('trusty', 'cloud:trusty-juno'): self.trusty_juno, - ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, - ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo} - return releases[(self.series, self.openstack)] - - def _get_openstack_release_string(self): - """Get openstack release string. - - Return a string representing the openstack release. - """ - releases = OrderedDict([ - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), - ]) - if self.openstack: - os_origin = self.openstack.split(':')[1] - return os_origin.split('%s-' % self.series)[1].split('/')[0] - else: - return releases[self.series] diff --git a/hooks/charmhelpers-pl/contrib/openstack/amulet/utils.py b/hooks/charmhelpers-pl/contrib/openstack/amulet/utils.py deleted file mode 100644 index 9c3d918..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/amulet/utils.py +++ /dev/null @@ -1,294 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import logging -import os -import time -import urllib - -import glanceclient.v1.client as glance_client -import keystoneclient.v2_0 as keystone_client -import novaclient.v1_1.client as nova_client - -import six - -from charmhelpers.contrib.amulet.utils import ( - AmuletUtils -) - -DEBUG = logging.DEBUG -ERROR = logging.ERROR - - -class OpenStackAmuletUtils(AmuletUtils): - """OpenStack amulet utilities. - - This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms. - """ - - def __init__(self, log_level=ERROR): - """Initialize the deployment environment.""" - super(OpenStackAmuletUtils, self).__init__(log_level) - - def validate_endpoint_data(self, endpoints, admin_port, internal_port, - public_port, expected): - """Validate endpoint data. - - Validate actual endpoint data vs expected endpoint data. The ports - are used to find the matching endpoint. - """ - found = False - for ep in endpoints: - self.log.debug('endpoint: {}'.format(repr(ep))) - if (admin_port in ep.adminurl and - internal_port in ep.internalurl and - public_port in ep.publicurl): - found = True - actual = {'id': ep.id, - 'region': ep.region, - 'adminurl': ep.adminurl, - 'internalurl': ep.internalurl, - 'publicurl': ep.publicurl, - 'service_id': ep.service_id} - ret = self._validate_dict_data(expected, actual) - if ret: - return 'unexpected endpoint data - {}'.format(ret) - - if not found: - return 'endpoint not found' - - def validate_svc_catalog_endpoint_data(self, expected, actual): - """Validate service catalog endpoint data. - - Validate a list of actual service catalog endpoints vs a list of - expected service catalog endpoints. - """ - self.log.debug('actual: {}'.format(repr(actual))) - for k, v in six.iteritems(expected): - if k in actual: - ret = self._validate_dict_data(expected[k][0], actual[k][0]) - if ret: - return self.endpoint_error(k, ret) - else: - return "endpoint {} does not exist".format(k) - return ret - - def validate_tenant_data(self, expected, actual): - """Validate tenant data. - - Validate a list of actual tenant data vs list of expected tenant - data. - """ - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'description': act.description, - 'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected tenant data - {}".format(ret) - if not found: - return "tenant {} does not exist".format(e['name']) - return ret - - def validate_role_data(self, expected, actual): - """Validate role data. - - Validate a list of actual role data vs a list of expected role - data. - """ - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'name': act.name, 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected role data - {}".format(ret) - if not found: - return "role {} does not exist".format(e['name']) - return ret - - def validate_user_data(self, expected, actual): - """Validate user data. - - Validate a list of actual user data vs a list of expected user - data. - """ - self.log.debug('actual: {}'.format(repr(actual))) - for e in expected: - found = False - for act in actual: - a = {'enabled': act.enabled, 'name': act.name, - 'email': act.email, 'tenantId': act.tenantId, - 'id': act.id} - if e['name'] == a['name']: - found = True - ret = self._validate_dict_data(e, a) - if ret: - return "unexpected user data - {}".format(ret) - if not found: - return "user {} does not exist".format(e['name']) - return ret - - def validate_flavor_data(self, expected, actual): - """Validate flavor data. - - Validate a list of actual flavors vs a list of expected flavors. - """ - self.log.debug('actual: {}'.format(repr(actual))) - act = [a.name for a in actual] - return self._validate_list_data(expected, act) - - def tenant_exists(self, keystone, tenant): - """Return True if tenant exists.""" - return tenant in [t.name for t in keystone.tenants.list()] - - def authenticate_keystone_admin(self, keystone_sentry, user, password, - tenant): - """Authenticates admin user with the keystone admin endpoint.""" - unit = keystone_sentry - service_ip = unit.relation('shared-db', - 'mysql:shared-db')['private-address'] - ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - - def authenticate_keystone_user(self, keystone, user, password, tenant): - """Authenticates a regular user with the keystone public endpoint.""" - ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return keystone_client.Client(username=user, password=password, - tenant_name=tenant, auth_url=ep) - - def authenticate_glance_admin(self, keystone): - """Authenticates admin user with glance.""" - ep = keystone.service_catalog.url_for(service_type='image', - endpoint_type='adminURL') - return glance_client.Client(ep, token=keystone.auth_token) - - def authenticate_nova_user(self, keystone, user, password, tenant): - """Authenticates a regular user with nova-api.""" - ep = keystone.service_catalog.url_for(service_type='identity', - endpoint_type='publicURL') - return nova_client.Client(username=user, api_key=password, - project_id=tenant, auth_url=ep) - - def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance.""" - http_proxy = os.getenv('AMULET_HTTP_PROXY') - self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) - if http_proxy: - proxies = {'http': http_proxy} - opener = urllib.FancyURLopener(proxies) - else: - opener = urllib.FancyURLopener() - - f = opener.open("http://download.cirros-cloud.net/version/released") - version = f.read().strip() - cirros_img = "cirros-{}-x86_64-disk.img".format(version) - local_path = os.path.join('tests', cirros_img) - - if not os.path.exists(local_path): - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", - version, cirros_img) - opener.retrieve(cirros_url, local_path) - f.close() - - with open(local_path) as f: - image = glance.images.create(name=image_name, is_public=True, - disk_format='qcow2', - container_format='bare', data=f) - count = 1 - status = image.status - while status != 'active' and count < 10: - time.sleep(3) - image = glance.images.get(image.id) - status = image.status - self.log.debug('image status: {}'.format(status)) - count += 1 - - if status != 'active': - self.log.error('image creation timed out') - return None - - return image - - def delete_image(self, glance, image): - """Delete the specified image.""" - num_before = len(list(glance.images.list())) - glance.images.delete(image) - - count = 1 - num_after = len(list(glance.images.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(glance.images.list())) - self.log.debug('number of images: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('image deletion timed out') - return False - - return True - - def create_instance(self, nova, image_name, instance_name, flavor): - """Create the specified instance.""" - image = nova.images.find(name=image_name) - flavor = nova.flavors.find(name=flavor) - instance = nova.servers.create(name=instance_name, image=image, - flavor=flavor) - - count = 1 - status = instance.status - while status != 'ACTIVE' and count < 60: - time.sleep(3) - instance = nova.servers.get(instance.id) - status = instance.status - self.log.debug('instance status: {}'.format(status)) - count += 1 - - if status != 'ACTIVE': - self.log.error('instance creation timed out') - return None - - return instance - - def delete_instance(self, nova, instance): - """Delete the specified instance.""" - num_before = len(list(nova.servers.list())) - nova.servers.delete(instance) - - count = 1 - num_after = len(list(nova.servers.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(nova.servers.list())) - self.log.debug('number of instances: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('instance deletion timed out') - return False - - return True diff --git a/hooks/charmhelpers-pl/contrib/openstack/context.py b/hooks/charmhelpers-pl/contrib/openstack/context.py deleted file mode 100644 index 76ffb27..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/context.py +++ /dev/null @@ -1,1343 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import json -import os -import re -import time -from base64 import b64decode -from subprocess import check_call - -import six -import yaml - -from charmhelpers.fetch import ( - apt_install, - filter_installed_packages, -) -from charmhelpers.core.hookenv import ( - config, - is_relation_made, - local_unit, - log, - relation_get, - relation_ids, - related_units, - relation_set, - unit_get, - unit_private_ip, - charm_name, - DEBUG, - INFO, - WARNING, - ERROR, -) - -from charmhelpers.core.sysctl import create as sysctl_create -from charmhelpers.core.strutils import bool_from_string - -from charmhelpers.core.host import ( - list_nics, - get_nic_hwaddr, - mkdir, - write_file, -) -from charmhelpers.contrib.hahelpers.cluster import ( - determine_apache_port, - determine_api_port, - https, - is_clustered, -) -from charmhelpers.contrib.hahelpers.apache import ( - get_cert, - get_ca_cert, - install_ca_cert, -) -from charmhelpers.contrib.openstack.neutron import ( - neutron_plugin_attribute, - parse_data_port_mappings, -) -from charmhelpers.contrib.openstack.ip import ( - resolve_address, - INTERNAL, -) -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - get_ipv4_addr, - get_ipv6_addr, - get_netmask_for_address, - format_ipv6_addr, - is_address_in_network, - is_bridge_member, -) -from charmhelpers.contrib.openstack.utils import get_host_ip -CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' -ADDRESS_TYPES = ['admin', 'internal', 'public'] - - -class OSContextError(Exception): - pass - - -def ensure_packages(packages): - """Install but do not upgrade required plugin packages.""" - required = filter_installed_packages(packages) - if required: - apt_install(required, fatal=True) - - -def context_complete(ctxt): - _missing = [] - for k, v in six.iteritems(ctxt): - if v is None or v == '': - _missing.append(k) - - if _missing: - log('Missing required data: %s' % ' '.join(_missing), level=INFO) - return False - - return True - - -def config_flags_parser(config_flags): - """Parses config flags string into dict. - - This parsing method supports a few different formats for the config - flag values to be parsed: - - 1. A string in the simple format of key=value pairs, with the possibility - of specifying multiple key value pairs within the same string. For - example, a string in the format of 'key1=value1, key2=value2' will - return a dict of: - {'key1': 'value1', - 'key2': 'value2'}. - - 2. A string in the above format, but supporting a comma-delimited list - of values for the same key. For example, a string in the format of - 'key1=value1, key2=value3,value4,value5' will return a dict of: - {'key1', 'value1', - 'key2', 'value2,value3,value4'} - - 3. A string containing a colon character (:) prior to an equal - character (=) will be treated as yaml and parsed as such. This can be - used to specify more complex key value pairs. For example, - a string in the format of 'key1: subkey1=value1, subkey2=value2' will - return a dict of: - {'key1', 'subkey1=value1, subkey2=value2'} - - The provided config_flags string may be a list of comma-separated values - which themselves may be comma-separated list of values. - """ - # If we find a colon before an equals sign then treat it as yaml. - # Note: limit it to finding the colon first since this indicates assignment - # for inline yaml. - colon = config_flags.find(':') - equals = config_flags.find('=') - if colon > 0: - if colon < equals or equals < 0: - return yaml.safe_load(config_flags) - - if config_flags.find('==') >= 0: - log("config_flags is not in expected format (key=value)", level=ERROR) - raise OSContextError - - # strip the following from each value. - post_strippers = ' ,' - # we strip any leading/trailing '=' or ' ' from the string then - # split on '='. - split = config_flags.strip(' =').split('=') - limit = len(split) - flags = {} - for i in range(0, limit - 1): - current = split[i] - next = split[i + 1] - vindex = next.rfind(',') - if (i == limit - 2) or (vindex < 0): - value = next - else: - value = next[:vindex] - - if i == 0: - key = current - else: - # if this not the first entry, expect an embedded key. - index = current.rfind(',') - if index < 0: - log("Invalid config value(s) at index %s" % (i), level=ERROR) - raise OSContextError - key = current[index + 1:] - - # Add to collection. - flags[key.strip(post_strippers)] = value.rstrip(post_strippers) - - return flags - - -class OSContextGenerator(object): - """Base class for all context generators.""" - interfaces = [] - - def __call__(self): - raise NotImplementedError - - -class SharedDBContext(OSContextGenerator): - interfaces = ['shared-db'] - - def __init__(self, - database=None, user=None, relation_prefix=None, ssl_dir=None): - """Allows inspecting relation for settings prefixed with - relation_prefix. This is useful for parsing access for multiple - databases returned via the shared-db interface (eg, nova_password, - quantum_password) - """ - self.relation_prefix = relation_prefix - self.database = database - self.user = user - self.ssl_dir = ssl_dir - - def __call__(self): - self.database = self.database or config('database') - self.user = self.user or config('database-user') - if None in [self.database, self.user]: - log("Could not generate shared_db context. Missing required charm " - "config options. (database name and user)", level=ERROR) - raise OSContextError - - ctxt = {} - - # NOTE(jamespage) if mysql charm provides a network upon which - # access to the database should be made, reconfigure relation - # with the service units local address and defer execution - access_network = relation_get('access-network') - if access_network is not None: - if self.relation_prefix is not None: - hostname_key = "{}_hostname".format(self.relation_prefix) - else: - hostname_key = "hostname" - access_hostname = get_address_in_network(access_network, - unit_get('private-address')) - set_hostname = relation_get(attribute=hostname_key, - unit=local_unit()) - if set_hostname != access_hostname: - relation_set(relation_settings={hostname_key: access_hostname}) - return None # Defer any further hook execution for now.... - - password_setting = 'password' - if self.relation_prefix: - password_setting = self.relation_prefix + '_password' - - for rid in relation_ids('shared-db'): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - host = rdata.get('db_host') - host = format_ipv6_addr(host) or host - ctxt = { - 'database_host': host, - 'database': self.database, - 'database_user': self.user, - 'database_password': rdata.get(password_setting), - 'database_type': 'mysql' - } - if context_complete(ctxt): - db_ssl(rdata, ctxt, self.ssl_dir) - return ctxt - return {} - - -class PostgresqlDBContext(OSContextGenerator): - interfaces = ['pgsql-db'] - - def __init__(self, database=None): - self.database = database - - def __call__(self): - self.database = self.database or config('database') - if self.database is None: - log('Could not generate postgresql_db context. Missing required ' - 'charm config options. (database name)', level=ERROR) - raise OSContextError - - ctxt = {} - for rid in relation_ids(self.interfaces[0]): - for unit in related_units(rid): - rel_host = relation_get('host', rid=rid, unit=unit) - rel_user = relation_get('user', rid=rid, unit=unit) - rel_passwd = relation_get('password', rid=rid, unit=unit) - ctxt = {'database_host': rel_host, - 'database': self.database, - 'database_user': rel_user, - 'database_password': rel_passwd, - 'database_type': 'postgresql'} - if context_complete(ctxt): - return ctxt - - return {} - - -def db_ssl(rdata, ctxt, ssl_dir): - if 'ssl_ca' in rdata and ssl_dir: - ca_path = os.path.join(ssl_dir, 'db-client.ca') - with open(ca_path, 'w') as fh: - fh.write(b64decode(rdata['ssl_ca'])) - - ctxt['database_ssl_ca'] = ca_path - elif 'ssl_ca' in rdata: - log("Charm not setup for ssl support but ssl ca found", level=INFO) - return ctxt - - if 'ssl_cert' in rdata: - cert_path = os.path.join( - ssl_dir, 'db-client.cert') - if not os.path.exists(cert_path): - log("Waiting 1m for ssl client cert validity", level=INFO) - time.sleep(60) - - with open(cert_path, 'w') as fh: - fh.write(b64decode(rdata['ssl_cert'])) - - ctxt['database_ssl_cert'] = cert_path - key_path = os.path.join(ssl_dir, 'db-client.key') - with open(key_path, 'w') as fh: - fh.write(b64decode(rdata['ssl_key'])) - - ctxt['database_ssl_key'] = key_path - - return ctxt - - -class IdentityServiceContext(OSContextGenerator): - - def __init__(self, service=None, service_user=None, rel_name='identity-service'): - self.service = service - self.service_user = service_user - self.rel_name = rel_name - self.interfaces = [self.rel_name] - - def __call__(self): - log('Generating template context for ' + self.rel_name, level=DEBUG) - ctxt = {} - - if self.service and self.service_user: - # This is required for pki token signing if we don't want /tmp to - # be used. - cachedir = '/var/cache/%s' % (self.service) - if not os.path.isdir(cachedir): - log("Creating service cache dir %s" % (cachedir), level=DEBUG) - mkdir(path=cachedir, owner=self.service_user, - group=self.service_user, perms=0o700) - - ctxt['signing_dir'] = cachedir - - for rid in relation_ids(self.rel_name): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - serv_host = rdata.get('service_host') - serv_host = format_ipv6_addr(serv_host) or serv_host - auth_host = rdata.get('auth_host') - auth_host = format_ipv6_addr(auth_host) or auth_host - svc_protocol = rdata.get('service_protocol') or 'http' - auth_protocol = rdata.get('auth_protocol') or 'http' - ctxt.update({'service_port': rdata.get('service_port'), - 'service_host': serv_host, - 'auth_host': auth_host, - 'auth_port': rdata.get('auth_port'), - 'admin_tenant_name': rdata.get('service_tenant'), - 'admin_user': rdata.get('service_username'), - 'admin_password': rdata.get('service_password'), - 'service_protocol': svc_protocol, - 'auth_protocol': auth_protocol}) - - if context_complete(ctxt): - # NOTE(jamespage) this is required for >= icehouse - # so a missing value just indicates keystone needs - # upgrading - ctxt['admin_tenant_id'] = rdata.get('service_tenant_id') - return ctxt - - return {} - - -class AMQPContext(OSContextGenerator): - - def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None): - self.ssl_dir = ssl_dir - self.rel_name = rel_name - self.relation_prefix = relation_prefix - self.interfaces = [rel_name] - - def __call__(self): - log('Generating template context for amqp', level=DEBUG) - conf = config() - if self.relation_prefix: - user_setting = '%s-rabbit-user' % (self.relation_prefix) - vhost_setting = '%s-rabbit-vhost' % (self.relation_prefix) - else: - user_setting = 'rabbit-user' - vhost_setting = 'rabbit-vhost' - - try: - username = conf[user_setting] - vhost = conf[vhost_setting] - except KeyError as e: - log('Could not generate shared_db context. Missing required charm ' - 'config options: %s.' % e, level=ERROR) - raise OSContextError - - ctxt = {} - for rid in relation_ids(self.rel_name): - ha_vip_only = False - for unit in related_units(rid): - if relation_get('clustered', rid=rid, unit=unit): - ctxt['clustered'] = True - vip = relation_get('vip', rid=rid, unit=unit) - vip = format_ipv6_addr(vip) or vip - ctxt['rabbitmq_host'] = vip - else: - host = relation_get('private-address', rid=rid, unit=unit) - host = format_ipv6_addr(host) or host - ctxt['rabbitmq_host'] = host - - ctxt.update({ - 'rabbitmq_user': username, - 'rabbitmq_password': relation_get('password', rid=rid, - unit=unit), - 'rabbitmq_virtual_host': vhost, - }) - - ssl_port = relation_get('ssl_port', rid=rid, unit=unit) - if ssl_port: - ctxt['rabbit_ssl_port'] = ssl_port - - ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit) - if ssl_ca: - ctxt['rabbit_ssl_ca'] = ssl_ca - - if relation_get('ha_queues', rid=rid, unit=unit) is not None: - ctxt['rabbitmq_ha_queues'] = True - - ha_vip_only = relation_get('ha-vip-only', - rid=rid, unit=unit) is not None - - if context_complete(ctxt): - if 'rabbit_ssl_ca' in ctxt: - if not self.ssl_dir: - log("Charm not setup for ssl support but ssl ca " - "found", level=INFO) - break - - ca_path = os.path.join( - self.ssl_dir, 'rabbit-client-ca.pem') - with open(ca_path, 'w') as fh: - fh.write(b64decode(ctxt['rabbit_ssl_ca'])) - ctxt['rabbit_ssl_ca'] = ca_path - - # Sufficient information found = break out! - break - - # Used for active/active rabbitmq >= grizzly - if (('clustered' not in ctxt or ha_vip_only) and - len(related_units(rid)) > 1): - rabbitmq_hosts = [] - for unit in related_units(rid): - host = relation_get('private-address', rid=rid, unit=unit) - host = format_ipv6_addr(host) or host - rabbitmq_hosts.append(host) - - ctxt['rabbitmq_hosts'] = ','.join(sorted(rabbitmq_hosts)) - - oslo_messaging_flags = conf.get('oslo-messaging-flags', None) - if oslo_messaging_flags: - ctxt['oslo_messaging_flags'] = config_flags_parser( - oslo_messaging_flags) - - if not context_complete(ctxt): - return {} - - return ctxt - - -class CephContext(OSContextGenerator): - """Generates context for /etc/ceph/ceph.conf templates.""" - interfaces = ['ceph'] - - def __call__(self): - if not relation_ids('ceph'): - return {} - - log('Generating template context for ceph', level=DEBUG) - mon_hosts = [] - auth = None - key = None - use_syslog = str(config('use-syslog')).lower() - for rid in relation_ids('ceph'): - for unit in related_units(rid): - auth = relation_get('auth', rid=rid, unit=unit) - key = relation_get('key', rid=rid, unit=unit) - ceph_pub_addr = relation_get('ceph-public-address', rid=rid, - unit=unit) - unit_priv_addr = relation_get('private-address', rid=rid, - unit=unit) - ceph_addr = ceph_pub_addr or unit_priv_addr - ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr - mon_hosts.append(ceph_addr) - - ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), - 'auth': auth, - 'key': key, - 'use_syslog': use_syslog} - - if not os.path.isdir('/etc/ceph'): - os.mkdir('/etc/ceph') - - if not context_complete(ctxt): - return {} - - ensure_packages(['ceph-common']) - return ctxt - - -class HAProxyContext(OSContextGenerator): - """Provides half a context for the haproxy template, which describes - all peers to be included in the cluster. Each charm needs to include - its own context generator that describes the port mapping. - """ - interfaces = ['cluster'] - - def __init__(self, singlenode_mode=False): - self.singlenode_mode = singlenode_mode - - def __call__(self): - if not relation_ids('cluster') and not self.singlenode_mode: - return {} - - if config('prefer-ipv6'): - addr = get_ipv6_addr(exc_list=[config('vip')])[0] - else: - addr = get_host_ip(unit_get('private-address')) - - l_unit = local_unit().replace('/', '-') - cluster_hosts = {} - - # NOTE(jamespage): build out map of configured network endpoints - # and associated backends - for addr_type in ADDRESS_TYPES: - cfg_opt = 'os-{}-network'.format(addr_type) - laddr = get_address_in_network(config(cfg_opt)) - if laddr: - netmask = get_netmask_for_address(laddr) - cluster_hosts[laddr] = {'network': "{}/{}".format(laddr, - netmask), - 'backends': {l_unit: laddr}} - for rid in relation_ids('cluster'): - for unit in related_units(rid): - _laddr = relation_get('{}-address'.format(addr_type), - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[laddr]['backends'][_unit] = _laddr - - # NOTE(jamespage) add backend based on private address - this - # with either be the only backend or the fallback if no acls - # match in the frontend - cluster_hosts[addr] = {} - netmask = get_netmask_for_address(addr) - cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask), - 'backends': {l_unit: addr}} - for rid in relation_ids('cluster'): - for unit in related_units(rid): - _laddr = relation_get('private-address', - rid=rid, unit=unit) - if _laddr: - _unit = unit.replace('/', '-') - cluster_hosts[addr]['backends'][_unit] = _laddr - - ctxt = { - 'frontends': cluster_hosts, - 'default_backend': addr - } - - if config('haproxy-server-timeout'): - ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout') - - if config('haproxy-client-timeout'): - ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout') - - if config('prefer-ipv6'): - ctxt['ipv6'] = True - ctxt['local_host'] = 'ip6-localhost' - ctxt['haproxy_host'] = '::' - ctxt['stat_port'] = ':::8888' - else: - ctxt['local_host'] = '127.0.0.1' - ctxt['haproxy_host'] = '0.0.0.0' - ctxt['stat_port'] = ':8888' - - for frontend in cluster_hosts: - if (len(cluster_hosts[frontend]['backends']) > 1 or - self.singlenode_mode): - # Enable haproxy when we have enough peers. - log('Ensuring haproxy enabled in /etc/default/haproxy.', - level=DEBUG) - with open('/etc/default/haproxy', 'w') as out: - out.write('ENABLED=1\n') - - return ctxt - - log('HAProxy context is incomplete, this unit has no peers.', - level=INFO) - return {} - - -class ImageServiceContext(OSContextGenerator): - interfaces = ['image-service'] - - def __call__(self): - """Obtains the glance API server from the image-service relation. - Useful in nova and cinder (currently). - """ - log('Generating template context for image-service.', level=DEBUG) - rids = relation_ids('image-service') - if not rids: - return {} - - for rid in rids: - for unit in related_units(rid): - api_server = relation_get('glance-api-server', - rid=rid, unit=unit) - if api_server: - return {'glance_api_servers': api_server} - - log("ImageService context is incomplete. Missing required relation " - "data.", level=INFO) - return {} - - -class ApacheSSLContext(OSContextGenerator): - """Generates a context for an apache vhost configuration that configures - HTTPS reverse proxying for one or many endpoints. Generated context - looks something like:: - - { - 'namespace': 'cinder', - 'private_address': 'iscsi.mycinderhost.com', - 'endpoints': [(8776, 8766), (8777, 8767)] - } - - The endpoints list consists of a tuples mapping external ports - to internal ports. - """ - interfaces = ['https'] - - # charms should inherit this context and set external ports - # and service namespace accordingly. - external_ports = [] - service_namespace = None - - def enable_modules(self): - cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] - check_call(cmd) - - def configure_cert(self, cn=None): - ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) - mkdir(path=ssl_dir) - cert, key = get_cert(cn) - if cn: - cert_filename = 'cert_{}'.format(cn) - key_filename = 'key_{}'.format(cn) - else: - cert_filename = 'cert' - key_filename = 'key' - - write_file(path=os.path.join(ssl_dir, cert_filename), - content=b64decode(cert)) - write_file(path=os.path.join(ssl_dir, key_filename), - content=b64decode(key)) - - def configure_ca(self): - ca_cert = get_ca_cert() - if ca_cert: - install_ca_cert(b64decode(ca_cert)) - - def canonical_names(self): - """Figure out which canonical names clients will access this service. - """ - cns = [] - for r_id in relation_ids('identity-service'): - for unit in related_units(r_id): - rdata = relation_get(rid=r_id, unit=unit) - for k in rdata: - if k.startswith('ssl_key_'): - cns.append(k.lstrip('ssl_key_')) - - return sorted(list(set(cns))) - - def get_network_addresses(self): - """For each network configured, return corresponding address and vip - (if available). - - Returns a list of tuples of the form: - - [(address_in_net_a, vip_in_net_a), - (address_in_net_b, vip_in_net_b), - ...] - - or, if no vip(s) available: - - [(address_in_net_a, address_in_net_a), - (address_in_net_b, address_in_net_b), - ...] - """ - addresses = [] - if config('vip'): - vips = config('vip').split() - else: - vips = [] - - for net_type in ['os-internal-network', 'os-admin-network', - 'os-public-network']: - addr = get_address_in_network(config(net_type), - unit_get('private-address')) - if len(vips) > 1 and is_clustered(): - if not config(net_type): - log("Multiple networks configured but net_type " - "is None (%s)." % net_type, level=WARNING) - continue - - for vip in vips: - if is_address_in_network(config(net_type), vip): - addresses.append((addr, vip)) - break - - elif is_clustered() and config('vip'): - addresses.append((addr, config('vip'))) - else: - addresses.append((addr, addr)) - - return sorted(addresses) - - def __call__(self): - if isinstance(self.external_ports, six.string_types): - self.external_ports = [self.external_ports] - - if not self.external_ports or not https(): - return {} - - self.configure_ca() - self.enable_modules() - - ctxt = {'namespace': self.service_namespace, - 'endpoints': [], - 'ext_ports': []} - - cns = self.canonical_names() - if cns: - for cn in cns: - self.configure_cert(cn) - else: - # Expect cert/key provided in config (currently assumed that ca - # uses ip for cn) - cn = resolve_address(endpoint_type=INTERNAL) - self.configure_cert(cn) - - addresses = self.get_network_addresses() - for address, endpoint in sorted(set(addresses)): - for api_port in self.external_ports: - ext_port = determine_apache_port(api_port, - singlenode_mode=True) - int_port = determine_api_port(api_port, singlenode_mode=True) - portmap = (address, endpoint, int(ext_port), int(int_port)) - ctxt['endpoints'].append(portmap) - ctxt['ext_ports'].append(int(ext_port)) - - ctxt['ext_ports'] = sorted(list(set(ctxt['ext_ports']))) - return ctxt - - -class NeutronContext(OSContextGenerator): - interfaces = [] - - @property - def plugin(self): - return None - - @property - def network_manager(self): - return None - - @property - def packages(self): - return neutron_plugin_attribute(self.plugin, 'packages', - self.network_manager) - - @property - def neutron_security_groups(self): - return None - - def _ensure_packages(self): - for pkgs in self.packages: - ensure_packages(pkgs) - - def _save_flag_file(self): - if self.network_manager == 'quantum': - _file = '/etc/nova/quantum_plugin.conf' - else: - _file = '/etc/nova/neutron_plugin.conf' - - with open(_file, 'wb') as out: - out.write(self.plugin + '\n') - - def ovs_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - ovs_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'ovs', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return ovs_ctxt - - def nuage_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - nuage_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'vsp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return nuage_ctxt - - def nvp_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - nvp_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'nvp', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return nvp_ctxt - - def n1kv_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - n1kv_config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - n1kv_user_config_flags = config('n1kv-config-flags') - restrict_policy_profiles = config('n1kv-restrict-policy-profiles') - n1kv_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'n1kv', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': n1kv_config, - 'vsm_ip': config('n1kv-vsm-ip'), - 'vsm_username': config('n1kv-vsm-username'), - 'vsm_password': config('n1kv-vsm-password'), - 'restrict_policy_profiles': restrict_policy_profiles} - - if n1kv_user_config_flags: - flags = config_flags_parser(n1kv_user_config_flags) - n1kv_ctxt['user_config_flags'] = flags - - return n1kv_ctxt - - def calico_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - calico_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'Calico', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return calico_ctxt - - def pg_ctxt(self): - driver = neutron_plugin_attribute(self.plugin, 'driver', - self.network_manager) - config = neutron_plugin_attribute(self.plugin, 'config', - self.network_manager) - ovs_ctxt = {'core_plugin': driver, - 'neutron_plugin': 'plumgrid', - 'neutron_security_groups': self.neutron_security_groups, - 'local_ip': unit_private_ip(), - 'config': config} - - return ovs_ctxt - - def neutron_ctxt(self): - if https(): - proto = 'https' - else: - proto = 'http' - - if is_clustered(): - host = config('vip') - else: - host = unit_get('private-address') - - ctxt = {'network_manager': self.network_manager, - 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} - return ctxt - - def __call__(self): - self._ensure_packages() - - if self.network_manager not in ['quantum', 'neutron']: - return {} - - if not self.plugin: - return {} - - ctxt = self.neutron_ctxt() - - if self.plugin == 'ovs': - ctxt.update(self.ovs_ctxt()) - elif self.plugin in ['nvp', 'nsx']: - ctxt.update(self.nvp_ctxt()) - elif self.plugin == 'n1kv': - ctxt.update(self.n1kv_ctxt()) - elif self.plugin == 'Calico': - ctxt.update(self.calico_ctxt()) - elif self.plugin == 'vsp': - ctxt.update(self.nuage_ctxt()) - elif self.plugin == 'plumgrid': - ctxt.update(self.pg_ctxt()) - - alchemy_flags = config('neutron-alchemy-flags') - if alchemy_flags: - flags = config_flags_parser(alchemy_flags) - ctxt['neutron_alchemy_flags'] = flags - - self._save_flag_file() - return ctxt - - -class NeutronPortContext(OSContextGenerator): - NIC_PREFIXES = ['eth', 'bond'] - - def resolve_ports(self, ports): - """Resolve NICs not yet bound to bridge(s) - - If hwaddress provided then returns resolved hwaddress otherwise NIC. - """ - if not ports: - return None - - hwaddr_to_nic = {} - hwaddr_to_ip = {} - for nic in list_nics(self.NIC_PREFIXES): - hwaddr = get_nic_hwaddr(nic) - hwaddr_to_nic[hwaddr] = nic - addresses = get_ipv4_addr(nic, fatal=False) - addresses += get_ipv6_addr(iface=nic, fatal=False) - hwaddr_to_ip[hwaddr] = addresses - - resolved = [] - mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I) - for entry in ports: - if re.match(mac_regex, entry): - # NIC is in known NICs and does NOT hace an IP address - if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]: - # If the nic is part of a bridge then don't use it - if is_bridge_member(hwaddr_to_nic[entry]): - continue - - # Entry is a MAC address for a valid interface that doesn't - # have an IP address assigned yet. - resolved.append(hwaddr_to_nic[entry]) - else: - # If the passed entry is not a MAC address, assume it's a valid - # interface, and that the user put it there on purpose (we can - # trust it to be the real external network). - resolved.append(entry) - - return resolved - - -class OSConfigFlagContext(OSContextGenerator): - """Provides support for user-defined config flags. - - Users can define a comma-seperated list of key=value pairs - in the charm configuration and apply them at any point in - any file by using a template flag. - - Sometimes users might want config flags inserted within a - specific section so this class allows users to specify the - template flag name, allowing for multiple template flags - (sections) within the same context. - - NOTE: the value of config-flags may be a comma-separated list of - key=value pairs and some Openstack config files support - comma-separated lists as values. - """ - - def __init__(self, charm_flag='config-flags', - template_flag='user_config_flags'): - """ - :param charm_flag: config flags in charm configuration. - :param template_flag: insert point for user-defined flags in template - file. - """ - super(OSConfigFlagContext, self).__init__() - self._charm_flag = charm_flag - self._template_flag = template_flag - - def __call__(self): - config_flags = config(self._charm_flag) - if not config_flags: - return {} - - return {self._template_flag: - config_flags_parser(config_flags)} - - -class SubordinateConfigContext(OSContextGenerator): - - """ - Responsible for inspecting relations to subordinates that - may be exporting required config via a json blob. - - The subordinate interface allows subordinates to export their - configuration requirements to the principle for multiple config - files and multiple serivces. Ie, a subordinate that has interfaces - to both glance and nova may export to following yaml blob as json:: - - glance: - /etc/glance/glance-api.conf: - sections: - DEFAULT: - - [key1, value1] - /etc/glance/glance-registry.conf: - MYSECTION: - - [key2, value2] - nova: - /etc/nova/nova.conf: - sections: - DEFAULT: - - [key3, value3] - - - It is then up to the principle charms to subscribe this context to - the service+config file it is interestd in. Configuration data will - be available in the template context, in glance's case, as:: - - ctxt = { - ... other context ... - 'subordinate_config': { - 'DEFAULT': { - 'key1': 'value1', - }, - 'MYSECTION': { - 'key2': 'value2', - }, - } - } - """ - - def __init__(self, service, config_file, interface): - """ - :param service : Service name key to query in any subordinate - data found - :param config_file : Service's config file to query sections - :param interface : Subordinate interface to inspect - """ - self.service = service - self.config_file = config_file - self.interface = interface - - def __call__(self): - ctxt = {'sections': {}} - for rid in relation_ids(self.interface): - for unit in related_units(rid): - sub_config = relation_get('subordinate_configuration', - rid=rid, unit=unit) - if sub_config and sub_config != '': - try: - sub_config = json.loads(sub_config) - except: - log('Could not parse JSON from subordinate_config ' - 'setting from %s' % rid, level=ERROR) - continue - - if self.service not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, self.service), - level=INFO) - continue - - sub_config = sub_config[self.service] - if self.config_file not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file), - level=INFO) - continue - - sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): - if k == 'sections': - for section, config_dict in six.iteritems(v): - log("adding section '%s'" % (section), - level=DEBUG) - ctxt[k][section] = config_dict - else: - ctxt[k] = v - - log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) - return ctxt - - -class LogLevelContext(OSContextGenerator): - - def __call__(self): - ctxt = {} - ctxt['debug'] = \ - False if config('debug') is None else config('debug') - ctxt['verbose'] = \ - False if config('verbose') is None else config('verbose') - - return ctxt - - -class SyslogContext(OSContextGenerator): - - def __call__(self): - ctxt = {'use_syslog': config('use-syslog')} - return ctxt - - -class BindHostContext(OSContextGenerator): - - def __call__(self): - if config('prefer-ipv6'): - return {'bind_host': '::'} - else: - return {'bind_host': '0.0.0.0'} - - -class WorkerConfigContext(OSContextGenerator): - - @property - def num_cpus(self): - try: - from psutil import NUM_CPUS - except ImportError: - apt_install('python-psutil', fatal=True) - from psutil import NUM_CPUS - - return NUM_CPUS - - def __call__(self): - multiplier = config('worker-multiplier') or 0 - ctxt = {"workers": self.num_cpus * multiplier} - return ctxt - - -class ZeroMQContext(OSContextGenerator): - interfaces = ['zeromq-configuration'] - - def __call__(self): - ctxt = {} - if is_relation_made('zeromq-configuration', 'host'): - for rid in relation_ids('zeromq-configuration'): - for unit in related_units(rid): - ctxt['zmq_nonce'] = relation_get('nonce', unit, rid) - ctxt['zmq_host'] = relation_get('host', unit, rid) - ctxt['zmq_redis_address'] = relation_get( - 'zmq_redis_address', unit, rid) - - return ctxt - - -class NotificationDriverContext(OSContextGenerator): - - def __init__(self, zmq_relation='zeromq-configuration', - amqp_relation='amqp'): - """ - :param zmq_relation: Name of Zeromq relation to check - """ - self.zmq_relation = zmq_relation - self.amqp_relation = amqp_relation - - def __call__(self): - ctxt = {'notifications': 'False'} - if is_relation_made(self.amqp_relation): - ctxt['notifications'] = "True" - - return ctxt - - -class SysctlContext(OSContextGenerator): - """This context check if the 'sysctl' option exists on configuration - then creates a file with the loaded contents""" - def __call__(self): - sysctl_dict = config('sysctl') - if sysctl_dict: - sysctl_create(sysctl_dict, - '/etc/sysctl.d/50-{0}.conf'.format(charm_name())) - return {'sysctl': sysctl_dict} - - -class NeutronAPIContext(OSContextGenerator): - ''' - Inspects current neutron-plugin-api relation for neutron settings. Return - defaults if it is not present. - ''' - interfaces = ['neutron-plugin-api'] - - def __call__(self): - self.neutron_defaults = { - 'l2_population': { - 'rel_key': 'l2-population', - 'default': False, - }, - 'overlay_network_type': { - 'rel_key': 'overlay-network-type', - 'default': 'gre', - }, - 'neutron_security_groups': { - 'rel_key': 'neutron-security-groups', - 'default': False, - }, - 'network_device_mtu': { - 'rel_key': 'network-device-mtu', - 'default': None, - }, - 'enable_dvr': { - 'rel_key': 'enable-dvr', - 'default': False, - }, - 'enable_l3ha': { - 'rel_key': 'enable-l3ha', - 'default': False, - }, - } - ctxt = self.get_neutron_options({}) - for rid in relation_ids('neutron-plugin-api'): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - if 'l2-population' in rdata: - ctxt.update(self.get_neutron_options(rdata)) - - return ctxt - - def get_neutron_options(self, rdata): - settings = {} - for nkey in self.neutron_defaults.keys(): - defv = self.neutron_defaults[nkey]['default'] - rkey = self.neutron_defaults[nkey]['rel_key'] - if rkey in rdata.keys(): - if type(defv) is bool: - settings[nkey] = bool_from_string(rdata[rkey]) - else: - settings[nkey] = rdata[rkey] - else: - settings[nkey] = defv - return settings - - -class ExternalPortContext(NeutronPortContext): - - def __call__(self): - ctxt = {} - ports = config('ext-port') - if ports: - ports = [p.strip() for p in ports.split()] - ports = self.resolve_ports(ports) - if ports: - ctxt = {"ext_port": ports[0]} - napi_settings = NeutronAPIContext()() - mtu = napi_settings.get('network_device_mtu') - if mtu: - ctxt['ext_port_mtu'] = mtu - - return ctxt - - -class DataPortContext(NeutronPortContext): - - def __call__(self): - ports = config('data-port') - if ports: - portmap = parse_data_port_mappings(ports) - ports = portmap.values() - resolved = self.resolve_ports(ports) - normalized = {get_nic_hwaddr(port): port for port in resolved - if port not in ports} - normalized.update({port: port for port in resolved - if port in ports}) - if resolved: - return {bridge: normalized[port] for bridge, port in - six.iteritems(portmap) if port in normalized.keys()} - - return None - - -class PhyNICMTUContext(DataPortContext): - - def __call__(self): - ctxt = {} - mappings = super(PhyNICMTUContext, self).__call__() - if mappings and mappings.values(): - ports = mappings.values() - napi_settings = NeutronAPIContext()() - mtu = napi_settings.get('network_device_mtu') - if mtu: - ctxt["devs"] = '\\n'.join(ports) - ctxt['mtu'] = mtu - - return ctxt - - -class NetworkServiceContext(OSContextGenerator): - - def __init__(self, rel_name='quantum-network-service'): - self.rel_name = rel_name - self.interfaces = [rel_name] - - def __call__(self): - for rid in relation_ids(self.rel_name): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - ctxt = { - 'keystone_host': rdata.get('keystone_host'), - 'service_port': rdata.get('service_port'), - 'auth_port': rdata.get('auth_port'), - 'service_tenant': rdata.get('service_tenant'), - 'service_username': rdata.get('service_username'), - 'service_password': rdata.get('service_password'), - 'quantum_host': rdata.get('quantum_host'), - 'quantum_port': rdata.get('quantum_port'), - 'quantum_url': rdata.get('quantum_url'), - 'region': rdata.get('region'), - 'service_protocol': - rdata.get('service_protocol') or 'http', - 'auth_protocol': - rdata.get('auth_protocol') or 'http', - } - if context_complete(ctxt): - return ctxt - return {} diff --git a/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy.sh deleted file mode 100755 index eb8527f..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -#-------------------------------------------- -# This file is managed by Juju -#-------------------------------------------- -# -# Copyright 2009,2012 Canonical Ltd. -# Author: Tom Haddon - -CRITICAL=0 -NOTACTIVE='' -LOGFILE=/var/log/nagios/check_haproxy.log -AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') - -for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); -do - output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') - if [ $? != 0 ]; then - date >> $LOGFILE - echo $output >> $LOGFILE - /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 - CRITICAL=1 - NOTACTIVE="${NOTACTIVE} $appserver" - fi -done - -if [ $CRITICAL = 1 ]; then - echo "CRITICAL:${NOTACTIVE}" - exit 2 -fi - -echo "OK: All haproxy instances looking good" -exit 0 diff --git a/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy_queue_depth.sh b/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy_queue_depth.sh deleted file mode 100755 index 3ebb532..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/files/check_haproxy_queue_depth.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -#-------------------------------------------- -# This file is managed by Juju -#-------------------------------------------- -# -# Copyright 2009,2012 Canonical Ltd. -# Author: Tom Haddon - -# These should be config options at some stage -CURRQthrsh=0 -MAXQthrsh=100 - -AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') - -HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) - -for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}') -do - CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3) - MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4) - - if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then - echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ" - exit 2 - fi -done - -echo "OK: All haproxy queue depths looking good" -exit 0 - diff --git a/hooks/charmhelpers-pl/contrib/openstack/ip.py b/hooks/charmhelpers-pl/contrib/openstack/ip.py deleted file mode 100644 index 29bbddc..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/ip.py +++ /dev/null @@ -1,146 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -from charmhelpers.core.hookenv import ( - config, - unit_get, -) -from charmhelpers.contrib.network.ip import ( - get_address_in_network, - is_address_in_network, - is_ipv6, - get_ipv6_addr, -) -from charmhelpers.contrib.hahelpers.cluster import is_clustered - -from functools import partial - -PUBLIC = 'public' -INTERNAL = 'int' -ADMIN = 'admin' - -ADDRESS_MAP = { - PUBLIC: { - 'config': 'os-public-network', - 'fallback': 'public-address' - }, - INTERNAL: { - 'config': 'os-internal-network', - 'fallback': 'private-address' - }, - ADMIN: { - 'config': 'os-admin-network', - 'fallback': 'private-address' - } -} - - -def canonical_url(configs, endpoint_type=PUBLIC): - """Returns the correct HTTP URL to this host given the state of HTTPS - configuration, hacluster and charm configuration. - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :param endpoint_type: str endpoint type to resolve. - :param returns: str base URL for services on the current service unit. - """ - scheme = 'http' - if 'https' in configs.complete_contexts(): - scheme = 'https' - address = resolve_address(endpoint_type) - if is_ipv6(address): - address = "[{}]".format(address) - return '%s://%s' % (scheme, address) - - -def resolve_address(endpoint_type=PUBLIC): - """Return unit address depending on net config. - - If unit is clustered with vip(s) and has net splits defined, return vip on - correct network. If clustered with no nets defined, return primary vip. - - If not clustered, return unit address ensuring address is on configured net - split if one is configured. - - :param endpoint_type: Network endpoing type - """ - resolved_address = None - vips = config('vip') - if vips: - vips = vips.split() - - net_type = ADDRESS_MAP[endpoint_type]['config'] - net_addr = config(net_type) - net_fallback = ADDRESS_MAP[endpoint_type]['fallback'] - clustered = is_clustered() - if clustered: - if not net_addr: - # If no net-splits defined, we expect a single vip - resolved_address = vips[0] - else: - for vip in vips: - if is_address_in_network(net_addr, vip): - resolved_address = vip - break - else: - if config('prefer-ipv6'): - fallback_addr = get_ipv6_addr(exc_list=vips)[0] - else: - fallback_addr = unit_get(net_fallback) - - resolved_address = get_address_in_network(net_addr, fallback_addr) - - if resolved_address is None: - raise ValueError("Unable to resolve a suitable IP address based on " - "charm state and configuration. (net_type=%s, " - "clustered=%s)" % (net_type, clustered)) - - return resolved_address - - -def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC, - override=None): - """Returns the correct endpoint URL to advertise to Keystone. - - This method provides the correct endpoint URL which should be advertised to - the keystone charm for endpoint creation. This method allows for the url to - be overridden to force a keystone endpoint to have specific URL for any of - the defined scopes (admin, internal, public). - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :param url_template: str format string for creating the url template. Only - two values will be passed - the scheme+hostname - returned by the canonical_url and the port. - :param endpoint_type: str endpoint type to resolve. - :param override: str the name of the config option which overrides the - endpoint URL defined by the charm itself. None will - disable any overrides (default). - """ - if override: - # Return any user-defined overrides for the keystone endpoint URL. - user_value = config(override) - if user_value: - return user_value.strip() - - return url_template % (canonical_url(configs, endpoint_type), port) - - -public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC) - -internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL) - -admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN) diff --git a/hooks/charmhelpers-pl/contrib/openstack/neutron.py b/hooks/charmhelpers-pl/contrib/openstack/neutron.py deleted file mode 100644 index fa036e4..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/neutron.py +++ /dev/null @@ -1,337 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# Various utilies for dealing with Neutron and the renaming from Quantum. - -import six -from subprocess import check_output - -from charmhelpers.core.hookenv import ( - config, - log, - ERROR, -) - -from charmhelpers.contrib.openstack.utils import os_release - - -def headers_package(): - """Ensures correct linux-headers for running kernel are installed, - for building DKMS package""" - kver = check_output(['uname', '-r']).decode('UTF-8').strip() - return 'linux-headers-%s' % kver - -QUANTUM_CONF_DIR = '/etc/quantum' - - -def kernel_version(): - """ Retrieve the current major kernel version as a tuple e.g. (3, 13) """ - kver = check_output(['uname', '-r']).decode('UTF-8').strip() - kver = kver.split('.') - return (int(kver[0]), int(kver[1])) - - -def determine_dkms_package(): - """ Determine which DKMS package should be used based on kernel version """ - # NOTE: 3.13 kernels have support for GRE and VXLAN native - if kernel_version() >= (3, 13): - return [] - else: - return ['openvswitch-datapath-dkms'] - - -# legacy - - -def quantum_plugins(): - from charmhelpers.contrib.openstack import context - return { - 'ovs': { - 'config': '/etc/quantum/plugins/openvswitch/' - 'ovs_quantum_plugin.ini', - 'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.' - 'OVSQuantumPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], - 'services': ['quantum-plugin-openvswitch-agent'], - 'packages': [[headers_package()] + determine_dkms_package(), - ['quantum-plugin-openvswitch-agent']], - 'server_packages': ['quantum-server', - 'quantum-plugin-openvswitch'], - 'server_services': ['quantum-server'] - }, - 'nvp': { - 'config': '/etc/quantum/plugins/nicira/nvp.ini', - 'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.' - 'QuantumPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=QUANTUM_CONF_DIR)], - 'services': [], - 'packages': [], - 'server_packages': ['quantum-server', - 'quantum-plugin-nicira'], - 'server_services': ['quantum-server'] - } - } - -NEUTRON_CONF_DIR = '/etc/neutron' - - -def neutron_plugins(): - from charmhelpers.contrib.openstack import context - release = os_release('nova-common') - plugins = { - 'ovs': { - 'config': '/etc/neutron/plugins/openvswitch/' - 'ovs_neutron_plugin.ini', - 'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.' - 'OVSNeutronPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], - 'services': ['neutron-plugin-openvswitch-agent'], - 'packages': [[headers_package()] + determine_dkms_package(), - ['neutron-plugin-openvswitch-agent']], - 'server_packages': ['neutron-server', - 'neutron-plugin-openvswitch'], - 'server_services': ['neutron-server'] - }, - 'nvp': { - 'config': '/etc/neutron/plugins/nicira/nvp.ini', - 'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.' - 'NeutronPlugin.NvpPluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', - 'neutron-plugin-nicira'], - 'server_services': ['neutron-server'] - }, - 'nsx': { - 'config': '/etc/neutron/plugins/vmware/nsx.ini', - 'driver': 'vmware', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', - 'neutron-plugin-vmware'], - 'server_services': ['neutron-server'] - }, - 'n1kv': { - 'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini', - 'driver': 'neutron.plugins.cisco.network_plugin.PluginV2', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], - 'services': [], - 'packages': [[headers_package()] + determine_dkms_package(), - ['neutron-plugin-cisco']], - 'server_packages': ['neutron-server', - 'neutron-plugin-cisco'], - 'server_services': ['neutron-server'] - }, - 'Calico': { - 'config': '/etc/neutron/plugins/ml2/ml2_conf.ini', - 'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], - 'services': ['calico-felix', - 'bird', - 'neutron-dhcp-agent', - 'nova-api-metadata'], - 'packages': [[headers_package()] + determine_dkms_package(), - ['calico-compute', - 'bird', - 'neutron-dhcp-agent', - 'nova-api-metadata']], - 'server_packages': ['neutron-server', 'calico-control'], - 'server_services': ['neutron-server'] - }, - 'vsp': { - 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', - 'driver': 'neutron.plugins.nuage.plugin.NuagePlugin', - 'contexts': [ - context.SharedDBContext(user=config('neutron-database-user'), - database=config('neutron-database'), - relation_prefix='neutron', - ssl_dir=NEUTRON_CONF_DIR)], - 'services': [], - 'packages': [], - 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], - 'server_services': ['neutron-server'] - }, - 'plumgrid': { - 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', - 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', - 'contexts': [ - context.SharedDBContext(user=config('database-user'), - database=config('database'), - ssl_dir=NEUTRON_CONF_DIR)], - 'services': [], - 'packages': [['plumgrid-lxc'], - ['iovisor-dkms'], - ['plumgrid-puppet']], - 'server_packages': ['neutron-server', - 'neutron-plugin-plumgrid'], - 'server_services': ['neutron-server'] - } - } - if release >= 'icehouse': - # NOTE: patch in ml2 plugin for icehouse onwards - plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini' - plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin' - plugins['ovs']['server_packages'] = ['neutron-server', - 'neutron-plugin-ml2'] - # NOTE: patch in vmware renames nvp->nsx for icehouse onwards - plugins['nvp'] = plugins['nsx'] - return plugins - - -def neutron_plugin_attribute(plugin, attr, net_manager=None): - manager = net_manager or network_manager() - if manager == 'quantum': - plugins = quantum_plugins() - elif manager == 'neutron': - plugins = neutron_plugins() - else: - log("Network manager '%s' does not support plugins." % (manager), - level=ERROR) - raise Exception - - try: - _plugin = plugins[plugin] - except KeyError: - log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR) - raise Exception - - try: - return _plugin[attr] - except KeyError: - return None - - -def network_manager(): - ''' - Deals with the renaming of Quantum to Neutron in H and any situations - that require compatability (eg, deploying H with network-manager=quantum, - upgrading from G). - ''' - release = os_release('nova-common') - manager = config('network-manager').lower() - - if manager not in ['quantum', 'neutron']: - return manager - - if release in ['essex']: - # E does not support neutron - log('Neutron networking not supported in Essex.', level=ERROR) - raise Exception - elif release in ['folsom', 'grizzly']: - # neutron is named quantum in F and G - return 'quantum' - else: - # ensure accurate naming for all releases post-H - return 'neutron' - - -def parse_mappings(mappings): - parsed = {} - if mappings: - mappings = mappings.split(' ') - for m in mappings: - p = m.partition(':') - if p[1] == ':': - parsed[p[0].strip()] = p[2].strip() - - return parsed - - -def parse_bridge_mappings(mappings): - """Parse bridge mappings. - - Mappings must be a space-delimited list of provider:bridge mappings. - - Returns dict of the form {provider:bridge}. - """ - return parse_mappings(mappings) - - -def parse_data_port_mappings(mappings, default_bridge='br-data'): - """Parse data port mappings. - - Mappings must be a space-delimited list of bridge:port mappings. - - Returns dict of the form {bridge:port}. - """ - _mappings = parse_mappings(mappings) - if not _mappings: - if not mappings: - return {} - - # For backwards-compatibility we need to support port-only provided in - # config. - _mappings = {default_bridge: mappings.split(' ')[0]} - - bridges = _mappings.keys() - ports = _mappings.values() - if len(set(bridges)) != len(bridges): - raise Exception("It is not allowed to have more than one port " - "configured on the same bridge") - - if len(set(ports)) != len(ports): - raise Exception("It is not allowed to have the same port configured " - "on more than one bridge") - - return _mappings - - -def parse_vlan_range_mappings(mappings): - """Parse vlan range mappings. - - Mappings must be a space-delimited list of provider:start:end mappings. - - Returns dict of the form {provider: (start, end)}. - """ - _mappings = parse_mappings(mappings) - if not _mappings: - return {} - - mappings = {} - for p, r in six.iteritems(_mappings): - mappings[p] = tuple(r.split(':')) - - return mappings diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/__init__.py b/hooks/charmhelpers-pl/contrib/openstack/templates/__init__.py deleted file mode 100644 index 7587679..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templates/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# dummy __init__.py to fool syncer into thinking this is a syncable python -# module diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers-pl/contrib/openstack/templates/ceph.conf deleted file mode 100644 index 81a9719..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templates/ceph.conf +++ /dev/null @@ -1,15 +0,0 @@ -############################################################################### -# [ WARNING ] -# cinder configuration file maintained by Juju -# local changes may be overwritten. -############################################################################### -[global] -{% if auth -%} - auth_supported = {{ auth }} - keyring = /etc/ceph/$cluster.$name.keyring - mon host = {{ mon_hosts }} -{% endif -%} - log to syslog = {{ use_syslog }} - err to syslog = {{ use_syslog }} - clog to syslog = {{ use_syslog }} - diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/git.upstart b/hooks/charmhelpers-pl/contrib/openstack/templates/git.upstart deleted file mode 100644 index 4bed404..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templates/git.upstart +++ /dev/null @@ -1,17 +0,0 @@ -description "{{ service_description }}" -author "Juju {{ service_name }} Charm " - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -exec start-stop-daemon --start --chuid {{ user_name }} \ - --chdir {{ start_dir }} --name {{ process_name }} \ - --exec {{ executable_name }} -- \ - {% for config_file in config_files -%} - --config-file={{ config_file }} \ - {% endfor -%} - {% if log_file -%} - --log-file={{ log_file }} - {% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers-pl/contrib/openstack/templates/haproxy.cfg deleted file mode 100644 index ad875f1..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templates/haproxy.cfg +++ /dev/null @@ -1,58 +0,0 @@ -global - log {{ local_host }} local0 - log {{ local_host }} local1 notice - maxconn 20000 - user haproxy - group haproxy - spread-checks 0 - -defaults - log global - mode tcp - option tcplog - option dontlognull - retries 3 - timeout queue 1000 - timeout connect 1000 -{% if haproxy_client_timeout -%} - timeout client {{ haproxy_client_timeout }} -{% else -%} - timeout client 30000 -{% endif -%} - -{% if haproxy_server_timeout -%} - timeout server {{ haproxy_server_timeout }} -{% else -%} - timeout server 30000 -{% endif -%} - -listen stats {{ stat_port }} - mode http - stats enable - stats hide-version - stats realm Haproxy\ Statistics - stats uri / - stats auth admin:password - -{% if frontends -%} -{% for service, ports in service_ports.items() -%} -frontend tcp-in_{{ service }} - bind *:{{ ports[0] }} - {% if ipv6 -%} - bind :::{{ ports[0] }} - {% endif -%} - {% for frontend in frontends -%} - acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} - use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} - {% endfor -%} - default_backend {{ service }}_{{ default_backend }} - -{% for frontend in frontends -%} -backend {{ service }}_{{ frontend }} - balance leastconn - {% for unit, address in frontends[frontend]['backends'].items() -%} - server {{ unit }} {{ address }}:{{ ports[1] }} check - {% endfor %} -{% endfor -%} -{% endfor -%} -{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend deleted file mode 100644 index ce28fa3..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend +++ /dev/null @@ -1,24 +0,0 @@ -{% if endpoints -%} -{% for ext_port in ext_ports -%} -Listen {{ ext_port }} -{% endfor -%} -{% for address, endpoint, ext, int in endpoints -%} - - ServerName {{ endpoint }} - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} - ProxyPass / http://localhost:{{ int }}/ - ProxyPassReverse / http://localhost:{{ int }}/ - ProxyPreserveHost on - -{% endfor -%} - - Order deny,allow - Allow from all - - - Order allow,deny - Allow from all - -{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend.conf deleted file mode 100644 index ce28fa3..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templates/openstack_https_frontend.conf +++ /dev/null @@ -1,24 +0,0 @@ -{% if endpoints -%} -{% for ext_port in ext_ports -%} -Listen {{ ext_port }} -{% endfor -%} -{% for address, endpoint, ext, int in endpoints -%} - - ServerName {{ endpoint }} - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} - ProxyPass / http://localhost:{{ int }}/ - ProxyPassReverse / http://localhost:{{ int }}/ - ProxyPreserveHost on - -{% endfor -%} - - Order deny,allow - Allow from all - - - Order allow,deny - Allow from all - -{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/section-keystone-authtoken b/hooks/charmhelpers-pl/contrib/openstack/templates/section-keystone-authtoken deleted file mode 100644 index 2a37edd..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templates/section-keystone-authtoken +++ /dev/null @@ -1,9 +0,0 @@ -{% if auth_host -%} -[keystone_authtoken] -identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} -auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} -admin_tenant_name = {{ admin_tenant_name }} -admin_user = {{ admin_user }} -admin_password = {{ admin_password }} -signing_dir = {{ signing_dir }} -{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/section-rabbitmq-oslo b/hooks/charmhelpers-pl/contrib/openstack/templates/section-rabbitmq-oslo deleted file mode 100644 index b444c9c..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templates/section-rabbitmq-oslo +++ /dev/null @@ -1,22 +0,0 @@ -{% if rabbitmq_host or rabbitmq_hosts -%} -[oslo_messaging_rabbit] -rabbit_userid = {{ rabbitmq_user }} -rabbit_virtual_host = {{ rabbitmq_virtual_host }} -rabbit_password = {{ rabbitmq_password }} -{% if rabbitmq_hosts -%} -rabbit_hosts = {{ rabbitmq_hosts }} -{% if rabbitmq_ha_queues -%} -rabbit_ha_queues = True -rabbit_durable_queues = False -{% endif -%} -{% else -%} -rabbit_host = {{ rabbitmq_host }} -{% endif -%} -{% if rabbit_ssl_port -%} -rabbit_use_ssl = True -rabbit_port = {{ rabbit_ssl_port }} -{% if rabbit_ssl_ca -%} -kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} -{% endif -%} -{% endif -%} -{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templates/section-zeromq b/hooks/charmhelpers-pl/contrib/openstack/templates/section-zeromq deleted file mode 100644 index 95f1a76..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templates/section-zeromq +++ /dev/null @@ -1,14 +0,0 @@ -{% if zmq_host -%} -# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) -rpc_backend = zmq -rpc_zmq_host = {{ zmq_host }} -{% if zmq_redis_address -%} -rpc_zmq_matchmaker = redis -matchmaker_heartbeat_freq = 15 -matchmaker_heartbeat_ttl = 30 -[matchmaker_redis] -host = {{ zmq_redis_address }} -{% else -%} -rpc_zmq_matchmaker = ring -{% endif -%} -{% endif -%} diff --git a/hooks/charmhelpers-pl/contrib/openstack/templating.py b/hooks/charmhelpers-pl/contrib/openstack/templating.py deleted file mode 100644 index 24cb272..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/templating.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os - -import six - -from charmhelpers.fetch import apt_install -from charmhelpers.core.hookenv import ( - log, - ERROR, - INFO -) -from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES - -try: - from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions -except ImportError: - # python-jinja2 may not be installed yet, or we're running unittests. - FileSystemLoader = ChoiceLoader = Environment = exceptions = None - - -class OSConfigException(Exception): - pass - - -def get_loader(templates_dir, os_release): - """ - Create a jinja2.ChoiceLoader containing template dirs up to - and including os_release. If directory template directory - is missing at templates_dir, it will be omitted from the loader. - templates_dir is added to the bottom of the search list as a base - loading dir. - - A charm may also ship a templates dir with this module - and it will be appended to the bottom of the search list, eg:: - - hooks/charmhelpers/contrib/openstack/templates - - :param templates_dir (str): Base template directory containing release - sub-directories. - :param os_release (str): OpenStack release codename to construct template - loader. - :returns: jinja2.ChoiceLoader constructed with a list of - jinja2.FilesystemLoaders, ordered in descending - order by OpenStack release. - """ - tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) - for rel in six.itervalues(OPENSTACK_CODENAMES)] - - if not os.path.isdir(templates_dir): - log('Templates directory not found @ %s.' % templates_dir, - level=ERROR) - raise OSConfigException - - # the bottom contains tempaltes_dir and possibly a common templates dir - # shipped with the helper. - loaders = [FileSystemLoader(templates_dir)] - helper_templates = os.path.join(os.path.dirname(__file__), 'templates') - if os.path.isdir(helper_templates): - loaders.append(FileSystemLoader(helper_templates)) - - for rel, tmpl_dir in tmpl_dirs: - if os.path.isdir(tmpl_dir): - loaders.insert(0, FileSystemLoader(tmpl_dir)) - if rel == os_release: - break - log('Creating choice loader with dirs: %s' % - [l.searchpath for l in loaders], level=INFO) - return ChoiceLoader(loaders) - - -class OSConfigTemplate(object): - """ - Associates a config file template with a list of context generators. - Responsible for constructing a template context based on those generators. - """ - def __init__(self, config_file, contexts): - self.config_file = config_file - - if hasattr(contexts, '__call__'): - self.contexts = [contexts] - else: - self.contexts = contexts - - self._complete_contexts = [] - - def context(self): - ctxt = {} - for context in self.contexts: - _ctxt = context() - if _ctxt: - ctxt.update(_ctxt) - # track interfaces for every complete context. - [self._complete_contexts.append(interface) - for interface in context.interfaces - if interface not in self._complete_contexts] - return ctxt - - def complete_contexts(self): - ''' - Return a list of interfaces that have atisfied contexts. - ''' - if self._complete_contexts: - return self._complete_contexts - self.context() - return self._complete_contexts - - -class OSConfigRenderer(object): - """ - This class provides a common templating system to be used by OpenStack - charms. It is intended to help charms share common code and templates, - and ease the burden of managing config templates across multiple OpenStack - releases. - - Basic usage:: - - # import some common context generates from charmhelpers - from charmhelpers.contrib.openstack import context - - # Create a renderer object for a specific OS release. - configs = OSConfigRenderer(templates_dir='/tmp/templates', - openstack_release='folsom') - # register some config files with context generators. - configs.register(config_file='/etc/nova/nova.conf', - contexts=[context.SharedDBContext(), - context.AMQPContext()]) - configs.register(config_file='/etc/nova/api-paste.ini', - contexts=[context.IdentityServiceContext()]) - configs.register(config_file='/etc/haproxy/haproxy.conf', - contexts=[context.HAProxyContext()]) - # write out a single config - configs.write('/etc/nova/nova.conf') - # write out all registered configs - configs.write_all() - - **OpenStack Releases and template loading** - - When the object is instantiated, it is associated with a specific OS - release. This dictates how the template loader will be constructed. - - The constructed loader attempts to load the template from several places - in the following order: - - from the most recent OS release-specific template dir (if one exists) - - the base templates_dir - - a template directory shipped in the charm with this helper file. - - For the example above, '/tmp/templates' contains the following structure:: - - /tmp/templates/nova.conf - /tmp/templates/api-paste.ini - /tmp/templates/grizzly/api-paste.ini - /tmp/templates/havana/api-paste.ini - - Since it was registered with the grizzly release, it first seraches - the grizzly directory for nova.conf, then the templates dir. - - When writing api-paste.ini, it will find the template in the grizzly - directory. - - If the object were created with folsom, it would fall back to the - base templates dir for its api-paste.ini template. - - This system should help manage changes in config files through - openstack releases, allowing charms to fall back to the most recently - updated config template for a given release - - The haproxy.conf, since it is not shipped in the templates dir, will - be loaded from the module directory's template directory, eg - $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows - us to ship common templates (haproxy, apache) with the helpers. - - **Context generators** - - Context generators are used to generate template contexts during hook - execution. Doing so may require inspecting service relations, charm - config, etc. When registered, a config file is associated with a list - of generators. When a template is rendered and written, all context - generates are called in a chain to generate the context dictionary - passed to the jinja2 template. See context.py for more info. - """ - def __init__(self, templates_dir, openstack_release): - if not os.path.isdir(templates_dir): - log('Could not locate templates dir %s' % templates_dir, - level=ERROR) - raise OSConfigException - - self.templates_dir = templates_dir - self.openstack_release = openstack_release - self.templates = {} - self._tmpl_env = None - - if None in [Environment, ChoiceLoader, FileSystemLoader]: - # if this code is running, the object is created pre-install hook. - # jinja2 shouldn't get touched until the module is reloaded on next - # hook execution, with proper jinja2 bits successfully imported. - apt_install('python-jinja2') - - def register(self, config_file, contexts): - """ - Register a config file with a list of context generators to be called - during rendering. - """ - self.templates[config_file] = OSConfigTemplate(config_file=config_file, - contexts=contexts) - log('Registered config file: %s' % config_file, level=INFO) - - def _get_tmpl_env(self): - if not self._tmpl_env: - loader = get_loader(self.templates_dir, self.openstack_release) - self._tmpl_env = Environment(loader=loader) - - def _get_template(self, template): - self._get_tmpl_env() - template = self._tmpl_env.get_template(template) - log('Loaded template from %s' % template.filename, level=INFO) - return template - - def render(self, config_file): - if config_file not in self.templates: - log('Config not registered: %s' % config_file, level=ERROR) - raise OSConfigException - ctxt = self.templates[config_file].context() - - _tmpl = os.path.basename(config_file) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound: - # if no template is found with basename, try looking for it - # using a munged full path, eg: - # /etc/apache2/apache2.conf -> etc_apache2_apache2.conf - _tmpl = '_'.join(config_file.split('/')[1:]) - try: - template = self._get_template(_tmpl) - except exceptions.TemplateNotFound as e: - log('Could not load template from %s by %s or %s.' % - (self.templates_dir, os.path.basename(config_file), _tmpl), - level=ERROR) - raise e - - log('Rendering from template: %s' % _tmpl, level=INFO) - return template.render(ctxt) - - def write(self, config_file): - """ - Write a single config file, raises if config file is not registered. - """ - if config_file not in self.templates: - log('Config not registered: %s' % config_file, level=ERROR) - raise OSConfigException - - _out = self.render(config_file) - - with open(config_file, 'wb') as out: - out.write(_out) - - log('Wrote template %s.' % config_file, level=INFO) - - def write_all(self): - """ - Write out all registered config files. - """ - [self.write(k) for k in six.iterkeys(self.templates)] - - def set_release(self, openstack_release): - """ - Resets the template environment and generates a new template loader - based on a the new openstack release. - """ - self._tmpl_env = None - self.openstack_release = openstack_release - self._get_tmpl_env() - - def complete_contexts(self): - ''' - Returns a list of context interfaces that yield a complete context. - ''' - interfaces = [] - [interfaces.extend(i.complete_contexts()) - for i in six.itervalues(self.templates)] - return interfaces diff --git a/hooks/charmhelpers-pl/contrib/openstack/utils.py b/hooks/charmhelpers-pl/contrib/openstack/utils.py deleted file mode 100644 index f90a028..0000000 --- a/hooks/charmhelpers-pl/contrib/openstack/utils.py +++ /dev/null @@ -1,642 +0,0 @@ -#!/usr/bin/python - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# Common python helper functions used for OpenStack charms. -from collections import OrderedDict -from functools import wraps - -import subprocess -import json -import os -import sys - -import six -import yaml - -from charmhelpers.contrib.network import ip - -from charmhelpers.core import ( - unitdata, -) - -from charmhelpers.core.hookenv import ( - config, - log as juju_log, - charm_dir, - INFO, - relation_ids, - relation_set -) - -from charmhelpers.contrib.storage.linux.lvm import ( - deactivate_lvm_volume_group, - is_lvm_physical_volume, - remove_lvm_physical_volume, -) - -from charmhelpers.contrib.network.ip import ( - get_ipv6_addr -) - -from charmhelpers.core.host import lsb_release, mounts, umount -from charmhelpers.fetch import apt_install, apt_cache, install_remote -from charmhelpers.contrib.python.packages import pip_install -from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk -from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device - -CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" -CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' - -DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' - 'restricted main multiverse universe') - - -UBUNTU_OPENSTACK_RELEASE = OrderedDict([ - ('oneiric', 'diablo'), - ('precise', 'essex'), - ('quantal', 'folsom'), - ('raring', 'grizzly'), - ('saucy', 'havana'), - ('trusty', 'icehouse'), - ('utopic', 'juno'), - ('vivid', 'kilo'), -]) - - -OPENSTACK_CODENAMES = OrderedDict([ - ('2011.2', 'diablo'), - ('2012.1', 'essex'), - ('2012.2', 'folsom'), - ('2013.1', 'grizzly'), - ('2013.2', 'havana'), - ('2014.1', 'icehouse'), - ('2014.2', 'juno'), - ('2015.1', 'kilo'), -]) - -# The ugly duckling -SWIFT_CODENAMES = OrderedDict([ - ('1.4.3', 'diablo'), - ('1.4.8', 'essex'), - ('1.7.4', 'folsom'), - ('1.8.0', 'grizzly'), - ('1.7.7', 'grizzly'), - ('1.7.6', 'grizzly'), - ('1.10.0', 'havana'), - ('1.9.1', 'havana'), - ('1.9.0', 'havana'), - ('1.13.1', 'icehouse'), - ('1.13.0', 'icehouse'), - ('1.12.0', 'icehouse'), - ('1.11.0', 'icehouse'), - ('2.0.0', 'juno'), - ('2.1.0', 'juno'), - ('2.2.0', 'juno'), - ('2.2.1', 'kilo'), - ('2.2.2', 'kilo'), -]) - -DEFAULT_LOOPBACK_SIZE = '5G' - - -def error_out(msg): - juju_log("FATAL ERROR: %s" % msg, level='ERROR') - sys.exit(1) - - -def get_os_codename_install_source(src): - '''Derive OpenStack release codename from a given installation source.''' - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = '' - if src is None: - return rel - if src in ['distro', 'distro-proposed']: - try: - rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] - except KeyError: - e = 'Could not derive openstack release for '\ - 'this Ubuntu release: %s' % ubuntu_rel - error_out(e) - return rel - - if src.startswith('cloud:'): - ca_rel = src.split(':')[1] - ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] - return ca_rel - - # Best guess match based on deb string provided - if src.startswith('deb') or src.startswith('ppa'): - for k, v in six.iteritems(OPENSTACK_CODENAMES): - if v in src: - return v - - -def get_os_version_install_source(src): - codename = get_os_codename_install_source(src) - return get_os_version_codename(codename) - - -def get_os_codename_version(vers): - '''Determine OpenStack codename from version number.''' - try: - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_codename(codename): - '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(OPENSTACK_CODENAMES): - if v == codename: - return k - e = 'Could not derive OpenStack version for '\ - 'codename: %s' % codename - error_out(e) - - -def get_os_codename_package(package, fatal=True): - '''Derive OpenStack release codename from an installed package.''' - import apt_pkg as apt - - cache = apt_cache() - - try: - pkg = cache[package] - except: - if not fatal: - return None - # the package is unknown to the current apt cache. - e = 'Could not determine version of package with no installation '\ - 'candidate: %s' % package - error_out(e) - - if not pkg.current_ver: - if not fatal: - return None - # package is known, but no version is currently installed. - e = 'Could not determine version of uninstalled package: %s' % package - error_out(e) - - vers = apt.upstream_version(pkg.current_ver.ver_str) - - try: - if 'swift' in pkg.name: - swift_vers = vers[:5] - if swift_vers not in SWIFT_CODENAMES: - # Deal with 1.10.0 upward - swift_vers = vers[:6] - return SWIFT_CODENAMES[swift_vers] - else: - vers = vers[:6] - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) - - -def get_os_version_package(pkg, fatal=True): - '''Derive OpenStack version number from an installed package.''' - codename = get_os_codename_package(pkg, fatal=fatal) - - if not codename: - return None - - if 'swift' in pkg: - vers_map = SWIFT_CODENAMES - else: - vers_map = OPENSTACK_CODENAMES - - for version, cname in six.iteritems(vers_map): - if cname == codename: - return version - # e = "Could not determine OpenStack version for package: %s" % pkg - # error_out(e) - - -os_rel = None - - -def os_release(package, base='essex'): - ''' - Returns OpenStack release codename from a cached global. - If the codename can not be determined from either an installed package or - the installation source, the earliest release supported by the charm should - be returned. - ''' - global os_rel - if os_rel: - return os_rel - os_rel = (get_os_codename_package(package, fatal=False) or - get_os_codename_install_source(config('openstack-origin')) or - base) - return os_rel - - -def import_key(keyid): - cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ - "--recv-keys %s" % keyid - try: - subprocess.check_call(cmd.split(' ')) - except subprocess.CalledProcessError: - error_out("Error importing repo key %s" % keyid) - - -def configure_installation_source(rel): - '''Configure apt installation source.''' - if rel == 'distro': - return - elif rel == 'distro-proposed': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(DISTRO_PROPOSED % ubuntu_rel) - elif rel[:4] == "ppa:": - src = rel - subprocess.check_call(["add-apt-repository", "-y", src]) - elif rel[:3] == "deb": - l = len(rel.split('|')) - if l == 2: - src, key = rel.split('|') - juju_log("Importing PPA key from keyserver for %s" % src) - import_key(key) - elif l == 1: - src = rel - with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: - f.write(src) - elif rel[:6] == 'cloud:': - ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] - rel = rel.split(':')[1] - u_rel = rel.split('-')[0] - ca_rel = rel.split('-')[1] - - if u_rel != ubuntu_rel: - e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ - 'version (%s)' % (ca_rel, ubuntu_rel) - error_out(e) - - if 'staging' in ca_rel: - # staging is just a regular PPA. - os_rel = ca_rel.split('/')[0] - ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel - cmd = 'add-apt-repository -y %s' % ppa - subprocess.check_call(cmd.split(' ')) - return - - # map charm config options to actual archive pockets. - pockets = { - 'folsom': 'precise-updates/folsom', - 'folsom/updates': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'grizzly': 'precise-updates/grizzly', - 'grizzly/updates': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'havana': 'precise-updates/havana', - 'havana/updates': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'icehouse': 'precise-updates/icehouse', - 'icehouse/updates': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'juno': 'trusty-updates/juno', - 'juno/updates': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'kilo': 'trusty-updates/kilo', - 'kilo/updates': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - } - - try: - pocket = pockets[ca_rel] - except KeyError: - e = 'Invalid Cloud Archive release specified: %s' % rel - error_out(e) - - src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) - apt_install('ubuntu-cloud-keyring', fatal=True) - - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: - f.write(src) - else: - error_out("Invalid openstack-release specified: %s" % rel) - - -def config_value_changed(option): - """ - Determine if config value changed since last call to this function. - """ - hook_data = unitdata.HookData() - with hook_data(): - db = unitdata.kv() - current = config(option) - saved = db.get(option) - db.set(option, current) - if saved is None: - return False - return current != saved - - -def save_script_rc(script_path="scripts/scriptrc", **env_vars): - """ - Write an rc file in the charm-delivered directory containing - exported environment variables provided by env_vars. Any charm scripts run - outside the juju hook environment can source this scriptrc to obtain - updated config information necessary to perform health checks or - service changes. - """ - juju_rc_path = "%s/%s" % (charm_dir(), script_path) - if not os.path.exists(os.path.dirname(juju_rc_path)): - os.mkdir(os.path.dirname(juju_rc_path)) - with open(juju_rc_path, 'wb') as rc_script: - rc_script.write( - "#!/bin/bash\n") - [rc_script.write('export %s=%s\n' % (u, p)) - for u, p in six.iteritems(env_vars) if u != "script_path"] - - -def openstack_upgrade_available(package): - """ - Determines if an OpenStack upgrade is available from installation - source, based on version of installed package. - - :param package: str: Name of installed package. - - :returns: bool: : Returns True if configured installation source offers - a newer version of package. - - """ - - import apt_pkg as apt - src = config('openstack-origin') - cur_vers = get_os_version_package(package) - available_vers = get_os_version_install_source(src) - apt.init() - return apt.version_compare(available_vers, cur_vers) == 1 - - -def ensure_block_device(block_device): - ''' - Confirm block_device, create as loopback if necessary. - - :param block_device: str: Full path of block device to ensure. - - :returns: str: Full path of ensured block device. - ''' - _none = ['None', 'none', None] - if (block_device in _none): - error_out('prepare_storage(): Missing required input: block_device=%s.' - % block_device) - - if block_device.startswith('/dev/'): - bdev = block_device - elif block_device.startswith('/'): - _bd = block_device.split('|') - if len(_bd) == 2: - bdev, size = _bd - else: - bdev = block_device - size = DEFAULT_LOOPBACK_SIZE - bdev = ensure_loopback_device(bdev, size) - else: - bdev = '/dev/%s' % block_device - - if not is_block_device(bdev): - error_out('Failed to locate valid block device at %s' % bdev) - - return bdev - - -def clean_storage(block_device): - ''' - Ensures a block device is clean. That is: - - unmounted - - any lvm volume groups are deactivated - - any lvm physical device signatures removed - - partition table wiped - - :param block_device: str: Full path to block device to clean. - ''' - for mp, d in mounts(): - if d == block_device: - juju_log('clean_storage(): %s is mounted @ %s, unmounting.' % - (d, mp), level=INFO) - umount(mp, persist=True) - - if is_lvm_physical_volume(block_device): - deactivate_lvm_volume_group(block_device) - remove_lvm_physical_volume(block_device) - else: - zap_disk(block_device) - -is_ip = ip.is_ip -ns_query = ip.ns_query -get_host_ip = ip.get_host_ip -get_hostname = ip.get_hostname - - -def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'): - mm_map = {} - if os.path.isfile(mm_file): - with open(mm_file, 'r') as f: - mm_map = json.load(f) - return mm_map - - -def sync_db_with_multi_ipv6_addresses(database, database_user, - relation_prefix=None): - hosts = get_ipv6_addr(dynamic_only=False) - - kwargs = {'database': database, - 'username': database_user, - 'hostname': json.dumps(hosts)} - - if relation_prefix: - for key in list(kwargs.keys()): - kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key] - del kwargs[key] - - for rid in relation_ids('shared-db'): - relation_set(relation_id=rid, **kwargs) - - -def os_requires_version(ostack_release, pkg): - """ - Decorator for hook to specify minimum supported release - """ - def wrap(f): - @wraps(f) - def wrapped_f(*args): - if os_release(pkg) < ostack_release: - raise Exception("This hook is not supported on releases" - " before %s" % ostack_release) - f(*args) - return wrapped_f - return wrap - - -def git_install_requested(): - """ - Returns true if openstack-origin-git is specified. - """ - return config('openstack-origin-git') is not None - - -requirements_dir = None - - -def git_clone_and_install(projects_yaml, core_project): - """ - Clone/install all specified OpenStack repositories. - - The expected format of projects_yaml is: - repositories: - - {name: keystone, - repository: 'git://git.openstack.org/openstack/keystone.git', - branch: 'stable/icehouse'} - - {name: requirements, - repository: 'git://git.openstack.org/openstack/requirements.git', - branch: 'stable/icehouse'} - directory: /mnt/openstack-git - http_proxy: http://squid.internal:3128 - https_proxy: https://squid.internal:3128 - - The directory, http_proxy, and https_proxy keys are optional. - """ - global requirements_dir - parent_dir = '/mnt/openstack-git' - - if not projects_yaml: - return - - projects = yaml.load(projects_yaml) - _git_validate_projects_yaml(projects, core_project) - - old_environ = dict(os.environ) - - if 'http_proxy' in projects.keys(): - os.environ['http_proxy'] = projects['http_proxy'] - if 'https_proxy' in projects.keys(): - os.environ['https_proxy'] = projects['https_proxy'] - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - for p in projects['repositories']: - repo = p['repository'] - branch = p['branch'] - if p['name'] == 'requirements': - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, - update_requirements=False) - requirements_dir = repo_dir - else: - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, - update_requirements=True) - - os.environ = old_environ - - -def _git_validate_projects_yaml(projects, core_project): - """ - Validate the projects yaml. - """ - _git_ensure_key_exists('repositories', projects) - - for project in projects['repositories']: - _git_ensure_key_exists('name', project.keys()) - _git_ensure_key_exists('repository', project.keys()) - _git_ensure_key_exists('branch', project.keys()) - - if projects['repositories'][0]['name'] != 'requirements': - error_out('{} git repo must be specified first'.format('requirements')) - - if projects['repositories'][-1]['name'] != core_project: - error_out('{} git repo must be specified last'.format(core_project)) - - -def _git_ensure_key_exists(key, keys): - """ - Ensure that key exists in keys. - """ - if key not in keys: - error_out('openstack-origin-git key \'{}\' is missing'.format(key)) - - -def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): - """ - Clone and install a single git repository. - """ - dest_dir = os.path.join(parent_dir, os.path.basename(repo)) - - if not os.path.exists(parent_dir): - juju_log('Directory already exists at {}. ' - 'No need to create directory.'.format(parent_dir)) - os.mkdir(parent_dir) - - if not os.path.exists(dest_dir): - juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=parent_dir, branch=branch) - else: - repo_dir = dest_dir - - if update_requirements: - if not requirements_dir: - error_out('requirements repo must be cloned before ' - 'updating from global requirements.') - _git_update_requirements(repo_dir, requirements_dir) - - juju_log('Installing git repo from dir: {}'.format(repo_dir)) - pip_install(repo_dir) - - return repo_dir - - -def _git_update_requirements(package_dir, reqs_dir): - """ - Update from global requirements. - - Update an OpenStack git directory's requirements.txt and - test-requirements.txt from global-requirements.txt. - """ - orig_dir = os.getcwd() - os.chdir(reqs_dir) - cmd = ['python', 'update.py', package_dir] - try: - subprocess.check_call(cmd) - except subprocess.CalledProcessError: - package = os.path.basename(package_dir) - error_out("Error updating {} from global-requirements.txt".format(package)) - os.chdir(orig_dir) - - -def git_src_dir(projects_yaml, project): - """ - Return the directory where the specified project's source is located. - """ - parent_dir = '/mnt/openstack-git' - - if not projects_yaml: - return - - projects = yaml.load(projects_yaml) - - if 'directory' in projects.keys(): - parent_dir = projects['directory'] - - for p in projects['repositories']: - if p['name'] == project: - return os.path.join(parent_dir, os.path.basename(p['repository'])) - - return None diff --git a/hooks/charmhelpers-pl/contrib/python/__init__.py b/hooks/charmhelpers-pl/contrib/python/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/hooks/charmhelpers-pl/contrib/python/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/python/packages.py b/hooks/charmhelpers-pl/contrib/python/packages.py deleted file mode 100644 index 8659516..0000000 --- a/hooks/charmhelpers-pl/contrib/python/packages.py +++ /dev/null @@ -1,96 +0,0 @@ -#!/usr/bin/env python -# coding: utf-8 - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import log - -try: - from pip import main as pip_execute -except ImportError: - apt_update() - apt_install('python-pip') - from pip import main as pip_execute - -__author__ = "Jorge Niedbalski " - - -def parse_options(given, available): - """Given a set of options, check if available""" - for key, value in sorted(given.items()): - if key in available: - yield "--{0}={1}".format(key, value) - - -def pip_install_requirements(requirements, **options): - """Install a requirements file """ - command = ["install"] - - available_options = ('proxy', 'src', 'log', ) - for option in parse_options(options, available_options): - command.append(option) - - command.append("-r {0}".format(requirements)) - log("Installing from file: {} with options: {}".format(requirements, - command)) - pip_execute(command) - - -def pip_install(package, fatal=False, upgrade=False, **options): - """Install a python package""" - command = ["install"] - - available_options = ('proxy', 'src', 'log', "index-url", ) - for option in parse_options(options, available_options): - command.append(option) - - if upgrade: - command.append('--upgrade') - - if isinstance(package, list): - command.extend(package) - else: - command.append(package) - - log("Installing {} package with options: {}".format(package, - command)) - pip_execute(command) - - -def pip_uninstall(package, **options): - """Uninstall a python package""" - command = ["uninstall", "-q", "-y"] - - available_options = ('proxy', 'log', ) - for option in parse_options(options, available_options): - command.append(option) - - if isinstance(package, list): - command.extend(package) - else: - command.append(package) - - log("Uninstalling {} package with options: {}".format(package, - command)) - pip_execute(command) - - -def pip_list(): - """Returns the list of current python installed packages - """ - return pip_execute(["list"]) diff --git a/hooks/charmhelpers-pl/contrib/storage/__init__.py b/hooks/charmhelpers-pl/contrib/storage/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/hooks/charmhelpers-pl/contrib/storage/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/__init__.py b/hooks/charmhelpers-pl/contrib/storage/linux/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/hooks/charmhelpers-pl/contrib/storage/linux/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/ceph.py b/hooks/charmhelpers-pl/contrib/storage/linux/ceph.py deleted file mode 100644 index 31ea7f9..0000000 --- a/hooks/charmhelpers-pl/contrib/storage/linux/ceph.py +++ /dev/null @@ -1,444 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# -# Copyright 2012 Canonical Ltd. -# -# This file is sourced from lp:openstack-charm-helpers -# -# Authors: -# James Page -# Adam Gandelman -# - -import os -import shutil -import json -import time - -from subprocess import ( - check_call, - check_output, - CalledProcessError, -) -from charmhelpers.core.hookenv import ( - relation_get, - relation_ids, - related_units, - log, - DEBUG, - INFO, - WARNING, - ERROR, -) -from charmhelpers.core.host import ( - mount, - mounts, - service_start, - service_stop, - service_running, - umount, -) -from charmhelpers.fetch import ( - apt_install, -) - -KEYRING = '/etc/ceph/ceph.client.{}.keyring' -KEYFILE = '/etc/ceph/ceph.client.{}.key' - -CEPH_CONF = """[global] - auth supported = {auth} - keyring = {keyring} - mon host = {mon_hosts} - log to syslog = {use_syslog} - err to syslog = {use_syslog} - clog to syslog = {use_syslog} -""" - - -def install(): - """Basic Ceph client installation.""" - ceph_dir = "/etc/ceph" - if not os.path.exists(ceph_dir): - os.mkdir(ceph_dir) - - apt_install('ceph-common', fatal=True) - - -def rbd_exists(service, pool, rbd_img): - """Check to see if a RADOS block device exists.""" - try: - out = check_output(['rbd', 'list', '--id', - service, '--pool', pool]).decode('UTF-8') - except CalledProcessError: - return False - - return rbd_img in out - - -def create_rbd_image(service, pool, image, sizemb): - """Create a new RADOS block device.""" - cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service, - '--pool', pool] - check_call(cmd) - - -def pool_exists(service, name): - """Check to see if a RADOS pool already exists.""" - try: - out = check_output(['rados', '--id', service, - 'lspools']).decode('UTF-8') - except CalledProcessError: - return False - - return name in out - - -def get_osds(service): - """Return a list of all Ceph Object Storage Daemons currently in the - cluster. - """ - version = ceph_version() - if version and version >= '0.56': - return json.loads(check_output(['ceph', '--id', service, - 'osd', 'ls', - '--format=json']).decode('UTF-8')) - - return None - - -def create_pool(service, name, replicas=3): - """Create a new RADOS pool.""" - if pool_exists(service, name): - log("Ceph pool {} already exists, skipping creation".format(name), - level=WARNING) - return - - # Calculate the number of placement groups based - # on upstream recommended best practices. - osds = get_osds(service) - if osds: - pgnum = (len(osds) * 100 // replicas) - else: - # NOTE(james-page): Default to 200 for older ceph versions - # which don't support OSD query from cli - pgnum = 200 - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)] - check_call(cmd) - - cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size', - str(replicas)] - check_call(cmd) - - -def delete_pool(service, name): - """Delete a RADOS pool from ceph.""" - cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name, - '--yes-i-really-really-mean-it'] - check_call(cmd) - - -def _keyfile_path(service): - return KEYFILE.format(service) - - -def _keyring_path(service): - return KEYRING.format(service) - - -def create_keyring(service, key): - """Create a new Ceph keyring containing key.""" - keyring = _keyring_path(service) - if os.path.exists(keyring): - log('Ceph keyring exists at %s.' % keyring, level=WARNING) - return - - cmd = ['ceph-authtool', keyring, '--create-keyring', - '--name=client.{}'.format(service), '--add-key={}'.format(key)] - check_call(cmd) - log('Created new ceph keyring at %s.' % keyring, level=DEBUG) - - -def delete_keyring(service): - """Delete an existing Ceph keyring.""" - keyring = _keyring_path(service) - if not os.path.exists(keyring): - log('Keyring does not exist at %s' % keyring, level=WARNING) - return - - os.remove(keyring) - log('Deleted ring at %s.' % keyring, level=INFO) - - -def create_key_file(service, key): - """Create a file containing key.""" - keyfile = _keyfile_path(service) - if os.path.exists(keyfile): - log('Keyfile exists at %s.' % keyfile, level=WARNING) - return - - with open(keyfile, 'w') as fd: - fd.write(key) - - log('Created new keyfile at %s.' % keyfile, level=INFO) - - -def get_ceph_nodes(): - """Query named relation 'ceph' to determine current nodes.""" - hosts = [] - for r_id in relation_ids('ceph'): - for unit in related_units(r_id): - hosts.append(relation_get('private-address', unit=unit, rid=r_id)) - - return hosts - - -def configure(service, key, auth, use_syslog): - """Perform basic configuration of Ceph.""" - create_keyring(service, key) - create_key_file(service, key) - hosts = get_ceph_nodes() - with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: - ceph_conf.write(CEPH_CONF.format(auth=auth, - keyring=_keyring_path(service), - mon_hosts=",".join(map(str, hosts)), - use_syslog=use_syslog)) - modprobe('rbd') - - -def image_mapped(name): - """Determine whether a RADOS block device is mapped locally.""" - try: - out = check_output(['rbd', 'showmapped']).decode('UTF-8') - except CalledProcessError: - return False - - return name in out - - -def map_block_storage(service, pool, image): - """Map a RADOS block device for local use.""" - cmd = [ - 'rbd', - 'map', - '{}/{}'.format(pool, image), - '--user', - service, - '--secret', - _keyfile_path(service), - ] - check_call(cmd) - - -def filesystem_mounted(fs): - """Determine whether a filesytems is already mounted.""" - return fs in [f for f, m in mounts()] - - -def make_filesystem(blk_device, fstype='ext4', timeout=10): - """Make a new filesystem on the specified block device.""" - count = 0 - e_noent = os.errno.ENOENT - while not os.path.exists(blk_device): - if count >= timeout: - log('Gave up waiting on block device %s' % blk_device, - level=ERROR) - raise IOError(e_noent, os.strerror(e_noent), blk_device) - - log('Waiting for block device %s to appear' % blk_device, - level=DEBUG) - count += 1 - time.sleep(1) - else: - log('Formatting block device %s as filesystem %s.' % - (blk_device, fstype), level=INFO) - check_call(['mkfs', '-t', fstype, blk_device]) - - -def place_data_on_block_device(blk_device, data_src_dst): - """Migrate data in data_src_dst to blk_device and then remount.""" - # mount block device into /mnt - mount(blk_device, '/mnt') - # copy data to /mnt - copy_files(data_src_dst, '/mnt') - # umount block device - umount('/mnt') - # Grab user/group ID's from original source - _dir = os.stat(data_src_dst) - uid = _dir.st_uid - gid = _dir.st_gid - # re-mount where the data should originally be - # TODO: persist is currently a NO-OP in core.host - mount(blk_device, data_src_dst, persist=True) - # ensure original ownership of new mount. - os.chown(data_src_dst, uid, gid) - - -# TODO: re-use -def modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - log('Loading kernel module', level=INFO) - cmd = ['modprobe', module] - check_call(cmd) - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - -def copy_files(src, dst, symlinks=False, ignore=None): - """Copy files from src to dst.""" - for item in os.listdir(src): - s = os.path.join(src, item) - d = os.path.join(dst, item) - if os.path.isdir(s): - shutil.copytree(s, d, symlinks, ignore) - else: - shutil.copy2(s, d) - - -def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, - blk_device, fstype, system_services=[], - replicas=3): - """NOTE: This function must only be called from a single service unit for - the same rbd_img otherwise data loss will occur. - - Ensures given pool and RBD image exists, is mapped to a block device, - and the device is formatted and mounted at the given mount_point. - - If formatting a device for the first time, data existing at mount_point - will be migrated to the RBD device before being re-mounted. - - All services listed in system_services will be stopped prior to data - migration and restarted when complete. - """ - # Ensure pool, RBD image, RBD mappings are in place. - if not pool_exists(service, pool): - log('Creating new pool {}.'.format(pool), level=INFO) - create_pool(service, pool, replicas=replicas) - - if not rbd_exists(service, pool, rbd_img): - log('Creating RBD image ({}).'.format(rbd_img), level=INFO) - create_rbd_image(service, pool, rbd_img, sizemb) - - if not image_mapped(rbd_img): - log('Mapping RBD Image {} as a Block Device.'.format(rbd_img), - level=INFO) - map_block_storage(service, pool, rbd_img) - - # make file system - # TODO: What happens if for whatever reason this is run again and - # the data is already in the rbd device and/or is mounted?? - # When it is mounted already, it will fail to make the fs - # XXX: This is really sketchy! Need to at least add an fstab entry - # otherwise this hook will blow away existing data if its executed - # after a reboot. - if not filesystem_mounted(mount_point): - make_filesystem(blk_device, fstype) - - for svc in system_services: - if service_running(svc): - log('Stopping services {} prior to migrating data.' - .format(svc), level=DEBUG) - service_stop(svc) - - place_data_on_block_device(blk_device, mount_point) - - for svc in system_services: - log('Starting service {} after migrating data.' - .format(svc), level=DEBUG) - service_start(svc) - - -def ensure_ceph_keyring(service, user=None, group=None): - """Ensures a ceph keyring is created for a named service and optionally - ensures user and group ownership. - - Returns False if no ceph key is available in relation state. - """ - key = None - for rid in relation_ids('ceph'): - for unit in related_units(rid): - key = relation_get('key', rid=rid, unit=unit) - if key: - break - - if not key: - return False - - create_keyring(service=service, key=key) - keyring = _keyring_path(service) - if user and group: - check_call(['chown', '%s.%s' % (user, group), keyring]) - - return True - - -def ceph_version(): - """Retrieve the local version of ceph.""" - if os.path.exists('/usr/bin/ceph'): - cmd = ['ceph', '-v'] - output = check_output(cmd).decode('US-ASCII') - output = output.split() - if len(output) > 3: - return output[2] - else: - return None - else: - return None - - -class CephBrokerRq(object): - """Ceph broker request. - - Multiple operations can be added to a request and sent to the Ceph broker - to be executed. - - Request is json-encoded for sending over the wire. - - The API is versioned and defaults to version 1. - """ - def __init__(self, api_version=1): - self.api_version = api_version - self.ops = [] - - def add_op_create_pool(self, name, replica_count=3): - self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count}) - - @property - def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops}) - - -class CephBrokerRsp(object): - """Ceph broker response. - - Response is json-decoded and contents provided as methods/properties. - - The API is versioned and defaults to version 1. - """ - def __init__(self, encoded_rsp): - self.api_version = None - self.rsp = json.loads(encoded_rsp) - - @property - def exit_code(self): - return self.rsp.get('exit-code') - - @property - def exit_msg(self): - return self.rsp.get('stderr') diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/loopback.py b/hooks/charmhelpers-pl/contrib/storage/linux/loopback.py deleted file mode 100644 index c296f09..0000000 --- a/hooks/charmhelpers-pl/contrib/storage/linux/loopback.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os -import re -from subprocess import ( - check_call, - check_output, -) - -import six - - -################################################## -# loopback device helpers. -################################################## -def loopback_devices(): - ''' - Parse through 'losetup -a' output to determine currently mapped - loopback devices. Output is expected to look like: - - /dev/loop0: [0807]:961814 (/tmp/my.img) - - :returns: dict: a dict mapping {loopback_dev: backing_file} - ''' - loopbacks = {} - cmd = ['losetup', '-a'] - devs = [d.strip().split(' ') for d in - check_output(cmd).splitlines() if d != ''] - for dev, _, f in devs: - loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] - return loopbacks - - -def create_loopback(file_path): - ''' - Create a loopback device for a given backing file. - - :returns: str: Full path to new loopback device (eg, /dev/loop0) - ''' - file_path = os.path.abspath(file_path) - check_call(['losetup', '--find', file_path]) - for d, f in six.iteritems(loopback_devices()): - if f == file_path: - return d - - -def ensure_loopback_device(path, size): - ''' - Ensure a loopback device exists for a given backing file path and size. - If it a loopback device is not mapped to file, a new one will be created. - - TODO: Confirm size of found loopback device. - - :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) - ''' - for d, f in six.iteritems(loopback_devices()): - if f == path: - return d - - if not os.path.exists(path): - cmd = ['truncate', '--size', size, path] - check_call(cmd) - - return create_loopback(path) diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/lvm.py b/hooks/charmhelpers-pl/contrib/storage/linux/lvm.py deleted file mode 100644 index 34b5f71..0000000 --- a/hooks/charmhelpers-pl/contrib/storage/linux/lvm.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -from subprocess import ( - CalledProcessError, - check_call, - check_output, - Popen, - PIPE, -) - - -################################################## -# LVM helpers. -################################################## -def deactivate_lvm_volume_group(block_device): - ''' - Deactivate any volume gruop associated with an LVM physical volume. - - :param block_device: str: Full path to LVM physical volume - ''' - vg = list_lvm_volume_group(block_device) - if vg: - cmd = ['vgchange', '-an', vg] - check_call(cmd) - - -def is_lvm_physical_volume(block_device): - ''' - Determine whether a block device is initialized as an LVM PV. - - :param block_device: str: Full path of block device to inspect. - - :returns: boolean: True if block device is a PV, False if not. - ''' - try: - check_output(['pvdisplay', block_device]) - return True - except CalledProcessError: - return False - - -def remove_lvm_physical_volume(block_device): - ''' - Remove LVM PV signatures from a given block device. - - :param block_device: str: Full path of block device to scrub. - ''' - p = Popen(['pvremove', '-ff', block_device], - stdin=PIPE) - p.communicate(input='y\n') - - -def list_lvm_volume_group(block_device): - ''' - List LVM volume group associated with a given block device. - - Assumes block device is a valid LVM PV. - - :param block_device: str: Full path of block device to inspect. - - :returns: str: Name of volume group associated with block device or None - ''' - vg = None - pvd = check_output(['pvdisplay', block_device]).splitlines() - for l in pvd: - l = l.decode('UTF-8') - if l.strip().startswith('VG Name'): - vg = ' '.join(l.strip().split()[2:]) - return vg - - -def create_lvm_physical_volume(block_device): - ''' - Initialize a block device as an LVM physical volume. - - :param block_device: str: Full path of block device to initialize. - - ''' - check_call(['pvcreate', block_device]) - - -def create_lvm_volume_group(volume_group, block_device): - ''' - Create an LVM volume group backed by a given block device. - - Assumes block device has already been initialized as an LVM PV. - - :param volume_group: str: Name of volume group to create. - :block_device: str: Full path of PV-initialized block device. - ''' - check_call(['vgcreate', volume_group, block_device]) diff --git a/hooks/charmhelpers-pl/contrib/storage/linux/utils.py b/hooks/charmhelpers-pl/contrib/storage/linux/utils.py deleted file mode 100644 index c8373b7..0000000 --- a/hooks/charmhelpers-pl/contrib/storage/linux/utils.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os -import re -from stat import S_ISBLK - -from subprocess import ( - check_call, - check_output, - call -) - - -def is_block_device(path): - ''' - Confirm device at path is a valid block device node. - - :returns: boolean: True if path is a block device, False if not. - ''' - if not os.path.exists(path): - return False - return S_ISBLK(os.stat(path).st_mode) - - -def zap_disk(block_device): - ''' - Clear a block device of partition table. Relies on sgdisk, which is - installed as pat of the 'gdisk' package in Ubuntu. - - :param block_device: str: Full path of block device to clean. - ''' - # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--mbrtogpt', - '--clear', block_device]) - dev_end = check_output(['blockdev', '--getsz', - block_device]).decode('UTF-8') - gpt_end = int(dev_end.split()[0]) - 100 - check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), - 'bs=1M', 'count=1']) - check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device), - 'bs=512', 'count=100', 'seek=%s' % (gpt_end)]) - - -def is_device_mounted(device): - '''Given a device path, return True if that device is mounted, and False - if it isn't. - - :param device: str: Full path of the device to check. - :returns: boolean: True if the path represents a mounted device, False if - it doesn't. - ''' - is_partition = bool(re.search(r".*[0-9]+\b", device)) - out = check_output(['mount']).decode('UTF-8') - if is_partition: - return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]+\b", out)) diff --git a/hooks/charmhelpers-pl/core/__init__.py b/hooks/charmhelpers-pl/core/__init__.py deleted file mode 100644 index d1400a0..0000000 --- a/hooks/charmhelpers-pl/core/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . diff --git a/hooks/charmhelpers-pl/core/decorators.py b/hooks/charmhelpers-pl/core/decorators.py deleted file mode 100644 index bb05620..0000000 --- a/hooks/charmhelpers-pl/core/decorators.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -# -# Copyright 2014 Canonical Ltd. -# -# Authors: -# Edward Hope-Morley -# - -import time - -from charmhelpers.core.hookenv import ( - log, - INFO, -) - - -def retry_on_exception(num_retries, base_delay=0, exc_type=Exception): - """If the decorated function raises exception exc_type, allow num_retries - retry attempts before raise the exception. - """ - def _retry_on_exception_inner_1(f): - def _retry_on_exception_inner_2(*args, **kwargs): - retries = num_retries - multiplier = 1 - while True: - try: - return f(*args, **kwargs) - except exc_type: - if not retries: - raise - - delay = base_delay * multiplier - multiplier += 1 - log("Retrying '%s' %d more times (delay=%s)" % - (f.__name__, retries, delay), level=INFO) - retries -= 1 - if delay: - time.sleep(delay) - - return _retry_on_exception_inner_2 - - return _retry_on_exception_inner_1 diff --git a/hooks/charmhelpers-pl/core/fstab.py b/hooks/charmhelpers-pl/core/fstab.py deleted file mode 100644 index 3056fba..0000000 --- a/hooks/charmhelpers-pl/core/fstab.py +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import io -import os - -__author__ = 'Jorge Niedbalski R. ' - - -class Fstab(io.FileIO): - """This class extends file in order to implement a file reader/writer - for file `/etc/fstab` - """ - - class Entry(object): - """Entry class represents a non-comment line on the `/etc/fstab` file - """ - def __init__(self, device, mountpoint, filesystem, - options, d=0, p=0): - self.device = device - self.mountpoint = mountpoint - self.filesystem = filesystem - - if not options: - options = "defaults" - - self.options = options - self.d = int(d) - self.p = int(p) - - def __eq__(self, o): - return str(self) == str(o) - - def __str__(self): - return "{} {} {} {} {} {}".format(self.device, - self.mountpoint, - self.filesystem, - self.options, - self.d, - self.p) - - DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab') - - def __init__(self, path=None): - if path: - self._path = path - else: - self._path = self.DEFAULT_PATH - super(Fstab, self).__init__(self._path, 'rb+') - - def _hydrate_entry(self, line): - # NOTE: use split with no arguments to split on any - # whitespace including tabs - return Fstab.Entry(*filter( - lambda x: x not in ('', None), - line.strip("\n").split())) - - @property - def entries(self): - self.seek(0) - for line in self.readlines(): - line = line.decode('us-ascii') - try: - if line.strip() and not line.strip().startswith("#"): - yield self._hydrate_entry(line) - except ValueError: - pass - - def get_entry_by_attr(self, attr, value): - for entry in self.entries: - e_attr = getattr(entry, attr) - if e_attr == value: - return entry - return None - - def add_entry(self, entry): - if self.get_entry_by_attr('device', entry.device): - return False - - self.write((str(entry) + '\n').encode('us-ascii')) - self.truncate() - return entry - - def remove_entry(self, entry): - self.seek(0) - - lines = [l.decode('us-ascii') for l in self.readlines()] - - found = False - for index, line in enumerate(lines): - if line.strip() and not line.strip().startswith("#"): - if self._hydrate_entry(line) == entry: - found = True - break - - if not found: - return False - - lines.remove(line) - - self.seek(0) - self.write(''.join(lines).encode('us-ascii')) - self.truncate() - return True - - @classmethod - def remove_by_mountpoint(cls, mountpoint, path=None): - fstab = cls(path=path) - entry = fstab.get_entry_by_attr('mountpoint', mountpoint) - if entry: - return fstab.remove_entry(entry) - return False - - @classmethod - def add(cls, device, mountpoint, filesystem, options=None, path=None): - return cls(path=path).add_entry(Fstab.Entry(device, - mountpoint, filesystem, - options=options)) diff --git a/hooks/charmhelpers-pl/core/hookenv.py b/hooks/charmhelpers-pl/core/hookenv.py deleted file mode 100644 index fa6193f..0000000 --- a/hooks/charmhelpers-pl/core/hookenv.py +++ /dev/null @@ -1,667 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -"Interactions with the Juju environment" -# Copyright 2013 Canonical Ltd. -# -# Authors: -# Charm Helpers Developers - -from __future__ import print_function -from functools import wraps -import os -import json -import yaml -import subprocess -import sys -import errno -from subprocess import CalledProcessError - -import six -if not six.PY3: - from UserDict import UserDict -else: - from collections import UserDict - -CRITICAL = "CRITICAL" -ERROR = "ERROR" -WARNING = "WARNING" -INFO = "INFO" -DEBUG = "DEBUG" -MARKER = object() - -cache = {} - - -def cached(func): - """Cache return values for multiple executions of func + args - - For example:: - - @cached - def unit_get(attribute): - pass - - unit_get('test') - - will cache the result of unit_get + 'test' for future calls. - """ - @wraps(func) - def wrapper(*args, **kwargs): - global cache - key = str((func, args, kwargs)) - try: - return cache[key] - except KeyError: - pass # Drop out of the exception handler scope. - res = func(*args, **kwargs) - cache[key] = res - return res - return wrapper - - -def flush(key): - """Flushes any entries from function cache where the - key is found in the function+args """ - flush_list = [] - for item in cache: - if key in item: - flush_list.append(item) - for item in flush_list: - del cache[item] - - -def log(message, level=None): - """Write a message to the juju log""" - command = ['juju-log'] - if level: - command += ['-l', level] - if not isinstance(message, six.string_types): - message = repr(message) - command += [message] - # Missing juju-log should not cause failures in unit tests - # Send log output to stderr - try: - subprocess.call(command) - except OSError as e: - if e.errno == errno.ENOENT: - if level: - message = "{}: {}".format(level, message) - message = "juju-log: {}".format(message) - print(message, file=sys.stderr) - else: - raise - - -class Serializable(UserDict): - """Wrapper, an object that can be serialized to yaml or json""" - - def __init__(self, obj): - # wrap the object - UserDict.__init__(self) - self.data = obj - - def __getattr__(self, attr): - # See if this object has attribute. - if attr in ("json", "yaml", "data"): - return self.__dict__[attr] - # Check for attribute in wrapped object. - got = getattr(self.data, attr, MARKER) - if got is not MARKER: - return got - # Proxy to the wrapped object via dict interface. - try: - return self.data[attr] - except KeyError: - raise AttributeError(attr) - - def __getstate__(self): - # Pickle as a standard dictionary. - return self.data - - def __setstate__(self, state): - # Unpickle into our wrapper. - self.data = state - - def json(self): - """Serialize the object to json""" - return json.dumps(self.data) - - def yaml(self): - """Serialize the object to yaml""" - return yaml.dump(self.data) - - -def execution_environment(): - """A convenient bundling of the current execution context""" - context = {} - context['conf'] = config() - if relation_id(): - context['reltype'] = relation_type() - context['relid'] = relation_id() - context['rel'] = relation_get() - context['unit'] = local_unit() - context['rels'] = relations() - context['env'] = os.environ - return context - - -def in_relation_hook(): - """Determine whether we're running in a relation hook""" - return 'JUJU_RELATION' in os.environ - - -def relation_type(): - """The scope for the current relation hook""" - return os.environ.get('JUJU_RELATION', None) - - -def relation_id(): - """The relation ID for the current relation hook""" - return os.environ.get('JUJU_RELATION_ID', None) - - -def local_unit(): - """Local unit ID""" - return os.environ['JUJU_UNIT_NAME'] - - -def remote_unit(): - """The remote unit for the current relation hook""" - return os.environ.get('JUJU_REMOTE_UNIT', None) - - -def service_name(): - """The name service group this unit belongs to""" - return local_unit().split('/')[0] - - -def hook_name(): - """The name of the currently executing hook""" - return os.path.basename(sys.argv[0]) - - -class Config(dict): - """A dictionary representation of the charm's config.yaml, with some - extra features: - - - See which values in the dictionary have changed since the previous hook. - - For values that have changed, see what the previous value was. - - Store arbitrary data for use in a later hook. - - NOTE: Do not instantiate this object directly - instead call - ``hookenv.config()``, which will return an instance of :class:`Config`. - - Example usage:: - - >>> # inside a hook - >>> from charmhelpers.core import hookenv - >>> config = hookenv.config() - >>> config['foo'] - 'bar' - >>> # store a new key/value for later use - >>> config['mykey'] = 'myval' - - - >>> # user runs `juju set mycharm foo=baz` - >>> # now we're inside subsequent config-changed hook - >>> config = hookenv.config() - >>> config['foo'] - 'baz' - >>> # test to see if this val has changed since last hook - >>> config.changed('foo') - True - >>> # what was the previous value? - >>> config.previous('foo') - 'bar' - >>> # keys/values that we add are preserved across hooks - >>> config['mykey'] - 'myval' - - """ - CONFIG_FILE_NAME = '.juju-persistent-config' - - def __init__(self, *args, **kw): - super(Config, self).__init__(*args, **kw) - self.implicit_save = True - self._prev_dict = None - self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): - self.load_previous() - - def __getitem__(self, key): - """For regular dict lookups, check the current juju config first, - then the previous (saved) copy. This ensures that user-saved values - will be returned by a dict lookup. - - """ - try: - return dict.__getitem__(self, key) - except KeyError: - return (self._prev_dict or {})[key] - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def keys(self): - prev_keys = [] - if self._prev_dict is not None: - prev_keys = self._prev_dict.keys() - return list(set(prev_keys + list(dict.keys(self)))) - - def load_previous(self, path=None): - """Load previous copy of config from disk. - - In normal usage you don't need to call this method directly - it - is called automatically at object initialization. - - :param path: - - File path from which to load the previous config. If `None`, - config is loaded from the default location. If `path` is - specified, subsequent `save()` calls will write to the same - path. - - """ - self.path = path or self.path - with open(self.path) as f: - self._prev_dict = json.load(f) - - def changed(self, key): - """Return True if the current value for this key is different from - the previous value. - - """ - if self._prev_dict is None: - return True - return self.previous(key) != self.get(key) - - def previous(self, key): - """Return previous value for this key, or None if there - is no previous value. - - """ - if self._prev_dict: - return self._prev_dict.get(key) - return None - - def save(self): - """Save this config to disk. - - If the charm is using the :mod:`Services Framework ` - or :meth:'@hook ' decorator, this - is called automatically at the end of successful hook execution. - Otherwise, it should be called directly by user code. - - To disable automatic saves, set ``implicit_save=False`` on this - instance. - - """ - if self._prev_dict: - for k, v in six.iteritems(self._prev_dict): - if k not in self: - self[k] = v - with open(self.path, 'w') as f: - json.dump(self, f) - - -@cached -def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - config_cmd_line.append('--format=json') - try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) - if scope is not None: - return config_data - return Config(config_data) - except ValueError: - return None - - -@cached -def relation_get(attribute=None, unit=None, rid=None): - """Get relation information""" - _args = ['relation-get', '--format=json'] - if rid: - _args.append('-r') - _args.append(rid) - _args.append(attribute or '-') - if unit: - _args.append(unit) - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - except CalledProcessError as e: - if e.returncode == 2: - return None - raise - - -def relation_set(relation_id=None, relation_settings=None, **kwargs): - """Set relation information for the current unit""" - relation_settings = relation_settings if relation_settings else {} - relation_cmd_line = ['relation-set'] - if relation_id is not None: - relation_cmd_line.extend(('-r', relation_id)) - for k, v in (list(relation_settings.items()) + list(kwargs.items())): - if v is None: - relation_cmd_line.append('{}='.format(k)) - else: - relation_cmd_line.append('{}={}'.format(k, v)) - subprocess.check_call(relation_cmd_line) - # Flush cache of any relation-gets for local unit - flush(local_unit()) - - -@cached -def relation_ids(reltype=None): - """A list of relation_ids""" - reltype = reltype or relation_type() - relid_cmd_line = ['relation-ids', '--format=json'] - if reltype is not None: - relid_cmd_line.append(reltype) - return json.loads( - subprocess.check_output(relid_cmd_line).decode('UTF-8')) or [] - return [] - - -@cached -def related_units(relid=None): - """A list of related units""" - relid = relid or relation_id() - units_cmd_line = ['relation-list', '--format=json'] - if relid is not None: - units_cmd_line.extend(('-r', relid)) - return json.loads( - subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] - - -@cached -def relation_for_unit(unit=None, rid=None): - """Get the json represenation of a unit's relation""" - unit = unit or remote_unit() - relation = relation_get(unit=unit, rid=rid) - for key in relation: - if key.endswith('-list'): - relation[key] = relation[key].split() - relation['__unit__'] = unit - return relation - - -@cached -def relations_for_id(relid=None): - """Get relations of a specific relation ID""" - relation_data = [] - relid = relid or relation_ids() - for unit in related_units(relid): - unit_data = relation_for_unit(unit, relid) - unit_data['__relid__'] = relid - relation_data.append(unit_data) - return relation_data - - -@cached -def relations_of_type(reltype=None): - """Get relations of a specific type""" - relation_data = [] - reltype = reltype or relation_type() - for relid in relation_ids(reltype): - for relation in relations_for_id(relid): - relation['__relid__'] = relid - relation_data.append(relation) - return relation_data - - -@cached -def metadata(): - """Get the current charm metadata.yaml contents as a python object""" - with open(os.path.join(charm_dir(), 'metadata.yaml')) as md: - return yaml.safe_load(md) - - -@cached -def relation_types(): - """Get a list of relation types supported by this charm""" - rel_types = [] - md = metadata() - for key in ('provides', 'requires', 'peers'): - section = md.get(key) - if section: - rel_types.extend(section.keys()) - return rel_types - - -@cached -def charm_name(): - """Get the name of the current charm as is specified on metadata.yaml""" - return metadata().get('name') - - -@cached -def relations(): - """Get a nested dictionary of relation data for all related units""" - rels = {} - for reltype in relation_types(): - relids = {} - for relid in relation_ids(reltype): - units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} - for unit in related_units(relid): - reldata = relation_get(unit=unit, rid=relid) - units[unit] = reldata - relids[relid] = units - rels[reltype] = relids - return rels - - -@cached -def is_relation_made(relation, keys='private-address'): - ''' - Determine whether a relation is established by checking for - presence of key(s). If a list of keys is provided, they - must all be present for the relation to be identified as made - ''' - if isinstance(keys, str): - keys = [keys] - for r_id in relation_ids(relation): - for unit in related_units(r_id): - context = {} - for k in keys: - context[k] = relation_get(k, rid=r_id, - unit=unit) - if None not in context.values(): - return True - return False - - -def open_port(port, protocol="TCP"): - """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) - - -def close_port(port, protocol="TCP"): - """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) - - -@cached -def unit_get(attribute): - """Get the unit ID for the remote unit""" - _args = ['unit-get', '--format=json', attribute] - try: - return json.loads(subprocess.check_output(_args).decode('UTF-8')) - except ValueError: - return None - - -def unit_public_ip(): - """Get this unit's public IP address""" - return unit_get('public-address') - - -def unit_private_ip(): - """Get this unit's private IP address""" - return unit_get('private-address') - - -class UnregisteredHookError(Exception): - """Raised when an undefined hook is called""" - pass - - -class Hooks(object): - """A convenient handler for hook functions. - - Example:: - - hooks = Hooks() - - # register a hook, taking its name from the function name - @hooks.hook() - def install(): - pass # your code here - - # register a hook, providing a custom hook name - @hooks.hook("config-changed") - def config_changed(): - pass # your code here - - if __name__ == "__main__": - # execute a hook based on the name the program is called by - hooks.execute(sys.argv) - """ - - def __init__(self, config_save=True): - super(Hooks, self).__init__() - self._hooks = {} - self._config_save = config_save - - def register(self, name, function): - """Register a hook""" - self._hooks[name] = function - - def execute(self, args): - """Execute a registered hook based on args[0]""" - hook_name = os.path.basename(args[0]) - if hook_name in self._hooks: - self._hooks[hook_name]() - if self._config_save: - cfg = config() - if cfg.implicit_save: - cfg.save() - else: - raise UnregisteredHookError(hook_name) - - def hook(self, *hook_names): - """Decorator, registering them as hooks""" - def wrapper(decorated): - for hook_name in hook_names: - self.register(hook_name, decorated) - else: - self.register(decorated.__name__, decorated) - if '_' in decorated.__name__: - self.register( - decorated.__name__.replace('_', '-'), decorated) - return decorated - return wrapper - - -def charm_dir(): - """Return the root directory of the current charm""" - return os.environ.get('CHARM_DIR') - - -@cached -def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" - cmd = ['action-get'] - if key is not None: - cmd.append(key) - cmd.append('--format=json') - action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) - return action_data - - -def action_set(values): - """Sets the values to be returned after the action finishes""" - cmd = ['action-set'] - for k, v in list(values.items()): - cmd.append('{}={}'.format(k, v)) - subprocess.check_call(cmd) - - -def action_fail(message): - """Sets the action status to failed and sets the error message. - - The results set by action_set are preserved.""" - subprocess.check_call(['action-fail', message]) - - -def status_set(workload_state, message): - """Set the workload state with a message - - Use status-set to set the workload state with a message which is visible - to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. - - workload_state -- valid juju workload state. - message -- status update message - """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] - try: - ret = subprocess.call(cmd) - if ret == 0: - return - except OSError as e: - if e.errno != errno.ENOENT: - raise - log_message = 'status-set failed: {} {}'.format(workload_state, - message) - log(log_message, level='INFO') - - -def status_get(): - """Retrieve the previously set juju workload state - - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' - """ - cmd = ['status-get'] - try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status - except OSError as e: - if e.errno == errno.ENOENT: - return 'unknown' - else: - raise diff --git a/hooks/charmhelpers-pl/core/host.py b/hooks/charmhelpers-pl/core/host.py deleted file mode 100644 index 0d2ab4b..0000000 --- a/hooks/charmhelpers-pl/core/host.py +++ /dev/null @@ -1,450 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -"""Tools for working with the host system""" -# Copyright 2012 Canonical Ltd. -# -# Authors: -# Nick Moffitt -# Matthew Wedgwood - -import os -import re -import pwd -import grp -import random -import string -import subprocess -import hashlib -from contextlib import contextmanager -from collections import OrderedDict - -import six - -from .hookenv import log -from .fstab import Fstab - - -def service_start(service_name): - """Start a system service""" - return service('start', service_name) - - -def service_stop(service_name): - """Stop a system service""" - return service('stop', service_name) - - -def service_restart(service_name): - """Restart a system service""" - return service('restart', service_name) - - -def service_reload(service_name, restart_on_failure=False): - """Reload a system service, optionally falling back to restart if - reload fails""" - service_result = service('reload', service_name) - if not service_result and restart_on_failure: - service_result = service('restart', service_name) - return service_result - - -def service(action, service_name): - """Control a system service""" - cmd = ['service', service_name, action] - return subprocess.call(cmd) == 0 - - -def service_running(service): - """Determine whether a system service is running""" - try: - output = subprocess.check_output( - ['service', service, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - if ("start/running" in output or "is running" in output): - return True - else: - return False - - -def service_available(service_name): - """Determine whether a system service is available""" - try: - subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError as e: - return b'unrecognized service' not in e.output - else: - return True - - -def adduser(username, password=None, shell='/bin/bash', system_user=False): - """Add a user to the system""" - try: - user_info = pwd.getpwnam(username) - log('user {0} already exists!'.format(username)) - except KeyError: - log('creating user {0}'.format(username)) - cmd = ['useradd'] - if system_user or password is None: - cmd.append('--system') - else: - cmd.extend([ - '--create-home', - '--shell', shell, - '--password', password, - ]) - cmd.append(username) - subprocess.check_call(cmd) - user_info = pwd.getpwnam(username) - return user_info - - -def add_group(group_name, system_group=False): - """Add a group to the system""" - try: - group_info = grp.getgrnam(group_name) - log('group {0} already exists!'.format(group_name)) - except KeyError: - log('creating group {0}'.format(group_name)) - cmd = ['addgroup'] - if system_group: - cmd.append('--system') - else: - cmd.extend([ - '--group', - ]) - cmd.append(group_name) - subprocess.check_call(cmd) - group_info = grp.getgrnam(group_name) - return group_info - - -def add_user_to_group(username, group): - """Add a user to a group""" - cmd = [ - 'gpasswd', '-a', - username, - group - ] - log("Adding user {} to group {}".format(username, group)) - subprocess.check_call(cmd) - - -def rsync(from_path, to_path, flags='-r', options=None): - """Replicate the contents of a path""" - options = options or ['--delete', '--executability'] - cmd = ['/usr/bin/rsync', flags] - cmd.extend(options) - cmd.append(from_path) - cmd.append(to_path) - log(" ".join(cmd)) - return subprocess.check_output(cmd).decode('UTF-8').strip() - - -def symlink(source, destination): - """Create a symbolic link""" - log("Symlinking {} as {}".format(source, destination)) - cmd = [ - 'ln', - '-sf', - source, - destination, - ] - subprocess.check_call(cmd) - - -def mkdir(path, owner='root', group='root', perms=0o555, force=False): - """Create a directory""" - log("Making dir {} {}:{} {:o}".format(path, owner, group, - perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - realpath = os.path.abspath(path) - path_exists = os.path.exists(realpath) - if path_exists and force: - if not os.path.isdir(realpath): - log("Removing non-directory file {} prior to mkdir()".format(path)) - os.unlink(realpath) - os.makedirs(realpath, perms) - elif not path_exists: - os.makedirs(realpath, perms) - os.chown(realpath, uid, gid) - os.chmod(realpath, perms) - - -def write_file(path, content, owner='root', group='root', perms=0o444): - """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) - - -def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab - """ - return Fstab.remove_by_mountpoint(mp) - - -def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file - """ - return Fstab.add(dev, mp, fs, options=options) - - -def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"): - """Mount a filesystem at a particular mountpoint""" - cmd_args = ['mount'] - if options is not None: - cmd_args.extend(['-o', options]) - cmd_args.extend([device, mountpoint]) - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) - return False - - if persist: - return fstab_add(device, mountpoint, filesystem, options=options) - return True - - -def umount(mountpoint, persist=False): - """Unmount a filesystem""" - cmd_args = ['umount', mountpoint] - try: - subprocess.check_output(cmd_args) - except subprocess.CalledProcessError as e: - log('Error unmounting {}\n{}'.format(mountpoint, e.output)) - return False - - if persist: - return fstab_remove(mountpoint) - return True - - -def mounts(): - """Get a list of all mounted volumes as [[mountpoint,device],[...]]""" - with open('/proc/mounts') as f: - # [['/mount/point','/dev/path'],[...]] - system_mounts = [m[1::-1] for m in [l.strip().split() - for l in f.readlines()]] - return system_mounts - - -def file_hash(path, hash_type='md5'): - """ - Generate a hash checksum of the contents of 'path' or None if not found. - - :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - """ - if os.path.exists(path): - h = getattr(hashlib, hash_type)() - with open(path, 'rb') as source: - h.update(source.read()) - return h.hexdigest() - else: - return None - - -def check_hash(path, checksum, hash_type='md5'): - """ - Validate a file using a cryptographic checksum. - - :param str checksum: Value of the checksum used to validate the file. - :param str hash_type: Hash algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - :raises ChecksumError: If the file fails the checksum - - """ - actual_checksum = file_hash(path, hash_type) - if checksum != actual_checksum: - raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum)) - - -class ChecksumError(ValueError): - pass - - -def restart_on_change(restart_map, stopstart=False): - """Restart services based on configuration files changing - - This function is used a decorator, for example:: - - @restart_on_change({ - '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] - }) - def ceph_client_changed(): - pass # your code here - - In this example, the cinder-api and cinder-volume services - would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. - """ - def wrap(f): - def wrapped_f(*args, **kwargs): - checksums = {} - for path in restart_map: - checksums[path] = file_hash(path) - f(*args, **kwargs) - restarts = [] - for path in restart_map: - if checksums[path] != file_hash(path): - restarts += restart_map[path] - services_list = list(OrderedDict.fromkeys(restarts)) - if not stopstart: - for service_name in services_list: - service('restart', service_name) - else: - for action in ['stop', 'start']: - for service_name in services_list: - service(action, service_name) - return wrapped_f - return wrap - - -def lsb_release(): - """Return /etc/lsb-release in a dict""" - d = {} - with open('/etc/lsb-release', 'r') as lsb: - for l in lsb: - k, v = l.split('=') - d[k.strip()] = v.strip() - return d - - -def pwgen(length=None): - """Generate a random pasword.""" - if length is None: - # A random length is ok to use a weak PRNG - length = random.choice(range(35, 45)) - alphanumeric_chars = [ - l for l in (string.ascii_letters + string.digits) - if l not in 'l0QD1vAEIOUaeiou'] - # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the - # actual password - random_generator = random.SystemRandom() - random_chars = [ - random_generator.choice(alphanumeric_chars) for _ in range(length)] - return(''.join(random_chars)) - - -def list_nics(nic_type): - '''Return a list of nics of given type(s)''' - if isinstance(nic_type, six.string_types): - int_types = [nic_type] - else: - int_types = nic_type - interfaces = [] - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line for line in ip_output if line) - for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) - if matched: - interface = matched.groups()[0] - else: - interface = line.split()[1].replace(":", "") - interfaces.append(interface) - - return interfaces - - -def set_nic_mtu(nic, mtu): - '''Set MTU on a network interface''' - cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] - subprocess.check_call(cmd) - - -def get_nic_mtu(nic): - cmd = ['ip', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - mtu = "" - for line in ip_output: - words = line.split() - if 'mtu' in words: - mtu = words[words.index("mtu") + 1] - return mtu - - -def get_nic_hwaddr(nic): - cmd = ['ip', '-o', '-0', 'addr', 'show', nic] - ip_output = subprocess.check_output(cmd).decode('UTF-8') - hwaddr = "" - words = ip_output.split() - if 'link/ether' in words: - hwaddr = words[words.index('link/ether') + 1] - return hwaddr - - -def cmp_pkgrevno(package, revno, pkgcache=None): - '''Compare supplied revno with the revno of the installed package - - * 1 => Installed revno is greater than supplied arg - * 0 => Installed revno is the same as supplied arg - * -1 => Installed revno is less than supplied arg - - This function imports apt_cache function from charmhelpers.fetch if - the pkgcache argument is None. Be sure to add charmhelpers.fetch if - you call this function, or pass an apt_pkg.Cache() instance. - ''' - import apt_pkg - if not pkgcache: - from charmhelpers.fetch import apt_cache - pkgcache = apt_cache() - pkg = pkgcache[package] - return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) - - -@contextmanager -def chdir(d): - cur = os.getcwd() - try: - yield os.chdir(d) - finally: - os.chdir(cur) - - -def chownr(path, owner, group, follow_links=True): - uid = pwd.getpwnam(owner).pw_uid - gid = grp.getgrnam(group).gr_gid - if follow_links: - chown = os.chown - else: - chown = os.lchown - - for root, dirs, files in os.walk(path): - for name in dirs + files: - full = os.path.join(root, name) - broken_symlink = os.path.lexists(full) and not os.path.exists(full) - if not broken_symlink: - chown(full, uid, gid) - - -def lchownr(path, owner, group): - chownr(path, owner, group, follow_links=False) diff --git a/hooks/charmhelpers-pl/core/services/base.py b/hooks/charmhelpers-pl/core/services/base.py deleted file mode 100644 index 2ff5338..0000000 --- a/hooks/charmhelpers-pl/core/services/base.py +++ /dev/null @@ -1,329 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os -import re -import json -from collections import Iterable, OrderedDict - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -__all__ = ['ServiceManager', 'ManagerCallback', - 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports', - 'service_restart', 'service_stop'] - - -class ServiceManager(object): - def __init__(self, services=None): - """ - Register a list of services, given their definitions. - - Service definitions are dicts in the following formats (all keys except - 'service' are optional):: - - { - "service": , - "required_data": , - "provided_data": , - "data_ready": , - "data_lost": , - "start": , - "stop": , - "ports": , - } - - The 'required_data' list should contain dicts of required data (or - dependency managers that act like dicts and know how to collect the data). - Only when all items in the 'required_data' list are populated are the list - of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more - information. - - The 'provided_data' list should contain relation data providers, most likely - a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`, - that will indicate a set of data to set on a given relation. - - The 'data_ready' value should be either a single callback, or a list of - callbacks, to be called when all items in 'required_data' pass `is_ready()`. - Each callback will be called with the service name as the only parameter. - After all of the 'data_ready' callbacks are called, the 'start' callbacks - are fired. - - The 'data_lost' value should be either a single callback, or a list of - callbacks, to be called when a 'required_data' item no longer passes - `is_ready()`. Each callback will be called with the service name as the - only parameter. After all of the 'data_lost' callbacks are called, - the 'stop' callbacks are fired. - - The 'start' value should be either a single callback, or a list of - callbacks, to be called when starting the service, after the 'data_ready' - callbacks are complete. Each callback will be called with the service - name as the only parameter. This defaults to - `[host.service_start, services.open_ports]`. - - The 'stop' value should be either a single callback, or a list of - callbacks, to be called when stopping the service. If the service is - being stopped because it no longer has all of its 'required_data', this - will be called after all of the 'data_lost' callbacks are complete. - Each callback will be called with the service name as the only parameter. - This defaults to `[services.close_ports, host.service_stop]`. - - The 'ports' value should be a list of ports to manage. The default - 'start' handler will open the ports after the service is started, - and the default 'stop' handler will close the ports prior to stopping - the service. - - - Examples: - - The following registers an Upstart service called bingod that depends on - a mongodb relation and which runs a custom `db_migrate` function prior to - restarting the service, and a Runit service called spadesd:: - - manager = services.ServiceManager([ - { - 'service': 'bingod', - 'ports': [80, 443], - 'required_data': [MongoRelation(), config(), {'my': 'data'}], - 'data_ready': [ - services.template(source='bingod.conf'), - services.template(source='bingod.ini', - target='/etc/bingod.ini', - owner='bingo', perms=0400), - ], - }, - { - 'service': 'spadesd', - 'data_ready': services.template(source='spadesd_run.j2', - target='/etc/sv/spadesd/run', - perms=0555), - 'start': runit_start, - 'stop': runit_stop, - }, - ]) - manager.manage() - """ - self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json') - self._ready = None - self.services = OrderedDict() - for service in services or []: - service_name = service['service'] - self.services[service_name] = service - - def manage(self): - """ - Handle the current hook by doing The Right Thing with the registered services. - """ - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.provide_data() - self.reconfigure_services() - cfg = hookenv.config() - if cfg.implicit_save: - cfg.save() - - def provide_data(self): - """ - Set the relation data for each provider in the ``provided_data`` list. - - A provider must have a `name` attribute, which indicates which relation - to set data on, and a `provide_data()` method, which returns a dict of - data to set. - """ - hook_name = hookenv.hook_name() - for service in self.services.values(): - for provider in service.get('provided_data', []): - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): - data = provider.provide_data() - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data - if _ready: - hookenv.relation_set(None, data) - - def reconfigure_services(self, *service_names): - """ - Update all files for one or more registered services, and, - if ready, optionally restart them. - - If no service names are given, reconfigures all registered services. - """ - for service_name in service_names or self.services.keys(): - if self.is_ready(service_name): - self.fire_event('data_ready', service_name) - self.fire_event('start', service_name, default=[ - service_restart, - manage_ports]) - self.save_ready(service_name) - else: - if self.was_ready(service_name): - self.fire_event('data_lost', service_name) - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - self.save_lost(service_name) - - def stop_services(self, *service_names): - """ - Stop one or more registered services, by name. - - If no service names are given, stops all registered services. - """ - for service_name in service_names or self.services.keys(): - self.fire_event('stop', service_name, default=[ - manage_ports, - service_stop]) - - def get_service(self, service_name): - """ - Given the name of a registered service, return its service definition. - """ - service = self.services.get(service_name) - if not service: - raise KeyError('Service not registered: %s' % service_name) - return service - - def fire_event(self, event_name, service_name, default=None): - """ - Fire a data_ready, data_lost, start, or stop event on a given service. - """ - service = self.get_service(service_name) - callbacks = service.get(event_name, default) - if not callbacks: - return - if not isinstance(callbacks, Iterable): - callbacks = [callbacks] - for callback in callbacks: - if isinstance(callback, ManagerCallback): - callback(self, service_name, event_name) - else: - callback(service_name) - - def is_ready(self, service_name): - """ - Determine if a registered service is ready, by checking its 'required_data'. - - A 'required_data' item can be any mapping type, and is considered ready - if `bool(item)` evaluates as True. - """ - service = self.get_service(service_name) - reqs = service.get('required_data', []) - return all(bool(req) for req in reqs) - - def _load_ready_file(self): - if self._ready is not None: - return - if os.path.exists(self._ready_file): - with open(self._ready_file) as fp: - self._ready = set(json.load(fp)) - else: - self._ready = set() - - def _save_ready_file(self): - if self._ready is None: - return - with open(self._ready_file, 'w') as fp: - json.dump(list(self._ready), fp) - - def save_ready(self, service_name): - """ - Save an indicator that the given service is now data_ready. - """ - self._load_ready_file() - self._ready.add(service_name) - self._save_ready_file() - - def save_lost(self, service_name): - """ - Save an indicator that the given service is no longer data_ready. - """ - self._load_ready_file() - self._ready.discard(service_name) - self._save_ready_file() - - def was_ready(self, service_name): - """ - Determine if the given service was previously data_ready. - """ - self._load_ready_file() - return service_name in self._ready - - -class ManagerCallback(object): - """ - Special case of a callback that takes the `ServiceManager` instance - in addition to the service name. - - Subclasses should implement `__call__` which should accept three parameters: - - * `manager` The `ServiceManager` instance - * `service_name` The name of the service it's being triggered for - * `event_name` The name of the event that this callback is handling - """ - def __call__(self, manager, service_name, event_name): - raise NotImplementedError() - - -class PortManagerCallback(ManagerCallback): - """ - Callback class that will open or close ports, for use as either - a start or stop action. - """ - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - new_ports = service.get('ports', []) - port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) - if os.path.exists(port_file): - with open(port_file) as fp: - old_ports = fp.read().split(',') - for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) - with open(port_file, 'w') as fp: - fp.write(','.join(str(port) for port in new_ports)) - for port in new_ports: - if event_name == 'start': - hookenv.open_port(port) - elif event_name == 'stop': - hookenv.close_port(port) - - -def service_stop(service_name): - """ - Wrapper around host.service_stop to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_running(service_name): - host.service_stop(service_name) - - -def service_restart(service_name): - """ - Wrapper around host.service_restart to prevent spurious "unknown service" - messages in the logs. - """ - if host.service_available(service_name): - if host.service_running(service_name): - host.service_restart(service_name) - else: - host.service_start(service_name) - - -# Convenience aliases -open_ports = close_ports = manage_ports = PortManagerCallback() diff --git a/hooks/charmhelpers-pl/core/services/helpers.py b/hooks/charmhelpers-pl/core/services/helpers.py deleted file mode 100644 index 3eb5fb4..0000000 --- a/hooks/charmhelpers-pl/core/services/helpers.py +++ /dev/null @@ -1,267 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os -import yaml -from charmhelpers.core import hookenv -from charmhelpers.core import templating - -from charmhelpers.core.services.base import ManagerCallback - - -__all__ = ['RelationContext', 'TemplateCallback', - 'render_template', 'template'] - - -class RelationContext(dict): - """ - Base class for a context generator that gets relation data from juju. - - Subclasses must provide the attributes `name`, which is the name of the - interface of interest, `interface`, which is the type of the interface of - interest, and `required_keys`, which is the set of keys required for the - relation to be considered complete. The data for all interfaces matching - the `name` attribute that are complete will used to populate the dictionary - values (see `get_data`, below). - - The generated context will be namespaced under the relation :attr:`name`, - to prevent potential naming conflicts. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = None - interface = None - - def __init__(self, name=None, additional_required_keys=None): - if not hasattr(self, 'required_keys'): - self.required_keys = [] - - if name is not None: - self.name = name - if additional_required_keys: - self.required_keys.extend(additional_required_keys) - self.get_data() - - def __bool__(self): - """ - Returns True if all of the required_keys are available. - """ - return self.is_ready() - - __nonzero__ = __bool__ - - def __repr__(self): - return super(RelationContext, self).__repr__() - - def is_ready(self): - """ - Returns True if all of the `required_keys` are available from any units. - """ - ready = len(self.get(self.name, [])) > 0 - if not ready: - hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG) - return ready - - def _is_ready(self, unit_data): - """ - Helper method that tests a set of relation data and returns True if - all of the `required_keys` are present. - """ - return set(unit_data.keys()).issuperset(set(self.required_keys)) - - def get_data(self): - """ - Retrieve the relation data for each unit involved in a relation and, - if complete, store it in a list under `self[self.name]`. This - is automatically called when the RelationContext is instantiated. - - The units are sorted lexographically first by the service ID, then by - the unit ID. Thus, if an interface has two other services, 'db:1' - and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1', - and 'db:2' having one unit, 'mediawiki/0', all of which have a complete - set of data, the relation data for the units will be stored in the - order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'. - - If you only care about a single unit on the relation, you can just - access it as `{{ interface[0]['key'] }}`. However, if you can at all - support multiple units on a relation, you should iterate over the list, - like:: - - {% for unit in interface -%} - {{ unit['key'] }}{% if not loop.last %},{% endif %} - {%- endfor %} - - Note that since all sets of relation data from all related services and - units are in a single list, if you need to know which service or unit a - set of data came from, you'll need to extend this class to preserve - that information. - """ - if not hookenv.relation_ids(self.name): - return - - ns = self.setdefault(self.name, []) - for rid in sorted(hookenv.relation_ids(self.name)): - for unit in sorted(hookenv.related_units(rid)): - reldata = hookenv.relation_get(rid=rid, unit=unit) - if self._is_ready(reldata): - ns.append(reldata) - - def provide_data(self): - """ - Return data to be relation_set for this interface. - """ - return {} - - -class MysqlRelation(RelationContext): - """ - Relation context for the `mysql` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'db' - interface = 'mysql' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'user', 'password', 'database'] - RelationContext.__init__(self, *args, **kwargs) - - -class HttpRelation(RelationContext): - """ - Relation context for the `http` interface. - - :param str name: Override the relation :attr:`name`, since it can vary from charm to charm - :param list additional_required_keys: Extend the list of :attr:`required_keys` - """ - name = 'website' - interface = 'http' - - def __init__(self, *args, **kwargs): - self.required_keys = ['host', 'port'] - RelationContext.__init__(self, *args, **kwargs) - - def provide_data(self): - return { - 'host': hookenv.unit_get('private-address'), - 'port': 80, - } - - -class RequiredConfig(dict): - """ - Data context that loads config options with one or more mandatory options. - - Once the required options have been changed from their default values, all - config options will be available, namespaced under `config` to prevent - potential naming conflicts (for example, between a config option and a - relation property). - - :param list *args: List of options that must be changed from their default values. - """ - - def __init__(self, *args): - self.required_options = args - self['config'] = hookenv.config() - with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp: - self.config = yaml.load(fp).get('options', {}) - - def __bool__(self): - for option in self.required_options: - if option not in self['config']: - return False - current_value = self['config'][option] - default_value = self.config[option].get('default') - if current_value == default_value: - return False - if current_value in (None, '') and default_value in (None, ''): - return False - return True - - def __nonzero__(self): - return self.__bool__() - - -class StoredContext(dict): - """ - A data context that always returns the data that it was first created with. - - This is useful to do a one-time generation of things like passwords, that - will thereafter use the same value that was originally generated, instead - of generating a new value each time it is run. - """ - def __init__(self, file_name, config_data): - """ - If the file exists, populate `self` with the data from the file. - Otherwise, populate with the given data and persist it to the file. - """ - if os.path.exists(file_name): - self.update(self.read_context(file_name)) - else: - self.store_context(file_name, config_data) - self.update(config_data) - - def store_context(self, file_name, config_data): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'w') as file_stream: - os.fchmod(file_stream.fileno(), 0o600) - yaml.dump(config_data, file_stream) - - def read_context(self, file_name): - if not os.path.isabs(file_name): - file_name = os.path.join(hookenv.charm_dir(), file_name) - with open(file_name, 'r') as file_stream: - data = yaml.load(file_stream) - if not data: - raise OSError("%s is empty" % file_name) - return data - - -class TemplateCallback(ManagerCallback): - """ - Callback class that will render a Jinja2 template, for use as a ready - action. - - :param str source: The template source file, relative to - `$CHARM_DIR/templates` - - :param str target: The target to write the rendered template to - :param str owner: The owner of the rendered file - :param str group: The group of the rendered file - :param int perms: The permissions of the rendered file - """ - def __init__(self, source, target, - owner='root', group='root', perms=0o444): - self.source = source - self.target = target - self.owner = owner - self.group = group - self.perms = perms - - def __call__(self, manager, service_name, event_name): - service = manager.get_service(service_name) - context = {} - for ctx in service.get('required_data', []): - context.update(ctx) - templating.render(self.source, self.target, context, - self.owner, self.group, self.perms) - - -# Convenience aliases for templates -render_template = template = TemplateCallback diff --git a/hooks/charmhelpers-pl/core/strutils.py b/hooks/charmhelpers-pl/core/strutils.py deleted file mode 100644 index a2a784a..0000000 --- a/hooks/charmhelpers-pl/core/strutils.py +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import six - - -def bool_from_string(value): - """Interpret string value as boolean. - - Returns True if value translates to True otherwise False. - """ - if isinstance(value, six.string_types): - value = six.text_type(value) - else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) - raise ValueError(msg) - - value = value.strip().lower() - - if value in ['y', 'yes', 'true', 't', 'on']: - return True - elif value in ['n', 'no', 'false', 'f', 'off']: - return False - - msg = "Unable to interpret string value '%s' as boolean" % (value) - raise ValueError(msg) diff --git a/hooks/charmhelpers-pl/core/sysctl.py b/hooks/charmhelpers-pl/core/sysctl.py deleted file mode 100644 index 21cc8ab..0000000 --- a/hooks/charmhelpers-pl/core/sysctl.py +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import yaml - -from subprocess import check_call - -from charmhelpers.core.hookenv import ( - log, - DEBUG, - ERROR, -) - -__author__ = 'Jorge Niedbalski R. ' - - -def create(sysctl_dict, sysctl_file): - """Creates a sysctl.conf file from a YAML associative array - - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" - :type sysctl_dict: str - :param sysctl_file: path to the sysctl file to be saved - :type sysctl_file: str or unicode - :returns: None - """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return - - with open(sysctl_file, "w") as fd: - for key, value in sysctl_dict_parsed.items(): - fd.write("{}={}\n".format(key, value)) - - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), - level=DEBUG) - - check_call(["sysctl", "-p", sysctl_file]) diff --git a/hooks/charmhelpers-pl/core/templating.py b/hooks/charmhelpers-pl/core/templating.py deleted file mode 100644 index 4531999..0000000 --- a/hooks/charmhelpers-pl/core/templating.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os - -from charmhelpers.core import host -from charmhelpers.core import hookenv - - -def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8'): - """ - Render a template. - - The `source` path, if not absolute, is relative to the `templates_dir`. - - The `target` path should be absolute. - - The context should be a dict containing the values to be replaced in the - template. - - The `owner`, `group`, and `perms` options will be passed to `write_file`. - - If omitted, `templates_dir` defaults to the `templates` folder in the charm. - - Note: Using this requires python-jinja2; if it is not installed, calling - this will attempt to use charmhelpers.fetch.apt_install to install it. - """ - try: - from jinja2 import FileSystemLoader, Environment, exceptions - except ImportError: - try: - from charmhelpers.fetch import apt_install - except ImportError: - hookenv.log('Could not import jinja2, and could not import ' - 'charmhelpers.fetch to install it', - level=hookenv.ERROR) - raise - apt_install('python-jinja2', fatal=True) - from jinja2 import FileSystemLoader, Environment, exceptions - - if templates_dir is None: - templates_dir = os.path.join(hookenv.charm_dir(), 'templates') - loader = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = loader.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e - content = template.render(context) - host.mkdir(os.path.dirname(target), owner, group, perms=0o755) - host.write_file(target, content.encode(encoding), owner, group, perms) diff --git a/hooks/charmhelpers-pl/core/unitdata.py b/hooks/charmhelpers-pl/core/unitdata.py deleted file mode 100644 index 406a35c..0000000 --- a/hooks/charmhelpers-pl/core/unitdata.py +++ /dev/null @@ -1,477 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . -# -# -# Authors: -# Kapil Thangavelu -# -""" -Intro ------ - -A simple way to store state in units. This provides a key value -storage with support for versioned, transactional operation, -and can calculate deltas from previous values to simplify unit logic -when processing changes. - - -Hook Integration ----------------- - -There are several extant frameworks for hook execution, including - - - charmhelpers.core.hookenv.Hooks - - charmhelpers.core.services.ServiceManager - -The storage classes are framework agnostic, one simple integration is -via the HookData contextmanager. It will record the current hook -execution environment (including relation data, config data, etc.), -setup a transaction and allow easy access to the changes from -previously seen values. One consequence of the integration is the -reservation of particular keys ('rels', 'unit', 'env', 'config', -'charm_revisions') for their respective values. - -Here's a fully worked integration example using hookenv.Hooks:: - - from charmhelper.core import hookenv, unitdata - - hook_data = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # Print all changes to configuration from previously seen - # values. - for changed, (prev, cur) in hook_data.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - # Directly access all charm config as a mapping. - conf = db.getrange('config', True) - - # Directly access all relation data as a mapping - rels = db.getrange('rels', True) - - if __name__ == '__main__': - with hook_data(): - hook.execute() - - -A more basic integration is via the hook_scope context manager which simply -manages transaction scope (and records hook name, and timestamp):: - - >>> from unitdata import kv - >>> db = kv() - >>> with db.hook_scope('install'): - ... # do work, in transactional scope. - ... db.set('x', 1) - >>> db.get('x') - 1 - - -Usage ------ - -Values are automatically json de/serialized to preserve basic typing -and complex data struct capabilities (dicts, lists, ints, booleans, etc). - -Individual values can be manipulated via get/set:: - - >>> kv.set('y', True) - >>> kv.get('y') - True - - # We can set complex values (dicts, lists) as a single key. - >>> kv.set('config', {'a': 1, 'b': True'}) - - # Also supports returning dictionaries as a record which - # provides attribute access. - >>> config = kv.get('config', record=True) - >>> config.b - True - - -Groups of keys can be manipulated with update/getrange:: - - >>> kv.update({'z': 1, 'y': 2}, prefix="gui.") - >>> kv.getrange('gui.', strip=True) - {'z': 1, 'y': 2} - -When updating values, its very helpful to understand which values -have actually changed and how have they changed. The storage -provides a delta method to provide for this:: - - >>> data = {'debug': True, 'option': 2} - >>> delta = kv.delta(data, 'config.') - >>> delta.debug.previous - None - >>> delta.debug.current - True - >>> delta - {'debug': (None, True), 'option': (None, 2)} - -Note the delta method does not persist the actual change, it needs to -be explicitly saved via 'update' method:: - - >>> kv.update(data, 'config.') - -Values modified in the context of a hook scope retain historical values -associated to the hookname. - - >>> with db.hook_scope('config-changed'): - ... db.set('x', 42) - >>> db.gethistory('x') - [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'), - (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')] - -""" - -import collections -import contextlib -import datetime -import json -import os -import pprint -import sqlite3 -import sys - -__author__ = 'Kapil Thangavelu ' - - -class Storage(object): - """Simple key value database for local unit state within charms. - - Modifications are automatically committed at hook exit. That's - currently regardless of exit code. - - To support dicts, lists, integer, floats, and booleans values - are automatically json encoded/decoded. - """ - def __init__(self, path=None): - self.db_path = path - if path is None: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') - self.conn = sqlite3.connect('%s' % self.db_path) - self.cursor = self.conn.cursor() - self.revision = None - self._closed = False - self._init() - - def close(self): - if self._closed: - return - self.flush(False) - self.cursor.close() - self.conn.close() - self._closed = True - - def _scoped_query(self, stmt, params=None): - if params is None: - params = [] - return stmt, params - - def get(self, key, default=None, record=False): - self.cursor.execute( - *self._scoped_query( - 'select data from kv where key=?', [key])) - result = self.cursor.fetchone() - if not result: - return default - if record: - return Record(json.loads(result[0])) - return json.loads(result[0]) - - def getrange(self, key_prefix, strip=False): - stmt = "select key, data from kv where key like '%s%%'" % key_prefix - self.cursor.execute(*self._scoped_query(stmt)) - result = self.cursor.fetchall() - - if not result: - return None - if not strip: - key_prefix = '' - return dict([ - (k[len(key_prefix):], json.loads(v)) for k, v in result]) - - def update(self, mapping, prefix=""): - for k, v in mapping.items(): - self.set("%s%s" % (prefix, k), v) - - def unset(self, key): - self.cursor.execute('delete from kv where key=?', [key]) - if self.revision and self.cursor.rowcount: - self.cursor.execute( - 'insert into kv_revisions values (?, ?, ?)', - [key, self.revision, json.dumps('DELETED')]) - - def set(self, key, value): - serialized = json.dumps(value) - - self.cursor.execute( - 'select data from kv where key=?', [key]) - exists = self.cursor.fetchone() - - # Skip mutations to the same value - if exists: - if exists[0] == serialized: - return value - - if not exists: - self.cursor.execute( - 'insert into kv (key, data) values (?, ?)', - (key, serialized)) - else: - self.cursor.execute(''' - update kv - set data = ? - where key = ?''', [serialized, key]) - - # Save - if not self.revision: - return value - - self.cursor.execute( - 'select 1 from kv_revisions where key=? and revision=?', - [key, self.revision]) - exists = self.cursor.fetchone() - - if not exists: - self.cursor.execute( - '''insert into kv_revisions ( - revision, key, data) values (?, ?, ?)''', - (self.revision, key, serialized)) - else: - self.cursor.execute( - ''' - update kv_revisions - set data = ? - where key = ? - and revision = ?''', - [serialized, key, self.revision]) - - return value - - def delta(self, mapping, prefix): - """ - return a delta containing values that have changed. - """ - previous = self.getrange(prefix, strip=True) - if not previous: - pk = set() - else: - pk = set(previous.keys()) - ck = set(mapping.keys()) - delta = DeltaSet() - - # added - for k in ck.difference(pk): - delta[k] = Delta(None, mapping[k]) - - # removed - for k in pk.difference(ck): - delta[k] = Delta(previous[k], None) - - # changed - for k in pk.intersection(ck): - c = mapping[k] - p = previous[k] - if c != p: - delta[k] = Delta(p, c) - - return delta - - @contextlib.contextmanager - def hook_scope(self, name=""): - """Scope all future interactions to the current hook execution - revision.""" - assert not self.revision - self.cursor.execute( - 'insert into hooks (hook, date) values (?, ?)', - (name or sys.argv[0], - datetime.datetime.utcnow().isoformat())) - self.revision = self.cursor.lastrowid - try: - yield self.revision - self.revision = None - except: - self.flush(False) - self.revision = None - raise - else: - self.flush() - - def flush(self, save=True): - if save: - self.conn.commit() - elif self._closed: - return - else: - self.conn.rollback() - - def _init(self): - self.cursor.execute(''' - create table if not exists kv ( - key text, - data text, - primary key (key) - )''') - self.cursor.execute(''' - create table if not exists kv_revisions ( - key text, - revision integer, - data text, - primary key (key, revision) - )''') - self.cursor.execute(''' - create table if not exists hooks ( - version integer primary key autoincrement, - hook text, - date text - )''') - self.conn.commit() - - def gethistory(self, key, deserialize=False): - self.cursor.execute( - ''' - select kv.revision, kv.key, kv.data, h.hook, h.date - from kv_revisions kv, - hooks h - where kv.key=? - and kv.revision = h.version - ''', [key]) - if deserialize is False: - return self.cursor.fetchall() - return map(_parse_history, self.cursor.fetchall()) - - def debug(self, fh=sys.stderr): - self.cursor.execute('select * from kv') - pprint.pprint(self.cursor.fetchall(), stream=fh) - self.cursor.execute('select * from kv_revisions') - pprint.pprint(self.cursor.fetchall(), stream=fh) - - -def _parse_history(d): - return (d[0], d[1], json.loads(d[2]), d[3], - datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f")) - - -class HookData(object): - """Simple integration for existing hook exec frameworks. - - Records all unit information, and stores deltas for processing - by the hook. - - Sample:: - - from charmhelper.core import hookenv, unitdata - - changes = unitdata.HookData() - db = unitdata.kv() - hooks = hookenv.Hooks() - - @hooks.hook - def config_changed(): - # View all changes to configuration - for changed, (prev, cur) in changes.conf.items(): - print('config changed', changed, - 'previous value', prev, - 'current value', cur) - - # Get some unit specific bookeeping - if not db.get('pkg_key'): - key = urllib.urlopen('https://example.com/pkg_key').read() - db.set('pkg_key', key) - - if __name__ == '__main__': - with changes(): - hook.execute() - - """ - def __init__(self): - self.kv = kv() - self.conf = None - self.rels = None - - @contextlib.contextmanager - def __call__(self): - from charmhelpers.core import hookenv - hook_name = hookenv.hook_name() - - with self.kv.hook_scope(hook_name): - self._record_charm_version(hookenv.charm_dir()) - delta_config, delta_relation = self._record_hook(hookenv) - yield self.kv, delta_config, delta_relation - - def _record_charm_version(self, charm_dir): - # Record revisions.. charm revisions are meaningless - # to charm authors as they don't control the revision. - # so logic dependnent on revision is not particularly - # useful, however it is useful for debugging analysis. - charm_rev = open( - os.path.join(charm_dir, 'revision')).read().strip() - charm_rev = charm_rev or '0' - revs = self.kv.get('charm_revisions', []) - if charm_rev not in revs: - revs.append(charm_rev.strip() or '0') - self.kv.set('charm_revisions', revs) - - def _record_hook(self, hookenv): - data = hookenv.execution_environment() - self.conf = conf_delta = self.kv.delta(data['conf'], 'config') - self.rels = rels_delta = self.kv.delta(data['rels'], 'rels') - self.kv.set('env', dict(data['env'])) - self.kv.set('unit', data['unit']) - self.kv.set('relid', data.get('relid')) - return conf_delta, rels_delta - - -class Record(dict): - - __slots__ = () - - def __getattr__(self, k): - if k in self: - return self[k] - raise AttributeError(k) - - -class DeltaSet(Record): - - __slots__ = () - - -Delta = collections.namedtuple('Delta', ['previous', 'current']) - - -_KV = None - - -def kv(): - global _KV - if _KV is None: - _KV = Storage() - return _KV diff --git a/hooks/charmhelpers-pl/fetch/__init__.py b/hooks/charmhelpers-pl/fetch/__init__.py deleted file mode 100644 index 9a1a251..0000000 --- a/hooks/charmhelpers-pl/fetch/__init__.py +++ /dev/null @@ -1,439 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import importlib -from tempfile import NamedTemporaryFile -import time -from yaml import safe_load -from charmhelpers.core.host import ( - lsb_release -) -import subprocess -from charmhelpers.core.hookenv import ( - config, - log, -) -import os - -import six -if six.PY3: - from urllib.parse import urlparse, urlunparse -else: - from urlparse import urlparse, urlunparse - - -CLOUD_ARCHIVE = """# Ubuntu Cloud Archive -deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main -""" -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" -CLOUD_ARCHIVE_POCKETS = { - # Folsom - 'folsom': 'precise-updates/folsom', - 'precise-folsom': 'precise-updates/folsom', - 'precise-folsom/updates': 'precise-updates/folsom', - 'precise-updates/folsom': 'precise-updates/folsom', - 'folsom/proposed': 'precise-proposed/folsom', - 'precise-folsom/proposed': 'precise-proposed/folsom', - 'precise-proposed/folsom': 'precise-proposed/folsom', - # Grizzly - 'grizzly': 'precise-updates/grizzly', - 'precise-grizzly': 'precise-updates/grizzly', - 'precise-grizzly/updates': 'precise-updates/grizzly', - 'precise-updates/grizzly': 'precise-updates/grizzly', - 'grizzly/proposed': 'precise-proposed/grizzly', - 'precise-grizzly/proposed': 'precise-proposed/grizzly', - 'precise-proposed/grizzly': 'precise-proposed/grizzly', - # Havana - 'havana': 'precise-updates/havana', - 'precise-havana': 'precise-updates/havana', - 'precise-havana/updates': 'precise-updates/havana', - 'precise-updates/havana': 'precise-updates/havana', - 'havana/proposed': 'precise-proposed/havana', - 'precise-havana/proposed': 'precise-proposed/havana', - 'precise-proposed/havana': 'precise-proposed/havana', - # Icehouse - 'icehouse': 'precise-updates/icehouse', - 'precise-icehouse': 'precise-updates/icehouse', - 'precise-icehouse/updates': 'precise-updates/icehouse', - 'precise-updates/icehouse': 'precise-updates/icehouse', - 'icehouse/proposed': 'precise-proposed/icehouse', - 'precise-icehouse/proposed': 'precise-proposed/icehouse', - 'precise-proposed/icehouse': 'precise-proposed/icehouse', - # Juno - 'juno': 'trusty-updates/juno', - 'trusty-juno': 'trusty-updates/juno', - 'trusty-juno/updates': 'trusty-updates/juno', - 'trusty-updates/juno': 'trusty-updates/juno', - 'juno/proposed': 'trusty-proposed/juno', - 'trusty-juno/proposed': 'trusty-proposed/juno', - 'trusty-proposed/juno': 'trusty-proposed/juno', - # Kilo - 'kilo': 'trusty-updates/kilo', - 'trusty-kilo': 'trusty-updates/kilo', - 'trusty-kilo/updates': 'trusty-updates/kilo', - 'trusty-updates/kilo': 'trusty-updates/kilo', - 'kilo/proposed': 'trusty-proposed/kilo', - 'trusty-kilo/proposed': 'trusty-proposed/kilo', - 'trusty-proposed/kilo': 'trusty-proposed/kilo', -} - -# The order of this list is very important. Handlers should be listed in from -# least- to most-specific URL matching. -FETCH_HANDLERS = ( - 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler', - 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler', - 'charmhelpers.fetch.giturl.GitUrlFetchHandler', -) - -APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. -APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks. -APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. - - -class SourceConfigError(Exception): - pass - - -class UnhandledSource(Exception): - pass - - -class AptLockError(Exception): - pass - - -class BaseFetchHandler(object): - - """Base class for FetchHandler implementations in fetch plugins""" - - def can_handle(self, source): - """Returns True if the source can be handled. Otherwise returns - a string explaining why it cannot""" - return "Wrong source type" - - def install(self, source): - """Try to download and unpack the source. Return the path to the - unpacked files or raise UnhandledSource.""" - raise UnhandledSource("Wrong source type {}".format(source)) - - def parse_url(self, url): - return urlparse(url) - - def base_url(self, url): - """Return url without querystring or fragment""" - parts = list(self.parse_url(url)) - parts[4:] = ['' for i in parts[4:]] - return urlunparse(parts) - - -def filter_installed_packages(packages): - """Returns a list of packages that require installation""" - cache = apt_cache() - _pkgs = [] - for package in packages: - try: - p = cache[package] - p.current_ver or _pkgs.append(package) - except KeyError: - log('Package {} has no installation candidate.'.format(package), - level='WARNING') - _pkgs.append(package) - return _pkgs - - -def apt_cache(in_memory=True): - """Build and return an apt cache""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache() - - -def apt_install(packages, options=None, fatal=False): - """Install one or more packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - cmd.append('install') - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Installing {} with options: {}".format(packages, - options)) - _run_apt_command(cmd, fatal) - - -def apt_upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages""" - if options is None: - options = ['--option=Dpkg::Options::=--force-confold'] - - cmd = ['apt-get', '--assume-yes'] - cmd.extend(options) - if dist: - cmd.append('dist-upgrade') - else: - cmd.append('upgrade') - log("Upgrading with options: {}".format(options)) - _run_apt_command(cmd, fatal) - - -def apt_update(fatal=False): - """Update local apt cache""" - cmd = ['apt-get', 'update'] - _run_apt_command(cmd, fatal) - - -def apt_purge(packages, fatal=False): - """Purge one or more packages""" - cmd = ['apt-get', '--assume-yes', 'purge'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Purging {}".format(packages)) - _run_apt_command(cmd, fatal) - - -def apt_hold(packages, fatal=False): - """Hold one or more packages""" - cmd = ['apt-mark', 'hold'] - if isinstance(packages, six.string_types): - cmd.append(packages) - else: - cmd.extend(packages) - log("Holding {}".format(packages)) - - if fatal: - subprocess.check_call(cmd) - else: - subprocess.call(cmd) - - -def add_source(source, key=None): - """Add a package source to this system. - - @param source: a URL or sources.list entry, as supported by - add-apt-repository(1). Examples:: - - ppa:charmers/example - deb https://stub:key@private.example.com/ubuntu trusty main - - In addition: - 'proposed:' may be used to enable the standard 'proposed' - pocket for the release. - 'cloud:' may be used to activate official cloud archive pockets, - such as 'cloud:icehouse' - 'distro' may be used as a noop - - @param key: A key to be added to the system's APT keyring and used - to verify the signatures on packages. Ideally, this should be an - ASCII format GPG public key including the block headers. A GPG key - id may also be used, but be aware that only insecure protocols are - available to retrieve the actual public key from a public keyserver - placing your Juju environment at risk. ppa and cloud archive keys - are securely added automtically, so sould not be provided. - """ - if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - subprocess.check_call(['add-apt-repository', '--yes', source]) - elif source.startswith('cloud:'): - apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass - else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) - - -def configure_sources(update=False, - sources_var='install_sources', - keys_var='install_keys'): - """ - Configure multiple sources from charm configuration. - - The lists are encoded as yaml fragments in the configuration. - The frament needs to be included as a string. Sources and their - corresponding keys are of the types supported by add_source(). - - Example config: - install_sources: | - - "ppa:foo" - - "http://example.com/repo precise main" - install_keys: | - - null - - "a1b2c3d4" - - Note that 'null' (a.k.a. None) should not be quoted. - """ - sources = safe_load((config(sources_var) or '').strip()) or [] - keys = safe_load((config(keys_var) or '').strip()) or None - - if isinstance(sources, six.string_types): - sources = [sources] - - if keys is None: - for source in sources: - add_source(source, None) - else: - if isinstance(keys, six.string_types): - keys = [keys] - - if len(sources) != len(keys): - raise SourceConfigError( - 'Install sources and keys lists are different lengths') - for source, key in zip(sources, keys): - add_source(source, key) - if update: - apt_update(fatal=True) - - -def install_remote(source, *args, **kwargs): - """ - Install a file tree from a remote source - - The specified source should be a url of the form: - scheme://[host]/path[#[option=value][&...]] - - Schemes supported are based on this modules submodules. - Options supported are submodule-specific. - Additional arguments are passed through to the submodule. - - For example:: - - dest = install_remote('http://example.com/archive.tgz', - checksum='deadbeef', - hash_type='sha1') - - This will download `archive.tgz`, validate it using SHA1 and, if - the file is ok, extract it and return the directory in which it - was extracted. If the checksum fails, it will raise - :class:`charmhelpers.core.host.ChecksumError`. - """ - # We ONLY check for True here because can_handle may return a string - # explaining why it can't handle a given source. - handlers = [h for h in plugins() if h.can_handle(source) is True] - installed_to = None - for handler in handlers: - try: - installed_to = handler.install(source, *args, **kwargs) - except UnhandledSource: - pass - if not installed_to: - raise UnhandledSource("No handler found for source {}".format(source)) - return installed_to - - -def install_from_config(config_var_name): - charm_config = config() - source = charm_config[config_var_name] - return install_remote(source) - - -def plugins(fetch_handlers=None): - if not fetch_handlers: - fetch_handlers = FETCH_HANDLERS - plugin_list = [] - for handler_name in fetch_handlers: - package, classname = handler_name.rsplit('.', 1) - try: - handler_class = getattr( - importlib.import_module(package), - classname) - plugin_list.append(handler_class()) - except (ImportError, AttributeError): - # Skip missing plugins so that they can be ommitted from - # installation if desired - log("FetchHandler {} not found, skipping plugin".format( - handler_name)) - return plugin_list - - -def _run_apt_command(cmd, fatal=False): - """ - Run an APT command, checking output and retrying if the fatal flag is set - to True. - - :param: cmd: str: The apt command to run. - :param: fatal: bool: Whether the command's output should be checked and - retried. - """ - env = os.environ.copy() - - if 'DEBIAN_FRONTEND' not in env: - env['DEBIAN_FRONTEND'] = 'noninteractive' - - if fatal: - retry_count = 0 - result = None - - # If the command is considered "fatal", we need to retry if the apt - # lock was not acquired. - - while result is None or result == APT_NO_LOCK: - try: - result = subprocess.check_call(cmd, env=env) - except subprocess.CalledProcessError as e: - retry_count = retry_count + 1 - if retry_count > APT_NO_LOCK_RETRY_COUNT: - raise - result = e.returncode - log("Couldn't acquire DPKG lock. Will retry in {} seconds." - "".format(APT_NO_LOCK_RETRY_DELAY)) - time.sleep(APT_NO_LOCK_RETRY_DELAY) - - else: - subprocess.call(cmd, env=env) diff --git a/hooks/charmhelpers-pl/fetch/archiveurl.py b/hooks/charmhelpers-pl/fetch/archiveurl.py deleted file mode 100644 index 8dfce50..0000000 --- a/hooks/charmhelpers-pl/fetch/archiveurl.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os -import hashlib -import re - -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.payload.archive import ( - get_archive_handler, - extract, -) -from charmhelpers.core.host import mkdir, check_hash - -import six -if six.PY3: - from urllib.request import ( - build_opener, install_opener, urlopen, urlretrieve, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - ) - from urllib.parse import urlparse, urlunparse, parse_qs - from urllib.error import URLError -else: - from urllib import urlretrieve - from urllib2 import ( - build_opener, install_opener, urlopen, - HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler, - URLError - ) - from urlparse import urlparse, urlunparse, parse_qs - - -def splituser(host): - '''urllib.splituser(), but six's support of this seems broken''' - _userprog = re.compile('^(.*)@(.*)$') - match = _userprog.match(host) - if match: - return match.group(1, 2) - return None, host - - -def splitpasswd(user): - '''urllib.splitpasswd(), but six's support of this is missing''' - _passwdprog = re.compile('^([^:]*):(.*)$', re.S) - match = _passwdprog.match(user) - if match: - return match.group(1, 2) - return user, None - - -class ArchiveUrlFetchHandler(BaseFetchHandler): - """ - Handler to download archive files from arbitrary URLs. - - Can fetch from http, https, ftp, and file URLs. - - Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files. - - Installs the contents of the archive in $CHARM_DIR/fetched/. - """ - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): - return "Wrong source type" - if get_archive_handler(self.base_url(source)): - return True - return False - - def download(self, source, dest): - """ - Download an archive file. - - :param str source: URL pointing to an archive file. - :param str dest: Local path location to download archive file to. - """ - # propogate all exceptions - # URLError, OSError, etc - proto, netloc, path, params, query, fragment = urlparse(source) - if proto in ('http', 'https'): - auth, barehost = splituser(netloc) - if auth is not None: - source = urlunparse((proto, barehost, path, params, query, fragment)) - username, password = splitpasswd(auth) - passman = HTTPPasswordMgrWithDefaultRealm() - # Realm is set to None in add_password to force the username and password - # to be used whatever the realm - passman.add_password(None, source, username, password) - authhandler = HTTPBasicAuthHandler(passman) - opener = build_opener(authhandler) - install_opener(opener) - response = urlopen(source) - try: - with open(dest, 'w') as dest_file: - dest_file.write(response.read()) - except Exception as e: - if os.path.isfile(dest): - os.unlink(dest) - raise e - - # Mandatory file validation via Sha1 or MD5 hashing. - def download_and_validate(self, url, hashsum, validate="sha1"): - tempfile, headers = urlretrieve(url) - check_hash(tempfile, hashsum, validate) - return tempfile - - def install(self, source, dest=None, checksum=None, hash_type='sha1'): - """ - Download and install an archive file, with optional checksum validation. - - The checksum can also be given on the `source` URL's fragment. - For example:: - - handler.install('http://example.com/file.tgz#sha1=deadbeef') - - :param str source: URL pointing to an archive file. - :param str dest: Local destination path to install to. If not given, - installs to `$CHARM_DIR/archives/archive_file_name`. - :param str checksum: If given, validate the archive file after download. - :param str hash_type: Algorithm used to generate `checksum`. - Can be any hash alrgorithm supported by :mod:`hashlib`, - such as md5, sha1, sha256, sha512, etc. - - """ - url_parts = self.parse_url(source) - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) - dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) - try: - self.download(source, dld_file) - except URLError as e: - raise UnhandledSource(e.reason) - except OSError as e: - raise UnhandledSource(e.strerror) - options = parse_qs(url_parts.fragment) - for key, value in options.items(): - if not six.PY3: - algorithms = hashlib.algorithms - else: - algorithms = hashlib.algorithms_available - if key in algorithms: - check_hash(dld_file, value, key) - if checksum: - check_hash(dld_file, checksum, hash_type) - return extract(dld_file, dest) diff --git a/hooks/charmhelpers-pl/fetch/bzrurl.py b/hooks/charmhelpers-pl/fetch/bzrurl.py deleted file mode 100644 index 3531315..0000000 --- a/hooks/charmhelpers-pl/fetch/bzrurl.py +++ /dev/null @@ -1,78 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.core.host import mkdir - -import six -if six.PY3: - raise ImportError('bzrlib does not support Python3') - -try: - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-bzrlib") - from bzrlib.branch import Branch - from bzrlib import bzrdir, workingtree, errors - - -class BzrUrlFetchHandler(BaseFetchHandler): - """Handler for bazaar branches via generic and lp URLs""" - def can_handle(self, source): - url_parts = self.parse_url(source) - if url_parts.scheme not in ('bzr+ssh', 'lp'): - return False - else: - return True - - def branch(self, source, dest): - url_parts = self.parse_url(source) - # If we use lp:branchname scheme we need to load plugins - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - if url_parts.scheme == "lp": - from bzrlib.plugin import load_plugins - load_plugins() - try: - local_branch = bzrdir.BzrDir.create_branch_convenience(dest) - except errors.AlreadyControlDirError: - local_branch = Branch.open(dest) - try: - remote_branch = Branch.open(source) - remote_branch.push(local_branch) - tree = workingtree.WorkingTree.open(dest) - tree.update() - except Exception as e: - raise e - - def install(self, source): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) - try: - self.branch(source, dest_dir) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/hooks/charmhelpers-pl/fetch/giturl.py b/hooks/charmhelpers-pl/fetch/giturl.py deleted file mode 100644 index 93aae87..0000000 --- a/hooks/charmhelpers-pl/fetch/giturl.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os -from charmhelpers.fetch import ( - BaseFetchHandler, - UnhandledSource -) -from charmhelpers.core.host import mkdir - -import six -if six.PY3: - raise ImportError('GitPython does not support Python 3') - -try: - from git import Repo -except ImportError: - from charmhelpers.fetch import apt_install - apt_install("python-git") - from git import Repo - -from git.exc import GitCommandError # noqa E402 - - -class GitUrlFetchHandler(BaseFetchHandler): - """Handler for git branches via generic and github URLs""" - def can_handle(self, source): - url_parts = self.parse_url(source) - # TODO (mattyw) no support for ssh git@ yet - if url_parts.scheme not in ('http', 'https', 'git'): - return False - else: - return True - - def clone(self, source, dest, branch): - if not self.can_handle(source): - raise UnhandledSource("Cannot handle {}".format(source)) - - repo = Repo.clone_from(source, dest) - repo.git.checkout(branch) - - def install(self, source, branch="master", dest=None): - url_parts = self.parse_url(source) - branch_name = url_parts.path.strip("/").split("/")[-1] - if dest: - dest_dir = os.path.join(dest, branch_name) - else: - dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", - branch_name) - if not os.path.exists(dest_dir): - mkdir(dest_dir, perms=0o755) - try: - self.clone(source, dest_dir, branch) - except GitCommandError as e: - raise UnhandledSource(e.message) - except OSError as e: - raise UnhandledSource(e.strerror) - return dest_dir diff --git a/hooks/charmhelpers-pl/payload/__init__.py b/hooks/charmhelpers-pl/payload/__init__.py deleted file mode 100644 index e6f4249..0000000 --- a/hooks/charmhelpers-pl/payload/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -"Tools for working with files injected into a charm just before deployment." diff --git a/hooks/charmhelpers-pl/payload/execd.py b/hooks/charmhelpers-pl/payload/execd.py deleted file mode 100644 index 4d4d81a..0000000 --- a/hooks/charmhelpers-pl/payload/execd.py +++ /dev/null @@ -1,66 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2014-2015 Canonical Limited. -# -# This file is part of charm-helpers. -# -# charm-helpers is free software: you can redistribute it and/or modify -# it under the terms of the GNU Lesser General Public License version 3 as -# published by the Free Software Foundation. -# -# charm-helpers is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public License -# along with charm-helpers. If not, see . - -import os -import sys -import subprocess -from charmhelpers.core import hookenv - - -def default_execd_dir(): - return os.path.join(os.environ['CHARM_DIR'], 'exec.d') - - -def execd_module_paths(execd_dir=None): - """Generate a list of full paths to modules within execd_dir.""" - if not execd_dir: - execd_dir = default_execd_dir() - - if not os.path.exists(execd_dir): - return - - for subpath in os.listdir(execd_dir): - module = os.path.join(execd_dir, subpath) - if os.path.isdir(module): - yield module - - -def execd_submodule_paths(command, execd_dir=None): - """Generate a list of full paths to the specified command within exec_dir. - """ - for module_path in execd_module_paths(execd_dir): - path = os.path.join(module_path, command) - if os.access(path, os.X_OK) and os.path.isfile(path): - yield path - - -def execd_run(command, execd_dir=None, die_on_error=False, stderr=None): - """Run command for each module within execd_dir which defines it.""" - for submodule_path in execd_submodule_paths(command, execd_dir): - try: - subprocess.check_call(submodule_path, shell=True, stderr=stderr) - except subprocess.CalledProcessError as e: - hookenv.log("Error ({}) running {}. Output: {}".format( - e.returncode, e.cmd, e.output)) - if die_on_error: - sys.exit(e.returncode) - - -def execd_preinstall(execd_dir=None): - """Run charm-pre-install for each module within execd_dir.""" - execd_run('charm-pre-install', execd_dir=execd_dir) diff --git a/hooks/charmhelpers-pl/contrib/__init__.py b/hooks/charmhelpers/contrib/amulet/__init__.py similarity index 100% rename from hooks/charmhelpers-pl/contrib/__init__.py rename to hooks/charmhelpers/contrib/amulet/__init__.py diff --git a/hooks/charmhelpers/contrib/amulet/deployment.py b/hooks/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 0000000..367d6b4 --- /dev/null +++ b/hooks/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,93 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import amulet +import os +import six + + +class AmuletDeployment(object): + """Amulet deployment. + + This class provides generic Amulet deployment and test runner + methods. + """ + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services. + + Add services to the deployment where this_service is the local charm + that we're testing and other_services are the other services that + are being used in the local amulet tests. + """ + if this_service['name'] != os.path.basename(os.getcwd()): + s = this_service['name'] + msg = "The charm's root directory name needs to be {}".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + + if 'units' not in this_service: + this_service['units'] = 1 + + self.d.add(this_service['name'], units=this_service['units']) + + for svc in other_services: + if 'location' in svc: + branch_location = svc['location'] + elif self.series: + branch_location = 'cs:{}/{}'.format(self.series, svc['name']), + else: + branch_location = None + + if 'units' not in svc: + svc['units'] = 1 + + self.d.add(svc['name'], charm=branch_location, units=svc['units']) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in six.iteritems(relations): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in six.iteritems(configs): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup(timeout=900) + self.d.sentry.wait(timeout=900) + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except Exception: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/hooks/charmhelpers/contrib/amulet/utils.py b/hooks/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 0000000..3de26af --- /dev/null +++ b/hooks/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,533 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import amulet +import ConfigParser +import distro_info +import io +import logging +import os +import re +import six +import sys +import time +import urlparse + + +class AmuletUtils(object): + """Amulet utilities. + + This class provides common utility functions that are used by Amulet + tests. + """ + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + self.ubuntu_releases = self.get_ubuntu_releases() + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = log.Formatter("%(asctime)s %(funcName)s " + "%(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def get_ubuntu_release_from_sentry(self, sentry_unit): + """Get Ubuntu release codename from sentry unit. + + :param sentry_unit: amulet sentry/service unit pointer + :returns: list of strings - release codename, failure message + """ + msg = None + cmd = 'lsb_release -cs' + release, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} lsb_release: {}'.format( + sentry_unit.info['unit_name'], release)) + else: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, release, code)) + if release not in self.ubuntu_releases: + msg = ("Release ({}) not found in Ubuntu releases " + "({})".format(release, self.ubuntu_releases)) + return release, msg + + def validate_services(self, commands): + """Validate that lists of commands succeed on service units. Can be + used to verify system services are running on the corresponding + service units. + + :param commands: dict with sentry keys and arbitrary command list vals + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # /!\ DEPRECATION WARNING (beisner): + # New and existing tests should be rewritten to use + # validate_services_by_name() as it is aware of init systems. + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'validate_services_by_name instead of validate_services ' + 'due to init system differences.') + + for k, v in six.iteritems(commands): + for cmd in v: + output, code = k.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(k.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def validate_services_by_name(self, sentry_services): + """Validate system service status by service name, automatically + detecting init system based on Ubuntu release codename. + + :param sentry_services: dict with sentry keys and svc list values + :returns: None if successful, Failure string message otherwise + """ + self.log.debug('Checking status of system services...') + + # Point at which systemd became a thing + systemd_switch = self.ubuntu_releases.index('vivid') + + for sentry_unit, services_list in six.iteritems(sentry_services): + # Get lsb_release codename from unit + release, ret = self.get_ubuntu_release_from_sentry(sentry_unit) + if ret: + return ret + + for service_name in services_list: + if (self.ubuntu_releases.index(release) >= systemd_switch or + service_name == "rabbitmq-server"): + # init is systemd + cmd = 'sudo service {} status'.format(service_name) + elif self.ubuntu_releases.index(release) < systemd_switch: + # init is upstart + cmd = 'sudo status {}'.format(service_name) + + output, code = sentry_unit.run(cmd) + self.log.debug('{} `{}` returned ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code)) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + + # NOTE(beisner): by default, ConfigParser does not handle options + # with no value, such as the flags used in the mysql my.cnf file. + # https://bugs.python.org/issue7005 + config = ConfigParser.ConfigParser(allow_no_value=True) + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, + expected): + """Validate config file data. + + Verify that the specified section of the config file contains + the expected option key:value pairs. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('Validating config file data ({} in {} on {})' + '...'.format(section, config_file, + sentry_unit.info['unit_name'])) + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + + actual = config.get(section, k) + v = expected[k] + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if actual != v: + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual): + return "section [{}] {}:{} != expected {}:{}".format( + section, k, actual, k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Validate dictionary data. + + Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluates a variable and returns a + bool. + """ + self.log.debug('actual: {}'.format(repr(actual))) + self.log.debug('expected: {}'.format(repr(expected))) + + for k, v in six.iteritems(expected): + if k in actual: + if (isinstance(v, six.string_types) or + isinstance(v, bool) or + isinstance(v, six.integer_types)): + # handle explicit values + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + # handle function pointers, such as not_null or valid_ip + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string is not None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): + """Get process' start time. + + Determine start time of the process based on the last modification + time of the /proc/pid directory. If pgrep_full is True, the process + name is matched against the full command line. + """ + if pgrep_full: + cmd = 'pgrep -o -f {}'.format(service) + else: + cmd = 'pgrep -o {}'.format(service) + cmd = cmd + ' | grep -v pgrep || exit 0' + cmd_out = sentry_unit.run(cmd) + self.log.debug('CMDout: ' + str(cmd_out)) + if cmd_out[0]: + self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) + proc_dir = '/proc/{}'.format(cmd_out[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=False, sleep_time=20): + """Check if service was restarted. + + Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted. + """ + time.sleep(sleep_time) + if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= + self._get_file_mtime(sentry_unit, filename)): + return True + else: + return False + + def service_restarted_since(self, sentry_unit, mtime, service, + pgrep_full=False, sleep_time=20, + retry_count=2): + """Check if service was been started after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Returns: + bool: True if service found and its start time it newer than mtime, + False if service is older than mtime or if service was + not found. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + while retry_count > 0 and not proc_start_time: + self.log.debug('No pid file found for service %s, will retry %i ' + 'more times' % (service, retry_count)) + time.sleep(30) + proc_start_time = self._get_proc_start_time(sentry_unit, service, + pgrep_full) + retry_count = retry_count - 1 + + if not proc_start_time: + self.log.warn('No proc start time found, assuming service did ' + 'not start') + return False + if proc_start_time >= mtime: + self.log.debug('proc start time is newer than provided mtime' + '(%s >= %s)' % (proc_start_time, mtime)) + return True + else: + self.log.warn('proc start time (%s) is older than provided mtime ' + '(%s), service did not restart' % (proc_start_time, + mtime)) + return False + + def config_updated_since(self, sentry_unit, filename, mtime, + sleep_time=20): + """Check if file was modified after a given time. + + Args: + sentry_unit (sentry): The sentry unit to check the file mtime on + filename (string): The file to check mtime of + mtime (float): The epoch time to check against + sleep_time (int): Seconds to sleep before looking for process + + Returns: + bool: True if file was modified more recently than mtime, False if + file was modified before mtime, + """ + self.log.debug('Checking %s updated since %s' % (filename, mtime)) + time.sleep(sleep_time) + file_mtime = self._get_file_mtime(sentry_unit, filename) + if file_mtime >= mtime: + self.log.debug('File mtime is newer than provided mtime ' + '(%s >= %s)' % (file_mtime, mtime)) + return True + else: + self.log.warn('File mtime %s is older than provided mtime %s' + % (file_mtime, mtime)) + return False + + def validate_service_config_changed(self, sentry_unit, mtime, service, + filename, pgrep_full=False, + sleep_time=20, retry_count=2): + """Check service and file were updated after mtime + + Args: + sentry_unit (sentry): The sentry unit to check for the service on + mtime (float): The epoch time to check against + service (string): service name to look for in process table + filename (string): The file to check mtime of + pgrep_full (boolean): Use full command line search mode with pgrep + sleep_time (int): Seconds to sleep before looking for process + retry_count (int): If service is not found, how many times to retry + + Typical Usage: + u = OpenStackAmuletUtils(ERROR) + ... + mtime = u.get_sentry_time(self.cinder_sentry) + self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'}) + if not u.validate_service_config_changed(self.cinder_sentry, + mtime, + 'cinder-api', + '/etc/cinder/cinder.conf') + amulet.raise_status(amulet.FAIL, msg='update failed') + Returns: + bool: True if both service and file where updated/restarted after + mtime, False if service is older than mtime or if service was + not found or if filename was modified before mtime. + """ + self.log.debug('Checking %s restarted since %s' % (service, mtime)) + time.sleep(sleep_time) + service_restart = self.service_restarted_since(sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=0, + retry_count=retry_count) + config_update = self.config_updated_since(sentry_unit, filename, mtime, + sleep_time=0) + return service_restart and config_update + + def get_sentry_time(self, sentry_unit): + """Return current epoch time on a sentry""" + cmd = "date +'%s'" + return float(sentry_unit.run(cmd)[0]) + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) + + def get_ubuntu_releases(self): + """Return a list of all Ubuntu releases in order of release.""" + _d = distro_info.UbuntuDistroInfo() + _release_list = _d.all + self.log.debug('Ubuntu release list: {}'.format(_release_list)) + return _release_list + + def file_to_url(self, file_rel_path): + """Convert a relative file path to a file URL.""" + _abs_path = os.path.abspath(file_rel_path) + return urlparse.urlparse(_abs_path, scheme='file').geturl() + + def check_commands_on_units(self, commands, sentry_units): + """Check that all commands in a list exit zero on all + sentry units in a list. + + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + self.log.debug('Checking exit codes for {} commands on {} ' + 'sentry units...'.format(len(commands), + len(sentry_units))) + for sentry_unit in sentry_units: + for cmd in commands: + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + return ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + return None + + def get_process_id_list(self, sentry_unit, process_name): + """Get a list of process ID(s) from a single sentry juju unit + for a single process name. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param process_name: Process name + :returns: List of process IDs + """ + cmd = 'pidof {}'.format(process_name) + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output).split() + + def get_unit_process_ids(self, unit_processes): + """Construct a dict containing unit sentries, process names, and + process IDs.""" + pid_dict = {} + for sentry_unit, process_list in unit_processes.iteritems(): + pid_dict[sentry_unit] = {} + for process in process_list: + pids = self.get_process_id_list(sentry_unit, process) + pid_dict[sentry_unit].update({process: pids}) + return pid_dict + + def validate_unit_process_ids(self, expected, actual): + """Validate process id quantities for services on units.""" + self.log.debug('Checking units for running processes...') + self.log.debug('Expected PIDs: {}'.format(expected)) + self.log.debug('Actual PIDs: {}'.format(actual)) + + if len(actual) != len(expected): + return ('Unit count mismatch. expected, actual: {}, ' + '{} '.format(len(expected), len(actual))) + + for (e_sentry, e_proc_names) in expected.iteritems(): + e_sentry_name = e_sentry.info['unit_name'] + if e_sentry in actual.keys(): + a_proc_names = actual[e_sentry] + else: + return ('Expected sentry ({}) not found in actual dict data.' + '{}'.format(e_sentry_name, e_sentry)) + + if len(e_proc_names.keys()) != len(a_proc_names.keys()): + return ('Process name count mismatch. expected, actual: {}, ' + '{}'.format(len(expected), len(actual))) + + for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \ + zip(e_proc_names.items(), a_proc_names.items()): + if e_proc_name != a_proc_name: + return ('Process name mismatch. expected, actual: {}, ' + '{}'.format(e_proc_name, a_proc_name)) + + a_pids_length = len(a_pids) + if e_pids_length != a_pids_length: + return ('PID count mismatch. {} ({}) expected, actual: ' + '{}, {} ({})'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids_length, + a_pids)) + else: + self.log.debug('PID check OK: {} {} {}: ' + '{}'.format(e_sentry_name, e_proc_name, + e_pids_length, a_pids)) + return None + + def validate_list_of_identical_dicts(self, list_of_dicts): + """Check that all dicts within a list are identical.""" + hashes = [] + for _dict in list_of_dicts: + hashes.append(hash(frozenset(_dict.items()))) + + self.log.debug('Hashes: {}'.format(hashes)) + if len(set(hashes)) == 1: + self.log.debug('Dicts within list are identical') + else: + return 'Dicts within list are not identical' + + return None diff --git a/hooks/charmhelpers/contrib/ansible/__init__.py b/hooks/charmhelpers/contrib/ansible/__init__.py new file mode 100644 index 0000000..944f406 --- /dev/null +++ b/hooks/charmhelpers/contrib/ansible/__init__.py @@ -0,0 +1,254 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +"""Charm Helpers ansible - declare the state of your machines. + +This helper enables you to declare your machine state, rather than +program it procedurally (and have to test each change to your procedures). +Your install hook can be as simple as:: + + {{{ + import charmhelpers.contrib.ansible + + + def install(): + charmhelpers.contrib.ansible.install_ansible_support() + charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml') + }}} + +and won't need to change (nor will its tests) when you change the machine +state. + +All of your juju config and relation-data are available as template +variables within your playbooks and templates. An install playbook looks +something like:: + + {{{ + --- + - hosts: localhost + user: root + + tasks: + - name: Add private repositories. + template: + src: ../templates/private-repositories.list.jinja2 + dest: /etc/apt/sources.list.d/private.list + + - name: Update the cache. + apt: update_cache=yes + + - name: Install dependencies. + apt: pkg={{ item }} + with_items: + - python-mimeparse + - python-webob + - sunburnt + + - name: Setup groups. + group: name={{ item.name }} gid={{ item.gid }} + with_items: + - { name: 'deploy_user', gid: 1800 } + - { name: 'service_user', gid: 1500 } + + ... + }}} + +Read more online about `playbooks`_ and standard ansible `modules`_. + +.. _playbooks: http://www.ansibleworks.com/docs/playbooks.html +.. _modules: http://www.ansibleworks.com/docs/modules.html + +A further feature os the ansible hooks is to provide a light weight "action" +scripting tool. This is a decorator that you apply to a function, and that +function can now receive cli args, and can pass extra args to the playbook. + +e.g. + + +@hooks.action() +def some_action(amount, force="False"): + "Usage: some-action AMOUNT [force=True]" # <-- shown on error + # process the arguments + # do some calls + # return extra-vars to be passed to ansible-playbook + return { + 'amount': int(amount), + 'type': force, + } + +You can now create a symlink to hooks.py that can be invoked like a hook, but +with cli params: + +# link actions/some-action to hooks/hooks.py + +actions/some-action amount=10 force=true + +""" +import os +import stat +import subprocess +import functools + +import charmhelpers.contrib.templating.contexts +import charmhelpers.core.host +import charmhelpers.core.hookenv +import charmhelpers.fetch + + +charm_dir = os.environ.get('CHARM_DIR', '') +ansible_hosts_path = '/etc/ansible/hosts' +# Ansible will automatically include any vars in the following +# file in its inventory when run locally. +ansible_vars_path = '/etc/ansible/host_vars/localhost' + + +def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'): + """Installs the ansible package. + + By default it is installed from the `PPA`_ linked from + the ansible `website`_ or from a ppa specified by a charm config.. + + .. _PPA: https://launchpad.net/~rquillo/+archive/ansible + .. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu + + If from_ppa is empty, you must ensure that the package is available + from a configured repository. + """ + if from_ppa: + charmhelpers.fetch.add_source(ppa_location) + charmhelpers.fetch.apt_update(fatal=True) + charmhelpers.fetch.apt_install('ansible') + with open(ansible_hosts_path, 'w+') as hosts_file: + hosts_file.write('localhost ansible_connection=local') + + +def apply_playbook(playbook, tags=None, extra_vars=None): + tags = tags or [] + tags = ",".join(tags) + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + ansible_vars_path, namespace_separator='__', + allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR)) + + # we want ansible's log output to be unbuffered + env = os.environ.copy() + env['PYTHONUNBUFFERED'] = "1" + call = [ + 'ansible-playbook', + '-c', + 'local', + playbook, + ] + if tags: + call.extend(['--tags', '{}'.format(tags)]) + if extra_vars: + extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()] + call.extend(['--extra-vars', " ".join(extra)]) + subprocess.check_call(call, env=env) + + +class AnsibleHooks(charmhelpers.core.hookenv.Hooks): + """Run a playbook with the hook-name as the tag. + + This helper builds on the standard hookenv.Hooks helper, + but additionally runs the playbook with the hook-name specified + using --tags (ie. running all the tasks tagged with the hook-name). + + Example:: + + hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml') + + # All the tasks within my_machine_state.yaml tagged with 'install' + # will be run automatically after do_custom_work() + @hooks.hook() + def install(): + do_custom_work() + + # For most of your hooks, you won't need to do anything other + # than run the tagged tasks for the hook: + @hooks.hook('config-changed', 'start', 'stop') + def just_use_playbook(): + pass + + # As a convenience, you can avoid the above noop function by specifying + # the hooks which are handled by ansible-only and they'll be registered + # for you: + # hooks = AnsibleHooks( + # 'playbooks/my_machine_state.yaml', + # default_hooks=['config-changed', 'start', 'stop']) + + if __name__ == "__main__": + # execute a hook based on the name the program is called by + hooks.execute(sys.argv) + + """ + + def __init__(self, playbook_path, default_hooks=None): + """Register any hooks handled by ansible.""" + super(AnsibleHooks, self).__init__() + + self._actions = {} + self.playbook_path = playbook_path + + default_hooks = default_hooks or [] + + def noop(*args, **kwargs): + pass + + for hook in default_hooks: + self.register(hook, noop) + + def register_action(self, name, function): + """Register a hook""" + self._actions[name] = function + + def execute(self, args): + """Execute the hook followed by the playbook using the hook as tag.""" + hook_name = os.path.basename(args[0]) + extra_vars = None + if hook_name in self._actions: + extra_vars = self._actions[hook_name](args[1:]) + else: + super(AnsibleHooks, self).execute(args) + + charmhelpers.contrib.ansible.apply_playbook( + self.playbook_path, tags=[hook_name], extra_vars=extra_vars) + + def action(self, *action_names): + """Decorator, registering them as actions""" + def action_wrapper(decorated): + + @functools.wraps(decorated) + def wrapper(argv): + kwargs = dict(arg.split('=') for arg in argv) + try: + return decorated(**kwargs) + except TypeError as e: + if decorated.__doc__: + e.args += (decorated.__doc__,) + raise + + self.register_action(decorated.__name__, wrapper) + if '_' in decorated.__name__: + self.register_action( + decorated.__name__.replace('_', '-'), wrapper) + + return wrapper + + return action_wrapper diff --git a/hooks/charmhelpers/contrib/benchmark/__init__.py b/hooks/charmhelpers/contrib/benchmark/__init__.py new file mode 100644 index 0000000..1d039ea --- /dev/null +++ b/hooks/charmhelpers/contrib/benchmark/__init__.py @@ -0,0 +1,126 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import subprocess +import time +import os +from distutils.spawn import find_executable + +from charmhelpers.core.hookenv import ( + in_relation_hook, + relation_ids, + relation_set, + relation_get, +) + + +def action_set(key, val): + if find_executable('action-set'): + action_cmd = ['action-set'] + + if isinstance(val, dict): + for k, v in iter(val.items()): + action_set('%s.%s' % (key, k), v) + return True + + action_cmd.append('%s=%s' % (key, val)) + subprocess.check_call(action_cmd) + return True + return False + + +class Benchmark(): + """ + Helper class for the `benchmark` interface. + + :param list actions: Define the actions that are also benchmarks + + From inside the benchmark-relation-changed hook, you would + Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom']) + + Examples: + + siege = Benchmark(['siege']) + siege.start() + [... run siege ...] + # The higher the score, the better the benchmark + siege.set_composite_score(16.70, 'trans/sec', 'desc') + siege.finish() + + + """ + + BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing + + required_keys = [ + 'hostname', + 'port', + 'graphite_port', + 'graphite_endpoint', + 'api_port' + ] + + def __init__(self, benchmarks=None): + if in_relation_hook(): + if benchmarks is not None: + for rid in sorted(relation_ids('benchmark')): + relation_set(relation_id=rid, relation_settings={ + 'benchmarks': ",".join(benchmarks) + }) + + # Check the relation data + config = {} + for key in self.required_keys: + val = relation_get(key) + if val is not None: + config[key] = val + else: + # We don't have all of the required keys + config = {} + break + + if len(config): + with open(self.BENCHMARK_CONF, 'w') as f: + for key, val in iter(config.items()): + f.write("%s=%s\n" % (key, val)) + + @staticmethod + def start(): + action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + """ + If the collectd charm is also installed, tell it to send a snapshot + of the current profile data. + """ + COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' + if os.path.exists(COLLECT_PROFILE_DATA): + subprocess.check_output([COLLECT_PROFILE_DATA]) + + @staticmethod + def finish(): + action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ')) + + @staticmethod + def set_composite_score(value, units, direction='asc'): + """ + Set the composite score for a benchmark run. This is a single number + representative of the benchmark results. This could be the most + important metric, or an amalgamation of metric scores. + """ + return action_set( + "meta.composite", + {'value': value, 'units': units, 'direction': direction} + ) diff --git a/hooks/charmhelpers/contrib/charmhelpers/__init__.py b/hooks/charmhelpers/contrib/charmhelpers/__init__.py new file mode 100644 index 0000000..edba750 --- /dev/null +++ b/hooks/charmhelpers/contrib/charmhelpers/__init__.py @@ -0,0 +1,208 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Copyright 2012 Canonical Ltd. This software is licensed under the +# GNU Affero General Public License version 3 (see the file LICENSE). + +import warnings +warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa + +import operator +import tempfile +import time +import yaml +import subprocess + +import six +if six.PY3: + from urllib.request import urlopen + from urllib.error import (HTTPError, URLError) +else: + from urllib2 import (urlopen, HTTPError, URLError) + +"""Helper functions for writing Juju charms in Python.""" + +__metaclass__ = type +__all__ = [ + # 'get_config', # core.hookenv.config() + # 'log', # core.hookenv.log() + # 'log_entry', # core.hookenv.log() + # 'log_exit', # core.hookenv.log() + # 'relation_get', # core.hookenv.relation_get() + # 'relation_set', # core.hookenv.relation_set() + # 'relation_ids', # core.hookenv.relation_ids() + # 'relation_list', # core.hookenv.relation_units() + # 'config_get', # core.hookenv.config() + # 'unit_get', # core.hookenv.unit_get() + # 'open_port', # core.hookenv.open_port() + # 'close_port', # core.hookenv.close_port() + # 'service_control', # core.host.service() + 'unit_info', # client-side, NOT IMPLEMENTED + 'wait_for_machine', # client-side, NOT IMPLEMENTED + 'wait_for_page_contents', # client-side, NOT IMPLEMENTED + 'wait_for_relation', # client-side, NOT IMPLEMENTED + 'wait_for_unit', # client-side, NOT IMPLEMENTED +] + + +SLEEP_AMOUNT = 0.1 + + +# We create a juju_status Command here because it makes testing much, +# much easier. +def juju_status(): + subprocess.check_call(['juju', 'status']) + +# re-implemented as charmhelpers.fetch.configure_sources() +# def configure_source(update=False): +# source = config_get('source') +# if ((source.startswith('ppa:') or +# source.startswith('cloud:') or +# source.startswith('http:'))): +# run('add-apt-repository', source) +# if source.startswith("http:"): +# run('apt-key', 'import', config_get('key')) +# if update: +# run('apt-get', 'update') + + +# DEPRECATED: client-side only +def make_charm_config_file(charm_config): + charm_config_file = tempfile.NamedTemporaryFile(mode='w+') + charm_config_file.write(yaml.dump(charm_config)) + charm_config_file.flush() + # The NamedTemporaryFile instance is returned instead of just the name + # because we want to take advantage of garbage collection-triggered + # deletion of the temp file when it goes out of scope in the caller. + return charm_config_file + + +# DEPRECATED: client-side only +def unit_info(service_name, item_name, data=None, unit=None): + if data is None: + data = yaml.safe_load(juju_status()) + service = data['services'].get(service_name) + if service is None: + # XXX 2012-02-08 gmb: + # This allows us to cope with the race condition that we + # have between deploying a service and having it come up in + # `juju status`. We could probably do with cleaning it up so + # that it fails a bit more noisily after a while. + return '' + units = service['units'] + if unit is not None: + item = units[unit][item_name] + else: + # It might seem odd to sort the units here, but we do it to + # ensure that when no unit is specified, the first unit for the + # service (or at least the one with the lowest number) is the + # one whose data gets returned. + sorted_unit_names = sorted(units.keys()) + item = units[sorted_unit_names[0]][item_name] + return item + + +# DEPRECATED: client-side only +def get_machine_data(): + return yaml.safe_load(juju_status())['machines'] + + +# DEPRECATED: client-side only +def wait_for_machine(num_machines=1, timeout=300): + """Wait `timeout` seconds for `num_machines` machines to come up. + + This wait_for... function can be called by other wait_for functions + whose timeouts might be too short in situations where only a bare + Juju setup has been bootstrapped. + + :return: A tuple of (num_machines, time_taken). This is used for + testing. + """ + # You may think this is a hack, and you'd be right. The easiest way + # to tell what environment we're working in (LXC vs EC2) is to check + # the dns-name of the first machine. If it's localhost we're in LXC + # and we can just return here. + if get_machine_data()[0]['dns-name'] == 'localhost': + return 1, 0 + start_time = time.time() + while True: + # Drop the first machine, since it's the Zookeeper and that's + # not a machine that we need to wait for. This will only work + # for EC2 environments, which is why we return early above if + # we're in LXC. + machine_data = get_machine_data() + non_zookeeper_machines = [ + machine_data[key] for key in list(machine_data.keys())[1:]] + if len(non_zookeeper_machines) >= num_machines: + all_machines_running = True + for machine in non_zookeeper_machines: + if machine.get('instance-state') != 'running': + all_machines_running = False + break + if all_machines_running: + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for service to start') + time.sleep(SLEEP_AMOUNT) + return num_machines, time.time() - start_time + + +# DEPRECATED: client-side only +def wait_for_unit(service_name, timeout=480): + """Wait `timeout` seconds for a given service name to come up.""" + wait_for_machine(num_machines=1) + start_time = time.time() + while True: + state = unit_info(service_name, 'agent-state') + if 'error' in state or state == 'started': + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for service to start') + time.sleep(SLEEP_AMOUNT) + if state != 'started': + raise RuntimeError('unit did not start, agent-state: ' + state) + + +# DEPRECATED: client-side only +def wait_for_relation(service_name, relation_name, timeout=120): + """Wait `timeout` seconds for a given relation to come up.""" + start_time = time.time() + while True: + relation = unit_info(service_name, 'relations').get(relation_name) + if relation is not None and relation['state'] == 'up': + break + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for relation to be up') + time.sleep(SLEEP_AMOUNT) + + +# DEPRECATED: client-side only +def wait_for_page_contents(url, contents, timeout=120, validate=None): + if validate is None: + validate = operator.contains + start_time = time.time() + while True: + try: + stream = urlopen(url) + except (HTTPError, URLError): + pass + else: + page = stream.read() + if validate(page, contents): + return page + if time.time() - start_time >= timeout: + raise RuntimeError('timeout waiting for contents of ' + url) + time.sleep(SLEEP_AMOUNT) diff --git a/hooks/charmhelpers-pl/contrib/hahelpers/__init__.py b/hooks/charmhelpers/contrib/charmsupport/__init__.py similarity index 100% rename from hooks/charmhelpers-pl/contrib/hahelpers/__init__.py rename to hooks/charmhelpers/contrib/charmsupport/__init__.py diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py new file mode 100644 index 0000000..95a79c2 --- /dev/null +++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py @@ -0,0 +1,360 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +"""Compatibility with the nrpe-external-master charm""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Matthew Wedgwood + +import subprocess +import pwd +import grp +import os +import glob +import shutil +import re +import shlex +import yaml + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_ids, + relation_set, + relations_of_type, +) + +from charmhelpers.core.host import service + +# This module adds compatibility with the nrpe-external-master and plain nrpe +# subordinate charms. To use it in your charm: +# +# 1. Update metadata.yaml +# +# provides: +# (...) +# nrpe-external-master: +# interface: nrpe-external-master +# scope: container +# +# and/or +# +# provides: +# (...) +# local-monitors: +# interface: local-monitors +# scope: container + +# +# 2. Add the following to config.yaml +# +# nagios_context: +# default: "juju" +# type: string +# description: | +# Used by the nrpe subordinate charms. +# A string that will be prepended to instance name to set the host name +# in nagios. So for instance the hostname would be something like: +# juju-myservice-0 +# If you're running multiple environments with the same services in them +# this allows you to differentiate between them. +# nagios_servicegroups: +# default: "" +# type: string +# description: | +# A comma-separated list of nagios servicegroups. +# If left empty, the nagios_context will be used as the servicegroup +# +# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master +# +# 4. Update your hooks.py with something like this: +# +# from charmsupport.nrpe import NRPE +# (...) +# def update_nrpe_config(): +# nrpe_compat = NRPE() +# nrpe_compat.add_check( +# shortname = "myservice", +# description = "Check MyService", +# check_cmd = "check_http -w 2 -c 10 http://localhost" +# ) +# nrpe_compat.add_check( +# "myservice_other", +# "Check for widget failures", +# check_cmd = "/srv/myapp/scripts/widget_check" +# ) +# nrpe_compat.write() +# +# def config_changed(): +# (...) +# update_nrpe_config() +# +# def nrpe_external_master_relation_changed(): +# update_nrpe_config() +# +# def local_monitors_relation_changed(): +# update_nrpe_config() +# +# 5. ln -s hooks.py nrpe-external-master-relation-changed +# ln -s hooks.py local-monitors-relation-changed + + +class CheckException(Exception): + pass + + +class Check(object): + shortname_re = '[A-Za-z0-9-_]+$' + service_template = (""" +#--------------------------------------------------- +# This file is Juju managed +#--------------------------------------------------- +define service {{ + use active-service + host_name {nagios_hostname} + service_description {nagios_hostname}[{shortname}] """ + """{description} + check_command check_nrpe!{command} + servicegroups {nagios_servicegroup} +}} +""") + + def __init__(self, shortname, description, check_cmd): + super(Check, self).__init__() + # XXX: could be better to calculate this from the service name + if not re.match(self.shortname_re, shortname): + raise CheckException("shortname must match {}".format( + Check.shortname_re)) + self.shortname = shortname + self.command = "check_{}".format(shortname) + # Note: a set of invalid characters is defined by the + # Nagios server config + # The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()= + self.description = description + self.check_cmd = self._locate_cmd(check_cmd) + + def _locate_cmd(self, check_cmd): + search_path = ( + '/usr/lib/nagios/plugins', + '/usr/local/lib/nagios/plugins', + ) + parts = shlex.split(check_cmd) + for path in search_path: + if os.path.exists(os.path.join(path, parts[0])): + command = os.path.join(path, parts[0]) + if len(parts) > 1: + command += " " + " ".join(parts[1:]) + return command + log('Check command not found: {}'.format(parts[0])) + return '' + + def write(self, nagios_context, hostname, nagios_servicegroups): + nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format( + self.command) + with open(nrpe_check_file, 'w') as nrpe_check_config: + nrpe_check_config.write("# check {}\n".format(self.shortname)) + nrpe_check_config.write("command[{}]={}\n".format( + self.command, self.check_cmd)) + + if not os.path.exists(NRPE.nagios_exportdir): + log('Not writing service config as {} is not accessible'.format( + NRPE.nagios_exportdir)) + else: + self.write_service_config(nagios_context, hostname, + nagios_servicegroups) + + def write_service_config(self, nagios_context, hostname, + nagios_servicegroups): + for f in os.listdir(NRPE.nagios_exportdir): + if re.search('.*{}.cfg'.format(self.command), f): + os.remove(os.path.join(NRPE.nagios_exportdir, f)) + + templ_vars = { + 'nagios_hostname': hostname, + 'nagios_servicegroup': nagios_servicegroups, + 'description': self.description, + 'shortname': self.shortname, + 'command': self.command, + } + nrpe_service_text = Check.service_template.format(**templ_vars) + nrpe_service_file = '{}/service__{}_{}.cfg'.format( + NRPE.nagios_exportdir, hostname, self.command) + with open(nrpe_service_file, 'w') as nrpe_service_config: + nrpe_service_config.write(str(nrpe_service_text)) + + def run(self): + subprocess.call(self.check_cmd) + + +class NRPE(object): + nagios_logdir = '/var/log/nagios' + nagios_exportdir = '/var/lib/nagios/export' + nrpe_confdir = '/etc/nagios/nrpe.d' + + def __init__(self, hostname=None): + super(NRPE, self).__init__() + self.config = config() + self.nagios_context = self.config['nagios_context'] + if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']: + self.nagios_servicegroups = self.config['nagios_servicegroups'] + else: + self.nagios_servicegroups = self.nagios_context + self.unit_name = local_unit().replace('/', '-') + if hostname: + self.hostname = hostname + else: + self.hostname = "{}-{}".format(self.nagios_context, self.unit_name) + self.checks = [] + + def add_check(self, *args, **kwargs): + self.checks.append(Check(*args, **kwargs)) + + def write(self): + try: + nagios_uid = pwd.getpwnam('nagios').pw_uid + nagios_gid = grp.getgrnam('nagios').gr_gid + except: + log("Nagios user not set up, nrpe checks not updated") + return + + if not os.path.exists(NRPE.nagios_logdir): + os.mkdir(NRPE.nagios_logdir) + os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid) + + nrpe_monitors = {} + monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}} + for nrpecheck in self.checks: + nrpecheck.write(self.nagios_context, self.hostname, + self.nagios_servicegroups) + nrpe_monitors[nrpecheck.shortname] = { + "command": nrpecheck.command, + } + + service('restart', 'nagios-nrpe-server') + + monitor_ids = relation_ids("local-monitors") + \ + relation_ids("nrpe-external-master") + for rid in monitor_ids: + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + +def get_nagios_hostcontext(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_host_context + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_host_context'] + + +def get_nagios_hostname(relation_name='nrpe-external-master'): + """ + Query relation with nrpe subordinate, return the nagios_hostname + + :param str relation_name: Name of relation nrpe sub joined to + """ + for rel in relations_of_type(relation_name): + if 'nagios_hostname' in rel: + return rel['nagios_hostname'] + + +def get_nagios_unit_name(relation_name='nrpe-external-master'): + """ + Return the nagios unit name prepended with host_context if needed + + :param str relation_name: Name of relation nrpe sub joined to + """ + host_context = get_nagios_hostcontext(relation_name) + if host_context: + unit = "%s:%s" % (host_context, local_unit()) + else: + unit = local_unit() + return unit + + +def add_init_service_checks(nrpe, services, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param list services: List of services to check + :param str unit_name: Unit name to use in check description + """ + for svc in services: + upstart_init = '/etc/init/%s.conf' % svc + sysv_init = '/etc/init.d/%s' % svc + if os.path.exists(upstart_init): + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_upstart_job %s' % svc + ) + elif os.path.exists(sysv_init): + cronpath = '/etc/cron.d/nagios-service-check-%s' % svc + cron_file = ('*/5 * * * * root ' + '/usr/local/lib/nagios/plugins/check_exit_status.pl ' + '-s /etc/init.d/%s status > ' + '/var/lib/nagios/service-check-%s.txt\n' % (svc, + svc) + ) + f = open(cronpath, 'w') + f.write(cron_file) + f.close() + nrpe.add_check( + shortname=svc, + description='process check {%s}' % unit_name, + check_cmd='check_status_file.py -f ' + '/var/lib/nagios/service-check-%s.txt' % svc, + ) + + +def copy_nrpe_checks(): + """ + Copy the nrpe checks into place + + """ + NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' + nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', + 'charmhelpers', 'contrib', 'openstack', + 'files') + + if not os.path.exists(NAGIOS_PLUGINS): + os.makedirs(NAGIOS_PLUGINS) + for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): + if os.path.isfile(fname): + shutil.copy2(fname, + os.path.join(NAGIOS_PLUGINS, os.path.basename(fname))) + + +def add_haproxy_checks(nrpe, unit_name): + """ + Add checks for each service in list + + :param NRPE nrpe: NRPE object to add check to + :param str unit_name: Unit name to use in check description + """ + nrpe.add_check( + shortname='haproxy_servers', + description='Check HAProxy {%s}' % unit_name, + check_cmd='check_haproxy.sh') + nrpe.add_check( + shortname='haproxy_queue', + description='Check HAProxy queue depth {%s}' % unit_name, + check_cmd='check_haproxy_queue_depth.sh') diff --git a/hooks/charmhelpers/contrib/charmsupport/volumes.py b/hooks/charmhelpers/contrib/charmsupport/volumes.py new file mode 100644 index 0000000..320961b --- /dev/null +++ b/hooks/charmhelpers/contrib/charmsupport/volumes.py @@ -0,0 +1,175 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +''' +Functions for managing volumes in juju units. One volume is supported per unit. +Subordinates may have their own storage, provided it is on its own partition. + +Configuration stanzas:: + + volume-ephemeral: + type: boolean + default: true + description: > + If false, a volume is mounted as sepecified in "volume-map" + If true, ephemeral storage will be used, meaning that log data + will only exist as long as the machine. YOU HAVE BEEN WARNED. + volume-map: + type: string + default: {} + description: > + YAML map of units to device names, e.g: + "{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }" + Service units will raise a configure-error if volume-ephemeral + is 'true' and no volume-map value is set. Use 'juju set' to set a + value and 'juju resolved' to complete configuration. + +Usage:: + + from charmsupport.volumes import configure_volume, VolumeConfigurationError + from charmsupport.hookenv import log, ERROR + def post_mount_hook(): + stop_service('myservice') + def post_mount_hook(): + start_service('myservice') + + if __name__ == '__main__': + try: + configure_volume(before_change=pre_mount_hook, + after_change=post_mount_hook) + except VolumeConfigurationError: + log('Storage could not be configured', ERROR) + +''' + +# XXX: Known limitations +# - fstab is neither consulted nor updated + +import os +from charmhelpers.core import hookenv +from charmhelpers.core import host +import yaml + + +MOUNT_BASE = '/srv/juju/volumes' + + +class VolumeConfigurationError(Exception): + '''Volume configuration data is missing or invalid''' + pass + + +def get_config(): + '''Gather and sanity-check volume configuration data''' + volume_config = {} + config = hookenv.config() + + errors = False + + if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'): + volume_config['ephemeral'] = True + else: + volume_config['ephemeral'] = False + + try: + volume_map = yaml.safe_load(config.get('volume-map', '{}')) + except yaml.YAMLError as e: + hookenv.log("Error parsing YAML volume-map: {}".format(e), + hookenv.ERROR) + errors = True + if volume_map is None: + # probably an empty string + volume_map = {} + elif not isinstance(volume_map, dict): + hookenv.log("Volume-map should be a dictionary, not {}".format( + type(volume_map))) + errors = True + + volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME']) + if volume_config['device'] and volume_config['ephemeral']: + # asked for ephemeral storage but also defined a volume ID + hookenv.log('A volume is defined for this unit, but ephemeral ' + 'storage was requested', hookenv.ERROR) + errors = True + elif not volume_config['device'] and not volume_config['ephemeral']: + # asked for permanent storage but did not define volume ID + hookenv.log('Ephemeral storage was requested, but there is no volume ' + 'defined for this unit.', hookenv.ERROR) + errors = True + + unit_mount_name = hookenv.local_unit().replace('/', '-') + volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name) + + if errors: + return None + return volume_config + + +def mount_volume(config): + if os.path.exists(config['mountpoint']): + if not os.path.isdir(config['mountpoint']): + hookenv.log('Not a directory: {}'.format(config['mountpoint'])) + raise VolumeConfigurationError() + else: + host.mkdir(config['mountpoint']) + if os.path.ismount(config['mountpoint']): + unmount_volume(config) + if not host.mount(config['device'], config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def unmount_volume(config): + if os.path.ismount(config['mountpoint']): + if not host.umount(config['mountpoint'], persist=True): + raise VolumeConfigurationError() + + +def managed_mounts(): + '''List of all mounted managed volumes''' + return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts()) + + +def configure_volume(before_change=lambda: None, after_change=lambda: None): + '''Set up storage (or don't) according to the charm's volume configuration. + Returns the mount point or "ephemeral". before_change and after_change + are optional functions to be called if the volume configuration changes. + ''' + + config = get_config() + if not config: + hookenv.log('Failed to read volume configuration', hookenv.CRITICAL) + raise VolumeConfigurationError() + + if config['ephemeral']: + if os.path.ismount(config['mountpoint']): + before_change() + unmount_volume(config) + after_change() + return 'ephemeral' + else: + # persistent storage + if os.path.ismount(config['mountpoint']): + mounts = dict(managed_mounts()) + if mounts.get(config['mountpoint']) != config['device']: + before_change() + unmount_volume(config) + mount_volume(config) + after_change() + else: + before_change() + mount_volume(config) + after_change() + return config['mountpoint'] diff --git a/hooks/charmhelpers/contrib/database/__init__.py b/hooks/charmhelpers/contrib/database/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/database/mysql.py b/hooks/charmhelpers/contrib/database/mysql.py new file mode 100644 index 0000000..20f6141 --- /dev/null +++ b/hooks/charmhelpers/contrib/database/mysql.py @@ -0,0 +1,412 @@ +"""Helper for working with a MySQL database""" +import json +import re +import sys +import platform +import os +import glob + +# from string import upper + +from charmhelpers.core.host import ( + mkdir, + pwgen, + write_file +) +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + related_units, + unit_get, + log, + DEBUG, + INFO, + WARNING, +) +from charmhelpers.fetch import ( + apt_install, + apt_update, + filter_installed_packages, +) +from charmhelpers.contrib.peerstorage import ( + peer_store, + peer_retrieve, +) +from charmhelpers.contrib.network.ip import get_host_ip + +try: + import MySQLdb +except ImportError: + apt_update(fatal=True) + apt_install(filter_installed_packages(['python-mysqldb']), fatal=True) + import MySQLdb + + +class MySQLHelper(object): + + def __init__(self, rpasswdf_template, upasswdf_template, host='localhost', + migrate_passwd_to_peer_relation=True, + delete_ondisk_passwd_file=True): + self.host = host + # Password file path templates + self.root_passwd_file_template = rpasswdf_template + self.user_passwd_file_template = upasswdf_template + + self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation + # If we migrate we have the option to delete local copy of root passwd + self.delete_ondisk_passwd_file = delete_ondisk_passwd_file + + def connect(self, user='root', password=None): + log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG) + self.connection = MySQLdb.connect(user=user, host=self.host, + passwd=password) + + def database_exists(self, db_name): + cursor = self.connection.cursor() + try: + cursor.execute("SHOW DATABASES") + databases = [i[0] for i in cursor.fetchall()] + finally: + cursor.close() + + return db_name in databases + + def create_database(self, db_name): + cursor = self.connection.cursor() + try: + cursor.execute("CREATE DATABASE {} CHARACTER SET UTF8" + .format(db_name)) + finally: + cursor.close() + + def grant_exists(self, db_name, db_user, remote_ip): + cursor = self.connection.cursor() + priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \ + "TO '{}'@'{}'".format(db_name, db_user, remote_ip) + try: + cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user, + remote_ip)) + grants = [i[0] for i in cursor.fetchall()] + except MySQLdb.OperationalError: + return False + finally: + cursor.close() + + # TODO: review for different grants + return priv_string in grants + + def create_grant(self, db_name, db_user, remote_ip, password): + cursor = self.connection.cursor() + try: + # TODO: review for different grants + cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' " + "IDENTIFIED BY '{}'".format(db_name, + db_user, + remote_ip, + password)) + finally: + cursor.close() + + def create_admin_grant(self, db_user, remote_ip, password): + cursor = self.connection.cursor() + try: + cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' " + "IDENTIFIED BY '{}'".format(db_user, + remote_ip, + password)) + finally: + cursor.close() + + def cleanup_grant(self, db_user, remote_ip): + cursor = self.connection.cursor() + try: + cursor.execute("DROP FROM mysql.user WHERE user='{}' " + "AND HOST='{}'".format(db_user, + remote_ip)) + finally: + cursor.close() + + def execute(self, sql): + """Execute arbitary SQL against the database.""" + cursor = self.connection.cursor() + try: + cursor.execute(sql) + finally: + cursor.close() + + def migrate_passwords_to_peer_relation(self, excludes=None): + """Migrate any passwords storage on disk to cluster peer relation.""" + dirname = os.path.dirname(self.root_passwd_file_template) + path = os.path.join(dirname, '*.passwd') + for f in glob.glob(path): + if excludes and f in excludes: + log("Excluding %s from peer migration" % (f), level=DEBUG) + continue + + key = os.path.basename(f) + with open(f, 'r') as passwd: + _value = passwd.read().strip() + + try: + peer_store(key, _value) + + if self.delete_ondisk_passwd_file: + os.unlink(f) + except ValueError: + # NOTE cluster relation not yet ready - skip for now + pass + + def get_mysql_password_on_disk(self, username=None, password=None): + """Retrieve, generate or store a mysql password for the provided + username on disk.""" + if username: + template = self.user_passwd_file_template + passwd_file = template.format(username) + else: + passwd_file = self.root_passwd_file_template + + _password = None + if os.path.exists(passwd_file): + log("Using existing password file '%s'" % passwd_file, level=DEBUG) + with open(passwd_file, 'r') as passwd: + _password = passwd.read().strip() + else: + log("Generating new password file '%s'" % passwd_file, level=DEBUG) + if not os.path.isdir(os.path.dirname(passwd_file)): + # NOTE: need to ensure this is not mysql root dir (which needs + # to be mysql readable) + mkdir(os.path.dirname(passwd_file), owner='root', group='root', + perms=0o770) + # Force permissions - for some reason the chmod in makedirs + # fails + os.chmod(os.path.dirname(passwd_file), 0o770) + + _password = password or pwgen(length=32) + write_file(passwd_file, _password, owner='root', group='root', + perms=0o660) + + return _password + + def passwd_keys(self, username): + """Generator to return keys used to store passwords in peer store. + + NOTE: we support both legacy and new format to support mysql + charm prior to refactor. This is necessary to avoid LP 1451890. + """ + keys = [] + if username == 'mysql': + log("Bad username '%s'" % (username), level=WARNING) + + if username: + # IMPORTANT: *newer* format must be returned first + keys.append('mysql-%s.passwd' % (username)) + keys.append('%s.passwd' % (username)) + else: + keys.append('mysql.passwd') + + for key in keys: + yield key + + def get_mysql_password(self, username=None, password=None): + """Retrieve, generate or store a mysql password for the provided + username using peer relation cluster.""" + excludes = [] + + # First check peer relation. + try: + for key in self.passwd_keys(username): + _password = peer_retrieve(key) + if _password: + break + + # If root password available don't update peer relation from local + if _password and not username: + excludes.append(self.root_passwd_file_template) + + except ValueError: + # cluster relation is not yet started; use on-disk + _password = None + + # If none available, generate new one + if not _password: + _password = self.get_mysql_password_on_disk(username, password) + + # Put on wire if required + if self.migrate_passwd_to_peer_relation: + self.migrate_passwords_to_peer_relation(excludes=excludes) + + return _password + + def get_mysql_root_password(self, password=None): + """Retrieve or generate mysql root password for service units.""" + return self.get_mysql_password(username=None, password=password) + + def normalize_address(self, hostname): + """Ensure that address returned is an IP address (i.e. not fqdn)""" + if config_get('prefer-ipv6'): + # TODO: add support for ipv6 dns + return hostname + + if hostname != unit_get('private-address'): + return get_host_ip(hostname, fallback=hostname) + + # Otherwise assume localhost + return '127.0.0.1' + + def get_allowed_units(self, database, username, relation_id=None): + """Get list of units with access grants for database with username. + + This is typically used to provide shared-db relations with a list of + which units have been granted access to the given database. + """ + self.connect(password=self.get_mysql_root_password()) + allowed_units = set() + for unit in related_units(relation_id): + settings = relation_get(rid=relation_id, unit=unit) + # First check for setting with prefix, then without + for attr in ["%s_hostname" % (database), 'hostname']: + hosts = settings.get(attr, None) + if hosts: + break + + if hosts: + # hostname can be json-encoded list of hostnames + try: + hosts = json.loads(hosts) + except ValueError: + hosts = [hosts] + else: + hosts = [settings['private-address']] + + if hosts: + for host in hosts: + host = self.normalize_address(host) + if self.grant_exists(database, username, host): + log("Grant exists for host '%s' on db '%s'" % + (host, database), level=DEBUG) + if unit not in allowed_units: + allowed_units.add(unit) + else: + log("Grant does NOT exist for host '%s' on db '%s'" % + (host, database), level=DEBUG) + else: + log("No hosts found for grant check", level=INFO) + + return allowed_units + + def configure_db(self, hostname, database, username, admin=False): + """Configure access to database for username from hostname.""" + self.connect(password=self.get_mysql_root_password()) + if not self.database_exists(database): + self.create_database(database) + + remote_ip = self.normalize_address(hostname) + password = self.get_mysql_password(username) + if not self.grant_exists(database, username, remote_ip): + if not admin: + self.create_grant(database, username, remote_ip, password) + else: + self.create_admin_grant(username, remote_ip, password) + + return password + + +class PerconaClusterHelper(object): + + # Going for the biggest page size to avoid wasted bytes. + # InnoDB page size is 16MB + + DEFAULT_PAGE_SIZE = 16 * 1024 * 1024 + DEFAULT_INNODB_BUFFER_FACTOR = 0.50 + + def human_to_bytes(self, human): + """Convert human readable configuration options to bytes.""" + num_re = re.compile('^[0-9]+$') + if num_re.match(human): + return human + + factors = { + 'K': 1024, + 'M': 1048576, + 'G': 1073741824, + 'T': 1099511627776 + } + modifier = human[-1] + if modifier in factors: + return int(human[:-1]) * factors[modifier] + + if modifier == '%': + total_ram = self.human_to_bytes(self.get_mem_total()) + if self.is_32bit_system() and total_ram > self.sys_mem_limit(): + total_ram = self.sys_mem_limit() + factor = int(human[:-1]) * 0.01 + pctram = total_ram * factor + return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE)) + + raise ValueError("Can only convert K,M,G, or T") + + def is_32bit_system(self): + """Determine whether system is 32 or 64 bit.""" + try: + return sys.maxsize < 2 ** 32 + except OverflowError: + return False + + def sys_mem_limit(self): + """Determine the default memory limit for the current service unit.""" + if platform.machine() in ['armv7l']: + _mem_limit = self.human_to_bytes('2700M') # experimentally determined + else: + # Limit for x86 based 32bit systems + _mem_limit = self.human_to_bytes('4G') + + return _mem_limit + + def get_mem_total(self): + """Calculate the total memory in the current service unit.""" + with open('/proc/meminfo') as meminfo_file: + for line in meminfo_file: + key, mem = line.split(':', 2) + if key == 'MemTotal': + mtot, modifier = mem.strip().split(' ') + return '%s%s' % (mtot, modifier[0].upper()) + + def parse_config(self): + """Parse charm configuration and calculate values for config files.""" + config = config_get() + mysql_config = {} + if 'max-connections' in config: + mysql_config['max_connections'] = config['max-connections'] + + if 'wait-timeout' in config: + mysql_config['wait_timeout'] = config['wait-timeout'] + + if 'innodb-flush-log-at-trx-commit' in config: + mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit'] + + # Set a sane default key_buffer size + mysql_config['key_buffer'] = self.human_to_bytes('32M') + total_memory = self.human_to_bytes(self.get_mem_total()) + + dataset_bytes = config.get('dataset-size', None) + innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None) + + if innodb_buffer_pool_size: + innodb_buffer_pool_size = self.human_to_bytes( + innodb_buffer_pool_size) + elif dataset_bytes: + log("Option 'dataset-size' has been deprecated, please use" + "innodb_buffer_pool_size option instead", level="WARN") + innodb_buffer_pool_size = self.human_to_bytes( + dataset_bytes) + else: + innodb_buffer_pool_size = int( + total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR) + + if innodb_buffer_pool_size > total_memory: + log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format( + innodb_buffer_pool_size, + total_memory), level='WARN') + + mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size + return mysql_config diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py index c555d7a..aa0b515 100644 --- a/hooks/charmhelpers/contrib/hahelpers/cluster.py +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -44,6 +44,7 @@ from charmhelpers.core.hookenv import ( ERROR, WARNING, unit_get, + is_leader as juju_is_leader ) from charmhelpers.core.decorators import ( retry_on_exception, @@ -63,17 +64,30 @@ class CRMResourceNotFound(Exception): pass +class CRMDCNotFound(Exception): + pass + + def is_elected_leader(resource): """ Returns True if the charm executing this is the elected cluster leader. It relies on two mechanisms to determine leadership: - 1. If the charm is part of a corosync cluster, call corosync to + 1. If juju is sufficiently new and leadership election is supported, + the is_leader command will be used. + 2. If the charm is part of a corosync cluster, call corosync to determine leadership. - 2. If the charm is not part of a corosync cluster, the leader is + 3. If the charm is not part of a corosync cluster, the leader is determined as being "the alive unit with the lowest unit numer". In other words, the oldest surviving unit. """ + try: + return juju_is_leader() + except NotImplementedError: + log('Juju leadership election feature not enabled' + ', using fallback support', + level=WARNING) + if is_clustered(): if not is_crm_leader(resource): log('Deferring action to CRM leader.', level=INFO) @@ -106,8 +120,9 @@ def is_crm_dc(): status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) if not isinstance(status, six.text_type): status = six.text_type(status, "utf-8") - except subprocess.CalledProcessError: - return False + except subprocess.CalledProcessError as ex: + raise CRMDCNotFound(str(ex)) + current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): @@ -115,10 +130,14 @@ def is_crm_dc(): current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True + elif current_dc == 'NONE': + raise CRMDCNotFound('Current DC: NONE') + return False -@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound) +@retry_on_exception(5, base_delay=2, + exc_type=(CRMResourceNotFound, CRMDCNotFound)) def is_crm_leader(resource, retry=False): """ Returns True if the charm calling this is the elected corosync leader, diff --git a/hooks/charmhelpers/contrib/network/ufw.py b/hooks/charmhelpers/contrib/network/ufw.py new file mode 100644 index 0000000..d40110d --- /dev/null +++ b/hooks/charmhelpers/contrib/network/ufw.py @@ -0,0 +1,319 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +""" +This module contains helpers to add and remove ufw rules. + +Examples: + +- open SSH port for subnet 10.0.3.0/24: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp') + +- open service by name as defined in /etc/services: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.service('ssh', 'open') + +- close service by port number: + + >>> from charmhelpers.contrib.network import ufw + >>> ufw.enable() + >>> ufw.service('4949', 'close') # munin +""" +import re +import os +import subprocess +from charmhelpers.core import hookenv + +__author__ = "Felipe Reyes " + + +class UFWError(Exception): + pass + + +class UFWIPv6Error(UFWError): + pass + + +def is_enabled(): + """ + Check if `ufw` is enabled + + :returns: True if ufw is enabled + """ + output = subprocess.check_output(['ufw', 'status'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall(r'^Status: active\n', output, re.M) + + return len(m) >= 1 + + +def is_ipv6_ok(soft_fail=False): + """ + Check if IPv6 support is present and ip6tables functional + + :param soft_fail: If set to True and IPv6 support is broken, then reports + that the host doesn't have IPv6 support, otherwise a + UFWIPv6Error exception is raised. + :returns: True if IPv6 is working, False otherwise + """ + + # do we have IPv6 in the machine? + if os.path.isdir('/proc/sys/net/ipv6'): + # is ip6tables kernel module loaded? + lsmod = subprocess.check_output(['lsmod'], universal_newlines=True) + matches = re.findall('^ip6_tables[ ]+', lsmod, re.M) + if len(matches) == 0: + # ip6tables support isn't complete, let's try to load it + try: + subprocess.check_output(['modprobe', 'ip6_tables'], + universal_newlines=True) + # great, we could load the module + return True + except subprocess.CalledProcessError as ex: + hookenv.log("Couldn't load ip6_tables module: %s" % ex.output, + level="WARN") + # we are in a world where ip6tables isn't working + if soft_fail: + # so we inform that the machine doesn't have IPv6 + return False + else: + raise UFWIPv6Error("IPv6 firewall support broken") + else: + # the module is present :) + return True + + else: + # the system doesn't have IPv6 + return False + + +def disable_ipv6(): + """ + Disable ufw IPv6 support in /etc/default/ufw + """ + exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g', + '/etc/default/ufw']) + if exit_code == 0: + hookenv.log('IPv6 support in ufw disabled', level='INFO') + else: + hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR") + raise UFWError("Couldn't disable IPv6 support in ufw") + + +def enable(soft_fail=False): + """ + Enable ufw + + :param soft_fail: If set to True silently disables IPv6 support in ufw, + otherwise a UFWIPv6Error exception is raised when IP6 + support is broken. + :returns: True if ufw is successfully enabled + """ + if is_enabled(): + return True + + if not is_ipv6_ok(soft_fail): + disable_ipv6() + + output = subprocess.check_output(['ufw', 'enable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall('^Firewall is active and enabled on system startup\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be enabled", level='WARN') + return False + else: + hookenv.log("ufw enabled", level='INFO') + return True + + +def disable(): + """ + Disable ufw + + :returns: True if ufw is successfully disabled + """ + if not is_enabled(): + return True + + output = subprocess.check_output(['ufw', 'disable'], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + + m = re.findall(r'^Firewall stopped and disabled on system startup\n', + output, re.M) + hookenv.log(output, level='DEBUG') + + if len(m) == 0: + hookenv.log("ufw couldn't be disabled", level='WARN') + return False + else: + hookenv.log("ufw disabled", level='INFO') + return True + + +def default_policy(policy='deny', direction='incoming'): + """ + Changes the default policy for traffic `direction` + + :param policy: allow, deny or reject + :param direction: traffic direction, possible values: incoming, outgoing, + routed + """ + if policy not in ['allow', 'deny', 'reject']: + raise UFWError(('Unknown policy %s, valid values: ' + 'allow, deny, reject') % policy) + + if direction not in ['incoming', 'outgoing', 'routed']: + raise UFWError(('Unknown direction %s, valid values: ' + 'incoming, outgoing, routed') % direction) + + output = subprocess.check_output(['ufw', 'default', policy, direction], + universal_newlines=True, + env={'LANG': 'en_US', + 'PATH': os.environ['PATH']}) + hookenv.log(output, level='DEBUG') + + m = re.findall("^Default %s policy changed to '%s'\n" % (direction, + policy), + output, re.M) + if len(m) == 0: + hookenv.log("ufw couldn't change the default policy to %s for %s" + % (policy, direction), level='WARN') + return False + else: + hookenv.log("ufw default policy for %s changed to %s" + % (direction, policy), level='INFO') + return True + + +def modify_access(src, dst='any', port=None, proto=None, action='allow', + index=None): + """ + Grant access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :param port: destiny port + :param proto: protocol (tcp or udp) + :param action: `allow` or `delete` + :param index: if different from None the rule is inserted at the given + `index`. + """ + if not is_enabled(): + hookenv.log('ufw is disabled, skipping modify_access()', level='WARN') + return + + if action == 'delete': + cmd = ['ufw', 'delete', 'allow'] + elif index is not None: + cmd = ['ufw', 'insert', str(index), action] + else: + cmd = ['ufw', action] + + if src is not None: + cmd += ['from', src] + + if dst is not None: + cmd += ['to', dst] + + if port is not None: + cmd += ['port', str(port)] + + if proto is not None: + cmd += ['proto', proto] + + hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG') + p = subprocess.Popen(cmd, stdout=subprocess.PIPE) + (stdout, stderr) = p.communicate() + + hookenv.log(stdout, level='INFO') + + if p.returncode != 0: + hookenv.log(stderr, level='ERROR') + hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd), + p.returncode), + level='ERROR') + + +def grant_access(src, dst='any', port=None, proto=None, index=None): + """ + Grant access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :param port: destiny port + :param proto: protocol (tcp or udp) + :param index: if different from None the rule is inserted at the given + `index`. + """ + return modify_access(src, dst=dst, port=port, proto=proto, action='allow', + index=index) + + +def revoke_access(src, dst='any', port=None, proto=None): + """ + Revoke access to an address or subnet + + :param src: address (e.g. 192.168.1.234) or subnet + (e.g. 192.168.1.0/24). + :param dst: destiny of the connection, if the machine has multiple IPs and + connections to only one of those have to accepted this is the + field has to be set. + :param port: destiny port + :param proto: protocol (tcp or udp) + """ + return modify_access(src, dst=dst, port=port, proto=proto, action='delete') + + +def service(name, action): + """ + Open/close access to a service + + :param name: could be a service name defined in `/etc/services` or a port + number. + :param action: `open` or `close` + """ + if action == 'open': + subprocess.check_output(['ufw', 'allow', str(name)], + universal_newlines=True) + elif action == 'close': + subprocess.check_output(['ufw', 'delete', 'allow', str(name)], + universal_newlines=True) + else: + raise UFWError(("'{}' not supported, use 'allow' " + "or 'delete'").format(action)) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index 461a702..b01e6cb 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -79,9 +79,9 @@ class OpenStackAmuletDeployment(AmuletDeployment): services.append(this_service) use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Openstack subordinate charms do not expose an origin option as that - # is controlled by the principle - ignore = ['neutron-openvswitch'] + # Most OpenStack subordinate charms do not expose an origin option + # as that is controlled by the principle. + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] if self.openstack: for svc in services: @@ -110,7 +110,8 @@ class OpenStackAmuletDeployment(AmuletDeployment): (self.precise_essex, self.precise_folsom, self.precise_grizzly, self.precise_havana, self.precise_icehouse, self.trusty_icehouse, self.trusty_juno, self.utopic_juno, - self.trusty_kilo, self.vivid_kilo) = range(10) + self.trusty_kilo, self.vivid_kilo, self.trusty_liberty, + self.wily_liberty) = range(12) releases = { ('precise', None): self.precise_essex, @@ -121,8 +122,10 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('trusty', None): self.trusty_icehouse, ('trusty', 'cloud:trusty-juno'): self.trusty_juno, ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo, + ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty, ('utopic', None): self.utopic_juno, - ('vivid', None): self.vivid_kilo} + ('vivid', None): self.vivid_kilo, + ('wily', None): self.wily_liberty} return releases[(self.series, self.openstack)] def _get_openstack_release_string(self): @@ -138,9 +141,43 @@ class OpenStackAmuletDeployment(AmuletDeployment): ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) if self.openstack: os_origin = self.openstack.split(':')[1] return os_origin.split('%s-' % self.series)[1].split('/')[0] else: return releases[self.series] + + def get_ceph_expected_pools(self, radosgw=False): + """Return a list of expected ceph pools in a ceph + cinder + glance + test scenario, based on OpenStack release and whether ceph radosgw + is flagged as present or not.""" + + if self._get_openstack_release() >= self.trusty_kilo: + # Kilo or later + pools = [ + 'rbd', + 'cinder', + 'glance' + ] + else: + # Juno or earlier + pools = [ + 'data', + 'metadata', + 'rbd', + 'cinder', + 'glance' + ] + + if radosgw: + pools.extend([ + '.rgw.root', + '.rgw.control', + '.rgw', + '.rgw.gc', + '.users.uid' + ]) + + return pools diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 9c3d918..03f7927 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -14,16 +14,20 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import amulet +import json import logging import os +import six import time import urllib +import cinderclient.v1.client as cinder_client import glanceclient.v1.client as glance_client +import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client - -import six +import swiftclient from charmhelpers.contrib.amulet.utils import ( AmuletUtils @@ -37,7 +41,7 @@ class OpenStackAmuletUtils(AmuletUtils): """OpenStack amulet utilities. This class inherits from AmuletUtils and has additional support - that is specifically for use by OpenStack charms. + that is specifically for use by OpenStack charm tests. """ def __init__(self, log_level=ERROR): @@ -51,6 +55,8 @@ class OpenStackAmuletUtils(AmuletUtils): Validate actual endpoint data vs expected endpoint data. The ports are used to find the matching endpoint. """ + self.log.debug('Validating endpoint data...') + self.log.debug('actual: {}'.format(repr(endpoints))) found = False for ep in endpoints: self.log.debug('endpoint: {}'.format(repr(ep))) @@ -77,6 +83,7 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual service catalog endpoints vs a list of expected service catalog endpoints. """ + self.log.debug('Validating service catalog endpoint data...') self.log.debug('actual: {}'.format(repr(actual))) for k, v in six.iteritems(expected): if k in actual: @@ -93,6 +100,7 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual tenant data vs list of expected tenant data. """ + self.log.debug('Validating tenant data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -114,6 +122,7 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual role data vs a list of expected role data. """ + self.log.debug('Validating role data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -134,6 +143,7 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual user data vs a list of expected user data. """ + self.log.debug('Validating user data...') self.log.debug('actual: {}'.format(repr(actual))) for e in expected: found = False @@ -155,17 +165,30 @@ class OpenStackAmuletUtils(AmuletUtils): Validate a list of actual flavors vs a list of expected flavors. """ + self.log.debug('Validating flavor data...') self.log.debug('actual: {}'.format(repr(actual))) act = [a.name for a in actual] return self._validate_list_data(expected, act) def tenant_exists(self, keystone, tenant): """Return True if tenant exists.""" + self.log.debug('Checking if tenant exists ({})...'.format(tenant)) return tenant in [t.name for t in keystone.tenants.list()] + def authenticate_cinder_admin(self, keystone_sentry, username, + password, tenant): + """Authenticates admin user with cinder.""" + # NOTE(beisner): cinder python client doesn't accept tokens. + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8')) + return cinder_client.Client(username, password, tenant, ept) + def authenticate_keystone_admin(self, keystone_sentry, user, password, tenant): """Authenticates admin user with the keystone admin endpoint.""" + self.log.debug('Authenticating keystone admin...') unit = keystone_sentry service_ip = unit.relation('shared-db', 'mysql:shared-db')['private-address'] @@ -175,6 +198,7 @@ class OpenStackAmuletUtils(AmuletUtils): def authenticate_keystone_user(self, keystone, user, password, tenant): """Authenticates a regular user with the keystone public endpoint.""" + self.log.debug('Authenticating keystone user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return keystone_client.Client(username=user, password=password, @@ -182,19 +206,49 @@ class OpenStackAmuletUtils(AmuletUtils): def authenticate_glance_admin(self, keystone): """Authenticates admin user with glance.""" + self.log.debug('Authenticating glance admin...') ep = keystone.service_catalog.url_for(service_type='image', endpoint_type='adminURL') return glance_client.Client(ep, token=keystone.auth_token) + def authenticate_heat_admin(self, keystone): + """Authenticates the admin user with heat.""" + self.log.debug('Authenticating heat admin...') + ep = keystone.service_catalog.url_for(service_type='orchestration', + endpoint_type='publicURL') + return heat_client.Client(endpoint=ep, token=keystone.auth_token) + def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" + self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', endpoint_type='publicURL') return nova_client.Client(username=user, api_key=password, project_id=tenant, auth_url=ep) + def authenticate_swift_user(self, keystone, user, password, tenant): + """Authenticates a regular user with swift api.""" + self.log.debug('Authenticating swift user ({})...'.format(user)) + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return swiftclient.Connection(authurl=ep, + user=user, + key=password, + tenant_name=tenant, + auth_version='2.0') + def create_cirros_image(self, glance, image_name): - """Download the latest cirros image and upload it to glance.""" + """Download the latest cirros image and upload it to glance, + validate and return a resource pointer. + + :param glance: pointer to authenticated glance connection + :param image_name: display name for new image + :returns: glance image pointer + """ + self.log.debug('Creating glance cirros image ' + '({})...'.format(image_name)) + + # Download cirros image http_proxy = os.getenv('AMULET_HTTP_PROXY') self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) if http_proxy: @@ -203,57 +257,67 @@ class OpenStackAmuletUtils(AmuletUtils): else: opener = urllib.FancyURLopener() - f = opener.open("http://download.cirros-cloud.net/version/released") + f = opener.open('http://download.cirros-cloud.net/version/released') version = f.read().strip() - cirros_img = "cirros-{}-x86_64-disk.img".format(version) + cirros_img = 'cirros-{}-x86_64-disk.img'.format(version) local_path = os.path.join('tests', cirros_img) if not os.path.exists(local_path): - cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net', version, cirros_img) opener.retrieve(cirros_url, local_path) f.close() + # Create glance image with open(local_path) as f: image = glance.images.create(name=image_name, is_public=True, disk_format='qcow2', container_format='bare', data=f) - count = 1 - status = image.status - while status != 'active' and count < 10: - time.sleep(3) - image = glance.images.get(image.id) - status = image.status - self.log.debug('image status: {}'.format(status)) - count += 1 - if status != 'active': - self.log.error('image creation timed out') - return None + # Wait for image to reach active status + img_id = image.id + ret = self.resource_reaches_status(glance.images, img_id, + expected_stat='active', + msg='Image status wait') + if not ret: + msg = 'Glance image failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new image + self.log.debug('Validating image attributes...') + val_img_name = glance.images.get(img_id).name + val_img_stat = glance.images.get(img_id).status + val_img_pub = glance.images.get(img_id).is_public + val_img_cfmt = glance.images.get(img_id).container_format + val_img_dfmt = glance.images.get(img_id).disk_format + msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} ' + 'container fmt:{} disk fmt:{}'.format( + val_img_name, val_img_pub, img_id, + val_img_stat, val_img_cfmt, val_img_dfmt)) + + if val_img_name == image_name and val_img_stat == 'active' \ + and val_img_pub is True and val_img_cfmt == 'bare' \ + and val_img_dfmt == 'qcow2': + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) return image def delete_image(self, glance, image): """Delete the specified image.""" - num_before = len(list(glance.images.list())) - glance.images.delete(image) - count = 1 - num_after = len(list(glance.images.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(glance.images.list())) - self.log.debug('number of images: {}'.format(num_after)) - count += 1 - - if num_after != (num_before - 1): - self.log.error('image deletion timed out') - return False - - return True + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_image.') + self.log.debug('Deleting glance image ({})...'.format(image)) + return self.delete_resource(glance.images, image, msg='glance image') def create_instance(self, nova, image_name, instance_name, flavor): """Create the specified instance.""" + self.log.debug('Creating instance ' + '({}|{}|{})'.format(instance_name, image_name, flavor)) image = nova.images.find(name=image_name) flavor = nova.flavors.find(name=flavor) instance = nova.servers.create(name=instance_name, image=image, @@ -276,19 +340,265 @@ class OpenStackAmuletUtils(AmuletUtils): def delete_instance(self, nova, instance): """Delete the specified instance.""" - num_before = len(list(nova.servers.list())) - nova.servers.delete(instance) - count = 1 - num_after = len(list(nova.servers.list())) - while num_after != (num_before - 1) and count < 10: - time.sleep(3) - num_after = len(list(nova.servers.list())) - self.log.debug('number of instances: {}'.format(num_after)) - count += 1 + # /!\ DEPRECATION WARNING + self.log.warn('/!\\ DEPRECATION WARNING: use ' + 'delete_resource instead of delete_instance.') + self.log.debug('Deleting instance ({})...'.format(instance)) + return self.delete_resource(nova.servers, instance, + msg='nova instance') - if num_after != (num_before - 1): - self.log.error('instance deletion timed out') + def create_or_get_keypair(self, nova, keypair_name="testkey"): + """Create a new keypair, or return pointer if it already exists.""" + try: + _keypair = nova.keypairs.get(keypair_name) + self.log.debug('Keypair ({}) already exists, ' + 'using it.'.format(keypair_name)) + return _keypair + except: + self.log.debug('Keypair ({}) does not exist, ' + 'creating it.'.format(keypair_name)) + + _keypair = nova.keypairs.create(name=keypair_name) + return _keypair + + def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1, + img_id=None, src_vol_id=None, snap_id=None): + """Create cinder volume, optionally from a glance image, OR + optionally as a clone of an existing volume, OR optionally + from a snapshot. Wait for the new volume status to reach + the expected status, validate and return a resource pointer. + + :param vol_name: cinder volume display name + :param vol_size: size in gigabytes + :param img_id: optional glance image id + :param src_vol_id: optional source volume id to clone + :param snap_id: optional snapshot id to use + :returns: cinder volume pointer + """ + # Handle parameter input and avoid impossible combinations + if img_id and not src_vol_id and not snap_id: + # Create volume from image + self.log.debug('Creating cinder volume from glance image...') + bootable = 'true' + elif src_vol_id and not img_id and not snap_id: + # Clone an existing volume + self.log.debug('Cloning cinder volume...') + bootable = cinder.volumes.get(src_vol_id).bootable + elif snap_id and not src_vol_id and not img_id: + # Create volume from snapshot + self.log.debug('Creating cinder volume from snapshot...') + snap = cinder.volume_snapshots.find(id=snap_id) + vol_size = snap.size + snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id + bootable = cinder.volumes.get(snap_vol_id).bootable + elif not img_id and not src_vol_id and not snap_id: + # Create volume + self.log.debug('Creating cinder volume...') + bootable = 'false' + else: + # Impossible combination of parameters + msg = ('Invalid method use - name:{} size:{} img_id:{} ' + 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size, + img_id, src_vol_id, + snap_id)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create new volume + try: + vol_new = cinder.volumes.create(display_name=vol_name, + imageRef=img_id, + size=vol_size, + source_volid=src_vol_id, + snapshot_id=snap_id) + vol_id = vol_new.id + except Exception as e: + msg = 'Failed to create volume: {}'.format(e) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Wait for volume to reach available status + ret = self.resource_reaches_status(cinder.volumes, vol_id, + expected_stat="available", + msg="Volume status wait") + if not ret: + msg = 'Cinder volume failed to reach expected state.' + amulet.raise_status(amulet.FAIL, msg=msg) + + # Re-validate new volume + self.log.debug('Validating volume attributes...') + val_vol_name = cinder.volumes.get(vol_id).display_name + val_vol_boot = cinder.volumes.get(vol_id).bootable + val_vol_stat = cinder.volumes.get(vol_id).status + val_vol_size = cinder.volumes.get(vol_id).size + msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:' + '{} size:{}'.format(val_vol_name, vol_id, + val_vol_stat, val_vol_boot, + val_vol_size)) + + if val_vol_boot == bootable and val_vol_stat == 'available' \ + and val_vol_name == vol_name and val_vol_size == vol_size: + self.log.debug(msg_attr) + else: + msg = ('Volume validation failed, {}'.format(msg_attr)) + amulet.raise_status(amulet.FAIL, msg=msg) + + return vol_new + + def delete_resource(self, resource, resource_id, + msg="resource", max_wait=120): + """Delete one openstack resource, such as one instance, keypair, + image, volume, stack, etc., and confirm deletion within max wait time. + + :param resource: pointer to os resource type, ex:glance_client.images + :param resource_id: unique name or id for the openstack resource + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, otherwise False + """ + self.log.debug('Deleting OpenStack resource ' + '{} ({})'.format(resource_id, msg)) + num_before = len(list(resource.list())) + resource.delete(resource_id) + + tries = 0 + num_after = len(list(resource.list())) + while num_after != (num_before - 1) and tries < (max_wait / 4): + self.log.debug('{} delete check: ' + '{} [{}:{}] {}'.format(msg, tries, + num_before, + num_after, + resource_id)) + time.sleep(4) + num_after = len(list(resource.list())) + tries += 1 + + self.log.debug('{}: expected, actual count = {}, ' + '{}'.format(msg, num_before - 1, num_after)) + + if num_after == (num_before - 1): + return True + else: + self.log.error('{} delete timed out'.format(msg)) return False - return True + def resource_reaches_status(self, resource, resource_id, + expected_stat='available', + msg='resource', max_wait=120): + """Wait for an openstack resources status to reach an + expected status within a specified time. Useful to confirm that + nova instances, cinder vols, snapshots, glance images, heat stacks + and other resources eventually reach the expected status. + + :param resource: pointer to os resource type, ex: heat_client.stacks + :param resource_id: unique id for the openstack resource + :param expected_stat: status to expect resource to reach + :param msg: text to identify purpose in logging + :param max_wait: maximum wait time in seconds + :returns: True if successful, False if status is not reached + """ + + tries = 0 + resource_stat = resource.get(resource_id).status + while resource_stat != expected_stat and tries < (max_wait / 4): + self.log.debug('{} status check: ' + '{} [{}:{}] {}'.format(msg, tries, + resource_stat, + expected_stat, + resource_id)) + time.sleep(4) + resource_stat = resource.get(resource_id).status + tries += 1 + + self.log.debug('{}: expected, actual status = {}, ' + '{}'.format(msg, resource_stat, expected_stat)) + + if resource_stat == expected_stat: + return True + else: + self.log.debug('{} never reached expected status: ' + '{}'.format(resource_id, expected_stat)) + return False + + def get_ceph_osd_id_cmd(self, index): + """Produce a shell command that will return a ceph-osd id.""" + return ("`initctl list | grep 'ceph-osd ' | " + "awk 'NR=={} {{ print $2 }}' | " + "grep -o '[0-9]*'`".format(index + 1)) + + def get_ceph_pools(self, sentry_unit): + """Return a dict of ceph pools from a single ceph unit, with + pool name as keys, pool id as vals.""" + pools = {} + cmd = 'sudo ceph osd lspools' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance, + for pool in str(output).split(','): + pool_id_name = pool.split(' ') + if len(pool_id_name) == 2: + pool_id = pool_id_name[0] + pool_name = pool_id_name[1] + pools[pool_name] = int(pool_id) + + self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'], + pools)) + return pools + + def get_ceph_df(self, sentry_unit): + """Return dict of ceph df json output, including ceph pool state. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :returns: Dict of ceph df output + """ + cmd = 'sudo ceph df --format=json' + output, code = sentry_unit.run(cmd) + if code != 0: + msg = ('{} `{}` returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return json.loads(output) + + def get_ceph_pool_sample(self, sentry_unit, pool_id=0): + """Take a sample of attributes of a ceph pool, returning ceph + pool name, object count and disk space used for the specified + pool ID number. + + :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param pool_id: Ceph pool ID + :returns: List of pool name, object count, kb disk space used + """ + df = self.get_ceph_df(sentry_unit) + pool_name = df['pools'][pool_id]['name'] + obj_count = df['pools'][pool_id]['stats']['objects'] + kb_used = df['pools'][pool_id]['stats']['kb_used'] + self.log.debug('Ceph {} pool (ID {}): {} objects, ' + '{} kb used'.format(pool_name, pool_id, + obj_count, kb_used)) + return pool_name, obj_count, kb_used + + def validate_ceph_pool_samples(self, samples, sample_type="resource pool"): + """Validate ceph pool samples taken over time, such as pool + object counts or pool kb used, before adding, after adding, and + after deleting items which affect those pool attributes. The + 2nd element is expected to be greater than the 1st; 3rd is expected + to be less than the 2nd. + + :param samples: List containing 3 data samples + :param sample_type: String for logging and usage context + :returns: None if successful, Failure message otherwise + """ + original, created, deleted = range(3) + if samples[created] <= samples[original] or \ + samples[deleted] >= samples[created]: + return ('Ceph {} samples ({}) ' + 'unexpected.'.format(sample_type, samples)) + else: + self.log.debug('Ceph {} samples (OK): ' + '{}'.format(sample_type, samples)) + return None diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 76ffb27..4f15104 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -122,21 +122,24 @@ def config_flags_parser(config_flags): of specifying multiple key value pairs within the same string. For example, a string in the format of 'key1=value1, key2=value2' will return a dict of: - {'key1': 'value1', - 'key2': 'value2'}. + + {'key1': 'value1', + 'key2': 'value2'}. 2. A string in the above format, but supporting a comma-delimited list of values for the same key. For example, a string in the format of 'key1=value1, key2=value3,value4,value5' will return a dict of: - {'key1', 'value1', - 'key2', 'value2,value3,value4'} + + {'key1', 'value1', + 'key2', 'value2,value3,value4'} 3. A string containing a colon character (:) prior to an equal character (=) will be treated as yaml and parsed as such. This can be used to specify more complex key value pairs. For example, a string in the format of 'key1: subkey1=value1, subkey2=value2' will return a dict of: - {'key1', 'subkey1=value1, subkey2=value2'} + + {'key1', 'subkey1=value1, subkey2=value2'} The provided config_flags string may be a list of comma-separated values which themselves may be comma-separated list of values. @@ -240,7 +243,7 @@ class SharedDBContext(OSContextGenerator): if self.relation_prefix: password_setting = self.relation_prefix + '_password' - for rid in relation_ids('shared-db'): + for rid in relation_ids(self.interfaces[0]): for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) host = rdata.get('db_host') @@ -880,13 +883,13 @@ class NeutronContext(OSContextGenerator): self.network_manager) config = neutron_plugin_attribute(self.plugin, 'config', self.network_manager) - ovs_ctxt = {'core_plugin': driver, + pg_ctxt = {'core_plugin': driver, 'neutron_plugin': 'plumgrid', 'neutron_security_groups': self.neutron_security_groups, 'local_ip': unit_private_ip(), 'config': config} - return ovs_ctxt + return pg_ctxt def neutron_ctxt(self): if https(): @@ -904,8 +907,6 @@ class NeutronContext(OSContextGenerator): return ctxt def __call__(self): - self._ensure_packages() - if self.network_manager not in ['quantum', 'neutron']: return {} diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh deleted file mode 100755 index eb8527f..0000000 --- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -#-------------------------------------------- -# This file is managed by Juju -#-------------------------------------------- -# -# Copyright 2009,2012 Canonical Ltd. -# Author: Tom Haddon - -CRITICAL=0 -NOTACTIVE='' -LOGFILE=/var/log/nagios/check_haproxy.log -AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') - -for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'}); -do - output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK') - if [ $? != 0 ]; then - date >> $LOGFILE - echo $output >> $LOGFILE - /usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1 - CRITICAL=1 - NOTACTIVE="${NOTACTIVE} $appserver" - fi -done - -if [ $CRITICAL = 1 ]; then - echo "CRITICAL:${NOTACTIVE}" - exit 2 -fi - -echo "OK: All haproxy instances looking good" -exit 0 diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh deleted file mode 100755 index 3ebb532..0000000 --- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -#-------------------------------------------- -# This file is managed by Juju -#-------------------------------------------- -# -# Copyright 2009,2012 Canonical Ltd. -# Author: Tom Haddon - -# These should be config options at some stage -CURRQthrsh=0 -MAXQthrsh=100 - -AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}') - -HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v) - -for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}') -do - CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3) - MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4) - - if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then - echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ" - exit 2 - fi -done - -echo "OK: All haproxy queue depths looking good" -exit 0 - diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py index 29bbddc..3dca6dc 100644 --- a/hooks/charmhelpers/contrib/openstack/ip.py +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -17,6 +17,7 @@ from charmhelpers.core.hookenv import ( config, unit_get, + service_name, ) from charmhelpers.contrib.network.ip import ( get_address_in_network, @@ -26,8 +27,6 @@ from charmhelpers.contrib.network.ip import ( ) from charmhelpers.contrib.hahelpers.cluster import is_clustered -from functools import partial - PUBLIC = 'public' INTERNAL = 'int' ADMIN = 'admin' @@ -35,15 +34,18 @@ ADMIN = 'admin' ADDRESS_MAP = { PUBLIC: { 'config': 'os-public-network', - 'fallback': 'public-address' + 'fallback': 'public-address', + 'override': 'os-public-hostname', }, INTERNAL: { 'config': 'os-internal-network', - 'fallback': 'private-address' + 'fallback': 'private-address', + 'override': 'os-internal-hostname', }, ADMIN: { 'config': 'os-admin-network', - 'fallback': 'private-address' + 'fallback': 'private-address', + 'override': 'os-admin-hostname', } } @@ -57,15 +59,50 @@ def canonical_url(configs, endpoint_type=PUBLIC): :param endpoint_type: str endpoint type to resolve. :param returns: str base URL for services on the current service unit. """ - scheme = 'http' - if 'https' in configs.complete_contexts(): - scheme = 'https' + scheme = _get_scheme(configs) + address = resolve_address(endpoint_type) if is_ipv6(address): address = "[{}]".format(address) + return '%s://%s' % (scheme, address) +def _get_scheme(configs): + """Returns the scheme to use for the url (either http or https) + depending upon whether https is in the configs value. + + :param configs: OSTemplateRenderer config templating object to inspect + for a complete https context. + :returns: either 'http' or 'https' depending on whether https is + configured within the configs context. + """ + scheme = 'http' + if configs and 'https' in configs.complete_contexts(): + scheme = 'https' + return scheme + + +def _get_address_override(endpoint_type=PUBLIC): + """Returns any address overrides that the user has defined based on the + endpoint type. + + Note: this function allows for the service name to be inserted into the + address if the user specifies {service_name}.somehost.org. + + :param endpoint_type: the type of endpoint to retrieve the override + value for. + :returns: any endpoint address or hostname that the user has overridden + or None if an override is not present. + """ + override_key = ADDRESS_MAP[endpoint_type]['override'] + addr_override = config(override_key) + if not addr_override: + return None + else: + return addr_override.format(service_name=service_name()) + + def resolve_address(endpoint_type=PUBLIC): """Return unit address depending on net config. @@ -77,7 +114,10 @@ def resolve_address(endpoint_type=PUBLIC): :param endpoint_type: Network endpoing type """ - resolved_address = None + resolved_address = _get_address_override(endpoint_type) + if resolved_address: + return resolved_address + vips = config('vip') if vips: vips = vips.split() @@ -109,38 +149,3 @@ def resolve_address(endpoint_type=PUBLIC): "clustered=%s)" % (net_type, clustered)) return resolved_address - - -def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC, - override=None): - """Returns the correct endpoint URL to advertise to Keystone. - - This method provides the correct endpoint URL which should be advertised to - the keystone charm for endpoint creation. This method allows for the url to - be overridden to force a keystone endpoint to have specific URL for any of - the defined scopes (admin, internal, public). - - :param configs: OSTemplateRenderer config templating object to inspect - for a complete https context. - :param url_template: str format string for creating the url template. Only - two values will be passed - the scheme+hostname - returned by the canonical_url and the port. - :param endpoint_type: str endpoint type to resolve. - :param override: str the name of the config option which overrides the - endpoint URL defined by the charm itself. None will - disable any overrides (default). - """ - if override: - # Return any user-defined overrides for the keystone endpoint URL. - user_value = config(override) - if user_value: - return user_value.strip() - - return url_template % (canonical_url(configs, endpoint_type), port) - - -public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC) - -internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL) - -admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN) diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index fa036e4..91ce1e2 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -172,14 +172,16 @@ def neutron_plugins(): 'services': ['calico-felix', 'bird', 'neutron-dhcp-agent', - 'nova-api-metadata'], + 'nova-api-metadata', + 'etcd'], 'packages': [[headers_package()] + determine_dkms_package(), ['calico-compute', 'bird', 'neutron-dhcp-agent', - 'nova-api-metadata']], - 'server_packages': ['neutron-server', 'calico-control'], - 'server_services': ['neutron-server'] + 'nova-api-metadata', + 'etcd']], + 'server_packages': ['neutron-server', 'calico-control', 'etcd'], + 'server_services': ['neutron-server', 'etcd'] }, 'vsp': { 'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini', @@ -203,8 +205,7 @@ def neutron_plugins(): ssl_dir=NEUTRON_CONF_DIR)], 'services': [], 'packages': [['plumgrid-lxc'], - ['iovisor-dkms'], - ['plumgrid-puppet']], + ['iovisor-dkms']], 'server_packages': ['neutron-server', 'neutron-plugin-plumgrid'], 'server_services': ['neutron-server'] @@ -271,11 +272,14 @@ def network_manager(): def parse_mappings(mappings): parsed = {} if mappings: - mappings = mappings.split(' ') + mappings = mappings.split() for m in mappings: p = m.partition(':') - if p[1] == ':': - parsed[p[0].strip()] = p[2].strip() + key = p[0].strip() + if p[1]: + parsed[key] = p[2].strip() + else: + parsed[key] = '' return parsed @@ -298,13 +302,13 @@ def parse_data_port_mappings(mappings, default_bridge='br-data'): Returns dict of the form {bridge:port}. """ _mappings = parse_mappings(mappings) - if not _mappings: + if not _mappings or list(_mappings.values()) == ['']: if not mappings: return {} # For backwards-compatibility we need to support port-only provided in # config. - _mappings = {default_bridge: mappings.split(' ')[0]} + _mappings = {default_bridge: mappings.split()[0]} bridges = _mappings.keys() ports = _mappings.values() @@ -324,6 +328,8 @@ def parse_vlan_range_mappings(mappings): Mappings must be a space-delimited list of provider:start:end mappings. + The start:end range is optional and may be omitted. + Returns dict of the form {provider: (start, end)}. """ _mappings = parse_mappings(mappings) diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf deleted file mode 100644 index 81a9719..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf +++ /dev/null @@ -1,15 +0,0 @@ -############################################################################### -# [ WARNING ] -# cinder configuration file maintained by Juju -# local changes may be overwritten. -############################################################################### -[global] -{% if auth -%} - auth_supported = {{ auth }} - keyring = /etc/ceph/$cluster.$name.keyring - mon host = {{ mon_hosts }} -{% endif -%} - log to syslog = {{ use_syslog }} - err to syslog = {{ use_syslog }} - clog to syslog = {{ use_syslog }} - diff --git a/hooks/charmhelpers/contrib/openstack/templates/git.upstart b/hooks/charmhelpers/contrib/openstack/templates/git.upstart deleted file mode 100644 index 4bed404..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/git.upstart +++ /dev/null @@ -1,17 +0,0 @@ -description "{{ service_description }}" -author "Juju {{ service_name }} Charm " - -start on runlevel [2345] -stop on runlevel [!2345] - -respawn - -exec start-stop-daemon --start --chuid {{ user_name }} \ - --chdir {{ start_dir }} --name {{ process_name }} \ - --exec {{ executable_name }} -- \ - {% for config_file in config_files -%} - --config-file={{ config_file }} \ - {% endfor -%} - {% if log_file -%} - --log-file={{ log_file }} - {% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg deleted file mode 100644 index ad875f1..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg +++ /dev/null @@ -1,58 +0,0 @@ -global - log {{ local_host }} local0 - log {{ local_host }} local1 notice - maxconn 20000 - user haproxy - group haproxy - spread-checks 0 - -defaults - log global - mode tcp - option tcplog - option dontlognull - retries 3 - timeout queue 1000 - timeout connect 1000 -{% if haproxy_client_timeout -%} - timeout client {{ haproxy_client_timeout }} -{% else -%} - timeout client 30000 -{% endif -%} - -{% if haproxy_server_timeout -%} - timeout server {{ haproxy_server_timeout }} -{% else -%} - timeout server 30000 -{% endif -%} - -listen stats {{ stat_port }} - mode http - stats enable - stats hide-version - stats realm Haproxy\ Statistics - stats uri / - stats auth admin:password - -{% if frontends -%} -{% for service, ports in service_ports.items() -%} -frontend tcp-in_{{ service }} - bind *:{{ ports[0] }} - {% if ipv6 -%} - bind :::{{ ports[0] }} - {% endif -%} - {% for frontend in frontends -%} - acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }} - use_backend {{ service }}_{{ frontend }} if net_{{ frontend }} - {% endfor -%} - default_backend {{ service }}_{{ default_backend }} - -{% for frontend in frontends -%} -backend {{ service }}_{{ frontend }} - balance leastconn - {% for unit, address in frontends[frontend]['backends'].items() -%} - server {{ unit }} {{ address }}:{{ ports[1] }} check - {% endfor %} -{% endfor -%} -{% endfor -%} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend deleted file mode 100644 index ce28fa3..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend +++ /dev/null @@ -1,24 +0,0 @@ -{% if endpoints -%} -{% for ext_port in ext_ports -%} -Listen {{ ext_port }} -{% endfor -%} -{% for address, endpoint, ext, int in endpoints -%} - - ServerName {{ endpoint }} - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} - ProxyPass / http://localhost:{{ int }}/ - ProxyPassReverse / http://localhost:{{ int }}/ - ProxyPreserveHost on - -{% endfor -%} - - Order deny,allow - Allow from all - - - Order allow,deny - Allow from all - -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf deleted file mode 100644 index ce28fa3..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend.conf +++ /dev/null @@ -1,24 +0,0 @@ -{% if endpoints -%} -{% for ext_port in ext_ports -%} -Listen {{ ext_port }} -{% endfor -%} -{% for address, endpoint, ext, int in endpoints -%} - - ServerName {{ endpoint }} - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} - SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} - ProxyPass / http://localhost:{{ int }}/ - ProxyPassReverse / http://localhost:{{ int }}/ - ProxyPreserveHost on - -{% endfor -%} - - Order deny,allow - Allow from all - - - Order allow,deny - Allow from all - -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken b/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken deleted file mode 100644 index 2a37edd..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-keystone-authtoken +++ /dev/null @@ -1,9 +0,0 @@ -{% if auth_host -%} -[keystone_authtoken] -identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} -auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} -admin_tenant_name = {{ admin_tenant_name }} -admin_user = {{ admin_user }} -admin_password = {{ admin_password }} -signing_dir = {{ signing_dir }} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo b/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo deleted file mode 100644 index b444c9c..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-rabbitmq-oslo +++ /dev/null @@ -1,22 +0,0 @@ -{% if rabbitmq_host or rabbitmq_hosts -%} -[oslo_messaging_rabbit] -rabbit_userid = {{ rabbitmq_user }} -rabbit_virtual_host = {{ rabbitmq_virtual_host }} -rabbit_password = {{ rabbitmq_password }} -{% if rabbitmq_hosts -%} -rabbit_hosts = {{ rabbitmq_hosts }} -{% if rabbitmq_ha_queues -%} -rabbit_ha_queues = True -rabbit_durable_queues = False -{% endif -%} -{% else -%} -rabbit_host = {{ rabbitmq_host }} -{% endif -%} -{% if rabbit_ssl_port -%} -rabbit_use_ssl = True -rabbit_port = {{ rabbit_ssl_port }} -{% if rabbit_ssl_ca -%} -kombu_ssl_ca_certs = {{ rabbit_ssl_ca }} -{% endif -%} -{% endif -%} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-zeromq b/hooks/charmhelpers/contrib/openstack/templates/section-zeromq deleted file mode 100644 index 95f1a76..0000000 --- a/hooks/charmhelpers/contrib/openstack/templates/section-zeromq +++ /dev/null @@ -1,14 +0,0 @@ -{% if zmq_host -%} -# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }}) -rpc_backend = zmq -rpc_zmq_host = {{ zmq_host }} -{% if zmq_redis_address -%} -rpc_zmq_matchmaker = redis -matchmaker_heartbeat_freq = 15 -matchmaker_heartbeat_ttl = 30 -[matchmaker_redis] -host = {{ zmq_redis_address }} -{% else -%} -rpc_zmq_matchmaker = ring -{% endif -%} -{% endif -%} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 24cb272..021d8cf 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -29,8 +29,8 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: - # python-jinja2 may not be installed yet, or we're running unittests. - FileSystemLoader = ChoiceLoader = Environment = exceptions = None + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions class OSConfigException(Exception): diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index f90a028..4dd000c 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -53,9 +53,13 @@ from charmhelpers.contrib.network.ip import ( get_ipv6_addr ) +from charmhelpers.contrib.python.packages import ( + pip_create_virtualenv, + pip_install, +) + from charmhelpers.core.host import lsb_release, mounts, umount from charmhelpers.fetch import apt_install, apt_cache, install_remote -from charmhelpers.contrib.python.packages import pip_install from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device @@ -75,6 +79,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('trusty', 'icehouse'), ('utopic', 'juno'), ('vivid', 'kilo'), + ('wily', 'liberty'), ]) @@ -87,6 +92,7 @@ OPENSTACK_CODENAMES = OrderedDict([ ('2014.1', 'icehouse'), ('2014.2', 'juno'), ('2015.1', 'kilo'), + ('2015.2', 'liberty'), ]) # The ugly duckling @@ -109,6 +115,7 @@ SWIFT_CODENAMES = OrderedDict([ ('2.2.0', 'juno'), ('2.2.1', 'kilo'), ('2.2.2', 'kilo'), + ('2.3.0', 'liberty'), ]) DEFAULT_LOOPBACK_SIZE = '5G' @@ -317,6 +324,9 @@ def configure_installation_source(rel): 'kilo': 'trusty-updates/kilo', 'kilo/updates': 'trusty-updates/kilo', 'kilo/proposed': 'trusty-proposed/kilo', + 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', } try: @@ -497,11 +507,22 @@ def git_install_requested(): requirements_dir = None -def git_clone_and_install(projects_yaml, core_project): +def _git_yaml_load(projects_yaml): + """ + Load the specified yaml into a dictionary. + """ + if not projects_yaml: + return None + + return yaml.load(projects_yaml) + + +def git_clone_and_install(projects_yaml, core_project, depth=1): """ Clone/install all specified OpenStack repositories. The expected format of projects_yaml is: + repositories: - {name: keystone, repository: 'git://git.openstack.org/openstack/keystone.git', @@ -509,24 +530,25 @@ def git_clone_and_install(projects_yaml, core_project): - {name: requirements, repository: 'git://git.openstack.org/openstack/requirements.git', branch: 'stable/icehouse'} - directory: /mnt/openstack-git - http_proxy: http://squid.internal:3128 - https_proxy: https://squid.internal:3128 - The directory, http_proxy, and https_proxy keys are optional. + directory: /mnt/openstack-git + http_proxy: squid-proxy-url + https_proxy: squid-proxy-url + + The directory, http_proxy, and https_proxy keys are optional. + """ global requirements_dir parent_dir = '/mnt/openstack-git' + http_proxy = None - if not projects_yaml: - return - - projects = yaml.load(projects_yaml) + projects = _git_yaml_load(projects_yaml) _git_validate_projects_yaml(projects, core_project) old_environ = dict(os.environ) if 'http_proxy' in projects.keys(): + http_proxy = projects['http_proxy'] os.environ['http_proxy'] = projects['http_proxy'] if 'https_proxy' in projects.keys(): os.environ['https_proxy'] = projects['https_proxy'] @@ -534,15 +556,25 @@ def git_clone_and_install(projects_yaml, core_project): if 'directory' in projects.keys(): parent_dir = projects['directory'] + pip_create_virtualenv(os.path.join(parent_dir, 'venv')) + + # Upgrade setuptools and pip from default virtualenv versions. The default + # versions in trusty break master OpenStack branch deployments. + for p in ['pip', 'setuptools']: + pip_install(p, upgrade=True, proxy=http_proxy, + venv=os.path.join(parent_dir, 'venv')) + for p in projects['repositories']: repo = p['repository'] branch = p['branch'] if p['name'] == 'requirements': - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, update_requirements=False) requirements_dir = repo_dir else: - repo_dir = _git_clone_and_install_single(repo, branch, parent_dir, + repo_dir = _git_clone_and_install_single(repo, branch, depth, + parent_dir, http_proxy, update_requirements=True) os.environ = old_environ @@ -574,7 +606,8 @@ def _git_ensure_key_exists(key, keys): error_out('openstack-origin-git key \'{}\' is missing'.format(key)) -def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements): +def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy, + update_requirements): """ Clone and install a single git repository. """ @@ -587,23 +620,29 @@ def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements) if not os.path.exists(dest_dir): juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch)) - repo_dir = install_remote(repo, dest=parent_dir, branch=branch) + repo_dir = install_remote(repo, dest=parent_dir, branch=branch, + depth=depth) else: repo_dir = dest_dir + venv = os.path.join(parent_dir, 'venv') + if update_requirements: if not requirements_dir: error_out('requirements repo must be cloned before ' 'updating from global requirements.') - _git_update_requirements(repo_dir, requirements_dir) + _git_update_requirements(venv, repo_dir, requirements_dir) juju_log('Installing git repo from dir: {}'.format(repo_dir)) - pip_install(repo_dir) + if http_proxy: + pip_install(repo_dir, proxy=http_proxy, venv=venv) + else: + pip_install(repo_dir, venv=venv) return repo_dir -def _git_update_requirements(package_dir, reqs_dir): +def _git_update_requirements(venv, package_dir, reqs_dir): """ Update from global requirements. @@ -612,25 +651,38 @@ def _git_update_requirements(package_dir, reqs_dir): """ orig_dir = os.getcwd() os.chdir(reqs_dir) - cmd = ['python', 'update.py', package_dir] + python = os.path.join(venv, 'bin/python') + cmd = [python, 'update.py', package_dir] try: subprocess.check_call(cmd) except subprocess.CalledProcessError: package = os.path.basename(package_dir) - error_out("Error updating {} from global-requirements.txt".format(package)) + error_out("Error updating {} from " + "global-requirements.txt".format(package)) os.chdir(orig_dir) +def git_pip_venv_dir(projects_yaml): + """ + Return the pip virtualenv path. + """ + parent_dir = '/mnt/openstack-git' + + projects = _git_yaml_load(projects_yaml) + + if 'directory' in projects.keys(): + parent_dir = projects['directory'] + + return os.path.join(parent_dir, 'venv') + + def git_src_dir(projects_yaml, project): """ Return the directory where the specified project's source is located. """ parent_dir = '/mnt/openstack-git' - if not projects_yaml: - return - - projects = yaml.load(projects_yaml) + projects = _git_yaml_load(projects_yaml) if 'directory' in projects.keys(): parent_dir = projects['directory'] @@ -640,3 +692,15 @@ def git_src_dir(projects_yaml, project): return os.path.join(parent_dir, os.path.basename(p['repository'])) return None + + +def git_yaml_value(projects_yaml, key): + """ + Return the value in projects_yaml for the specified key. + """ + projects = _git_yaml_load(projects_yaml) + + if key in projects.keys(): + return projects[key] + + return None diff --git a/hooks/charmhelpers/contrib/peerstorage/__init__.py b/hooks/charmhelpers/contrib/peerstorage/__init__.py new file mode 100644 index 0000000..09f2b12 --- /dev/null +++ b/hooks/charmhelpers/contrib/peerstorage/__init__.py @@ -0,0 +1,268 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import json +import six + +from charmhelpers.core.hookenv import relation_id as current_relation_id +from charmhelpers.core.hookenv import ( + is_relation_made, + relation_ids, + relation_get as _relation_get, + local_unit, + relation_set as _relation_set, + leader_get as _leader_get, + leader_set, + is_leader, +) + + +""" +This helper provides functions to support use of a peer relation +for basic key/value storage, with the added benefit that all storage +can be replicated across peer units. + +Requirement to use: + +To use this, the "peer_echo()" method has to be called form the peer +relation's relation-changed hook: + +@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name +def cluster_relation_changed(): + peer_echo() + +Once this is done, you can use peer storage from anywhere: + +@hooks.hook("some-hook") +def some_hook(): + # You can store and retrieve key/values this way: + if is_relation_made("cluster"): # from charmhelpers.core.hookenv + # There are peers available so we can work with peer storage + peer_store("mykey", "myvalue") + value = peer_retrieve("mykey") + print value + else: + print "No peers joind the relation, cannot share key/values :(" +""" + + +def leader_get(attribute=None): + """Wrapper to ensure that settings are migrated from the peer relation. + + This is to support upgrading an environment that does not support + Juju leadership election to one that does. + + If a setting is not extant in the leader-get but is on the relation-get + peer rel, it is migrated and marked as such so that it is not re-migrated. + """ + migration_key = '__leader_get_migrated_settings__' + if not is_leader(): + return _leader_get(attribute=attribute) + + settings_migrated = False + leader_settings = _leader_get(attribute=attribute) + previously_migrated = _leader_get(attribute=migration_key) + + if previously_migrated: + migrated = set(json.loads(previously_migrated)) + else: + migrated = set([]) + + try: + if migration_key in leader_settings: + del leader_settings[migration_key] + except TypeError: + pass + + if attribute: + if attribute in migrated: + return leader_settings + + # If attribute not present in leader db, check if this unit has set + # the attribute in the peer relation + if not leader_settings: + peer_setting = relation_get(attribute=attribute, unit=local_unit()) + if peer_setting: + leader_set(settings={attribute: peer_setting}) + leader_settings = peer_setting + + if leader_settings: + settings_migrated = True + migrated.add(attribute) + else: + r_settings = relation_get(unit=local_unit()) + if r_settings: + for key in set(r_settings.keys()).difference(migrated): + # Leader setting wins + if not leader_settings.get(key): + leader_settings[key] = r_settings[key] + + settings_migrated = True + migrated.add(key) + + if settings_migrated: + leader_set(**leader_settings) + + if migrated and settings_migrated: + migrated = json.dumps(list(migrated)) + leader_set(settings={migration_key: migrated}) + + return leader_settings + + +def relation_set(relation_id=None, relation_settings=None, **kwargs): + """Attempt to use leader-set if supported in the current version of Juju, + otherwise falls back on relation-set. + + Note that we only attempt to use leader-set if the provided relation_id is + a peer relation id or no relation id is provided (in which case we assume + we are within the peer relation context). + """ + try: + if relation_id in relation_ids('cluster'): + return leader_set(settings=relation_settings, **kwargs) + else: + raise NotImplementedError + except NotImplementedError: + return _relation_set(relation_id=relation_id, + relation_settings=relation_settings, **kwargs) + + +def relation_get(attribute=None, unit=None, rid=None): + """Attempt to use leader-get if supported in the current version of Juju, + otherwise falls back on relation-get. + + Note that we only attempt to use leader-get if the provided rid is a peer + relation id or no relation id is provided (in which case we assume we are + within the peer relation context). + """ + try: + if rid in relation_ids('cluster'): + return leader_get(attribute) + else: + raise NotImplementedError + except NotImplementedError: + return _relation_get(attribute=attribute, rid=rid, unit=unit) + + +def peer_retrieve(key, relation_name='cluster'): + """Retrieve a named key from peer relation `relation_name`.""" + cluster_rels = relation_ids(relation_name) + if len(cluster_rels) > 0: + cluster_rid = cluster_rels[0] + return relation_get(attribute=key, rid=cluster_rid, + unit=local_unit()) + else: + raise ValueError('Unable to detect' + 'peer relation {}'.format(relation_name)) + + +def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_', + inc_list=None, exc_list=None): + """ Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """ + inc_list = inc_list if inc_list else [] + exc_list = exc_list if exc_list else [] + peerdb_settings = peer_retrieve('-', relation_name=relation_name) + matched = {} + if peerdb_settings is None: + return matched + for k, v in peerdb_settings.items(): + full_prefix = prefix + delimiter + if k.startswith(full_prefix): + new_key = k.replace(full_prefix, '') + if new_key in exc_list: + continue + if new_key in inc_list or len(inc_list) == 0: + matched[new_key] = v + return matched + + +def peer_store(key, value, relation_name='cluster'): + """Store the key/value pair on the named peer relation `relation_name`.""" + cluster_rels = relation_ids(relation_name) + if len(cluster_rels) > 0: + cluster_rid = cluster_rels[0] + relation_set(relation_id=cluster_rid, + relation_settings={key: value}) + else: + raise ValueError('Unable to detect ' + 'peer relation {}'.format(relation_name)) + + +def peer_echo(includes=None, force=False): + """Echo filtered attributes back onto the same relation for storage. + + This is a requirement to use the peerstorage module - it needs to be called + from the peer relation's changed hook. + + If Juju leader support exists this will be a noop unless force is True. + """ + try: + is_leader() + except NotImplementedError: + pass + else: + if not force: + return # NOOP if leader-election is supported + + # Use original non-leader calls + relation_get = _relation_get + relation_set = _relation_set + + rdata = relation_get() + echo_data = {} + if includes is None: + echo_data = rdata.copy() + for ex in ['private-address', 'public-address']: + if ex in echo_data: + echo_data.pop(ex) + else: + for attribute, value in six.iteritems(rdata): + for include in includes: + if include in attribute: + echo_data[attribute] = value + if len(echo_data) > 0: + relation_set(relation_settings=echo_data) + + +def peer_store_and_set(relation_id=None, peer_relation_name='cluster', + peer_store_fatal=False, relation_settings=None, + delimiter='_', **kwargs): + """Store passed-in arguments both in argument relation and in peer storage. + + It functions like doing relation_set() and peer_store() at the same time, + with the same data. + + @param relation_id: the id of the relation to store the data on. Defaults + to the current relation. + @param peer_store_fatal: Set to True, the function will raise an exception + should the peer sotrage not be avialable.""" + + relation_settings = relation_settings if relation_settings else {} + relation_set(relation_id=relation_id, + relation_settings=relation_settings, + **kwargs) + if is_relation_made(peer_relation_name): + for key, value in six.iteritems(dict(list(kwargs.items()) + + list(relation_settings.items()))): + key_prefix = relation_id or current_relation_id() + peer_store(key_prefix + delimiter + key, + value, + relation_name=peer_relation_name) + else: + if peer_store_fatal: + raise ValueError('Unable to detect ' + 'peer relation {}'.format(peer_relation_name)) diff --git a/hooks/charmhelpers/contrib/python/debug.py b/hooks/charmhelpers/contrib/python/debug.py new file mode 100644 index 0000000..871cd6f --- /dev/null +++ b/hooks/charmhelpers/contrib/python/debug.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from __future__ import print_function + +import atexit +import sys + +from charmhelpers.contrib.python.rpdb import Rpdb +from charmhelpers.core.hookenv import ( + open_port, + close_port, + ERROR, + log +) + +__author__ = "Jorge Niedbalski " + +DEFAULT_ADDR = "0.0.0.0" +DEFAULT_PORT = 4444 + + +def _error(message): + log(message, level=ERROR) + + +def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): + """ + Set a trace point using the remote debugger + """ + atexit.register(close_port, port) + try: + log("Starting a remote python debugger session on %s:%s" % (addr, + port)) + open_port(port) + debugger = Rpdb(addr=addr, port=port) + debugger.set_trace(sys._getframe().f_back) + except: + _error("Cannot start a remote debug session on %s:%s" % (addr, + port)) diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py index 8659516..10b32e3 100644 --- a/hooks/charmhelpers/contrib/python/packages.py +++ b/hooks/charmhelpers/contrib/python/packages.py @@ -17,8 +17,11 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . +import os +import subprocess + from charmhelpers.fetch import apt_install, apt_update -from charmhelpers.core.hookenv import log +from charmhelpers.core.hookenv import charm_dir, log try: from pip import main as pip_execute @@ -33,6 +36,8 @@ __author__ = "Jorge Niedbalski " def parse_options(given, available): """Given a set of options, check if available""" for key, value in sorted(given.items()): + if not value: + continue if key in available: yield "--{0}={1}".format(key, value) @@ -51,11 +56,15 @@ def pip_install_requirements(requirements, **options): pip_execute(command) -def pip_install(package, fatal=False, upgrade=False, **options): +def pip_install(package, fatal=False, upgrade=False, venv=None, **options): """Install a python package""" - command = ["install"] + if venv: + venv_python = os.path.join(venv, 'bin/pip') + command = [venv_python, "install"] + else: + command = ["install"] - available_options = ('proxy', 'src', 'log', "index-url", ) + available_options = ('proxy', 'src', 'log', 'index-url', ) for option in parse_options(options, available_options): command.append(option) @@ -69,7 +78,10 @@ def pip_install(package, fatal=False, upgrade=False, **options): log("Installing {} package with options: {}".format(package, command)) - pip_execute(command) + if venv: + subprocess.check_call(command) + else: + pip_execute(command) def pip_uninstall(package, **options): @@ -94,3 +106,16 @@ def pip_list(): """Returns the list of current python installed packages """ return pip_execute(["list"]) + + +def pip_create_virtualenv(path=None): + """Create an isolated Python environment.""" + apt_install('python-virtualenv') + + if path: + venv_path = path + else: + venv_path = os.path.join(charm_dir(), 'venv') + + if not os.path.exists(venv_path): + subprocess.check_call(['virtualenv', venv_path]) diff --git a/hooks/charmhelpers/contrib/python/rpdb.py b/hooks/charmhelpers/contrib/python/rpdb.py new file mode 100644 index 0000000..d503f88 --- /dev/null +++ b/hooks/charmhelpers/contrib/python/rpdb.py @@ -0,0 +1,58 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +"""Remote Python Debugger (pdb wrapper).""" + +import pdb +import socket +import sys + +__author__ = "Bertrand Janin " +__version__ = "0.1.3" + + +class Rpdb(pdb.Pdb): + + def __init__(self, addr="127.0.0.1", port=4444): + """Initialize the socket and initialize pdb.""" + + # Backup stdin and stdout before replacing them by the socket handle + self.old_stdout = sys.stdout + self.old_stdin = sys.stdin + + # Open a 'reusable' socket to let the webapp reload on the same port + self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + self.skt.bind((addr, port)) + self.skt.listen(1) + (clientsocket, address) = self.skt.accept() + handle = clientsocket.makefile('rw') + pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) + sys.stdout = sys.stdin = handle + + def shutdown(self): + """Revert stdin and stdout, close the socket.""" + sys.stdout = self.old_stdout + sys.stdin = self.old_stdin + self.skt.close() + self.set_continue() + + def do_continue(self, arg): + """Stop all operation on ``continue``.""" + self.shutdown() + return 1 + + do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/hooks/charmhelpers-pl/contrib/openstack/files/__init__.py b/hooks/charmhelpers/contrib/python/version.py similarity index 58% rename from hooks/charmhelpers-pl/contrib/openstack/files/__init__.py rename to hooks/charmhelpers/contrib/python/version.py index 7587679..c39fcbf 100644 --- a/hooks/charmhelpers-pl/contrib/openstack/files/__init__.py +++ b/hooks/charmhelpers/contrib/python/version.py @@ -1,3 +1,6 @@ +#!/usr/bin/env python +# coding: utf-8 + # Copyright 2014-2015 Canonical Limited. # # This file is part of charm-helpers. @@ -14,5 +17,18 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -# dummy __init__.py to fool syncer into thinking this is a syncable python -# module +import sys + +__author__ = "Jorge Niedbalski " + + +def current_version(): + """Current system python version""" + return sys.version_info + + +def current_version_string(): + """Current system python version as string major.minor.micro""" + return "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro) diff --git a/hooks/charmhelpers/contrib/saltstack/__init__.py b/hooks/charmhelpers/contrib/saltstack/__init__.py new file mode 100644 index 0000000..6f1109d --- /dev/null +++ b/hooks/charmhelpers/contrib/saltstack/__init__.py @@ -0,0 +1,118 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +"""Charm Helpers saltstack - declare the state of your machines. + +This helper enables you to declare your machine state, rather than +program it procedurally (and have to test each change to your procedures). +Your install hook can be as simple as:: + + {{{ + from charmhelpers.contrib.saltstack import ( + install_salt_support, + update_machine_state, + ) + + + def install(): + install_salt_support() + update_machine_state('machine_states/dependencies.yaml') + update_machine_state('machine_states/installed.yaml') + }}} + +and won't need to change (nor will its tests) when you change the machine +state. + +It's using a python package called salt-minion which allows various formats for +specifying resources, such as:: + + {{{ + /srv/{{ basedir }}: + file.directory: + - group: ubunet + - user: ubunet + - require: + - user: ubunet + - recurse: + - user + - group + + ubunet: + group.present: + - gid: 1500 + user.present: + - uid: 1500 + - gid: 1500 + - createhome: False + - require: + - group: ubunet + }}} + +The docs for all the different state definitions are at: + http://docs.saltstack.com/ref/states/all/ + + +TODO: + * Add test helpers which will ensure that machine state definitions + are functionally (but not necessarily logically) correct (ie. getting + salt to parse all state defs. + * Add a link to a public bootstrap charm example / blogpost. + * Find a way to obviate the need to use the grains['charm_dir'] syntax + in templates. +""" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +import subprocess + +import charmhelpers.contrib.templating.contexts +import charmhelpers.core.host +import charmhelpers.core.hookenv + + +salt_grains_path = '/etc/salt/grains' + + +def install_salt_support(from_ppa=True): + """Installs the salt-minion helper for machine state. + + By default the salt-minion package is installed from + the saltstack PPA. If from_ppa is False you must ensure + that the salt-minion package is available in the apt cache. + """ + if from_ppa: + subprocess.check_call([ + '/usr/bin/add-apt-repository', + '--yes', + 'ppa:saltstack/salt', + ]) + subprocess.check_call(['/usr/bin/apt-get', 'update']) + # We install salt-common as salt-minion would run the salt-minion + # daemon. + charmhelpers.fetch.apt_install('salt-common') + + +def update_machine_state(state_path): + """Update the machine state using the provided state declaration.""" + charmhelpers.contrib.templating.contexts.juju_state_to_yaml( + salt_grains_path) + subprocess.check_call([ + 'salt-call', + '--local', + 'state.template', + state_path, + ]) diff --git a/hooks/charmhelpers/contrib/ssl/__init__.py b/hooks/charmhelpers/contrib/ssl/__init__.py new file mode 100644 index 0000000..f7428d6 --- /dev/null +++ b/hooks/charmhelpers/contrib/ssl/__init__.py @@ -0,0 +1,94 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import subprocess +from charmhelpers.core import hookenv + + +def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None): + """Generate selfsigned SSL keypair + + You must provide one of the 3 optional arguments: + config, subject or cn + If more than one is provided the leftmost will be used + + Arguments: + keyfile -- (required) full path to the keyfile to be created + certfile -- (required) full path to the certfile to be created + keysize -- (optional) SSL key length + config -- (optional) openssl configuration file + subject -- (optional) dictionary with SSL subject variables + cn -- (optional) cerfificate common name + + Required keys in subject dict: + cn -- Common name (eq. FQDN) + + Optional keys in subject dict + country -- Country Name (2 letter code) + state -- State or Province Name (full name) + locality -- Locality Name (eg, city) + organization -- Organization Name (eg, company) + organizational_unit -- Organizational Unit Name (eg, section) + email -- Email Address + """ + + cmd = [] + if config: + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-config", config] + elif subject: + ssl_subject = "" + if "country" in subject: + ssl_subject = ssl_subject + "/C={}".format(subject["country"]) + if "state" in subject: + ssl_subject = ssl_subject + "/ST={}".format(subject["state"]) + if "locality" in subject: + ssl_subject = ssl_subject + "/L={}".format(subject["locality"]) + if "organization" in subject: + ssl_subject = ssl_subject + "/O={}".format(subject["organization"]) + if "organizational_unit" in subject: + ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"]) + if "cn" in subject: + ssl_subject = ssl_subject + "/CN={}".format(subject["cn"]) + else: + hookenv.log("When using \"subject\" argument you must " + "provide \"cn\" field at very least") + return False + if "email" in subject: + ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"]) + + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-subj", ssl_subject] + elif cn: + cmd = ["/usr/bin/openssl", "req", "-new", "-newkey", + "rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509", + "-keyout", keyfile, + "-out", certfile, "-subj", "/CN={}".format(cn)] + + if not cmd: + hookenv.log("No config, subject or cn provided," + "unable to generate self signed SSL certificates") + return False + try: + subprocess.check_call(cmd) + return True + except Exception as e: + print("Execution of openssl command failed:\n{}".format(e)) + return False diff --git a/hooks/charmhelpers/contrib/ssl/service.py b/hooks/charmhelpers/contrib/ssl/service.py new file mode 100644 index 0000000..8892edf --- /dev/null +++ b/hooks/charmhelpers/contrib/ssl/service.py @@ -0,0 +1,279 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +from os.path import join as path_join +from os.path import exists +import subprocess + +from charmhelpers.core.hookenv import log, DEBUG + +STD_CERT = "standard" + +# Mysql server is fairly picky about cert creation +# and types, spec its creation separately for now. +MYSQL_CERT = "mysql" + + +class ServiceCA(object): + + default_expiry = str(365 * 2) + default_ca_expiry = str(365 * 6) + + def __init__(self, name, ca_dir, cert_type=STD_CERT): + self.name = name + self.ca_dir = ca_dir + self.cert_type = cert_type + + ############### + # Hook Helper API + @staticmethod + def get_ca(type=STD_CERT): + service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] + ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca') + ca = ServiceCA(service_name, ca_path, type) + ca.init() + return ca + + @classmethod + def get_service_cert(cls, type=STD_CERT): + service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0] + ca = cls.get_ca() + crt, key = ca.get_or_create_cert(service_name) + return crt, key, ca.get_ca_bundle() + + ############### + + def init(self): + log("initializing service ca", level=DEBUG) + if not exists(self.ca_dir): + self._init_ca_dir(self.ca_dir) + self._init_ca() + + @property + def ca_key(self): + return path_join(self.ca_dir, 'private', 'cacert.key') + + @property + def ca_cert(self): + return path_join(self.ca_dir, 'cacert.pem') + + @property + def ca_conf(self): + return path_join(self.ca_dir, 'ca.cnf') + + @property + def signing_conf(self): + return path_join(self.ca_dir, 'signing.cnf') + + def _init_ca_dir(self, ca_dir): + os.mkdir(ca_dir) + for i in ['certs', 'crl', 'newcerts', 'private']: + sd = path_join(ca_dir, i) + if not exists(sd): + os.mkdir(sd) + + if not exists(path_join(ca_dir, 'serial')): + with open(path_join(ca_dir, 'serial'), 'w') as fh: + fh.write('02\n') + + if not exists(path_join(ca_dir, 'index.txt')): + with open(path_join(ca_dir, 'index.txt'), 'w') as fh: + fh.write('') + + def _init_ca(self): + """Generate the root ca's cert and key. + """ + if not exists(path_join(self.ca_dir, 'ca.cnf')): + with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh: + fh.write( + CA_CONF_TEMPLATE % (self.get_conf_variables())) + + if not exists(path_join(self.ca_dir, 'signing.cnf')): + with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh: + fh.write( + SIGNING_CONF_TEMPLATE % (self.get_conf_variables())) + + if exists(self.ca_cert) or exists(self.ca_key): + raise RuntimeError("Initialized called when CA already exists") + cmd = ['openssl', 'req', '-config', self.ca_conf, + '-x509', '-nodes', '-newkey', 'rsa', + '-days', self.default_ca_expiry, + '-keyout', self.ca_key, '-out', self.ca_cert, + '-outform', 'PEM'] + output = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + log("CA Init:\n %s" % output, level=DEBUG) + + def get_conf_variables(self): + return dict( + org_name="juju", + org_unit_name="%s service" % self.name, + common_name=self.name, + ca_dir=self.ca_dir) + + def get_or_create_cert(self, common_name): + if common_name in self: + return self.get_certificate(common_name) + return self.create_certificate(common_name) + + def create_certificate(self, common_name): + if common_name in self: + return self.get_certificate(common_name) + key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name) + self._create_certificate(common_name, key_p, csr_p, crt_p) + return self.get_certificate(common_name) + + def get_certificate(self, common_name): + if common_name not in self: + raise ValueError("No certificate for %s" % common_name) + key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name) + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + with open(crt_p) as fh: + crt = fh.read() + with open(key_p) as fh: + key = fh.read() + return crt, key + + def __contains__(self, common_name): + crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name) + return exists(crt_p) + + def _create_certificate(self, common_name, key_p, csr_p, crt_p): + template_vars = self.get_conf_variables() + template_vars['common_name'] = common_name + subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % ( + template_vars) + + log("CA Create Cert %s" % common_name, level=DEBUG) + cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048', + '-nodes', '-days', self.default_expiry, + '-keyout', key_p, '-out', csr_p, '-subj', subj] + subprocess.check_call(cmd, stderr=subprocess.PIPE) + cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p] + subprocess.check_call(cmd, stderr=subprocess.PIPE) + + log("CA Sign Cert %s" % common_name, level=DEBUG) + if self.cert_type == MYSQL_CERT: + cmd = ['openssl', 'x509', '-req', + '-in', csr_p, '-days', self.default_expiry, + '-CA', self.ca_cert, '-CAkey', self.ca_key, + '-set_serial', '01', '-out', crt_p] + else: + cmd = ['openssl', 'ca', '-config', self.signing_conf, + '-extensions', 'req_extensions', + '-days', self.default_expiry, '-notext', + '-in', csr_p, '-out', crt_p, '-subj', subj, '-batch'] + log("running %s" % " ".join(cmd), level=DEBUG) + subprocess.check_call(cmd, stderr=subprocess.PIPE) + + def get_ca_bundle(self): + with open(self.ca_cert) as fh: + return fh.read() + + +CA_CONF_TEMPLATE = """ +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = %(ca_dir)s +policy = policy_match +database = $dir/index.txt +serial = $dir/serial +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +certificate = $dir/cacert.pem +private_key = $dir/private/cacert.key +RANDFILE = $dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = ca_distinguished_name + +x509_extensions = ca_extensions + +[ ca_distinguished_name ] +organizationName = %(org_name)s +organizationalUnitName = %(org_unit_name)s Certificate Authority + + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ ca_extensions ] +basicConstraints = critical,CA:true +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = cRLSign, keyCertSign +""" + + +SIGNING_CONF_TEMPLATE = """ +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = %(ca_dir)s +policy = policy_match +database = $dir/index.txt +serial = $dir/serial +certs = $dir/certs +crl_dir = $dir/crl +new_certs_dir = $dir/newcerts +certificate = $dir/cacert.pem +private_key = $dir/private/cacert.key +RANDFILE = $dir/private/.rand +default_md = default + +[ req ] +default_bits = 1024 +default_md = sha1 + +prompt = no +distinguished_name = req_distinguished_name + +x509_extensions = req_extensions + +[ req_distinguished_name ] +organizationName = %(org_name)s +organizationalUnitName = %(org_unit_name)s machine resources +commonName = %(common_name)s + +[ policy_match ] +countryName = optional +stateOrProvinceName = optional +organizationName = match +organizationalUnitName = optional +commonName = supplied + +[ req_extensions ] +basicConstraints = CA:false +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always, issuer +keyUsage = digitalSignature, keyEncipherment, keyAgreement +extendedKeyUsage = serverAuth, clientAuth +""" diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 31ea7f9..00dbffb 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -60,12 +60,12 @@ KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' CEPH_CONF = """[global] - auth supported = {auth} - keyring = {keyring} - mon host = {mon_hosts} - log to syslog = {use_syslog} - err to syslog = {use_syslog} - clog to syslog = {use_syslog} +auth supported = {auth} +keyring = {keyring} +mon host = {mon_hosts} +log to syslog = {use_syslog} +err to syslog = {use_syslog} +clog to syslog = {use_syslog} """ diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index c8373b7..e2769e4 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,4 +67,4 @@ def is_device_mounted(device): out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]+\b", out)) + return bool(re.search(device + r"[0-9]*\b", out)) diff --git a/hooks/charmhelpers-pl/contrib/network/__init__.py b/hooks/charmhelpers/contrib/templating/__init__.py similarity index 100% rename from hooks/charmhelpers-pl/contrib/network/__init__.py rename to hooks/charmhelpers/contrib/templating/__init__.py diff --git a/hooks/charmhelpers/contrib/templating/contexts.py b/hooks/charmhelpers/contrib/templating/contexts.py new file mode 100644 index 0000000..deea644 --- /dev/null +++ b/hooks/charmhelpers/contrib/templating/contexts.py @@ -0,0 +1,139 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers +"""A helper to create a yaml cache of config with namespaced relation data.""" +import os +import yaml + +import six + +import charmhelpers.core.hookenv + + +charm_dir = os.environ.get('CHARM_DIR', '') + + +def dict_keys_without_hyphens(a_dict): + """Return the a new dict with underscores instead of hyphens in keys.""" + return dict( + (key.replace('-', '_'), val) for key, val in a_dict.items()) + + +def update_relations(context, namespace_separator=':'): + """Update the context with the relation data.""" + # Add any relation data prefixed with the relation type. + relation_type = charmhelpers.core.hookenv.relation_type() + relations = [] + context['current_relation'] = {} + if relation_type is not None: + relation_data = charmhelpers.core.hookenv.relation_get() + context['current_relation'] = relation_data + # Deprecated: the following use of relation data as keys + # directly in the context will be removed. + relation_data = dict( + ("{relation_type}{namespace_separator}{key}".format( + relation_type=relation_type, + key=key, + namespace_separator=namespace_separator), val) + for key, val in relation_data.items()) + relation_data = dict_keys_without_hyphens(relation_data) + context.update(relation_data) + relations = charmhelpers.core.hookenv.relations_of_type(relation_type) + relations = [dict_keys_without_hyphens(rel) for rel in relations] + + context['relations_full'] = charmhelpers.core.hookenv.relations() + + # the hookenv.relations() data structure is effectively unusable in + # templates and other contexts when trying to access relation data other + # than the current relation. So provide a more useful structure that works + # with any hook. + local_unit = charmhelpers.core.hookenv.local_unit() + relations = {} + for rname, rids in context['relations_full'].items(): + relations[rname] = [] + for rid, rdata in rids.items(): + data = rdata.copy() + if local_unit in rdata: + data.pop(local_unit) + for unit_name, rel_data in data.items(): + new_data = {'__relid__': rid, '__unit__': unit_name} + new_data.update(rel_data) + relations[rname].append(new_data) + context['relations'] = relations + + +def juju_state_to_yaml(yaml_path, namespace_separator=':', + allow_hyphens_in_keys=True, mode=None): + """Update the juju config and state in a yaml file. + + This includes any current relation-get data, and the charm + directory. + + This function was created for the ansible and saltstack + support, as those libraries can use a yaml file to supply + context to templates, but it may be useful generally to + create and update an on-disk cache of all the config, including + previous relation data. + + By default, hyphens are allowed in keys as this is supported + by yaml, but for tools like ansible, hyphens are not valid [1]. + + [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name + """ + config = charmhelpers.core.hookenv.config() + + # Add the charm_dir which we will need to refer to charm + # file resources etc. + config['charm_dir'] = charm_dir + config['local_unit'] = charmhelpers.core.hookenv.local_unit() + config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip() + config['unit_public_address'] = charmhelpers.core.hookenv.unit_get( + 'public-address' + ) + + # Don't use non-standard tags for unicode which will not + # work when salt uses yaml.load_safe. + yaml.add_representer(six.text_type, + lambda dumper, value: dumper.represent_scalar( + six.u('tag:yaml.org,2002:str'), value)) + + yaml_dir = os.path.dirname(yaml_path) + if not os.path.exists(yaml_dir): + os.makedirs(yaml_dir) + + if os.path.exists(yaml_path): + with open(yaml_path, "r") as existing_vars_file: + existing_vars = yaml.load(existing_vars_file.read()) + else: + with open(yaml_path, "w+"): + pass + existing_vars = {} + + if mode is not None: + os.chmod(yaml_path, mode) + + if not allow_hyphens_in_keys: + config = dict_keys_without_hyphens(config) + existing_vars.update(config) + + update_relations(existing_vars, namespace_separator) + + with open(yaml_path, "w+") as fp: + fp.write(yaml.dump(existing_vars, default_flow_style=False)) diff --git a/hooks/charmhelpers-pl/contrib/openstack/alternatives.py b/hooks/charmhelpers/contrib/templating/jinja.py similarity index 54% rename from hooks/charmhelpers-pl/contrib/openstack/alternatives.py rename to hooks/charmhelpers/contrib/templating/jinja.py index ef77caf..6823bf0 100644 --- a/hooks/charmhelpers-pl/contrib/openstack/alternatives.py +++ b/hooks/charmhelpers/contrib/templating/jinja.py @@ -14,20 +14,26 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -''' Helper for managing alternatives for file conflict resolution ''' - -import subprocess -import shutil -import os +""" +Templating using the python-jinja2 package. +""" +import six +from charmhelpers.fetch import apt_install +try: + import jinja2 +except ImportError: + if six.PY3: + apt_install(["python3-jinja2"]) + else: + apt_install(["python-jinja2"]) + import jinja2 -def install_alternative(name, target, source, priority=50): - ''' Install alternative configuration ''' - if (os.path.exists(target) and not os.path.islink(target)): - # Move existing file/directory away before installing - shutil.move(target, '{}.bak'.format(target)) - cmd = [ - 'update-alternatives', '--force', '--install', - target, name, source, str(priority) - ] - subprocess.check_call(cmd) +DEFAULT_TEMPLATES_DIR = 'templates' + + +def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR): + templates = jinja2.Environment( + loader=jinja2.FileSystemLoader(template_dir)) + template = templates.get_template(template_name) + return template.render(context) diff --git a/hooks/charmhelpers-pl/core/services/__init__.py b/hooks/charmhelpers/contrib/templating/pyformat.py similarity index 65% rename from hooks/charmhelpers-pl/core/services/__init__.py rename to hooks/charmhelpers/contrib/templating/pyformat.py index 0928158..76c846e 100644 --- a/hooks/charmhelpers-pl/core/services/__init__.py +++ b/hooks/charmhelpers/contrib/templating/pyformat.py @@ -14,5 +14,16 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -from .base import * # NOQA -from .helpers import * # NOQA +''' +Templating using standard Python str.format() method. +''' + +from charmhelpers.core import hookenv + + +def render(template, extra={}, **kwargs): + """Return the template rendered using Python's str.format().""" + context = hookenv.execution_environment() + context.update(extra) + context.update(kwargs) + return template.format(**context) diff --git a/hooks/charmhelpers/contrib/unison/__init__.py b/hooks/charmhelpers/contrib/unison/__init__.py new file mode 100644 index 0000000..543e84a --- /dev/null +++ b/hooks/charmhelpers/contrib/unison/__init__.py @@ -0,0 +1,313 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +# Easy file synchronization among peer units using ssh + unison. +# +# For the -joined, -changed, and -departed peer relations, add a call to +# ssh_authorized_peers() describing the peer relation and the desired +# user + group. After all peer relations have settled, all hosts should +# be able to connect to on another via key auth'd ssh as the specified user. +# +# Other hooks are then free to synchronize files and directories using +# sync_to_peers(). +# +# For a peer relation named 'cluster', for example: +# +# cluster-relation-joined: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# cluster-relation-changed: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# cluster-relation-departed: +# ... +# ssh_authorized_peers(peer_interface='cluster', +# user='juju_ssh', group='juju_ssh', +# ensure_local_user=True) +# ... +# +# Hooks are now free to sync files as easily as: +# +# files = ['/etc/fstab', '/etc/apt.conf.d/'] +# sync_to_peers(peer_interface='cluster', +# user='juju_ssh, paths=[files]) +# +# It is assumed the charm itself has setup permissions on each unit +# such that 'juju_ssh' has read + write permissions. Also assumed +# that the calling charm takes care of leader delegation. +# +# Additionally files can be synchronized only to an specific unit: +# sync_to_peer(slave_address, user='juju_ssh', +# paths=[files], verbose=False) + +import os +import pwd + +from copy import copy +from subprocess import check_call, check_output + +from charmhelpers.core.host import ( + adduser, + add_user_to_group, + pwgen, +) + +from charmhelpers.core.hookenv import ( + log, + hook_name, + relation_ids, + related_units, + relation_set, + relation_get, + unit_private_ip, + INFO, + ERROR, +) + +BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false', + '-fastcheck=true', '-group=false', '-owner=false', + '-prefer=newer', '-times=true'] + + +def get_homedir(user): + try: + user = pwd.getpwnam(user) + return user.pw_dir + except KeyError: + log('Could not get homedir for user %s: user exists?' % (user), ERROR) + raise Exception + + +def create_private_key(user, priv_key_path, key_type='rsa'): + types_bits = { + 'rsa': '2048', + 'ecdsa': '521', + } + if key_type not in types_bits: + log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR) + key_type = 'rsa' + if not os.path.isfile(priv_key_path): + log('Generating new SSH key for user %s.' % user) + cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type, + '-b', types_bits[key_type], '-f', priv_key_path] + check_call(cmd) + else: + log('SSH key already exists at %s.' % priv_key_path) + check_call(['chown', user, priv_key_path]) + check_call(['chmod', '0600', priv_key_path]) + + +def create_public_key(user, priv_key_path, pub_key_path): + if not os.path.isfile(pub_key_path): + log('Generating missing ssh public key @ %s.' % pub_key_path) + cmd = ['ssh-keygen', '-y', '-f', priv_key_path] + p = check_output(cmd).strip() + with open(pub_key_path, 'wb') as out: + out.write(p) + check_call(['chown', user, pub_key_path]) + + +def get_keypair(user): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + priv_key = os.path.join(ssh_dir, 'id_rsa') + pub_key = '%s.pub' % priv_key + + if not os.path.isdir(ssh_dir): + os.mkdir(ssh_dir) + check_call(['chown', '-R', user, ssh_dir]) + + create_private_key(user, priv_key) + create_public_key(user, priv_key, pub_key) + + with open(priv_key, 'r') as p: + _priv = p.read().strip() + + with open(pub_key, 'r') as p: + _pub = p.read().strip() + + return (_priv, _pub) + + +def write_authorized_keys(user, keys): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + auth_keys = os.path.join(ssh_dir, 'authorized_keys') + log('Syncing authorized_keys @ %s.' % auth_keys) + with open(auth_keys, 'w') as out: + for k in keys: + out.write('%s\n' % k) + + +def write_known_hosts(user, hosts): + home_dir = get_homedir(user) + ssh_dir = os.path.join(home_dir, '.ssh') + known_hosts = os.path.join(ssh_dir, 'known_hosts') + khosts = [] + for host in hosts: + cmd = ['ssh-keyscan', host] + remote_key = check_output(cmd, universal_newlines=True).strip() + khosts.append(remote_key) + log('Syncing known_hosts @ %s.' % known_hosts) + with open(known_hosts, 'w') as out: + for host in khosts: + out.write('%s\n' % host) + + +def ensure_user(user, group=None): + adduser(user, pwgen()) + if group: + add_user_to_group(user, group) + + +def ssh_authorized_peers(peer_interface, user, group=None, + ensure_local_user=False): + """ + Main setup function, should be called from both peer -changed and -joined + hooks with the same parameters. + """ + if ensure_local_user: + ensure_user(user, group) + priv_key, pub_key = get_keypair(user) + hook = hook_name() + if hook == '%s-relation-joined' % peer_interface: + relation_set(ssh_pub_key=pub_key) + elif hook == '%s-relation-changed' % peer_interface or \ + hook == '%s-relation-departed' % peer_interface: + hosts = [] + keys = [] + + for r_id in relation_ids(peer_interface): + for unit in related_units(r_id): + ssh_pub_key = relation_get('ssh_pub_key', + rid=r_id, + unit=unit) + priv_addr = relation_get('private-address', + rid=r_id, + unit=unit) + if ssh_pub_key: + keys.append(ssh_pub_key) + hosts.append(priv_addr) + else: + log('ssh_authorized_peers(): ssh_pub_key ' + 'missing for unit %s, skipping.' % unit) + write_authorized_keys(user, keys) + write_known_hosts(user, hosts) + authed_hosts = ':'.join(hosts) + relation_set(ssh_authorized_hosts=authed_hosts) + + +def _run_as_user(user, gid=None): + try: + user = pwd.getpwnam(user) + except KeyError: + log('Invalid user: %s' % user) + raise Exception + uid = user.pw_uid + gid = gid or user.pw_gid + os.environ['HOME'] = user.pw_dir + + def _inner(): + os.setgid(gid) + os.setuid(uid) + return _inner + + +def run_as_user(user, cmd, gid=None): + return check_output(cmd, preexec_fn=_run_as_user(user, gid), cwd='/') + + +def collect_authed_hosts(peer_interface): + '''Iterate through the units on peer interface to find all that + have the calling host in its authorized hosts list''' + hosts = [] + for r_id in (relation_ids(peer_interface) or []): + for unit in related_units(r_id): + private_addr = relation_get('private-address', + rid=r_id, unit=unit) + authed_hosts = relation_get('ssh_authorized_hosts', + rid=r_id, unit=unit) + + if not authed_hosts: + log('Peer %s has not authorized *any* hosts yet, skipping.' % + (unit), level=INFO) + continue + + if unit_private_ip() in authed_hosts.split(':'): + hosts.append(private_addr) + else: + log('Peer %s has not authorized *this* host yet, skipping.' % + (unit), level=INFO) + return hosts + + +def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync path to an specific peer host + + Propagates exception if operation fails and fatal=True. + """ + cmd = cmd or copy(BASE_CMD) + if not verbose: + cmd.append('-silent') + + # removing trailing slash from directory paths, unison + # doesn't like these. + if path.endswith('/'): + path = path[:(len(path) - 1)] + + cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)] + + try: + log('Syncing local path %s to %s@%s:%s' % (path, user, host, path)) + run_as_user(user, cmd, gid) + except: + log('Error syncing remote files') + if fatal: + raise + + +def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None, + fatal=False): + """Sync paths to an specific peer host + + Propagates exception if any operation fails and fatal=True. + """ + if paths: + for p in paths: + sync_path_to_host(p, host, user, verbose, cmd, gid, fatal) + + +def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None, + gid=None, fatal=False): + """Sync all hosts to an specific path + + The type of group is integer, it allows user has permissions to + operate a directory have a different group id with the user id. + + Propagates exception if any operation fails and fatal=True. + """ + if paths: + for host in collect_authed_hosts(peer_interface): + sync_to_peer(host, user, paths, verbose, cmd, gid, fatal) diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py new file mode 100644 index 0000000..0f12d32 --- /dev/null +++ b/hooks/charmhelpers/core/files.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 411d0f2..15b09d1 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -21,7 +21,10 @@ # Charm Helpers Developers from __future__ import print_function +import copy +from distutils.version import LooseVersion from functools import wraps +import glob import os import json import yaml @@ -242,29 +245,7 @@ class Config(dict): self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) if os.path.exists(self.path): self.load_previous() - - def __getitem__(self, key): - """For regular dict lookups, check the current juju config first, - then the previous (saved) copy. This ensures that user-saved values - will be returned by a dict lookup. - - """ - try: - return dict.__getitem__(self, key) - except KeyError: - return (self._prev_dict or {})[key] - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def keys(self): - prev_keys = [] - if self._prev_dict is not None: - prev_keys = self._prev_dict.keys() - return list(set(prev_keys + list(dict.keys(self)))) + atexit(self._implicit_save) def load_previous(self, path=None): """Load previous copy of config from disk. @@ -283,6 +264,9 @@ class Config(dict): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) + for k, v in copy.deepcopy(self._prev_dict).items(): + if k not in self: + self[k] = v def changed(self, key): """Return True if the current value for this key is different from @@ -314,13 +298,13 @@ class Config(dict): instance. """ - if self._prev_dict: - for k, v in six.iteritems(self._prev_dict): - if k not in self: - self[k] = v with open(self.path, 'w') as f: json.dump(self, f) + def _implicit_save(self): + if self.implicit_save: + self.save() + @cached def config(scope=None): @@ -364,11 +348,16 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): relation_settings = relation_settings if relation_settings else {} relation_cmd_line = ['relation-set'] accepts_file = "--file" in subprocess.check_output( - relation_cmd_line + ["--help"]) + relation_cmd_line + ["--help"], universal_newlines=True) if relation_id is not None: relation_cmd_line.extend(('-r', relation_id)) settings = relation_settings.copy() settings.update(kwargs) + for key, value in settings.items(): + # Force value to be a string: it always should, but some call + # sites pass in things like dicts or numbers. + if value is not None: + settings[key] = "{}".format(value) if accepts_file: # --file was introduced in Juju 1.23.2. Use it by default if # available, since otherwise we'll break if the relation data is @@ -390,6 +379,17 @@ def relation_set(relation_id=None, relation_settings=None, **kwargs): flush(local_unit()) +def relation_clear(r_id=None): + ''' Clears any relation data already set on relation r_id ''' + settings = relation_get(rid=r_id, + unit=local_unit()) + for setting in settings: + if setting not in ['public-address', 'private-address']: + settings[setting] = None + relation_set(relation_id=r_id, + **settings) + + @cached def relation_ids(reltype=None): """A list of relation_ids""" @@ -571,10 +571,14 @@ class Hooks(object): hooks.execute(sys.argv) """ - def __init__(self, config_save=True): + def __init__(self, config_save=None): super(Hooks, self).__init__() self._hooks = {} - self._config_save = config_save + + # For unknown reasons, we allow the Hooks constructor to override + # config().implicit_save. + if config_save is not None: + config().implicit_save = config_save def register(self, name, function): """Register a hook""" @@ -582,13 +586,16 @@ class Hooks(object): def execute(self, args): """Execute a registered hook based on args[0]""" + _run_atstart() hook_name = os.path.basename(args[0]) if hook_name in self._hooks: - self._hooks[hook_name]() - if self._config_save: - cfg = config() - if cfg.implicit_save: - cfg.save() + try: + self._hooks[hook_name]() + except SystemExit as x: + if x.code is None or x.code == 0: + _run_atexit() + raise + _run_atexit() else: raise UnregisteredHookError(hook_name) @@ -681,3 +688,115 @@ def status_get(): return 'unknown' else: raise + + +def translate_exc(from_exc, to_exc): + def inner_translate_exc1(f): + def inner_translate_exc2(*args, **kwargs): + try: + return f(*args, **kwargs) + except from_exc: + raise to_exc + + return inner_translate_exc2 + + return inner_translate_exc1 + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def is_leader(): + """Does the current unit hold the juju leadership + + Uses juju to determine whether the current unit is the leader of its peers + """ + cmd = ['is-leader', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_get(attribute=None): + """Juju leader get value(s)""" + cmd = ['leader-get', '--format=json'] + [attribute or '-'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def leader_set(settings=None, **kwargs): + """Juju leader set value(s)""" + # Don't log secrets. + # log("Juju leader-set '%s'" % (settings), level=DEBUG) + cmd = ['leader-set'] + settings = settings or {} + settings.update(kwargs) + for k, v in settings.items(): + if v is None: + cmd.append('{}='.format(k)) + else: + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@cached +def juju_version(): + """Full version string (eg. '1.23.3.1-trusty-amd64')""" + # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1 + jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0] + return subprocess.check_output([jujud, 'version'], + universal_newlines=True).strip() + + +@cached +def has_juju_version(minimum_version): + """Return True if the Juju version is at least the provided version""" + return LooseVersion(juju_version()) >= LooseVersion(minimum_version) + + +_atexit = [] +_atstart = [] + + +def atstart(callback, *args, **kwargs): + '''Schedule a callback to run before the main hook. + + Callbacks are run in the order they were added. + + This is useful for modules and classes to perform initialization + and inject behavior. In particular: + + - Run common code before all of your hooks, such as logging + the hook name or interesting relation data. + - Defer object or module initialization that requires a hook + context until we know there actually is a hook context, + making testing easier. + - Rather than requiring charm authors to include boilerplate to + invoke your helper's behavior, have it run automatically if + your object is instantiated or module imported. + + This is not at all useful after your hook framework as been launched. + ''' + global _atstart + _atstart.append((callback, args, kwargs)) + + +def atexit(callback, *args, **kwargs): + '''Schedule a callback to run on successful hook completion. + + Callbacks are run in the reverse order that they were added.''' + _atexit.append((callback, args, kwargs)) + + +def _run_atstart(): + '''Hook frameworks must invoke this before running the main hook body.''' + global _atstart + for callback, args, kwargs in _atstart: + callback(*args, **kwargs) + del _atstart[:] + + +def _run_atexit(): + '''Hook frameworks must invoke this after the main hook body has + successfully completed. Do not invoke it if the hook fails.''' + global _atexit + for callback, args, kwargs in reversed(_atexit): + callback(*args, **kwargs) + del _atexit[:] diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 0d2ab4b..8ae8ef8 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -24,6 +24,7 @@ import os import re import pwd +import glob import grp import random import string @@ -62,6 +63,36 @@ def service_reload(service_name, restart_on_failure=False): return service_result +def service_pause(service_name, init_dir=None): + """Pause a system service. + + Stop it, and prevent it from starting again at boot.""" + if init_dir is None: + init_dir = "/etc/init" + stopped = service_stop(service_name) + # XXX: Support systemd too + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + return stopped + + +def service_resume(service_name, init_dir=None): + """Resume a system service. + + Reenable starting again at boot. Start the service""" + # XXX: Support systemd too + if init_dir is None: + init_dir = "/etc/init" + override_path = os.path.join( + init_dir, '{}.conf.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + started = service_start(service_name) + return started + + def service(action, service_name): """Control a system service""" cmd = ['service', service_name, action] @@ -139,11 +170,7 @@ def add_group(group_name, system_group=False): def add_user_to_group(username, group): """Add a user to a group""" - cmd = [ - 'gpasswd', '-a', - username, - group - ] + cmd = ['gpasswd', '-a', username, group] log("Adding user {} to group {}".format(username, group)) subprocess.check_call(cmd) @@ -269,6 +296,21 @@ def file_hash(path, hash_type='md5'): return None +def path_hash(path): + """ + Generate a hash checksum of all files matching 'path'. Standard wildcards + like '*' and '?' are supported, see documentation for the 'glob' module for + more information. + + :return: dict: A { filename: hash } dictionary for all matched files. + Empty if none found. + """ + return { + filename: file_hash(filename) + for filename in glob.iglob(path) + } + + def check_hash(path, checksum, hash_type='md5'): """ Validate a file using a cryptographic checksum. @@ -296,23 +338,25 @@ def restart_on_change(restart_map, stopstart=False): @restart_on_change({ '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + '/etc/apache/sites-enabled/*': [ 'apache2' ] }) - def ceph_client_changed(): + def config_changed(): pass # your code here In this example, the cinder-api and cinder-volume services would be restarted if /etc/ceph/ceph.conf is changed by the - ceph_client_changed function. + ceph_client_changed function. The apache2 service would be + restarted if any file matching the pattern got changed, created + or removed. Standard wildcards are supported, see documentation + for the 'glob' module for more information. """ def wrap(f): def wrapped_f(*args, **kwargs): - checksums = {} - for path in restart_map: - checksums[path] = file_hash(path) + checksums = {path: path_hash(path) for path in restart_map} f(*args, **kwargs) restarts = [] for path in restart_map: - if checksums[path] != file_hash(path): + if path_hash(path) != checksums[path]: restarts += restart_map[path] services_list = list(OrderedDict.fromkeys(restarts)) if not stopstart: diff --git a/hooks/charmhelpers/core/services/base.py b/hooks/charmhelpers/core/services/base.py index 2ff5338..a42660c 100644 --- a/hooks/charmhelpers/core/services/base.py +++ b/hooks/charmhelpers/core/services/base.py @@ -15,8 +15,8 @@ # along with charm-helpers. If not, see . import os -import re import json +from inspect import getargspec from collections import Iterable, OrderedDict from charmhelpers.core import host @@ -128,15 +128,18 @@ class ServiceManager(object): """ Handle the current hook by doing The Right Thing with the registered services. """ - hook_name = hookenv.hook_name() - if hook_name == 'stop': - self.stop_services() - else: - self.provide_data() - self.reconfigure_services() - cfg = hookenv.config() - if cfg.implicit_save: - cfg.save() + hookenv._run_atstart() + try: + hook_name = hookenv.hook_name() + if hook_name == 'stop': + self.stop_services() + else: + self.reconfigure_services() + self.provide_data() + except SystemExit as x: + if x.code is None or x.code == 0: + hookenv._run_atexit() + hookenv._run_atexit() def provide_data(self): """ @@ -145,15 +148,36 @@ class ServiceManager(object): A provider must have a `name` attribute, which indicates which relation to set data on, and a `provide_data()` method, which returns a dict of data to set. + + The `provide_data()` method can optionally accept two parameters: + + * ``remote_service`` The name of the remote service that the data will + be provided to. The `provide_data()` method will be called once + for each connected service (not unit). This allows the method to + tailor its data to the given service. + * ``service_ready`` Whether or not the service definition had all of + its requirements met, and thus the ``data_ready`` callbacks run. + + Note that the ``provided_data`` methods are now called **after** the + ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks + a chance to generate any data necessary for the providing to the remote + services. """ - hook_name = hookenv.hook_name() - for service in self.services.values(): + for service_name, service in self.services.items(): + service_ready = self.is_ready(service_name) for provider in service.get('provided_data', []): - if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name): - data = provider.provide_data() - _ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data - if _ready: - hookenv.relation_set(None, data) + for relid in hookenv.relation_ids(provider.name): + units = hookenv.related_units(relid) + if not units: + continue + remote_service = units[0].split('/')[0] + argspec = getargspec(provider.provide_data) + if len(argspec.args) > 1: + data = provider.provide_data(remote_service, service_ready) + else: + data = provider.provide_data() + if data: + hookenv.relation_set(relid, data) def reconfigure_services(self, *service_names): """ diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 3eb5fb4..8005c41 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -239,12 +239,12 @@ class TemplateCallback(ManagerCallback): action. :param str source: The template source file, relative to - `$CHARM_DIR/templates` - + `$CHARM_DIR/templates` :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file + """ def __init__(self, source, target, owner='root', group='root', perms=0o444): diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 9a1a251..0a3bb96 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -215,9 +215,9 @@ def apt_purge(packages, fatal=False): _run_apt_command(cmd, fatal) -def apt_hold(packages, fatal=False): - """Hold one or more packages""" - cmd = ['apt-mark', 'hold'] +def apt_mark(packages, mark, fatal=False): + """Flag one or more packages using apt-mark""" + cmd = ['apt-mark', mark] if isinstance(packages, six.string_types): cmd.append(packages) else: @@ -225,9 +225,17 @@ def apt_hold(packages, fatal=False): log("Holding {}".format(packages)) if fatal: - subprocess.check_call(cmd) + subprocess.check_call(cmd, universal_newlines=True) else: - subprocess.call(cmd) + subprocess.call(cmd, universal_newlines=True) + + +def apt_hold(packages, fatal=False): + return apt_mark(packages, 'hold', fatal=fatal) + + +def apt_unhold(packages, fatal=False): + return apt_mark(packages, 'unhold', fatal=fatal) def add_source(source, key=None): @@ -370,8 +378,9 @@ def install_remote(source, *args, **kwargs): for handler in handlers: try: installed_to = handler.install(source, *args, **kwargs) - except UnhandledSource: - pass + except UnhandledSource as e: + log('Install source attempt unsuccessful: {}'.format(e), + level='WARNING') if not installed_to: raise UnhandledSource("No handler found for source {}".format(source)) return installed_to diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py index 8dfce50..efd7f9f 100644 --- a/hooks/charmhelpers/fetch/archiveurl.py +++ b/hooks/charmhelpers/fetch/archiveurl.py @@ -77,6 +77,8 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): def can_handle(self, source): url_parts = self.parse_url(source) if url_parts.scheme not in ('http', 'https', 'ftp', 'file'): + # XXX: Why is this returning a boolean and a string? It's + # doomed to fail since "bool(can_handle('foo://'))" will be True. return "Wrong source type" if get_archive_handler(self.base_url(source)): return True @@ -155,7 +157,11 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): else: algorithms = hashlib.algorithms_available if key in algorithms: - check_hash(dld_file, value, key) + if len(value) != 1: + raise TypeError( + "Expected 1 hash value, not %d" % len(value)) + expected = value[0] + check_hash(dld_file, expected, key) if checksum: check_hash(dld_file, checksum, hash_type) return extract(dld_file, dest) diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py index 93aae87..f023b26 100644 --- a/hooks/charmhelpers/fetch/giturl.py +++ b/hooks/charmhelpers/fetch/giturl.py @@ -45,14 +45,16 @@ class GitUrlFetchHandler(BaseFetchHandler): else: return True - def clone(self, source, dest, branch): + def clone(self, source, dest, branch, depth=None): if not self.can_handle(source): raise UnhandledSource("Cannot handle {}".format(source)) - repo = Repo.clone_from(source, dest) - repo.git.checkout(branch) + if depth: + Repo.clone_from(source, dest, branch=branch, depth=depth) + else: + Repo.clone_from(source, dest, branch=branch) - def install(self, source, branch="master", dest=None): + def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) branch_name = url_parts.path.strip("/").split("/")[-1] if dest: @@ -63,9 +65,9 @@ class GitUrlFetchHandler(BaseFetchHandler): if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) try: - self.clone(source, dest_dir, branch) + self.clone(source, dest_dir, branch, depth) except GitCommandError as e: - raise UnhandledSource(e.message) + raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/hooks/charmhelpers/payload/archive.py b/hooks/charmhelpers/payload/archive.py new file mode 100644 index 0000000..5aaf13e --- /dev/null +++ b/hooks/charmhelpers/payload/archive.py @@ -0,0 +1,73 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import os +import tarfile +import zipfile +from charmhelpers.core import ( + host, + hookenv, +) + + +class ArchiveError(Exception): + pass + + +def get_archive_handler(archive_name): + if os.path.isfile(archive_name): + if tarfile.is_tarfile(archive_name): + return extract_tarfile + elif zipfile.is_zipfile(archive_name): + return extract_zipfile + else: + # look at the file name + for ext in ('.tar', '.tar.gz', '.tgz', 'tar.bz2', '.tbz2', '.tbz'): + if archive_name.endswith(ext): + return extract_tarfile + for ext in ('.zip', '.jar'): + if archive_name.endswith(ext): + return extract_zipfile + + +def archive_dest_default(archive_name): + archive_file = os.path.basename(archive_name) + return os.path.join(hookenv.charm_dir(), "archives", archive_file) + + +def extract(archive_name, destpath=None): + handler = get_archive_handler(archive_name) + if handler: + if not destpath: + destpath = archive_dest_default(archive_name) + if not os.path.isdir(destpath): + host.mkdir(destpath) + handler(archive_name, destpath) + return destpath + else: + raise ArchiveError("No handler for archive") + + +def extract_tarfile(archive_name, destpath): + "Unpack a tar archive, optionally compressed" + archive = tarfile.open(archive_name) + archive.extractall(destpath) + + +def extract_zipfile(archive_name, destpath): + "Unpack a zip file" + archive = zipfile.ZipFile(archive_name) + archive.extractall(destpath) diff --git a/hooks/neutron-plugin-api-relation-changed b/hooks/neutron-plugin-api-relation-changed deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/neutron-plugin-api-relation-changed +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/neutron-plugin-api-relation-departed b/hooks/neutron-plugin-api-relation-departed deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/neutron-plugin-api-relation-departed +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/neutron-plugin-api-relation-joined b/hooks/neutron-plugin-api-relation-joined deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/neutron-plugin-api-relation-joined +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/pg_dir_context.py b/hooks/pg_dir_context.py index 09a99f4..9be0b9f 100644 --- a/hooks/pg_dir_context.py +++ b/hooks/pg_dir_context.py @@ -1,3 +1,7 @@ +# Copyright (c) 2015, PLUMgrid Inc, http://plumgrid.com + +# This file contains the class that generates context for PLUMgrid template files. + from charmhelpers.core.hookenv import ( config, unit_get, @@ -9,53 +13,36 @@ from charmhelpers.contrib.network.ip import get_address_in_network import re from socket import gethostname as get_unit_hostname -''' -def _neutron_api_settings(): - neutron_settings = { - 'neutron_security_groups': False, - 'l2_population': True, - 'overlay_network_type': 'gre', - } - for rid in relation_ids('neutron-plugin-api'): - for unit in related_units(rid): - rdata = relation_get(rid=rid, unit=unit) - if 'l2-population' not in rdata: - continue - neutron_settings = { - 'l2_population': rdata['l2-population'], - 'neutron_security_groups': rdata['neutron-security-groups'], - 'overlay_network_type': rdata['overlay-network-type'], - } - # Override with configuration if set to true - if config('disable-security-groups'): - neutron_settings['neutron_security_groups'] = False - return neutron_settings - return neutron_settings -''' - class PGDirContext(context.NeutronContext): - interfaces = [] @property def plugin(self): + ''' + Over-riding function in NeutronContext Class to return 'plumgrid' + as the neutron plugin. + ''' return 'plumgrid' @property def network_manager(self): + ''' + Over-riding function in NeutronContext Class to return 'neutron' + as the network manager. + ''' return 'neutron' def _save_flag_file(self): + ''' + Over-riding function in NeutronContext Class. + Function only needed for OVS. + ''' pass - #@property - #def neutron_security_groups(self): - # neutron_api_settings = _neutron_api_settings() - # return neutron_api_settings['neutron_security_groups'] - def pg_ctxt(self): - #Generated Config for all Plumgrid templates inside - #the templates folder + ''' + Generated Config for all PLUMgrid templates inside the templates folder. + ''' pg_ctxt = super(PGDirContext, self).pg_ctxt() if not pg_ctxt: return {} @@ -64,9 +51,6 @@ class PGDirContext(context.NeutronContext): pg_ctxt['local_ip'] = \ get_address_in_network(network=None, fallback=get_host_ip(unit_get('private-address'))) - #neutron_api_settings = _neutron_api_settings() - - #TODO: Get this value from the neutron-api charm pg_ctxt['virtual_ip'] = conf['plumgrid-virtual-ip'] pg_ctxt['pg_hostname'] = "pg-director" pg_ctxt['interface'] = "juju-br0" diff --git a/hooks/pg_dir_hooks.py b/hooks/pg_dir_hooks.py index cb1a495..655db3e 100755 --- a/hooks/pg_dir_hooks.py +++ b/hooks/pg_dir_hooks.py @@ -1,13 +1,22 @@ #!/usr/bin/python +# Copyright (c) 2015, PLUMgrid Inc, http://plumgrid.com + +# The hooks of this charm have been symlinked to functions +# in this file. + import sys from charmhelpers.core.hookenv import ( Hooks, UnregisteredHookError, - config, log, - relation_set, +) + +from charmhelpers.fetch import ( + apt_install, + apt_purge, + configure_sources, ) from pg_dir_utils import ( @@ -15,6 +24,11 @@ from pg_dir_utils import ( ensure_files, restart_pg, stop_pg, + determine_packages, + load_iovisor, + remove_iovisor, + ensure_mtu, + add_lcm_key, ) hooks = Hooks() @@ -23,28 +37,61 @@ CONFIGS = register_configs() @hooks.hook() def install(): + ''' + Install hook is run when the charm is first deployed on a node. + ''' + configure_sources(update=True) + pkgs = determine_packages() + for pkg in pkgs: + apt_install(pkg, options=['--force-yes'], fatal=True) + load_iovisor() + ensure_mtu() ensure_files() + add_lcm_key() @hooks.hook('plumgrid-plugin-relation-joined') def plumgrid_dir(): + ''' + This hook is run when relation between neutron-api-plumgrid + and plumgrid-director is made. + ''' + ensure_mtu() ensure_files() + add_lcm_key() CONFIGS.write_all() restart_pg() -@hooks.hook('plumgrid-relation-joined') -def plumgrid_joined(relation_id=None): - #We can pass information to the edge and gateway from there. - relation_data = { - 'pg_virtual_ip': config('plumgrid-virtual-ip'), - } - relation_set(relation_id=relation_id, **relation_data) +@hooks.hook('config-changed') +def config_changed(): + ''' + This hook is run when a config parameter is changed. + It also runs on node reboot. + ''' + stop_pg() + configure_sources(update=True) + pkgs = determine_packages() + for pkg in pkgs: + apt_install(pkg, options=['--force-yes'], fatal=True) + load_iovisor() + ensure_mtu() + ensure_files() + add_lcm_key() + CONFIGS.write_all() + restart_pg() @hooks.hook('stop') def stop(): + ''' + This hook is run when the charm is destroyed. + ''' stop_pg() + remove_iovisor() + pkgs = determine_packages() + for pkg in pkgs: + apt_purge(pkg, fatal=False) def main(): diff --git a/hooks/pg_dir_utils.py b/hooks/pg_dir_utils.py index 0958690..f2cbf3d 100644 --- a/hooks/pg_dir_utils.py +++ b/hooks/pg_dir_utils.py @@ -1,38 +1,43 @@ +# Copyright (c) 2015, PLUMgrid Inc, http://plumgrid.com + +# This file contains functions used by the hooks to deploy PLUMgrid Director. + from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute from copy import deepcopy -from charmhelpers.core.hookenv import log +from charmhelpers.core.hookenv import ( + log, + config, +) from charmhelpers.contrib.openstack import templating +from charmhelpers.core.host import set_nic_mtu from collections import OrderedDict +from charmhelpers.contrib.storage.linux.ceph import modprobe from charmhelpers.contrib.openstack.utils import ( os_release, ) +from charmhelpers.core.host import ( + service_start, + service_stop, +) import pg_dir_context import subprocess import time +import os -#Dont need these right now -NOVA_CONF_DIR = "/etc/nova" -NEUTRON_CONF_DIR = "/etc/neutron" -NEUTRON_CONF = '%s/neutron.conf' % NEUTRON_CONF_DIR -NEUTRON_DEFAULT = '/etc/default/neutron-server' +LXC_CONF = '/etc/libvirt/lxc.conf' +TEMPLATES = 'templates/' +PG_LXC_DATA_PATH = '/var/lib/libvirt/filesystems/plumgrid-data' -#Puppet Files -P_PGKA_CONF = '/opt/pg/etc/puppet/modules/sal/templates/keepalived.conf.erb' -P_PG_CONF = '/opt/pg/etc/puppet/modules/plumgrid/templates/plumgrid.conf.erb' -P_PGDEF_CONF = '/opt/pg/etc/puppet/modules/sal/templates/default.conf.erb' - -#Plumgrid Files -PGKA_CONF = '/var/lib/libvirt/filesystems/plumgrid/etc/keepalived/keepalived.conf' -PG_CONF = '/var/lib/libvirt/filesystems/plumgrid/opt/pg/etc/plumgrid.conf' -PGDEF_CONF = '/var/lib/libvirt/filesystems/plumgrid/opt/pg/sal/nginx/conf.d/default.conf' -PGHN_CONF = '/var/lib/libvirt/filesystems/plumgrid-data/conf/etc/hostname' -PGHS_CONF = '/var/lib/libvirt/filesystems/plumgrid-data/conf/etc/hosts' -PGIFCS_CONF = '/var/lib/libvirt/filesystems/plumgrid-data/conf/pg/ifcs.conf' -IFCTL_CONF = '/var/run/plumgrid/lxc/ifc_list_gateway' -IFCTL_P_CONF = '/var/lib/libvirt/filesystems/plumgrid/var/run/plumgrid/lxc/ifc_list_gateway' +PG_CONF = '%s/conf/pg/plumgrid.conf' % PG_LXC_DATA_PATH +PG_KA_CONF = '%s/conf/etc/keepalived.conf' % PG_LXC_DATA_PATH +PG_DEF_CONF = '%s/conf/pg/nginx.conf' % PG_LXC_DATA_PATH +PG_HN_CONF = '%s/conf/etc/hostname' % PG_LXC_DATA_PATH +PG_HS_CONF = '%s/conf/etc/hosts' % PG_LXC_DATA_PATH +PG_IFCS_CONF = '%s/conf/pg/ifcs.conf' % PG_LXC_DATA_PATH +AUTH_KEY_PATH = '%s/root/.ssh/authorized_keys' % PG_LXC_DATA_PATH BASE_RESOURCE_MAP = OrderedDict([ - (PGKA_CONF, { + (PG_KA_CONF, { 'services': ['plumgrid'], 'contexts': [pg_dir_context.PGDirContext()], }), @@ -40,32 +45,39 @@ BASE_RESOURCE_MAP = OrderedDict([ 'services': ['plumgrid'], 'contexts': [pg_dir_context.PGDirContext()], }), - (PGDEF_CONF, { + (PG_DEF_CONF, { 'services': ['plumgrid'], 'contexts': [pg_dir_context.PGDirContext()], }), - (PGHN_CONF, { + (PG_HN_CONF, { 'services': ['plumgrid'], 'contexts': [pg_dir_context.PGDirContext()], }), - (PGHS_CONF, { + (PG_HS_CONF, { 'services': ['plumgrid'], 'contexts': [pg_dir_context.PGDirContext()], }), - (PGIFCS_CONF, { + (PG_IFCS_CONF, { 'services': [], 'contexts': [pg_dir_context.PGDirContext()], }), ]) -TEMPLATES = 'templates/' def determine_packages(): + ''' + Returns list of packages required by PLUMgrid director as specified + in the neutron_plugins dictionary in charmhelpers. + ''' return neutron_plugin_attribute('plumgrid', 'packages', 'neutron') def register_configs(release=None): - release = release or os_release('neutron-common', base='icehouse') + ''' + Returns an object of the Openstack Tempating Class which contains the + the context required for all templates of this charm. + ''' + release = release or os_release('neutron-common', base='kilo') configs = templating.OSConfigRenderer(templates_dir=TEMPLATES, openstack_release=release) for cfg, rscs in resource_map().iteritems(): @@ -87,39 +99,106 @@ def restart_map(): Constructs a restart map based on charm config settings and relation state. ''' - return {k: v['services'] for k, v in resource_map().iteritems()} + return {cfg: rscs['services'] for cfg, rscs in resource_map().iteritems()} def ensure_files(): - _exec_cmd(cmd=['cp', '--remove-destination', '-f', P_PGKA_CONF, PGKA_CONF]) - _exec_cmd(cmd=['cp', '--remove-destination', '-f', P_PG_CONF, PG_CONF]) - _exec_cmd(cmd=['cp', '--remove-destination', '-f', P_PGDEF_CONF, PGDEF_CONF]) - _exec_cmd(cmd=['touch', PGIFCS_CONF]) - _exec_cmd(cmd=['mkdir', '/etc/nova']) - _exec_cmd(cmd=['touch', 'neutron_plugin.conf']) + ''' + Ensures PLUMgrid specific files exist before templates are written. + ''' + #_exec_cmd(cmd=['mkdir', '/etc/nova']) + #_exec_cmd(cmd=['touch', 'neutron_plugin.conf']) + pass def restart_pg(): - _exec_cmd(cmd=['virsh', '-c', 'lxc:', 'destroy', 'plumgrid'], error_msg='ERROR Destroying PLUMgrid') - _exec_cmd(cmd=['rm', IFCTL_CONF, IFCTL_P_CONF], error_msg='ERROR Removing ifc_ctl_gateway file') + ''' + Stops and Starts PLUMgrid service after flushing iptables. + ''' + service_stop('plumgrid') + time.sleep(2) _exec_cmd(cmd=['iptables', '-F']) - _exec_cmd(cmd=['virsh', '-c', 'lxc:', 'start', 'plumgrid'], error_msg='ERROR Starting PLUMgrid') - time.sleep(5) - _exec_cmd(cmd=['service', 'plumgrid', 'start'], error_msg='ERROR starting PLUMgrid service') + service_start('plumgrid') time.sleep(5) def stop_pg(): - _exec_cmd(cmd=['virsh', '-c', 'lxc:', 'destroy', 'plumgrid'], error_msg='ERROR Destroying PLUMgrid') + ''' + Stops PLUMgrid service. + ''' + service_stop('plumgrid') time.sleep(2) - _exec_cmd(cmd=['rm', IFCTL_CONF, IFCTL_P_CONF], error_msg='ERROR Removing ifc_ctl_gateway file') -def _exec_cmd(cmd=None, error_msg='Command exited with ERRORs'): +def load_iovisor(): + ''' + Loads iovisor kernel module. + ''' + modprobe('iovisor') + + +def remove_iovisor(): + ''' + Removes iovisor kernel module. + ''' + _exec_cmd(cmd=['rmmod', 'iovisor'], error_msg='Error Loading IOVisor Kernel Module') + + +def ensure_mtu(): + ''' + Ensures required MTU of the underlying networking of the node. + ''' + log("Changing MTU of juju-br0 and all attached interfaces") + interface_mtu = config('network-device-mtu') + cmd = subprocess.check_output(["brctl", "show", "juju-br0"]) + words = cmd.split() + for word in words: + if 'eth' in word: + set_nic_mtu(word, interface_mtu) + set_nic_mtu('juju-br0', interface_mtu) + + +def _exec_cmd(cmd=None, error_msg='Command exited with ERRORs', fatal=False): + ''' + Function to execute any bash command on the node. + ''' if cmd is None: - log("NO command") + log("No command specified") else: - try: + if fatal: subprocess.check_call(cmd) - except subprocess.CalledProcessError, e: - log(error_msg) + else: + try: + subprocess.check_call(cmd) + except subprocess.CalledProcessError: + log(error_msg) + + +def add_lcm_key(): + ''' + Adds public key of PLUMgrid-lcm to authorized keys of PLUMgrid Director. + ''' + key = config('lcm-ssh-key') + if key == 'null': + log('lcm key not specified') + return 0 + file_write_type = 'w+' + if os.path.isfile(AUTH_KEY_PATH): + file_write_type = 'a' + try: + fr = open(AUTH_KEY_PATH, 'r') + except IOError: + log('plumgrid-lxc not installed yet') + return 0 + for line in fr: + if key in line: + log('key already added') + return 0 + try: + fa = open(AUTH_KEY_PATH, file_write_type) + except IOError: + log('Error opening file to append') + return 0 + fa.write(key) + fa.write('\n') + fa.close() diff --git a/hooks/plumgrid-plugin-relation-broken b/hooks/plumgrid-plugin-relation-broken deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/plumgrid-plugin-relation-broken +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-plugin-relation-changed b/hooks/plumgrid-plugin-relation-changed deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/plumgrid-plugin-relation-changed +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-plugin-relation-departed b/hooks/plumgrid-plugin-relation-departed deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/plumgrid-plugin-relation-departed +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-relation-broken b/hooks/plumgrid-relation-broken deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/plumgrid-relation-broken +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-relation-changed b/hooks/plumgrid-relation-changed deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/plumgrid-relation-changed +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/plumgrid-relation-departed b/hooks/plumgrid-relation-departed deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/plumgrid-relation-departed +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/hooks/neutron-plugin-api-relation-broken b/hooks/start similarity index 100% rename from hooks/neutron-plugin-api-relation-broken rename to hooks/start diff --git a/hooks/upgrade-charm b/hooks/upgrade-charm deleted file mode 120000 index 6530c5a..0000000 --- a/hooks/upgrade-charm +++ /dev/null @@ -1 +0,0 @@ -pg_dir_hooks.py \ No newline at end of file diff --git a/metadata.yaml b/metadata.yaml index b3550ba..820d422 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -12,9 +12,6 @@ tags: requires: plumgrid-plugin: interface: plumgrid-plugin - neutron-plugin-api: - interface: neutron-plugin-api provides: plumgrid: interface: plumgrid -