Addressed reviews by Charmers
This commit is contained in:
parent
f034c26f6c
commit
649195946f
26
README.ex
26
README.ex
@ -7,19 +7,17 @@ Once deployed this charm performs the configurations required for a PLUMgrid Dir
|
|||||||
Step by step instructions on using the charm:
|
Step by step instructions on using the charm:
|
||||||
|
|
||||||
juju deploy neutron-api
|
juju deploy neutron-api
|
||||||
juju deploy neutron-plumgrid-plugin neutron-api
|
juju deploy neutron-api-plumgrid
|
||||||
juju deploy neutron-iovisor
|
juju deploy plumgrid-director
|
||||||
juju deploy plumgrid-director --to <Machince No of neutron-iovisor>
|
|
||||||
|
|
||||||
juju add-relation neutron-api neutron-plumgrid-plugin
|
juju add-relation neutron-api neutron-api-plumgrid
|
||||||
juju add-relation neutron-plumgrid-plugin neutron-iovisor
|
juju add-relation neutron-api-plumgrid plumgrid-director
|
||||||
juju add-relation neutron-iovisor plumgrid-director
|
|
||||||
|
|
||||||
For plumgrid-director to work make the configuration in the neutron-api, neutron-plumgrid-plugin and neutron-iovisor charms as specified in the configuration section below.
|
For plumgrid-director to work make the configuration in the neutron-api and neutron-api-plumgrid charms as specified in the configuration section below.
|
||||||
|
|
||||||
# Known Limitations and Issues
|
# Known Limitations and Issues
|
||||||
|
|
||||||
This is an early access version of the PLUMgrid Director charm and it is not meant for production deployments. The charm only works with JUNO for now. This charm needs to be deployed on a node where a unit of neutron-iovisor charm exists. Also plumgrid-edge and plumgrid-gateway charms should not be deployed on the same node.
|
This is an early access version of the PLUMgrid Director charm and it is not meant for production deployments. The charm only supports Kilo Openstack Release.
|
||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
|
|
||||||
@ -27,10 +25,9 @@ Example Config
|
|||||||
|
|
||||||
plumgrid-director:
|
plumgrid-director:
|
||||||
plumgrid-virtual-ip: "192.168.100.250"
|
plumgrid-virtual-ip: "192.168.100.250"
|
||||||
neutron-iovisor:
|
|
||||||
install_sources: 'ppa:plumgrid-team/stable'
|
install_sources: 'ppa:plumgrid-team/stable'
|
||||||
install_keys: 'null'
|
install_keys: 'null'
|
||||||
neutron-plumgrid-plugin:
|
neutron-api-plumgrid:
|
||||||
install_sources: 'ppa:plumgrid-team/stable'
|
install_sources: 'ppa:plumgrid-team/stable'
|
||||||
install_keys: 'null'
|
install_keys: 'null'
|
||||||
enable-metadata: False
|
enable-metadata: False
|
||||||
@ -38,9 +35,12 @@ Example Config
|
|||||||
neutron-plugin: "plumgrid"
|
neutron-plugin: "plumgrid"
|
||||||
plumgrid-virtual-ip: "192.168.100.250"
|
plumgrid-virtual-ip: "192.168.100.250"
|
||||||
|
|
||||||
The plumgrid-virtual-ip is the IP address of the PLUMgrid Director's Management interface and that the same IP is used to access PLUMgrid Console.
|
Provide the virtual IP you want PLUMgrid GUI to be accessible.
|
||||||
Ensure that the same ip is specified in the neutron-api charm configuration.
|
Make sure that it is the same IP specified in the neutron-api charm configuration for PLUMgrid.
|
||||||
Using the example config provided above PLUMgrid Console can be accessed at https://192.168.100.250
|
The virtual IP passed on in the neutron-api charm has to be same as the one passed in the plumgrid-director charm.
|
||||||
|
Provide the source repo path for PLUMgrid Debs in 'install_sources' and the corresponding keys in 'install_keys'.
|
||||||
|
|
||||||
|
You can access the PG Console at https://192.168.100.250
|
||||||
|
|
||||||
# Contact Information
|
# Contact Information
|
||||||
|
|
||||||
|
@ -1,253 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Authors:
|
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import optparse
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
import shutil
|
|
||||||
import sys
|
|
||||||
import tempfile
|
|
||||||
import yaml
|
|
||||||
from fnmatch import fnmatch
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
|
|
||||||
|
|
||||||
|
|
||||||
def parse_config(conf_file):
|
|
||||||
if not os.path.isfile(conf_file):
|
|
||||||
logging.error('Invalid config file: %s.' % conf_file)
|
|
||||||
return False
|
|
||||||
return yaml.load(open(conf_file).read())
|
|
||||||
|
|
||||||
|
|
||||||
def clone_helpers(work_dir, branch):
|
|
||||||
dest = os.path.join(work_dir, 'charm-helpers')
|
|
||||||
logging.info('Checking out %s to %s.' % (branch, dest))
|
|
||||||
cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
return dest
|
|
||||||
|
|
||||||
|
|
||||||
def _module_path(module):
|
|
||||||
return os.path.join(*module.split('.'))
|
|
||||||
|
|
||||||
|
|
||||||
def _src_path(src, module):
|
|
||||||
return os.path.join(src, 'charmhelpers', _module_path(module))
|
|
||||||
|
|
||||||
|
|
||||||
def _dest_path(dest, module):
|
|
||||||
return os.path.join(dest, _module_path(module))
|
|
||||||
|
|
||||||
|
|
||||||
def _is_pyfile(path):
|
|
||||||
return os.path.isfile(path + '.py')
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_init(path):
|
|
||||||
'''
|
|
||||||
ensure directories leading up to path are importable, omitting
|
|
||||||
parent directory, eg path='/hooks/helpers/foo'/:
|
|
||||||
hooks/
|
|
||||||
hooks/helpers/__init__.py
|
|
||||||
hooks/helpers/foo/__init__.py
|
|
||||||
'''
|
|
||||||
for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
|
|
||||||
_i = os.path.join(d, '__init__.py')
|
|
||||||
if not os.path.exists(_i):
|
|
||||||
logging.info('Adding missing __init__.py: %s' % _i)
|
|
||||||
open(_i, 'wb').close()
|
|
||||||
|
|
||||||
|
|
||||||
def sync_pyfile(src, dest):
|
|
||||||
src = src + '.py'
|
|
||||||
src_dir = os.path.dirname(src)
|
|
||||||
logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
|
|
||||||
if not os.path.exists(dest):
|
|
||||||
os.makedirs(dest)
|
|
||||||
shutil.copy(src, dest)
|
|
||||||
if os.path.isfile(os.path.join(src_dir, '__init__.py')):
|
|
||||||
shutil.copy(os.path.join(src_dir, '__init__.py'),
|
|
||||||
dest)
|
|
||||||
ensure_init(dest)
|
|
||||||
|
|
||||||
|
|
||||||
def get_filter(opts=None):
|
|
||||||
opts = opts or []
|
|
||||||
if 'inc=*' in opts:
|
|
||||||
# do not filter any files, include everything
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _filter(dir, ls):
|
|
||||||
incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
|
|
||||||
_filter = []
|
|
||||||
for f in ls:
|
|
||||||
_f = os.path.join(dir, f)
|
|
||||||
|
|
||||||
if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
|
|
||||||
if True not in [fnmatch(_f, inc) for inc in incs]:
|
|
||||||
logging.debug('Not syncing %s, does not match include '
|
|
||||||
'filters (%s)' % (_f, incs))
|
|
||||||
_filter.append(f)
|
|
||||||
else:
|
|
||||||
logging.debug('Including file, which matches include '
|
|
||||||
'filters (%s): %s' % (incs, _f))
|
|
||||||
elif (os.path.isfile(_f) and not _f.endswith('.py')):
|
|
||||||
logging.debug('Not syncing file: %s' % f)
|
|
||||||
_filter.append(f)
|
|
||||||
elif (os.path.isdir(_f) and not
|
|
||||||
os.path.isfile(os.path.join(_f, '__init__.py'))):
|
|
||||||
logging.debug('Not syncing directory: %s' % f)
|
|
||||||
_filter.append(f)
|
|
||||||
return _filter
|
|
||||||
return _filter
|
|
||||||
|
|
||||||
|
|
||||||
def sync_directory(src, dest, opts=None):
|
|
||||||
if os.path.exists(dest):
|
|
||||||
logging.debug('Removing existing directory: %s' % dest)
|
|
||||||
shutil.rmtree(dest)
|
|
||||||
logging.info('Syncing directory: %s -> %s.' % (src, dest))
|
|
||||||
|
|
||||||
shutil.copytree(src, dest, ignore=get_filter(opts))
|
|
||||||
ensure_init(dest)
|
|
||||||
|
|
||||||
|
|
||||||
def sync(src, dest, module, opts=None):
|
|
||||||
|
|
||||||
# Sync charmhelpers/__init__.py for bootstrap code.
|
|
||||||
sync_pyfile(_src_path(src, '__init__'), dest)
|
|
||||||
|
|
||||||
# Sync other __init__.py files in the path leading to module.
|
|
||||||
m = []
|
|
||||||
steps = module.split('.')[:-1]
|
|
||||||
while steps:
|
|
||||||
m.append(steps.pop(0))
|
|
||||||
init = '.'.join(m + ['__init__'])
|
|
||||||
sync_pyfile(_src_path(src, init),
|
|
||||||
os.path.dirname(_dest_path(dest, init)))
|
|
||||||
|
|
||||||
# Sync the module, or maybe a .py file.
|
|
||||||
if os.path.isdir(_src_path(src, module)):
|
|
||||||
sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
|
|
||||||
elif _is_pyfile(_src_path(src, module)):
|
|
||||||
sync_pyfile(_src_path(src, module),
|
|
||||||
os.path.dirname(_dest_path(dest, module)))
|
|
||||||
else:
|
|
||||||
logging.warn('Could not sync: %s. Neither a pyfile or directory, '
|
|
||||||
'does it even exist?' % module)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_sync_options(options):
|
|
||||||
if not options:
|
|
||||||
return []
|
|
||||||
return options.split(',')
|
|
||||||
|
|
||||||
|
|
||||||
def extract_options(inc, global_options=None):
|
|
||||||
global_options = global_options or []
|
|
||||||
if global_options and isinstance(global_options, six.string_types):
|
|
||||||
global_options = [global_options]
|
|
||||||
if '|' not in inc:
|
|
||||||
return (inc, global_options)
|
|
||||||
inc, opts = inc.split('|')
|
|
||||||
return (inc, parse_sync_options(opts) + global_options)
|
|
||||||
|
|
||||||
|
|
||||||
def sync_helpers(include, src, dest, options=None):
|
|
||||||
if not os.path.isdir(dest):
|
|
||||||
os.makedirs(dest)
|
|
||||||
|
|
||||||
global_options = parse_sync_options(options)
|
|
||||||
|
|
||||||
for inc in include:
|
|
||||||
if isinstance(inc, str):
|
|
||||||
inc, opts = extract_options(inc, global_options)
|
|
||||||
sync(src, dest, inc, opts)
|
|
||||||
elif isinstance(inc, dict):
|
|
||||||
# could also do nested dicts here.
|
|
||||||
for k, v in six.iteritems(inc):
|
|
||||||
if isinstance(v, list):
|
|
||||||
for m in v:
|
|
||||||
inc, opts = extract_options(m, global_options)
|
|
||||||
sync(src, dest, '%s.%s' % (k, inc), opts)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
parser = optparse.OptionParser()
|
|
||||||
parser.add_option('-c', '--config', action='store', dest='config',
|
|
||||||
default=None, help='helper config file')
|
|
||||||
parser.add_option('-D', '--debug', action='store_true', dest='debug',
|
|
||||||
default=False, help='debug')
|
|
||||||
parser.add_option('-b', '--branch', action='store', dest='branch',
|
|
||||||
help='charm-helpers bzr branch (overrides config)')
|
|
||||||
parser.add_option('-d', '--destination', action='store', dest='dest_dir',
|
|
||||||
help='sync destination dir (overrides config)')
|
|
||||||
(opts, args) = parser.parse_args()
|
|
||||||
|
|
||||||
if opts.debug:
|
|
||||||
logging.basicConfig(level=logging.DEBUG)
|
|
||||||
else:
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
|
||||||
|
|
||||||
if opts.config:
|
|
||||||
logging.info('Loading charm helper config from %s.' % opts.config)
|
|
||||||
config = parse_config(opts.config)
|
|
||||||
if not config:
|
|
||||||
logging.error('Could not parse config from %s.' % opts.config)
|
|
||||||
sys.exit(1)
|
|
||||||
else:
|
|
||||||
config = {}
|
|
||||||
|
|
||||||
if 'branch' not in config:
|
|
||||||
config['branch'] = CHARM_HELPERS_BRANCH
|
|
||||||
if opts.branch:
|
|
||||||
config['branch'] = opts.branch
|
|
||||||
if opts.dest_dir:
|
|
||||||
config['destination'] = opts.dest_dir
|
|
||||||
|
|
||||||
if 'destination' not in config:
|
|
||||||
logging.error('No destination dir. specified as option or config.')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
if 'include' not in config:
|
|
||||||
if not args:
|
|
||||||
logging.error('No modules to sync specified as option or config.')
|
|
||||||
sys.exit(1)
|
|
||||||
config['include'] = []
|
|
||||||
[config['include'].append(a) for a in args]
|
|
||||||
|
|
||||||
sync_options = None
|
|
||||||
if 'options' in config:
|
|
||||||
sync_options = config['options']
|
|
||||||
tmpd = tempfile.mkdtemp()
|
|
||||||
try:
|
|
||||||
checkout = clone_helpers(tmpd, config['branch'])
|
|
||||||
sync_helpers(config['include'], checkout, config['destination'],
|
|
||||||
options=sync_options)
|
|
||||||
except Exception as e:
|
|
||||||
logging.error("Could not sync: %s" % e)
|
|
||||||
raise e
|
|
||||||
finally:
|
|
||||||
logging.debug('Cleaning up %s' % tmpd)
|
|
||||||
shutil.rmtree(tmpd)
|
|
@ -3,10 +3,5 @@ destination: hooks/charmhelpers
|
|||||||
include:
|
include:
|
||||||
- core
|
- core
|
||||||
- fetch
|
- fetch
|
||||||
- contrib.openstack|inc=*
|
- contrib
|
||||||
- contrib.hahelpers
|
- payload
|
||||||
- contrib.network.ovs
|
|
||||||
- contrib.storage.linux
|
|
||||||
- payload.execd
|
|
||||||
- contrib.network.ip
|
|
||||||
- contrib.python.packages
|
|
||||||
|
18
config.yaml
18
config.yaml
@ -2,4 +2,20 @@ options:
|
|||||||
plumgrid-virtual-ip:
|
plumgrid-virtual-ip:
|
||||||
default: 192.168.100.250
|
default: 192.168.100.250
|
||||||
type: string
|
type: string
|
||||||
description: The IP on which PG Console will be accessible.
|
description: IP address of the Director's Management interface. Same IP can be used to access PG Console.
|
||||||
|
lcm-ssh-key:
|
||||||
|
default: 'null'
|
||||||
|
type: string
|
||||||
|
description: Public SSH key of PLUMgrid LCM which is running PG-Tools
|
||||||
|
network-device-mtu:
|
||||||
|
type: string
|
||||||
|
default: '1580'
|
||||||
|
description: The MTU size for interfaces managed by director.
|
||||||
|
install_sources:
|
||||||
|
default: 'ppa:plumgrid-team/stable'
|
||||||
|
type: string
|
||||||
|
description: Provide the install source from where to install the PLUMgrid debs
|
||||||
|
install_keys:
|
||||||
|
default: null
|
||||||
|
type: string
|
||||||
|
description: Provide the respective keys of the install sources
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0
|
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0
|
||||||
|
|
||||||
Files: *
|
Files: *
|
||||||
Copyright: 2012, Canonical Ltd.
|
Copyright: 2015, PLUMgrid Inc.
|
||||||
License: GPL-3
|
License: GPL-3
|
||||||
|
|
||||||
License: GPL-3
|
License: GPL-3
|
||||||
|
@ -1,38 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
|
||||||
# only standard libraries.
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
try:
|
|
||||||
import six # flake8: noqa
|
|
||||||
except ImportError:
|
|
||||||
if sys.version_info.major == 2:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
|
||||||
import six # flake8: noqa
|
|
||||||
|
|
||||||
try:
|
|
||||||
import yaml # flake8: noqa
|
|
||||||
except ImportError:
|
|
||||||
if sys.version_info.major == 2:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
|
||||||
import yaml # flake8: noqa
|
|
@ -1,82 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# This file is sourced from lp:openstack-charm-helpers
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# James Page <james.page@ubuntu.com>
|
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config as config_get,
|
|
||||||
relation_get,
|
|
||||||
relation_ids,
|
|
||||||
related_units as relation_list,
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_cert(cn=None):
|
|
||||||
# TODO: deal with multiple https endpoints via charm config
|
|
||||||
cert = config_get('ssl_cert')
|
|
||||||
key = config_get('ssl_key')
|
|
||||||
if not (cert and key):
|
|
||||||
log("Inspecting identity-service relations for SSL certificate.",
|
|
||||||
level=INFO)
|
|
||||||
cert = key = None
|
|
||||||
if cn:
|
|
||||||
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
|
|
||||||
ssl_key_attr = 'ssl_key_{}'.format(cn)
|
|
||||||
else:
|
|
||||||
ssl_cert_attr = 'ssl_cert'
|
|
||||||
ssl_key_attr = 'ssl_key'
|
|
||||||
for r_id in relation_ids('identity-service'):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
if not cert:
|
|
||||||
cert = relation_get(ssl_cert_attr,
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
if not key:
|
|
||||||
key = relation_get(ssl_key_attr,
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
return (cert, key)
|
|
||||||
|
|
||||||
|
|
||||||
def get_ca_cert():
|
|
||||||
ca_cert = config_get('ssl_ca')
|
|
||||||
if ca_cert is None:
|
|
||||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
|
||||||
level=INFO)
|
|
||||||
for r_id in relation_ids('identity-service'):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
if ca_cert is None:
|
|
||||||
ca_cert = relation_get('ca_cert',
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
return ca_cert
|
|
||||||
|
|
||||||
|
|
||||||
def install_ca_cert(ca_cert):
|
|
||||||
if ca_cert:
|
|
||||||
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
|
|
||||||
'w') as crt:
|
|
||||||
crt.write(ca_cert)
|
|
||||||
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
|
@ -1,272 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# James Page <james.page@ubuntu.com>
|
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
Helpers for clustering and determining "cluster leadership" and other
|
|
||||||
clustering-related helpers.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
|
|
||||||
from socket import gethostname as get_unit_hostname
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
relation_ids,
|
|
||||||
related_units as relation_list,
|
|
||||||
relation_get,
|
|
||||||
config as config_get,
|
|
||||||
INFO,
|
|
||||||
ERROR,
|
|
||||||
WARNING,
|
|
||||||
unit_get,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.decorators import (
|
|
||||||
retry_on_exception,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.strutils import (
|
|
||||||
bool_from_string,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class HAIncompleteConfig(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CRMResourceNotFound(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def is_elected_leader(resource):
|
|
||||||
"""
|
|
||||||
Returns True if the charm executing this is the elected cluster leader.
|
|
||||||
|
|
||||||
It relies on two mechanisms to determine leadership:
|
|
||||||
1. If the charm is part of a corosync cluster, call corosync to
|
|
||||||
determine leadership.
|
|
||||||
2. If the charm is not part of a corosync cluster, the leader is
|
|
||||||
determined as being "the alive unit with the lowest unit numer". In
|
|
||||||
other words, the oldest surviving unit.
|
|
||||||
"""
|
|
||||||
if is_clustered():
|
|
||||||
if not is_crm_leader(resource):
|
|
||||||
log('Deferring action to CRM leader.', level=INFO)
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
peers = peer_units()
|
|
||||||
if peers and not oldest_peer(peers):
|
|
||||||
log('Deferring action to oldest service unit.', level=INFO)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def is_clustered():
|
|
||||||
for r_id in (relation_ids('ha') or []):
|
|
||||||
for unit in (relation_list(r_id) or []):
|
|
||||||
clustered = relation_get('clustered',
|
|
||||||
rid=r_id,
|
|
||||||
unit=unit)
|
|
||||||
if clustered:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
|
|
||||||
def is_crm_leader(resource, retry=False):
|
|
||||||
"""
|
|
||||||
Returns True if the charm calling this is the elected corosync leader,
|
|
||||||
as returned by calling the external "crm" command.
|
|
||||||
|
|
||||||
We allow this operation to be retried to avoid the possibility of getting a
|
|
||||||
false negative. See LP #1396246 for more info.
|
|
||||||
"""
|
|
||||||
cmd = ['crm', 'resource', 'show', resource]
|
|
||||||
try:
|
|
||||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
|
||||||
if not isinstance(status, six.text_type):
|
|
||||||
status = six.text_type(status, "utf-8")
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
status = None
|
|
||||||
|
|
||||||
if status and get_unit_hostname() in status:
|
|
||||||
return True
|
|
||||||
|
|
||||||
if status and "resource %s is NOT running" % (resource) in status:
|
|
||||||
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_leader(resource):
|
|
||||||
log("is_leader is deprecated. Please consider using is_crm_leader "
|
|
||||||
"instead.", level=WARNING)
|
|
||||||
return is_crm_leader(resource)
|
|
||||||
|
|
||||||
|
|
||||||
def peer_units(peer_relation="cluster"):
|
|
||||||
peers = []
|
|
||||||
for r_id in (relation_ids(peer_relation) or []):
|
|
||||||
for unit in (relation_list(r_id) or []):
|
|
||||||
peers.append(unit)
|
|
||||||
return peers
|
|
||||||
|
|
||||||
|
|
||||||
def peer_ips(peer_relation='cluster', addr_key='private-address'):
|
|
||||||
'''Return a dict of peers and their private-address'''
|
|
||||||
peers = {}
|
|
||||||
for r_id in relation_ids(peer_relation):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
|
|
||||||
return peers
|
|
||||||
|
|
||||||
|
|
||||||
def oldest_peer(peers):
|
|
||||||
"""Determines who the oldest peer is by comparing unit numbers."""
|
|
||||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
|
||||||
for peer in peers:
|
|
||||||
remote_unit_no = int(peer.split('/')[1])
|
|
||||||
if remote_unit_no < local_unit_no:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def eligible_leader(resource):
|
|
||||||
log("eligible_leader is deprecated. Please consider using "
|
|
||||||
"is_elected_leader instead.", level=WARNING)
|
|
||||||
return is_elected_leader(resource)
|
|
||||||
|
|
||||||
|
|
||||||
def https():
|
|
||||||
'''
|
|
||||||
Determines whether enough data has been provided in configuration
|
|
||||||
or relation data to configure HTTPS
|
|
||||||
.
|
|
||||||
returns: boolean
|
|
||||||
'''
|
|
||||||
use_https = config_get('use-https')
|
|
||||||
if use_https and bool_from_string(use_https):
|
|
||||||
return True
|
|
||||||
if config_get('ssl_cert') and config_get('ssl_key'):
|
|
||||||
return True
|
|
||||||
for r_id in relation_ids('identity-service'):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
|
|
||||||
rel_state = [
|
|
||||||
relation_get('https_keystone', rid=r_id, unit=unit),
|
|
||||||
relation_get('ca_cert', rid=r_id, unit=unit),
|
|
||||||
]
|
|
||||||
# NOTE: works around (LP: #1203241)
|
|
||||||
if (None not in rel_state) and ('' not in rel_state):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def determine_api_port(public_port, singlenode_mode=False):
|
|
||||||
'''
|
|
||||||
Determine correct API server listening port based on
|
|
||||||
existence of HTTPS reverse proxy and/or haproxy.
|
|
||||||
|
|
||||||
public_port: int: standard public port for given service
|
|
||||||
|
|
||||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
|
||||||
|
|
||||||
returns: int: the correct listening port for the API service
|
|
||||||
'''
|
|
||||||
i = 0
|
|
||||||
if singlenode_mode:
|
|
||||||
i += 1
|
|
||||||
elif len(peer_units()) > 0 or is_clustered():
|
|
||||||
i += 1
|
|
||||||
if https():
|
|
||||||
i += 1
|
|
||||||
return public_port - (i * 10)
|
|
||||||
|
|
||||||
|
|
||||||
def determine_apache_port(public_port, singlenode_mode=False):
|
|
||||||
'''
|
|
||||||
Description: Determine correct apache listening port based on public IP +
|
|
||||||
state of the cluster.
|
|
||||||
|
|
||||||
public_port: int: standard public port for given service
|
|
||||||
|
|
||||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
|
||||||
|
|
||||||
returns: int: the correct listening port for the HAProxy service
|
|
||||||
'''
|
|
||||||
i = 0
|
|
||||||
if singlenode_mode:
|
|
||||||
i += 1
|
|
||||||
elif len(peer_units()) > 0 or is_clustered():
|
|
||||||
i += 1
|
|
||||||
return public_port - (i * 10)
|
|
||||||
|
|
||||||
|
|
||||||
def get_hacluster_config(exclude_keys=None):
|
|
||||||
'''
|
|
||||||
Obtains all relevant configuration from charm configuration required
|
|
||||||
for initiating a relation to hacluster:
|
|
||||||
|
|
||||||
ha-bindiface, ha-mcastport, vip
|
|
||||||
|
|
||||||
param: exclude_keys: list of setting key(s) to be excluded.
|
|
||||||
returns: dict: A dict containing settings keyed by setting name.
|
|
||||||
raises: HAIncompleteConfig if settings are missing.
|
|
||||||
'''
|
|
||||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip']
|
|
||||||
conf = {}
|
|
||||||
for setting in settings:
|
|
||||||
if exclude_keys and setting in exclude_keys:
|
|
||||||
continue
|
|
||||||
|
|
||||||
conf[setting] = config_get(setting)
|
|
||||||
missing = []
|
|
||||||
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
|
|
||||||
if missing:
|
|
||||||
log('Insufficient config data to configure hacluster.', level=ERROR)
|
|
||||||
raise HAIncompleteConfig
|
|
||||||
return conf
|
|
||||||
|
|
||||||
|
|
||||||
def canonical_url(configs, vip_setting='vip'):
|
|
||||||
'''
|
|
||||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
|
||||||
configuration and hacluster.
|
|
||||||
|
|
||||||
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
|
||||||
a complete https context.
|
|
||||||
|
|
||||||
:vip_setting: str: Setting in charm config that specifies
|
|
||||||
VIP address.
|
|
||||||
'''
|
|
||||||
scheme = 'http'
|
|
||||||
if 'https' in configs.complete_contexts():
|
|
||||||
scheme = 'https'
|
|
||||||
if is_clustered():
|
|
||||||
addr = config_get(vip_setting)
|
|
||||||
else:
|
|
||||||
addr = unit_get('private-address')
|
|
||||||
return '%s://%s' % (scheme, addr)
|
|
@ -1,450 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import glob
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import six
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import unit_get
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
import netifaces
|
|
||||||
except ImportError:
|
|
||||||
apt_install('python-netifaces')
|
|
||||||
import netifaces
|
|
||||||
|
|
||||||
try:
|
|
||||||
import netaddr
|
|
||||||
except ImportError:
|
|
||||||
apt_install('python-netaddr')
|
|
||||||
import netaddr
|
|
||||||
|
|
||||||
|
|
||||||
def _validate_cidr(network):
|
|
||||||
try:
|
|
||||||
netaddr.IPNetwork(network)
|
|
||||||
except (netaddr.core.AddrFormatError, ValueError):
|
|
||||||
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
|
||||||
network)
|
|
||||||
|
|
||||||
|
|
||||||
def no_ip_found_error_out(network):
|
|
||||||
errmsg = ("No IP address found in network: %s" % network)
|
|
||||||
raise ValueError(errmsg)
|
|
||||||
|
|
||||||
|
|
||||||
def get_address_in_network(network, fallback=None, fatal=False):
|
|
||||||
"""Get an IPv4 or IPv6 address within the network from the host.
|
|
||||||
|
|
||||||
:param network (str): CIDR presentation format. For example,
|
|
||||||
'192.168.1.0/24'.
|
|
||||||
:param fallback (str): If no address is found, return fallback.
|
|
||||||
:param fatal (boolean): If no address is found, fallback is not
|
|
||||||
set and fatal is True then exit(1).
|
|
||||||
"""
|
|
||||||
if network is None:
|
|
||||||
if fallback is not None:
|
|
||||||
return fallback
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
no_ip_found_error_out(network)
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
_validate_cidr(network)
|
|
||||||
network = netaddr.IPNetwork(network)
|
|
||||||
for iface in netifaces.interfaces():
|
|
||||||
addresses = netifaces.ifaddresses(iface)
|
|
||||||
if network.version == 4 and netifaces.AF_INET in addresses:
|
|
||||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
|
||||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
|
||||||
if cidr in network:
|
|
||||||
return str(cidr.ip)
|
|
||||||
|
|
||||||
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
|
||||||
for addr in addresses[netifaces.AF_INET6]:
|
|
||||||
if not addr['addr'].startswith('fe80'):
|
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
|
||||||
addr['netmask']))
|
|
||||||
if cidr in network:
|
|
||||||
return str(cidr.ip)
|
|
||||||
|
|
||||||
if fallback is not None:
|
|
||||||
return fallback
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
no_ip_found_error_out(network)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def is_ipv6(address):
|
|
||||||
"""Determine whether provided address is IPv6 or not."""
|
|
||||||
try:
|
|
||||||
address = netaddr.IPAddress(address)
|
|
||||||
except netaddr.AddrFormatError:
|
|
||||||
# probably a hostname - so not an address at all!
|
|
||||||
return False
|
|
||||||
|
|
||||||
return address.version == 6
|
|
||||||
|
|
||||||
|
|
||||||
def is_address_in_network(network, address):
|
|
||||||
"""
|
|
||||||
Determine whether the provided address is within a network range.
|
|
||||||
|
|
||||||
:param network (str): CIDR presentation format. For example,
|
|
||||||
'192.168.1.0/24'.
|
|
||||||
:param address: An individual IPv4 or IPv6 address without a net
|
|
||||||
mask or subnet prefix. For example, '192.168.1.1'.
|
|
||||||
:returns boolean: Flag indicating whether address is in network.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
network = netaddr.IPNetwork(network)
|
|
||||||
except (netaddr.core.AddrFormatError, ValueError):
|
|
||||||
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
|
||||||
network)
|
|
||||||
|
|
||||||
try:
|
|
||||||
address = netaddr.IPAddress(address)
|
|
||||||
except (netaddr.core.AddrFormatError, ValueError):
|
|
||||||
raise ValueError("Address (%s) is not in correct presentation format" %
|
|
||||||
address)
|
|
||||||
|
|
||||||
if address in network:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _get_for_address(address, key):
|
|
||||||
"""Retrieve an attribute of or the physical interface that
|
|
||||||
the IP address provided could be bound to.
|
|
||||||
|
|
||||||
:param address (str): An individual IPv4 or IPv6 address without a net
|
|
||||||
mask or subnet prefix. For example, '192.168.1.1'.
|
|
||||||
:param key: 'iface' for the physical interface name or an attribute
|
|
||||||
of the configured interface, for example 'netmask'.
|
|
||||||
:returns str: Requested attribute or None if address is not bindable.
|
|
||||||
"""
|
|
||||||
address = netaddr.IPAddress(address)
|
|
||||||
for iface in netifaces.interfaces():
|
|
||||||
addresses = netifaces.ifaddresses(iface)
|
|
||||||
if address.version == 4 and netifaces.AF_INET in addresses:
|
|
||||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
|
||||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
|
||||||
network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
|
||||||
cidr = network.cidr
|
|
||||||
if address in cidr:
|
|
||||||
if key == 'iface':
|
|
||||||
return iface
|
|
||||||
else:
|
|
||||||
return addresses[netifaces.AF_INET][0][key]
|
|
||||||
|
|
||||||
if address.version == 6 and netifaces.AF_INET6 in addresses:
|
|
||||||
for addr in addresses[netifaces.AF_INET6]:
|
|
||||||
if not addr['addr'].startswith('fe80'):
|
|
||||||
network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
|
||||||
addr['netmask']))
|
|
||||||
cidr = network.cidr
|
|
||||||
if address in cidr:
|
|
||||||
if key == 'iface':
|
|
||||||
return iface
|
|
||||||
elif key == 'netmask' and cidr:
|
|
||||||
return str(cidr).split('/')[1]
|
|
||||||
else:
|
|
||||||
return addr[key]
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
get_iface_for_address = partial(_get_for_address, key='iface')
|
|
||||||
|
|
||||||
|
|
||||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
|
||||||
|
|
||||||
|
|
||||||
def format_ipv6_addr(address):
|
|
||||||
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
|
||||||
|
|
||||||
This is required by most configuration files when specifying IPv6
|
|
||||||
addresses.
|
|
||||||
"""
|
|
||||||
if is_ipv6(address):
|
|
||||||
return "[%s]" % address
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
|
||||||
fatal=True, exc_list=None):
|
|
||||||
"""Return the assigned IP address for a given interface, if any."""
|
|
||||||
# Extract nic if passed /dev/ethX
|
|
||||||
if '/' in iface:
|
|
||||||
iface = iface.split('/')[-1]
|
|
||||||
|
|
||||||
if not exc_list:
|
|
||||||
exc_list = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
inet_num = getattr(netifaces, inet_type)
|
|
||||||
except AttributeError:
|
|
||||||
raise Exception("Unknown inet type '%s'" % str(inet_type))
|
|
||||||
|
|
||||||
interfaces = netifaces.interfaces()
|
|
||||||
if inc_aliases:
|
|
||||||
ifaces = []
|
|
||||||
for _iface in interfaces:
|
|
||||||
if iface == _iface or _iface.split(':')[0] == iface:
|
|
||||||
ifaces.append(_iface)
|
|
||||||
|
|
||||||
if fatal and not ifaces:
|
|
||||||
raise Exception("Invalid interface '%s'" % iface)
|
|
||||||
|
|
||||||
ifaces.sort()
|
|
||||||
else:
|
|
||||||
if iface not in interfaces:
|
|
||||||
if fatal:
|
|
||||||
raise Exception("Interface '%s' not found " % (iface))
|
|
||||||
else:
|
|
||||||
return []
|
|
||||||
|
|
||||||
else:
|
|
||||||
ifaces = [iface]
|
|
||||||
|
|
||||||
addresses = []
|
|
||||||
for netiface in ifaces:
|
|
||||||
net_info = netifaces.ifaddresses(netiface)
|
|
||||||
if inet_num in net_info:
|
|
||||||
for entry in net_info[inet_num]:
|
|
||||||
if 'addr' in entry and entry['addr'] not in exc_list:
|
|
||||||
addresses.append(entry['addr'])
|
|
||||||
|
|
||||||
if fatal and not addresses:
|
|
||||||
raise Exception("Interface '%s' doesn't have any %s addresses." %
|
|
||||||
(iface, inet_type))
|
|
||||||
|
|
||||||
return sorted(addresses)
|
|
||||||
|
|
||||||
|
|
||||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
|
||||||
|
|
||||||
|
|
||||||
def get_iface_from_addr(addr):
|
|
||||||
"""Work out on which interface the provided address is configured."""
|
|
||||||
for iface in netifaces.interfaces():
|
|
||||||
addresses = netifaces.ifaddresses(iface)
|
|
||||||
for inet_type in addresses:
|
|
||||||
for _addr in addresses[inet_type]:
|
|
||||||
_addr = _addr['addr']
|
|
||||||
# link local
|
|
||||||
ll_key = re.compile("(.+)%.*")
|
|
||||||
raw = re.match(ll_key, _addr)
|
|
||||||
if raw:
|
|
||||||
_addr = raw.group(1)
|
|
||||||
|
|
||||||
if _addr == addr:
|
|
||||||
log("Address '%s' is configured on iface '%s'" %
|
|
||||||
(addr, iface))
|
|
||||||
return iface
|
|
||||||
|
|
||||||
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
|
|
||||||
raise Exception(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def sniff_iface(f):
|
|
||||||
"""Ensure decorated function is called with a value for iface.
|
|
||||||
|
|
||||||
If no iface provided, inject net iface inferred from unit private address.
|
|
||||||
"""
|
|
||||||
def iface_sniffer(*args, **kwargs):
|
|
||||||
if not kwargs.get('iface', None):
|
|
||||||
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
|
|
||||||
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
|
|
||||||
return iface_sniffer
|
|
||||||
|
|
||||||
|
|
||||||
@sniff_iface
|
|
||||||
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
|
||||||
dynamic_only=True):
|
|
||||||
"""Get assigned IPv6 address for a given interface.
|
|
||||||
|
|
||||||
Returns list of addresses found. If no address found, returns empty list.
|
|
||||||
|
|
||||||
If iface is None, we infer the current primary interface by doing a reverse
|
|
||||||
lookup on the unit private-address.
|
|
||||||
|
|
||||||
We currently only support scope global IPv6 addresses i.e. non-temporary
|
|
||||||
addresses. If no global IPv6 address is found, return the first one found
|
|
||||||
in the ipv6 address list.
|
|
||||||
"""
|
|
||||||
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
|
|
||||||
inc_aliases=inc_aliases, fatal=fatal,
|
|
||||||
exc_list=exc_list)
|
|
||||||
|
|
||||||
if addresses:
|
|
||||||
global_addrs = []
|
|
||||||
for addr in addresses:
|
|
||||||
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
|
|
||||||
m = re.match(key_scope_link_local, addr)
|
|
||||||
if m:
|
|
||||||
eui_64_mac = m.group(1)
|
|
||||||
iface = m.group(2)
|
|
||||||
else:
|
|
||||||
global_addrs.append(addr)
|
|
||||||
|
|
||||||
if global_addrs:
|
|
||||||
# Make sure any found global addresses are not temporary
|
|
||||||
cmd = ['ip', 'addr', 'show', iface]
|
|
||||||
out = subprocess.check_output(cmd).decode('UTF-8')
|
|
||||||
if dynamic_only:
|
|
||||||
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
|
|
||||||
else:
|
|
||||||
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
|
|
||||||
|
|
||||||
addrs = []
|
|
||||||
for line in out.split('\n'):
|
|
||||||
line = line.strip()
|
|
||||||
m = re.match(key, line)
|
|
||||||
if m and 'temporary' not in line:
|
|
||||||
# Return the first valid address we find
|
|
||||||
for addr in global_addrs:
|
|
||||||
if m.group(1) == addr:
|
|
||||||
if not dynamic_only or \
|
|
||||||
m.group(1).endswith(eui_64_mac):
|
|
||||||
addrs.append(addr)
|
|
||||||
|
|
||||||
if addrs:
|
|
||||||
return addrs
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
raise Exception("Interface '%s' does not have a scope global "
|
|
||||||
"non-temporary ipv6 address." % iface)
|
|
||||||
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
|
|
||||||
"""Return a list of bridges on the system."""
|
|
||||||
b_regex = "%s/*/bridge" % vnic_dir
|
|
||||||
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
|
|
||||||
|
|
||||||
|
|
||||||
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
|
|
||||||
"""Return a list of nics comprising a given bridge on the system."""
|
|
||||||
brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
|
||||||
return [x.split('/')[-1] for x in glob.glob(brif_regex)]
|
|
||||||
|
|
||||||
|
|
||||||
def is_bridge_member(nic):
|
|
||||||
"""Check if a given nic is a member of a bridge."""
|
|
||||||
for bridge in get_bridges():
|
|
||||||
if nic in get_bridge_nics(bridge):
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_ip(address):
|
|
||||||
"""
|
|
||||||
Returns True if address is a valid IP address.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# Test to see if already an IPv4 address
|
|
||||||
socket.inet_aton(address)
|
|
||||||
return True
|
|
||||||
except socket.error:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def ns_query(address):
|
|
||||||
try:
|
|
||||||
import dns.resolver
|
|
||||||
except ImportError:
|
|
||||||
apt_install('python-dnspython')
|
|
||||||
import dns.resolver
|
|
||||||
|
|
||||||
if isinstance(address, dns.name.Name):
|
|
||||||
rtype = 'PTR'
|
|
||||||
elif isinstance(address, six.string_types):
|
|
||||||
rtype = 'A'
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
answers = dns.resolver.query(address, rtype)
|
|
||||||
if answers:
|
|
||||||
return str(answers[0])
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_host_ip(hostname, fallback=None):
|
|
||||||
"""
|
|
||||||
Resolves the IP for a given hostname, or returns
|
|
||||||
the input if it is already an IP.
|
|
||||||
"""
|
|
||||||
if is_ip(hostname):
|
|
||||||
return hostname
|
|
||||||
|
|
||||||
ip_addr = ns_query(hostname)
|
|
||||||
if not ip_addr:
|
|
||||||
try:
|
|
||||||
ip_addr = socket.gethostbyname(hostname)
|
|
||||||
except:
|
|
||||||
log("Failed to resolve hostname '%s'" % (hostname),
|
|
||||||
level=WARNING)
|
|
||||||
return fallback
|
|
||||||
return ip_addr
|
|
||||||
|
|
||||||
|
|
||||||
def get_hostname(address, fqdn=True):
|
|
||||||
"""
|
|
||||||
Resolves hostname for given IP, or returns the input
|
|
||||||
if it is already a hostname.
|
|
||||||
"""
|
|
||||||
if is_ip(address):
|
|
||||||
try:
|
|
||||||
import dns.reversename
|
|
||||||
except ImportError:
|
|
||||||
apt_install("python-dnspython")
|
|
||||||
import dns.reversename
|
|
||||||
|
|
||||||
rev = dns.reversename.from_address(address)
|
|
||||||
result = ns_query(rev)
|
|
||||||
if not result:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
result = address
|
|
||||||
|
|
||||||
if fqdn:
|
|
||||||
# strip trailing .
|
|
||||||
if result.endswith('.'):
|
|
||||||
return result[:-1]
|
|
||||||
else:
|
|
||||||
return result
|
|
||||||
else:
|
|
||||||
return result.split('.')[0]
|
|
@ -1,96 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
''' Helpers for interacting with OpenvSwitch '''
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log, WARNING
|
|
||||||
)
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
service
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def add_bridge(name):
|
|
||||||
''' Add the named bridge to openvswitch '''
|
|
||||||
log('Creating bridge {}'.format(name))
|
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
|
|
||||||
|
|
||||||
|
|
||||||
def del_bridge(name):
|
|
||||||
''' Delete the named bridge from openvswitch '''
|
|
||||||
log('Deleting bridge {}'.format(name))
|
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
|
|
||||||
|
|
||||||
|
|
||||||
def add_bridge_port(name, port, promisc=False):
|
|
||||||
''' Add a port to the named openvswitch bridge '''
|
|
||||||
log('Adding port {} to bridge {}'.format(port, name))
|
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
|
|
||||||
name, port])
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "up"])
|
|
||||||
if promisc:
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
|
||||||
|
|
||||||
|
|
||||||
def del_bridge_port(name, port):
|
|
||||||
''' Delete a port from the named openvswitch bridge '''
|
|
||||||
log('Deleting port {} from bridge {}'.format(port, name))
|
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
|
|
||||||
name, port])
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "down"])
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
|
||||||
|
|
||||||
|
|
||||||
def set_manager(manager):
|
|
||||||
''' Set the controller for the local openvswitch '''
|
|
||||||
log('Setting manager for local ovs to {}'.format(manager))
|
|
||||||
subprocess.check_call(['ovs-vsctl', 'set-manager',
|
|
||||||
'ssl:{}'.format(manager)])
|
|
||||||
|
|
||||||
|
|
||||||
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
|
|
||||||
|
|
||||||
|
|
||||||
def get_certificate():
|
|
||||||
''' Read openvswitch certificate from disk '''
|
|
||||||
if os.path.exists(CERT_PATH):
|
|
||||||
log('Reading ovs certificate from {}'.format(CERT_PATH))
|
|
||||||
with open(CERT_PATH, 'r') as cert:
|
|
||||||
full_cert = cert.read()
|
|
||||||
begin_marker = "-----BEGIN CERTIFICATE-----"
|
|
||||||
end_marker = "-----END CERTIFICATE-----"
|
|
||||||
begin_index = full_cert.find(begin_marker)
|
|
||||||
end_index = full_cert.rfind(end_marker)
|
|
||||||
if end_index == -1 or begin_index == -1:
|
|
||||||
raise RuntimeError("Certificate does not contain valid begin"
|
|
||||||
" and end markers.")
|
|
||||||
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
|
|
||||||
return full_cert
|
|
||||||
else:
|
|
||||||
log('Certificate not found', level=WARNING)
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def full_restart():
|
|
||||||
''' Full restart and reload of openvswitch '''
|
|
||||||
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
|
|
||||||
service('start', 'openvswitch-force-reload-kmod')
|
|
||||||
else:
|
|
||||||
service('force-reload-kmod', 'openvswitch-switch')
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,146 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import six
|
|
||||||
from collections import OrderedDict
|
|
||||||
from charmhelpers.contrib.amulet.deployment import (
|
|
||||||
AmuletDeployment
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
|
||||||
"""OpenStack amulet deployment.
|
|
||||||
|
|
||||||
This class inherits from AmuletDeployment and has additional support
|
|
||||||
that is specifically for use by OpenStack charms.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
|
||||||
"""Initialize the deployment environment."""
|
|
||||||
super(OpenStackAmuletDeployment, self).__init__(series)
|
|
||||||
self.openstack = openstack
|
|
||||||
self.source = source
|
|
||||||
self.stable = stable
|
|
||||||
# Note(coreycb): this needs to be changed when new next branches come
|
|
||||||
# out.
|
|
||||||
self.current_next = "trusty"
|
|
||||||
|
|
||||||
def _determine_branch_locations(self, other_services):
|
|
||||||
"""Determine the branch locations for the other services.
|
|
||||||
|
|
||||||
Determine if the local branch being tested is derived from its
|
|
||||||
stable or next (dev) branch, and based on this, use the corresonding
|
|
||||||
stable or next branches for the other_services."""
|
|
||||||
base_charms = ['mysql', 'mongodb']
|
|
||||||
|
|
||||||
if self.series in ['precise', 'trusty']:
|
|
||||||
base_series = self.series
|
|
||||||
else:
|
|
||||||
base_series = self.current_next
|
|
||||||
|
|
||||||
if self.stable:
|
|
||||||
for svc in other_services:
|
|
||||||
temp = 'lp:charms/{}/{}'
|
|
||||||
svc['location'] = temp.format(base_series,
|
|
||||||
svc['name'])
|
|
||||||
else:
|
|
||||||
for svc in other_services:
|
|
||||||
if svc['name'] in base_charms:
|
|
||||||
temp = 'lp:charms/{}/{}'
|
|
||||||
svc['location'] = temp.format(base_series,
|
|
||||||
svc['name'])
|
|
||||||
else:
|
|
||||||
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
|
||||||
svc['location'] = temp.format(self.current_next,
|
|
||||||
svc['name'])
|
|
||||||
return other_services
|
|
||||||
|
|
||||||
def _add_services(self, this_service, other_services):
|
|
||||||
"""Add services to the deployment and set openstack-origin/source."""
|
|
||||||
other_services = self._determine_branch_locations(other_services)
|
|
||||||
|
|
||||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
|
||||||
other_services)
|
|
||||||
|
|
||||||
services = other_services
|
|
||||||
services.append(this_service)
|
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
|
||||||
'ceph-osd', 'ceph-radosgw']
|
|
||||||
# Openstack subordinate charms do not expose an origin option as that
|
|
||||||
# is controlled by the principle
|
|
||||||
ignore = ['neutron-openvswitch']
|
|
||||||
|
|
||||||
if self.openstack:
|
|
||||||
for svc in services:
|
|
||||||
if svc['name'] not in use_source + ignore:
|
|
||||||
config = {'openstack-origin': self.openstack}
|
|
||||||
self.d.configure(svc['name'], config)
|
|
||||||
|
|
||||||
if self.source:
|
|
||||||
for svc in services:
|
|
||||||
if svc['name'] in use_source and svc['name'] not in ignore:
|
|
||||||
config = {'source': self.source}
|
|
||||||
self.d.configure(svc['name'], config)
|
|
||||||
|
|
||||||
def _configure_services(self, configs):
|
|
||||||
"""Configure all of the services."""
|
|
||||||
for service, config in six.iteritems(configs):
|
|
||||||
self.d.configure(service, config)
|
|
||||||
|
|
||||||
def _get_openstack_release(self):
|
|
||||||
"""Get openstack release.
|
|
||||||
|
|
||||||
Return an integer representing the enum value of the openstack
|
|
||||||
release.
|
|
||||||
"""
|
|
||||||
# Must be ordered by OpenStack release (not by Ubuntu release):
|
|
||||||
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
|
||||||
self.precise_havana, self.precise_icehouse,
|
|
||||||
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
|
||||||
self.trusty_kilo, self.vivid_kilo) = range(10)
|
|
||||||
|
|
||||||
releases = {
|
|
||||||
('precise', None): self.precise_essex,
|
|
||||||
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
|
||||||
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
|
|
||||||
('precise', 'cloud:precise-havana'): self.precise_havana,
|
|
||||||
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
|
||||||
('trusty', None): self.trusty_icehouse,
|
|
||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
|
||||||
('utopic', None): self.utopic_juno,
|
|
||||||
('vivid', None): self.vivid_kilo}
|
|
||||||
return releases[(self.series, self.openstack)]
|
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
|
||||||
"""Get openstack release string.
|
|
||||||
|
|
||||||
Return a string representing the openstack release.
|
|
||||||
"""
|
|
||||||
releases = OrderedDict([
|
|
||||||
('precise', 'essex'),
|
|
||||||
('quantal', 'folsom'),
|
|
||||||
('raring', 'grizzly'),
|
|
||||||
('saucy', 'havana'),
|
|
||||||
('trusty', 'icehouse'),
|
|
||||||
('utopic', 'juno'),
|
|
||||||
('vivid', 'kilo'),
|
|
||||||
])
|
|
||||||
if self.openstack:
|
|
||||||
os_origin = self.openstack.split(':')[1]
|
|
||||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
|
||||||
else:
|
|
||||||
return releases[self.series]
|
|
@ -1,294 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import time
|
|
||||||
import urllib
|
|
||||||
|
|
||||||
import glanceclient.v1.client as glance_client
|
|
||||||
import keystoneclient.v2_0 as keystone_client
|
|
||||||
import novaclient.v1_1.client as nova_client
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.contrib.amulet.utils import (
|
|
||||||
AmuletUtils
|
|
||||||
)
|
|
||||||
|
|
||||||
DEBUG = logging.DEBUG
|
|
||||||
ERROR = logging.ERROR
|
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletUtils(AmuletUtils):
|
|
||||||
"""OpenStack amulet utilities.
|
|
||||||
|
|
||||||
This class inherits from AmuletUtils and has additional support
|
|
||||||
that is specifically for use by OpenStack charms.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, log_level=ERROR):
|
|
||||||
"""Initialize the deployment environment."""
|
|
||||||
super(OpenStackAmuletUtils, self).__init__(log_level)
|
|
||||||
|
|
||||||
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
|
||||||
public_port, expected):
|
|
||||||
"""Validate endpoint data.
|
|
||||||
|
|
||||||
Validate actual endpoint data vs expected endpoint data. The ports
|
|
||||||
are used to find the matching endpoint.
|
|
||||||
"""
|
|
||||||
found = False
|
|
||||||
for ep in endpoints:
|
|
||||||
self.log.debug('endpoint: {}'.format(repr(ep)))
|
|
||||||
if (admin_port in ep.adminurl and
|
|
||||||
internal_port in ep.internalurl and
|
|
||||||
public_port in ep.publicurl):
|
|
||||||
found = True
|
|
||||||
actual = {'id': ep.id,
|
|
||||||
'region': ep.region,
|
|
||||||
'adminurl': ep.adminurl,
|
|
||||||
'internalurl': ep.internalurl,
|
|
||||||
'publicurl': ep.publicurl,
|
|
||||||
'service_id': ep.service_id}
|
|
||||||
ret = self._validate_dict_data(expected, actual)
|
|
||||||
if ret:
|
|
||||||
return 'unexpected endpoint data - {}'.format(ret)
|
|
||||||
|
|
||||||
if not found:
|
|
||||||
return 'endpoint not found'
|
|
||||||
|
|
||||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
|
||||||
"""Validate service catalog endpoint data.
|
|
||||||
|
|
||||||
Validate a list of actual service catalog endpoints vs a list of
|
|
||||||
expected service catalog endpoints.
|
|
||||||
"""
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
for k, v in six.iteritems(expected):
|
|
||||||
if k in actual:
|
|
||||||
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
|
||||||
if ret:
|
|
||||||
return self.endpoint_error(k, ret)
|
|
||||||
else:
|
|
||||||
return "endpoint {} does not exist".format(k)
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def validate_tenant_data(self, expected, actual):
|
|
||||||
"""Validate tenant data.
|
|
||||||
|
|
||||||
Validate a list of actual tenant data vs list of expected tenant
|
|
||||||
data.
|
|
||||||
"""
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
for e in expected:
|
|
||||||
found = False
|
|
||||||
for act in actual:
|
|
||||||
a = {'enabled': act.enabled, 'description': act.description,
|
|
||||||
'name': act.name, 'id': act.id}
|
|
||||||
if e['name'] == a['name']:
|
|
||||||
found = True
|
|
||||||
ret = self._validate_dict_data(e, a)
|
|
||||||
if ret:
|
|
||||||
return "unexpected tenant data - {}".format(ret)
|
|
||||||
if not found:
|
|
||||||
return "tenant {} does not exist".format(e['name'])
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def validate_role_data(self, expected, actual):
|
|
||||||
"""Validate role data.
|
|
||||||
|
|
||||||
Validate a list of actual role data vs a list of expected role
|
|
||||||
data.
|
|
||||||
"""
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
for e in expected:
|
|
||||||
found = False
|
|
||||||
for act in actual:
|
|
||||||
a = {'name': act.name, 'id': act.id}
|
|
||||||
if e['name'] == a['name']:
|
|
||||||
found = True
|
|
||||||
ret = self._validate_dict_data(e, a)
|
|
||||||
if ret:
|
|
||||||
return "unexpected role data - {}".format(ret)
|
|
||||||
if not found:
|
|
||||||
return "role {} does not exist".format(e['name'])
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def validate_user_data(self, expected, actual):
|
|
||||||
"""Validate user data.
|
|
||||||
|
|
||||||
Validate a list of actual user data vs a list of expected user
|
|
||||||
data.
|
|
||||||
"""
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
for e in expected:
|
|
||||||
found = False
|
|
||||||
for act in actual:
|
|
||||||
a = {'enabled': act.enabled, 'name': act.name,
|
|
||||||
'email': act.email, 'tenantId': act.tenantId,
|
|
||||||
'id': act.id}
|
|
||||||
if e['name'] == a['name']:
|
|
||||||
found = True
|
|
||||||
ret = self._validate_dict_data(e, a)
|
|
||||||
if ret:
|
|
||||||
return "unexpected user data - {}".format(ret)
|
|
||||||
if not found:
|
|
||||||
return "user {} does not exist".format(e['name'])
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def validate_flavor_data(self, expected, actual):
|
|
||||||
"""Validate flavor data.
|
|
||||||
|
|
||||||
Validate a list of actual flavors vs a list of expected flavors.
|
|
||||||
"""
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
act = [a.name for a in actual]
|
|
||||||
return self._validate_list_data(expected, act)
|
|
||||||
|
|
||||||
def tenant_exists(self, keystone, tenant):
|
|
||||||
"""Return True if tenant exists."""
|
|
||||||
return tenant in [t.name for t in keystone.tenants.list()]
|
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
|
||||||
tenant):
|
|
||||||
"""Authenticates admin user with the keystone admin endpoint."""
|
|
||||||
unit = keystone_sentry
|
|
||||||
service_ip = unit.relation('shared-db',
|
|
||||||
'mysql:shared-db')['private-address']
|
|
||||||
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
|
||||||
return keystone_client.Client(username=user, password=password,
|
|
||||||
tenant_name=tenant, auth_url=ep)
|
|
||||||
|
|
||||||
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
|
||||||
"""Authenticates a regular user with the keystone public endpoint."""
|
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
|
||||||
endpoint_type='publicURL')
|
|
||||||
return keystone_client.Client(username=user, password=password,
|
|
||||||
tenant_name=tenant, auth_url=ep)
|
|
||||||
|
|
||||||
def authenticate_glance_admin(self, keystone):
|
|
||||||
"""Authenticates admin user with glance."""
|
|
||||||
ep = keystone.service_catalog.url_for(service_type='image',
|
|
||||||
endpoint_type='adminURL')
|
|
||||||
return glance_client.Client(ep, token=keystone.auth_token)
|
|
||||||
|
|
||||||
def authenticate_nova_user(self, keystone, user, password, tenant):
|
|
||||||
"""Authenticates a regular user with nova-api."""
|
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
|
||||||
endpoint_type='publicURL')
|
|
||||||
return nova_client.Client(username=user, api_key=password,
|
|
||||||
project_id=tenant, auth_url=ep)
|
|
||||||
|
|
||||||
def create_cirros_image(self, glance, image_name):
|
|
||||||
"""Download the latest cirros image and upload it to glance."""
|
|
||||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
|
||||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
|
||||||
if http_proxy:
|
|
||||||
proxies = {'http': http_proxy}
|
|
||||||
opener = urllib.FancyURLopener(proxies)
|
|
||||||
else:
|
|
||||||
opener = urllib.FancyURLopener()
|
|
||||||
|
|
||||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
|
||||||
version = f.read().strip()
|
|
||||||
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
|
||||||
local_path = os.path.join('tests', cirros_img)
|
|
||||||
|
|
||||||
if not os.path.exists(local_path):
|
|
||||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
|
||||||
version, cirros_img)
|
|
||||||
opener.retrieve(cirros_url, local_path)
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
with open(local_path) as f:
|
|
||||||
image = glance.images.create(name=image_name, is_public=True,
|
|
||||||
disk_format='qcow2',
|
|
||||||
container_format='bare', data=f)
|
|
||||||
count = 1
|
|
||||||
status = image.status
|
|
||||||
while status != 'active' and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
image = glance.images.get(image.id)
|
|
||||||
status = image.status
|
|
||||||
self.log.debug('image status: {}'.format(status))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if status != 'active':
|
|
||||||
self.log.error('image creation timed out')
|
|
||||||
return None
|
|
||||||
|
|
||||||
return image
|
|
||||||
|
|
||||||
def delete_image(self, glance, image):
|
|
||||||
"""Delete the specified image."""
|
|
||||||
num_before = len(list(glance.images.list()))
|
|
||||||
glance.images.delete(image)
|
|
||||||
|
|
||||||
count = 1
|
|
||||||
num_after = len(list(glance.images.list()))
|
|
||||||
while num_after != (num_before - 1) and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
num_after = len(list(glance.images.list()))
|
|
||||||
self.log.debug('number of images: {}'.format(num_after))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
|
||||||
self.log.error('image deletion timed out')
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
|
||||||
"""Create the specified instance."""
|
|
||||||
image = nova.images.find(name=image_name)
|
|
||||||
flavor = nova.flavors.find(name=flavor)
|
|
||||||
instance = nova.servers.create(name=instance_name, image=image,
|
|
||||||
flavor=flavor)
|
|
||||||
|
|
||||||
count = 1
|
|
||||||
status = instance.status
|
|
||||||
while status != 'ACTIVE' and count < 60:
|
|
||||||
time.sleep(3)
|
|
||||||
instance = nova.servers.get(instance.id)
|
|
||||||
status = instance.status
|
|
||||||
self.log.debug('instance status: {}'.format(status))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if status != 'ACTIVE':
|
|
||||||
self.log.error('instance creation timed out')
|
|
||||||
return None
|
|
||||||
|
|
||||||
return instance
|
|
||||||
|
|
||||||
def delete_instance(self, nova, instance):
|
|
||||||
"""Delete the specified instance."""
|
|
||||||
num_before = len(list(nova.servers.list()))
|
|
||||||
nova.servers.delete(instance)
|
|
||||||
|
|
||||||
count = 1
|
|
||||||
num_after = len(list(nova.servers.list()))
|
|
||||||
while num_after != (num_before - 1) and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
num_after = len(list(nova.servers.list()))
|
|
||||||
self.log.debug('number of instances: {}'.format(num_after))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
|
||||||
self.log.error('instance deletion timed out')
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
File diff suppressed because it is too large
Load Diff
@ -1,32 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#--------------------------------------------
|
|
||||||
# This file is managed by Juju
|
|
||||||
#--------------------------------------------
|
|
||||||
#
|
|
||||||
# Copyright 2009,2012 Canonical Ltd.
|
|
||||||
# Author: Tom Haddon
|
|
||||||
|
|
||||||
CRITICAL=0
|
|
||||||
NOTACTIVE=''
|
|
||||||
LOGFILE=/var/log/nagios/check_haproxy.log
|
|
||||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
|
||||||
|
|
||||||
for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
|
|
||||||
do
|
|
||||||
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
|
|
||||||
if [ $? != 0 ]; then
|
|
||||||
date >> $LOGFILE
|
|
||||||
echo $output >> $LOGFILE
|
|
||||||
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
|
|
||||||
CRITICAL=1
|
|
||||||
NOTACTIVE="${NOTACTIVE} $appserver"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ $CRITICAL = 1 ]; then
|
|
||||||
echo "CRITICAL:${NOTACTIVE}"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "OK: All haproxy instances looking good"
|
|
||||||
exit 0
|
|
@ -1,30 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#--------------------------------------------
|
|
||||||
# This file is managed by Juju
|
|
||||||
#--------------------------------------------
|
|
||||||
#
|
|
||||||
# Copyright 2009,2012 Canonical Ltd.
|
|
||||||
# Author: Tom Haddon
|
|
||||||
|
|
||||||
# These should be config options at some stage
|
|
||||||
CURRQthrsh=0
|
|
||||||
MAXQthrsh=100
|
|
||||||
|
|
||||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
|
||||||
|
|
||||||
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
|
|
||||||
|
|
||||||
for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
|
|
||||||
do
|
|
||||||
CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
|
|
||||||
MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
|
|
||||||
|
|
||||||
if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
|
|
||||||
echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "OK: All haproxy queue depths looking good"
|
|
||||||
exit 0
|
|
||||||
|
|
@ -1,146 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
unit_get,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.network.ip import (
|
|
||||||
get_address_in_network,
|
|
||||||
is_address_in_network,
|
|
||||||
is_ipv6,
|
|
||||||
get_ipv6_addr,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
|
||||||
|
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
PUBLIC = 'public'
|
|
||||||
INTERNAL = 'int'
|
|
||||||
ADMIN = 'admin'
|
|
||||||
|
|
||||||
ADDRESS_MAP = {
|
|
||||||
PUBLIC: {
|
|
||||||
'config': 'os-public-network',
|
|
||||||
'fallback': 'public-address'
|
|
||||||
},
|
|
||||||
INTERNAL: {
|
|
||||||
'config': 'os-internal-network',
|
|
||||||
'fallback': 'private-address'
|
|
||||||
},
|
|
||||||
ADMIN: {
|
|
||||||
'config': 'os-admin-network',
|
|
||||||
'fallback': 'private-address'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def canonical_url(configs, endpoint_type=PUBLIC):
|
|
||||||
"""Returns the correct HTTP URL to this host given the state of HTTPS
|
|
||||||
configuration, hacluster and charm configuration.
|
|
||||||
|
|
||||||
:param configs: OSTemplateRenderer config templating object to inspect
|
|
||||||
for a complete https context.
|
|
||||||
:param endpoint_type: str endpoint type to resolve.
|
|
||||||
:param returns: str base URL for services on the current service unit.
|
|
||||||
"""
|
|
||||||
scheme = 'http'
|
|
||||||
if 'https' in configs.complete_contexts():
|
|
||||||
scheme = 'https'
|
|
||||||
address = resolve_address(endpoint_type)
|
|
||||||
if is_ipv6(address):
|
|
||||||
address = "[{}]".format(address)
|
|
||||||
return '%s://%s' % (scheme, address)
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_address(endpoint_type=PUBLIC):
|
|
||||||
"""Return unit address depending on net config.
|
|
||||||
|
|
||||||
If unit is clustered with vip(s) and has net splits defined, return vip on
|
|
||||||
correct network. If clustered with no nets defined, return primary vip.
|
|
||||||
|
|
||||||
If not clustered, return unit address ensuring address is on configured net
|
|
||||||
split if one is configured.
|
|
||||||
|
|
||||||
:param endpoint_type: Network endpoing type
|
|
||||||
"""
|
|
||||||
resolved_address = None
|
|
||||||
vips = config('vip')
|
|
||||||
if vips:
|
|
||||||
vips = vips.split()
|
|
||||||
|
|
||||||
net_type = ADDRESS_MAP[endpoint_type]['config']
|
|
||||||
net_addr = config(net_type)
|
|
||||||
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
|
||||||
clustered = is_clustered()
|
|
||||||
if clustered:
|
|
||||||
if not net_addr:
|
|
||||||
# If no net-splits defined, we expect a single vip
|
|
||||||
resolved_address = vips[0]
|
|
||||||
else:
|
|
||||||
for vip in vips:
|
|
||||||
if is_address_in_network(net_addr, vip):
|
|
||||||
resolved_address = vip
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
if config('prefer-ipv6'):
|
|
||||||
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
|
||||||
else:
|
|
||||||
fallback_addr = unit_get(net_fallback)
|
|
||||||
|
|
||||||
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
|
||||||
|
|
||||||
if resolved_address is None:
|
|
||||||
raise ValueError("Unable to resolve a suitable IP address based on "
|
|
||||||
"charm state and configuration. (net_type=%s, "
|
|
||||||
"clustered=%s)" % (net_type, clustered))
|
|
||||||
|
|
||||||
return resolved_address
|
|
||||||
|
|
||||||
|
|
||||||
def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC,
|
|
||||||
override=None):
|
|
||||||
"""Returns the correct endpoint URL to advertise to Keystone.
|
|
||||||
|
|
||||||
This method provides the correct endpoint URL which should be advertised to
|
|
||||||
the keystone charm for endpoint creation. This method allows for the url to
|
|
||||||
be overridden to force a keystone endpoint to have specific URL for any of
|
|
||||||
the defined scopes (admin, internal, public).
|
|
||||||
|
|
||||||
:param configs: OSTemplateRenderer config templating object to inspect
|
|
||||||
for a complete https context.
|
|
||||||
:param url_template: str format string for creating the url template. Only
|
|
||||||
two values will be passed - the scheme+hostname
|
|
||||||
returned by the canonical_url and the port.
|
|
||||||
:param endpoint_type: str endpoint type to resolve.
|
|
||||||
:param override: str the name of the config option which overrides the
|
|
||||||
endpoint URL defined by the charm itself. None will
|
|
||||||
disable any overrides (default).
|
|
||||||
"""
|
|
||||||
if override:
|
|
||||||
# Return any user-defined overrides for the keystone endpoint URL.
|
|
||||||
user_value = config(override)
|
|
||||||
if user_value:
|
|
||||||
return user_value.strip()
|
|
||||||
|
|
||||||
return url_template % (canonical_url(configs, endpoint_type), port)
|
|
||||||
|
|
||||||
|
|
||||||
public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC)
|
|
||||||
|
|
||||||
internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL)
|
|
||||||
|
|
||||||
admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN)
|
|
@ -1,337 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Various utilies for dealing with Neutron and the renaming from Quantum.
|
|
||||||
|
|
||||||
import six
|
|
||||||
from subprocess import check_output
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
log,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.openstack.utils import os_release
|
|
||||||
|
|
||||||
|
|
||||||
def headers_package():
|
|
||||||
"""Ensures correct linux-headers for running kernel are installed,
|
|
||||||
for building DKMS package"""
|
|
||||||
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
|
||||||
return 'linux-headers-%s' % kver
|
|
||||||
|
|
||||||
QUANTUM_CONF_DIR = '/etc/quantum'
|
|
||||||
|
|
||||||
|
|
||||||
def kernel_version():
|
|
||||||
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
|
|
||||||
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
|
||||||
kver = kver.split('.')
|
|
||||||
return (int(kver[0]), int(kver[1]))
|
|
||||||
|
|
||||||
|
|
||||||
def determine_dkms_package():
|
|
||||||
""" Determine which DKMS package should be used based on kernel version """
|
|
||||||
# NOTE: 3.13 kernels have support for GRE and VXLAN native
|
|
||||||
if kernel_version() >= (3, 13):
|
|
||||||
return []
|
|
||||||
else:
|
|
||||||
return ['openvswitch-datapath-dkms']
|
|
||||||
|
|
||||||
|
|
||||||
# legacy
|
|
||||||
|
|
||||||
|
|
||||||
def quantum_plugins():
|
|
||||||
from charmhelpers.contrib.openstack import context
|
|
||||||
return {
|
|
||||||
'ovs': {
|
|
||||||
'config': '/etc/quantum/plugins/openvswitch/'
|
|
||||||
'ovs_quantum_plugin.ini',
|
|
||||||
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
|
||||||
'OVSQuantumPluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=QUANTUM_CONF_DIR)],
|
|
||||||
'services': ['quantum-plugin-openvswitch-agent'],
|
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
|
||||||
['quantum-plugin-openvswitch-agent']],
|
|
||||||
'server_packages': ['quantum-server',
|
|
||||||
'quantum-plugin-openvswitch'],
|
|
||||||
'server_services': ['quantum-server']
|
|
||||||
},
|
|
||||||
'nvp': {
|
|
||||||
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
|
||||||
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
|
||||||
'QuantumPlugin.NvpPluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=QUANTUM_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [],
|
|
||||||
'server_packages': ['quantum-server',
|
|
||||||
'quantum-plugin-nicira'],
|
|
||||||
'server_services': ['quantum-server']
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
NEUTRON_CONF_DIR = '/etc/neutron'
|
|
||||||
|
|
||||||
|
|
||||||
def neutron_plugins():
|
|
||||||
from charmhelpers.contrib.openstack import context
|
|
||||||
release = os_release('nova-common')
|
|
||||||
plugins = {
|
|
||||||
'ovs': {
|
|
||||||
'config': '/etc/neutron/plugins/openvswitch/'
|
|
||||||
'ovs_neutron_plugin.ini',
|
|
||||||
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
|
||||||
'OVSNeutronPluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': ['neutron-plugin-openvswitch-agent'],
|
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
|
||||||
['neutron-plugin-openvswitch-agent']],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-openvswitch'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'nvp': {
|
|
||||||
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
|
||||||
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
|
||||||
'NeutronPlugin.NvpPluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-nicira'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'nsx': {
|
|
||||||
'config': '/etc/neutron/plugins/vmware/nsx.ini',
|
|
||||||
'driver': 'vmware',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-vmware'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'n1kv': {
|
|
||||||
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
|
|
||||||
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
|
||||||
['neutron-plugin-cisco']],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-cisco'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'Calico': {
|
|
||||||
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
|
|
||||||
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': ['calico-felix',
|
|
||||||
'bird',
|
|
||||||
'neutron-dhcp-agent',
|
|
||||||
'nova-api-metadata'],
|
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
|
||||||
['calico-compute',
|
|
||||||
'bird',
|
|
||||||
'neutron-dhcp-agent',
|
|
||||||
'nova-api-metadata']],
|
|
||||||
'server_packages': ['neutron-server', 'calico-control'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'vsp': {
|
|
||||||
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
|
|
||||||
'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [],
|
|
||||||
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'plumgrid': {
|
|
||||||
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
|
|
||||||
'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('database-user'),
|
|
||||||
database=config('database'),
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [['plumgrid-lxc'],
|
|
||||||
['iovisor-dkms'],
|
|
||||||
['plumgrid-puppet']],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-plumgrid'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if release >= 'icehouse':
|
|
||||||
# NOTE: patch in ml2 plugin for icehouse onwards
|
|
||||||
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
|
|
||||||
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
|
|
||||||
plugins['ovs']['server_packages'] = ['neutron-server',
|
|
||||||
'neutron-plugin-ml2']
|
|
||||||
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
|
||||||
plugins['nvp'] = plugins['nsx']
|
|
||||||
return plugins
|
|
||||||
|
|
||||||
|
|
||||||
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
|
||||||
manager = net_manager or network_manager()
|
|
||||||
if manager == 'quantum':
|
|
||||||
plugins = quantum_plugins()
|
|
||||||
elif manager == 'neutron':
|
|
||||||
plugins = neutron_plugins()
|
|
||||||
else:
|
|
||||||
log("Network manager '%s' does not support plugins." % (manager),
|
|
||||||
level=ERROR)
|
|
||||||
raise Exception
|
|
||||||
|
|
||||||
try:
|
|
||||||
_plugin = plugins[plugin]
|
|
||||||
except KeyError:
|
|
||||||
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
|
|
||||||
raise Exception
|
|
||||||
|
|
||||||
try:
|
|
||||||
return _plugin[attr]
|
|
||||||
except KeyError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def network_manager():
|
|
||||||
'''
|
|
||||||
Deals with the renaming of Quantum to Neutron in H and any situations
|
|
||||||
that require compatability (eg, deploying H with network-manager=quantum,
|
|
||||||
upgrading from G).
|
|
||||||
'''
|
|
||||||
release = os_release('nova-common')
|
|
||||||
manager = config('network-manager').lower()
|
|
||||||
|
|
||||||
if manager not in ['quantum', 'neutron']:
|
|
||||||
return manager
|
|
||||||
|
|
||||||
if release in ['essex']:
|
|
||||||
# E does not support neutron
|
|
||||||
log('Neutron networking not supported in Essex.', level=ERROR)
|
|
||||||
raise Exception
|
|
||||||
elif release in ['folsom', 'grizzly']:
|
|
||||||
# neutron is named quantum in F and G
|
|
||||||
return 'quantum'
|
|
||||||
else:
|
|
||||||
# ensure accurate naming for all releases post-H
|
|
||||||
return 'neutron'
|
|
||||||
|
|
||||||
|
|
||||||
def parse_mappings(mappings):
|
|
||||||
parsed = {}
|
|
||||||
if mappings:
|
|
||||||
mappings = mappings.split(' ')
|
|
||||||
for m in mappings:
|
|
||||||
p = m.partition(':')
|
|
||||||
if p[1] == ':':
|
|
||||||
parsed[p[0].strip()] = p[2].strip()
|
|
||||||
|
|
||||||
return parsed
|
|
||||||
|
|
||||||
|
|
||||||
def parse_bridge_mappings(mappings):
|
|
||||||
"""Parse bridge mappings.
|
|
||||||
|
|
||||||
Mappings must be a space-delimited list of provider:bridge mappings.
|
|
||||||
|
|
||||||
Returns dict of the form {provider:bridge}.
|
|
||||||
"""
|
|
||||||
return parse_mappings(mappings)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
|
||||||
"""Parse data port mappings.
|
|
||||||
|
|
||||||
Mappings must be a space-delimited list of bridge:port mappings.
|
|
||||||
|
|
||||||
Returns dict of the form {bridge:port}.
|
|
||||||
"""
|
|
||||||
_mappings = parse_mappings(mappings)
|
|
||||||
if not _mappings:
|
|
||||||
if not mappings:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
# For backwards-compatibility we need to support port-only provided in
|
|
||||||
# config.
|
|
||||||
_mappings = {default_bridge: mappings.split(' ')[0]}
|
|
||||||
|
|
||||||
bridges = _mappings.keys()
|
|
||||||
ports = _mappings.values()
|
|
||||||
if len(set(bridges)) != len(bridges):
|
|
||||||
raise Exception("It is not allowed to have more than one port "
|
|
||||||
"configured on the same bridge")
|
|
||||||
|
|
||||||
if len(set(ports)) != len(ports):
|
|
||||||
raise Exception("It is not allowed to have the same port configured "
|
|
||||||
"on more than one bridge")
|
|
||||||
|
|
||||||
return _mappings
|
|
||||||
|
|
||||||
|
|
||||||
def parse_vlan_range_mappings(mappings):
|
|
||||||
"""Parse vlan range mappings.
|
|
||||||
|
|
||||||
Mappings must be a space-delimited list of provider:start:end mappings.
|
|
||||||
|
|
||||||
Returns dict of the form {provider: (start, end)}.
|
|
||||||
"""
|
|
||||||
_mappings = parse_mappings(mappings)
|
|
||||||
if not _mappings:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
mappings = {}
|
|
||||||
for p, r in six.iteritems(_mappings):
|
|
||||||
mappings[p] = tuple(r.split(':'))
|
|
||||||
|
|
||||||
return mappings
|
|
@ -1,18 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
|
||||||
# module
|
|
@ -1,15 +0,0 @@
|
|||||||
###############################################################################
|
|
||||||
# [ WARNING ]
|
|
||||||
# cinder configuration file maintained by Juju
|
|
||||||
# local changes may be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
[global]
|
|
||||||
{% if auth -%}
|
|
||||||
auth_supported = {{ auth }}
|
|
||||||
keyring = /etc/ceph/$cluster.$name.keyring
|
|
||||||
mon host = {{ mon_hosts }}
|
|
||||||
{% endif -%}
|
|
||||||
log to syslog = {{ use_syslog }}
|
|
||||||
err to syslog = {{ use_syslog }}
|
|
||||||
clog to syslog = {{ use_syslog }}
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
|||||||
description "{{ service_description }}"
|
|
||||||
author "Juju {{ service_name }} Charm <juju@localhost>"
|
|
||||||
|
|
||||||
start on runlevel [2345]
|
|
||||||
stop on runlevel [!2345]
|
|
||||||
|
|
||||||
respawn
|
|
||||||
|
|
||||||
exec start-stop-daemon --start --chuid {{ user_name }} \
|
|
||||||
--chdir {{ start_dir }} --name {{ process_name }} \
|
|
||||||
--exec {{ executable_name }} -- \
|
|
||||||
{% for config_file in config_files -%}
|
|
||||||
--config-file={{ config_file }} \
|
|
||||||
{% endfor -%}
|
|
||||||
{% if log_file -%}
|
|
||||||
--log-file={{ log_file }}
|
|
||||||
{% endif -%}
|
|
@ -1,58 +0,0 @@
|
|||||||
global
|
|
||||||
log {{ local_host }} local0
|
|
||||||
log {{ local_host }} local1 notice
|
|
||||||
maxconn 20000
|
|
||||||
user haproxy
|
|
||||||
group haproxy
|
|
||||||
spread-checks 0
|
|
||||||
|
|
||||||
defaults
|
|
||||||
log global
|
|
||||||
mode tcp
|
|
||||||
option tcplog
|
|
||||||
option dontlognull
|
|
||||||
retries 3
|
|
||||||
timeout queue 1000
|
|
||||||
timeout connect 1000
|
|
||||||
{% if haproxy_client_timeout -%}
|
|
||||||
timeout client {{ haproxy_client_timeout }}
|
|
||||||
{% else -%}
|
|
||||||
timeout client 30000
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if haproxy_server_timeout -%}
|
|
||||||
timeout server {{ haproxy_server_timeout }}
|
|
||||||
{% else -%}
|
|
||||||
timeout server 30000
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
listen stats {{ stat_port }}
|
|
||||||
mode http
|
|
||||||
stats enable
|
|
||||||
stats hide-version
|
|
||||||
stats realm Haproxy\ Statistics
|
|
||||||
stats uri /
|
|
||||||
stats auth admin:password
|
|
||||||
|
|
||||||
{% if frontends -%}
|
|
||||||
{% for service, ports in service_ports.items() -%}
|
|
||||||
frontend tcp-in_{{ service }}
|
|
||||||
bind *:{{ ports[0] }}
|
|
||||||
{% if ipv6 -%}
|
|
||||||
bind :::{{ ports[0] }}
|
|
||||||
{% endif -%}
|
|
||||||
{% for frontend in frontends -%}
|
|
||||||
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
|
|
||||||
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
|
|
||||||
{% endfor -%}
|
|
||||||
default_backend {{ service }}_{{ default_backend }}
|
|
||||||
|
|
||||||
{% for frontend in frontends -%}
|
|
||||||
backend {{ service }}_{{ frontend }}
|
|
||||||
balance leastconn
|
|
||||||
{% for unit, address in frontends[frontend]['backends'].items() -%}
|
|
||||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
|
||||||
{% endfor %}
|
|
||||||
{% endfor -%}
|
|
||||||
{% endfor -%}
|
|
||||||
{% endif -%}
|
|
@ -1,24 +0,0 @@
|
|||||||
{% if endpoints -%}
|
|
||||||
{% for ext_port in ext_ports -%}
|
|
||||||
Listen {{ ext_port }}
|
|
||||||
{% endfor -%}
|
|
||||||
{% for address, endpoint, ext, int in endpoints -%}
|
|
||||||
<VirtualHost {{ address }}:{{ ext }}>
|
|
||||||
ServerName {{ endpoint }}
|
|
||||||
SSLEngine on
|
|
||||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
|
||||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
|
||||||
ProxyPass / http://localhost:{{ int }}/
|
|
||||||
ProxyPassReverse / http://localhost:{{ int }}/
|
|
||||||
ProxyPreserveHost on
|
|
||||||
</VirtualHost>
|
|
||||||
{% endfor -%}
|
|
||||||
<Proxy *>
|
|
||||||
Order deny,allow
|
|
||||||
Allow from all
|
|
||||||
</Proxy>
|
|
||||||
<Location />
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
</Location>
|
|
||||||
{% endif -%}
|
|
@ -1,24 +0,0 @@
|
|||||||
{% if endpoints -%}
|
|
||||||
{% for ext_port in ext_ports -%}
|
|
||||||
Listen {{ ext_port }}
|
|
||||||
{% endfor -%}
|
|
||||||
{% for address, endpoint, ext, int in endpoints -%}
|
|
||||||
<VirtualHost {{ address }}:{{ ext }}>
|
|
||||||
ServerName {{ endpoint }}
|
|
||||||
SSLEngine on
|
|
||||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
|
||||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
|
||||||
ProxyPass / http://localhost:{{ int }}/
|
|
||||||
ProxyPassReverse / http://localhost:{{ int }}/
|
|
||||||
ProxyPreserveHost on
|
|
||||||
</VirtualHost>
|
|
||||||
{% endfor -%}
|
|
||||||
<Proxy *>
|
|
||||||
Order deny,allow
|
|
||||||
Allow from all
|
|
||||||
</Proxy>
|
|
||||||
<Location />
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
</Location>
|
|
||||||
{% endif -%}
|
|
@ -1,9 +0,0 @@
|
|||||||
{% if auth_host -%}
|
|
||||||
[keystone_authtoken]
|
|
||||||
identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
|
|
||||||
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
|
|
||||||
admin_tenant_name = {{ admin_tenant_name }}
|
|
||||||
admin_user = {{ admin_user }}
|
|
||||||
admin_password = {{ admin_password }}
|
|
||||||
signing_dir = {{ signing_dir }}
|
|
||||||
{% endif -%}
|
|
@ -1,22 +0,0 @@
|
|||||||
{% if rabbitmq_host or rabbitmq_hosts -%}
|
|
||||||
[oslo_messaging_rabbit]
|
|
||||||
rabbit_userid = {{ rabbitmq_user }}
|
|
||||||
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
|
||||||
rabbit_password = {{ rabbitmq_password }}
|
|
||||||
{% if rabbitmq_hosts -%}
|
|
||||||
rabbit_hosts = {{ rabbitmq_hosts }}
|
|
||||||
{% if rabbitmq_ha_queues -%}
|
|
||||||
rabbit_ha_queues = True
|
|
||||||
rabbit_durable_queues = False
|
|
||||||
{% endif -%}
|
|
||||||
{% else -%}
|
|
||||||
rabbit_host = {{ rabbitmq_host }}
|
|
||||||
{% endif -%}
|
|
||||||
{% if rabbit_ssl_port -%}
|
|
||||||
rabbit_use_ssl = True
|
|
||||||
rabbit_port = {{ rabbit_ssl_port }}
|
|
||||||
{% if rabbit_ssl_ca -%}
|
|
||||||
kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
|
@ -1,14 +0,0 @@
|
|||||||
{% if zmq_host -%}
|
|
||||||
# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
|
|
||||||
rpc_backend = zmq
|
|
||||||
rpc_zmq_host = {{ zmq_host }}
|
|
||||||
{% if zmq_redis_address -%}
|
|
||||||
rpc_zmq_matchmaker = redis
|
|
||||||
matchmaker_heartbeat_freq = 15
|
|
||||||
matchmaker_heartbeat_ttl = 30
|
|
||||||
[matchmaker_redis]
|
|
||||||
host = {{ zmq_redis_address }}
|
|
||||||
{% else -%}
|
|
||||||
rpc_zmq_matchmaker = ring
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
|
@ -1,295 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
ERROR,
|
|
||||||
INFO
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
|
||||||
|
|
||||||
try:
|
|
||||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
|
||||||
except ImportError:
|
|
||||||
# python-jinja2 may not be installed yet, or we're running unittests.
|
|
||||||
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
|
|
||||||
|
|
||||||
|
|
||||||
class OSConfigException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_loader(templates_dir, os_release):
|
|
||||||
"""
|
|
||||||
Create a jinja2.ChoiceLoader containing template dirs up to
|
|
||||||
and including os_release. If directory template directory
|
|
||||||
is missing at templates_dir, it will be omitted from the loader.
|
|
||||||
templates_dir is added to the bottom of the search list as a base
|
|
||||||
loading dir.
|
|
||||||
|
|
||||||
A charm may also ship a templates dir with this module
|
|
||||||
and it will be appended to the bottom of the search list, eg::
|
|
||||||
|
|
||||||
hooks/charmhelpers/contrib/openstack/templates
|
|
||||||
|
|
||||||
:param templates_dir (str): Base template directory containing release
|
|
||||||
sub-directories.
|
|
||||||
:param os_release (str): OpenStack release codename to construct template
|
|
||||||
loader.
|
|
||||||
:returns: jinja2.ChoiceLoader constructed with a list of
|
|
||||||
jinja2.FilesystemLoaders, ordered in descending
|
|
||||||
order by OpenStack release.
|
|
||||||
"""
|
|
||||||
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
|
|
||||||
for rel in six.itervalues(OPENSTACK_CODENAMES)]
|
|
||||||
|
|
||||||
if not os.path.isdir(templates_dir):
|
|
||||||
log('Templates directory not found @ %s.' % templates_dir,
|
|
||||||
level=ERROR)
|
|
||||||
raise OSConfigException
|
|
||||||
|
|
||||||
# the bottom contains tempaltes_dir and possibly a common templates dir
|
|
||||||
# shipped with the helper.
|
|
||||||
loaders = [FileSystemLoader(templates_dir)]
|
|
||||||
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
|
|
||||||
if os.path.isdir(helper_templates):
|
|
||||||
loaders.append(FileSystemLoader(helper_templates))
|
|
||||||
|
|
||||||
for rel, tmpl_dir in tmpl_dirs:
|
|
||||||
if os.path.isdir(tmpl_dir):
|
|
||||||
loaders.insert(0, FileSystemLoader(tmpl_dir))
|
|
||||||
if rel == os_release:
|
|
||||||
break
|
|
||||||
log('Creating choice loader with dirs: %s' %
|
|
||||||
[l.searchpath for l in loaders], level=INFO)
|
|
||||||
return ChoiceLoader(loaders)
|
|
||||||
|
|
||||||
|
|
||||||
class OSConfigTemplate(object):
|
|
||||||
"""
|
|
||||||
Associates a config file template with a list of context generators.
|
|
||||||
Responsible for constructing a template context based on those generators.
|
|
||||||
"""
|
|
||||||
def __init__(self, config_file, contexts):
|
|
||||||
self.config_file = config_file
|
|
||||||
|
|
||||||
if hasattr(contexts, '__call__'):
|
|
||||||
self.contexts = [contexts]
|
|
||||||
else:
|
|
||||||
self.contexts = contexts
|
|
||||||
|
|
||||||
self._complete_contexts = []
|
|
||||||
|
|
||||||
def context(self):
|
|
||||||
ctxt = {}
|
|
||||||
for context in self.contexts:
|
|
||||||
_ctxt = context()
|
|
||||||
if _ctxt:
|
|
||||||
ctxt.update(_ctxt)
|
|
||||||
# track interfaces for every complete context.
|
|
||||||
[self._complete_contexts.append(interface)
|
|
||||||
for interface in context.interfaces
|
|
||||||
if interface not in self._complete_contexts]
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
def complete_contexts(self):
|
|
||||||
'''
|
|
||||||
Return a list of interfaces that have atisfied contexts.
|
|
||||||
'''
|
|
||||||
if self._complete_contexts:
|
|
||||||
return self._complete_contexts
|
|
||||||
self.context()
|
|
||||||
return self._complete_contexts
|
|
||||||
|
|
||||||
|
|
||||||
class OSConfigRenderer(object):
|
|
||||||
"""
|
|
||||||
This class provides a common templating system to be used by OpenStack
|
|
||||||
charms. It is intended to help charms share common code and templates,
|
|
||||||
and ease the burden of managing config templates across multiple OpenStack
|
|
||||||
releases.
|
|
||||||
|
|
||||||
Basic usage::
|
|
||||||
|
|
||||||
# import some common context generates from charmhelpers
|
|
||||||
from charmhelpers.contrib.openstack import context
|
|
||||||
|
|
||||||
# Create a renderer object for a specific OS release.
|
|
||||||
configs = OSConfigRenderer(templates_dir='/tmp/templates',
|
|
||||||
openstack_release='folsom')
|
|
||||||
# register some config files with context generators.
|
|
||||||
configs.register(config_file='/etc/nova/nova.conf',
|
|
||||||
contexts=[context.SharedDBContext(),
|
|
||||||
context.AMQPContext()])
|
|
||||||
configs.register(config_file='/etc/nova/api-paste.ini',
|
|
||||||
contexts=[context.IdentityServiceContext()])
|
|
||||||
configs.register(config_file='/etc/haproxy/haproxy.conf',
|
|
||||||
contexts=[context.HAProxyContext()])
|
|
||||||
# write out a single config
|
|
||||||
configs.write('/etc/nova/nova.conf')
|
|
||||||
# write out all registered configs
|
|
||||||
configs.write_all()
|
|
||||||
|
|
||||||
**OpenStack Releases and template loading**
|
|
||||||
|
|
||||||
When the object is instantiated, it is associated with a specific OS
|
|
||||||
release. This dictates how the template loader will be constructed.
|
|
||||||
|
|
||||||
The constructed loader attempts to load the template from several places
|
|
||||||
in the following order:
|
|
||||||
- from the most recent OS release-specific template dir (if one exists)
|
|
||||||
- the base templates_dir
|
|
||||||
- a template directory shipped in the charm with this helper file.
|
|
||||||
|
|
||||||
For the example above, '/tmp/templates' contains the following structure::
|
|
||||||
|
|
||||||
/tmp/templates/nova.conf
|
|
||||||
/tmp/templates/api-paste.ini
|
|
||||||
/tmp/templates/grizzly/api-paste.ini
|
|
||||||
/tmp/templates/havana/api-paste.ini
|
|
||||||
|
|
||||||
Since it was registered with the grizzly release, it first seraches
|
|
||||||
the grizzly directory for nova.conf, then the templates dir.
|
|
||||||
|
|
||||||
When writing api-paste.ini, it will find the template in the grizzly
|
|
||||||
directory.
|
|
||||||
|
|
||||||
If the object were created with folsom, it would fall back to the
|
|
||||||
base templates dir for its api-paste.ini template.
|
|
||||||
|
|
||||||
This system should help manage changes in config files through
|
|
||||||
openstack releases, allowing charms to fall back to the most recently
|
|
||||||
updated config template for a given release
|
|
||||||
|
|
||||||
The haproxy.conf, since it is not shipped in the templates dir, will
|
|
||||||
be loaded from the module directory's template directory, eg
|
|
||||||
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
|
|
||||||
us to ship common templates (haproxy, apache) with the helpers.
|
|
||||||
|
|
||||||
**Context generators**
|
|
||||||
|
|
||||||
Context generators are used to generate template contexts during hook
|
|
||||||
execution. Doing so may require inspecting service relations, charm
|
|
||||||
config, etc. When registered, a config file is associated with a list
|
|
||||||
of generators. When a template is rendered and written, all context
|
|
||||||
generates are called in a chain to generate the context dictionary
|
|
||||||
passed to the jinja2 template. See context.py for more info.
|
|
||||||
"""
|
|
||||||
def __init__(self, templates_dir, openstack_release):
|
|
||||||
if not os.path.isdir(templates_dir):
|
|
||||||
log('Could not locate templates dir %s' % templates_dir,
|
|
||||||
level=ERROR)
|
|
||||||
raise OSConfigException
|
|
||||||
|
|
||||||
self.templates_dir = templates_dir
|
|
||||||
self.openstack_release = openstack_release
|
|
||||||
self.templates = {}
|
|
||||||
self._tmpl_env = None
|
|
||||||
|
|
||||||
if None in [Environment, ChoiceLoader, FileSystemLoader]:
|
|
||||||
# if this code is running, the object is created pre-install hook.
|
|
||||||
# jinja2 shouldn't get touched until the module is reloaded on next
|
|
||||||
# hook execution, with proper jinja2 bits successfully imported.
|
|
||||||
apt_install('python-jinja2')
|
|
||||||
|
|
||||||
def register(self, config_file, contexts):
|
|
||||||
"""
|
|
||||||
Register a config file with a list of context generators to be called
|
|
||||||
during rendering.
|
|
||||||
"""
|
|
||||||
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
|
|
||||||
contexts=contexts)
|
|
||||||
log('Registered config file: %s' % config_file, level=INFO)
|
|
||||||
|
|
||||||
def _get_tmpl_env(self):
|
|
||||||
if not self._tmpl_env:
|
|
||||||
loader = get_loader(self.templates_dir, self.openstack_release)
|
|
||||||
self._tmpl_env = Environment(loader=loader)
|
|
||||||
|
|
||||||
def _get_template(self, template):
|
|
||||||
self._get_tmpl_env()
|
|
||||||
template = self._tmpl_env.get_template(template)
|
|
||||||
log('Loaded template from %s' % template.filename, level=INFO)
|
|
||||||
return template
|
|
||||||
|
|
||||||
def render(self, config_file):
|
|
||||||
if config_file not in self.templates:
|
|
||||||
log('Config not registered: %s' % config_file, level=ERROR)
|
|
||||||
raise OSConfigException
|
|
||||||
ctxt = self.templates[config_file].context()
|
|
||||||
|
|
||||||
_tmpl = os.path.basename(config_file)
|
|
||||||
try:
|
|
||||||
template = self._get_template(_tmpl)
|
|
||||||
except exceptions.TemplateNotFound:
|
|
||||||
# if no template is found with basename, try looking for it
|
|
||||||
# using a munged full path, eg:
|
|
||||||
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
|
|
||||||
_tmpl = '_'.join(config_file.split('/')[1:])
|
|
||||||
try:
|
|
||||||
template = self._get_template(_tmpl)
|
|
||||||
except exceptions.TemplateNotFound as e:
|
|
||||||
log('Could not load template from %s by %s or %s.' %
|
|
||||||
(self.templates_dir, os.path.basename(config_file), _tmpl),
|
|
||||||
level=ERROR)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
log('Rendering from template: %s' % _tmpl, level=INFO)
|
|
||||||
return template.render(ctxt)
|
|
||||||
|
|
||||||
def write(self, config_file):
|
|
||||||
"""
|
|
||||||
Write a single config file, raises if config file is not registered.
|
|
||||||
"""
|
|
||||||
if config_file not in self.templates:
|
|
||||||
log('Config not registered: %s' % config_file, level=ERROR)
|
|
||||||
raise OSConfigException
|
|
||||||
|
|
||||||
_out = self.render(config_file)
|
|
||||||
|
|
||||||
with open(config_file, 'wb') as out:
|
|
||||||
out.write(_out)
|
|
||||||
|
|
||||||
log('Wrote template %s.' % config_file, level=INFO)
|
|
||||||
|
|
||||||
def write_all(self):
|
|
||||||
"""
|
|
||||||
Write out all registered config files.
|
|
||||||
"""
|
|
||||||
[self.write(k) for k in six.iterkeys(self.templates)]
|
|
||||||
|
|
||||||
def set_release(self, openstack_release):
|
|
||||||
"""
|
|
||||||
Resets the template environment and generates a new template loader
|
|
||||||
based on a the new openstack release.
|
|
||||||
"""
|
|
||||||
self._tmpl_env = None
|
|
||||||
self.openstack_release = openstack_release
|
|
||||||
self._get_tmpl_env()
|
|
||||||
|
|
||||||
def complete_contexts(self):
|
|
||||||
'''
|
|
||||||
Returns a list of context interfaces that yield a complete context.
|
|
||||||
'''
|
|
||||||
interfaces = []
|
|
||||||
[interfaces.extend(i.complete_contexts())
|
|
||||||
for i in six.itervalues(self.templates)]
|
|
||||||
return interfaces
|
|
@ -1,642 +0,0 @@
|
|||||||
#!/usr/bin/python
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Common python helper functions used for OpenStack charms.
|
|
||||||
from collections import OrderedDict
|
|
||||||
from functools import wraps
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
|
|
||||||
import six
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from charmhelpers.contrib.network import ip
|
|
||||||
|
|
||||||
from charmhelpers.core import (
|
|
||||||
unitdata,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
log as juju_log,
|
|
||||||
charm_dir,
|
|
||||||
INFO,
|
|
||||||
relation_ids,
|
|
||||||
relation_set
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.storage.linux.lvm import (
|
|
||||||
deactivate_lvm_volume_group,
|
|
||||||
is_lvm_physical_volume,
|
|
||||||
remove_lvm_physical_volume,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.network.ip import (
|
|
||||||
get_ipv6_addr
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
|
||||||
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
|
||||||
from charmhelpers.contrib.python.packages import pip_install
|
|
||||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
|
||||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
|
||||||
|
|
||||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
|
||||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
|
||||||
|
|
||||||
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
|
||||||
'restricted main multiverse universe')
|
|
||||||
|
|
||||||
|
|
||||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|
||||||
('oneiric', 'diablo'),
|
|
||||||
('precise', 'essex'),
|
|
||||||
('quantal', 'folsom'),
|
|
||||||
('raring', 'grizzly'),
|
|
||||||
('saucy', 'havana'),
|
|
||||||
('trusty', 'icehouse'),
|
|
||||||
('utopic', 'juno'),
|
|
||||||
('vivid', 'kilo'),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
OPENSTACK_CODENAMES = OrderedDict([
|
|
||||||
('2011.2', 'diablo'),
|
|
||||||
('2012.1', 'essex'),
|
|
||||||
('2012.2', 'folsom'),
|
|
||||||
('2013.1', 'grizzly'),
|
|
||||||
('2013.2', 'havana'),
|
|
||||||
('2014.1', 'icehouse'),
|
|
||||||
('2014.2', 'juno'),
|
|
||||||
('2015.1', 'kilo'),
|
|
||||||
])
|
|
||||||
|
|
||||||
# The ugly duckling
|
|
||||||
SWIFT_CODENAMES = OrderedDict([
|
|
||||||
('1.4.3', 'diablo'),
|
|
||||||
('1.4.8', 'essex'),
|
|
||||||
('1.7.4', 'folsom'),
|
|
||||||
('1.8.0', 'grizzly'),
|
|
||||||
('1.7.7', 'grizzly'),
|
|
||||||
('1.7.6', 'grizzly'),
|
|
||||||
('1.10.0', 'havana'),
|
|
||||||
('1.9.1', 'havana'),
|
|
||||||
('1.9.0', 'havana'),
|
|
||||||
('1.13.1', 'icehouse'),
|
|
||||||
('1.13.0', 'icehouse'),
|
|
||||||
('1.12.0', 'icehouse'),
|
|
||||||
('1.11.0', 'icehouse'),
|
|
||||||
('2.0.0', 'juno'),
|
|
||||||
('2.1.0', 'juno'),
|
|
||||||
('2.2.0', 'juno'),
|
|
||||||
('2.2.1', 'kilo'),
|
|
||||||
('2.2.2', 'kilo'),
|
|
||||||
])
|
|
||||||
|
|
||||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
|
||||||
|
|
||||||
|
|
||||||
def error_out(msg):
|
|
||||||
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_codename_install_source(src):
|
|
||||||
'''Derive OpenStack release codename from a given installation source.'''
|
|
||||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
|
||||||
rel = ''
|
|
||||||
if src is None:
|
|
||||||
return rel
|
|
||||||
if src in ['distro', 'distro-proposed']:
|
|
||||||
try:
|
|
||||||
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
|
||||||
except KeyError:
|
|
||||||
e = 'Could not derive openstack release for '\
|
|
||||||
'this Ubuntu release: %s' % ubuntu_rel
|
|
||||||
error_out(e)
|
|
||||||
return rel
|
|
||||||
|
|
||||||
if src.startswith('cloud:'):
|
|
||||||
ca_rel = src.split(':')[1]
|
|
||||||
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
|
||||||
return ca_rel
|
|
||||||
|
|
||||||
# Best guess match based on deb string provided
|
|
||||||
if src.startswith('deb') or src.startswith('ppa'):
|
|
||||||
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
|
||||||
if v in src:
|
|
||||||
return v
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_install_source(src):
|
|
||||||
codename = get_os_codename_install_source(src)
|
|
||||||
return get_os_version_codename(codename)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_codename_version(vers):
|
|
||||||
'''Determine OpenStack codename from version number.'''
|
|
||||||
try:
|
|
||||||
return OPENSTACK_CODENAMES[vers]
|
|
||||||
except KeyError:
|
|
||||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_codename(codename):
|
|
||||||
'''Determine OpenStack version number from codename.'''
|
|
||||||
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
|
||||||
if v == codename:
|
|
||||||
return k
|
|
||||||
e = 'Could not derive OpenStack version for '\
|
|
||||||
'codename: %s' % codename
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_codename_package(package, fatal=True):
|
|
||||||
'''Derive OpenStack release codename from an installed package.'''
|
|
||||||
import apt_pkg as apt
|
|
||||||
|
|
||||||
cache = apt_cache()
|
|
||||||
|
|
||||||
try:
|
|
||||||
pkg = cache[package]
|
|
||||||
except:
|
|
||||||
if not fatal:
|
|
||||||
return None
|
|
||||||
# the package is unknown to the current apt cache.
|
|
||||||
e = 'Could not determine version of package with no installation '\
|
|
||||||
'candidate: %s' % package
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
if not pkg.current_ver:
|
|
||||||
if not fatal:
|
|
||||||
return None
|
|
||||||
# package is known, but no version is currently installed.
|
|
||||||
e = 'Could not determine version of uninstalled package: %s' % package
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
|
||||||
|
|
||||||
try:
|
|
||||||
if 'swift' in pkg.name:
|
|
||||||
swift_vers = vers[:5]
|
|
||||||
if swift_vers not in SWIFT_CODENAMES:
|
|
||||||
# Deal with 1.10.0 upward
|
|
||||||
swift_vers = vers[:6]
|
|
||||||
return SWIFT_CODENAMES[swift_vers]
|
|
||||||
else:
|
|
||||||
vers = vers[:6]
|
|
||||||
return OPENSTACK_CODENAMES[vers]
|
|
||||||
except KeyError:
|
|
||||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_package(pkg, fatal=True):
|
|
||||||
'''Derive OpenStack version number from an installed package.'''
|
|
||||||
codename = get_os_codename_package(pkg, fatal=fatal)
|
|
||||||
|
|
||||||
if not codename:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if 'swift' in pkg:
|
|
||||||
vers_map = SWIFT_CODENAMES
|
|
||||||
else:
|
|
||||||
vers_map = OPENSTACK_CODENAMES
|
|
||||||
|
|
||||||
for version, cname in six.iteritems(vers_map):
|
|
||||||
if cname == codename:
|
|
||||||
return version
|
|
||||||
# e = "Could not determine OpenStack version for package: %s" % pkg
|
|
||||||
# error_out(e)
|
|
||||||
|
|
||||||
|
|
||||||
os_rel = None
|
|
||||||
|
|
||||||
|
|
||||||
def os_release(package, base='essex'):
|
|
||||||
'''
|
|
||||||
Returns OpenStack release codename from a cached global.
|
|
||||||
If the codename can not be determined from either an installed package or
|
|
||||||
the installation source, the earliest release supported by the charm should
|
|
||||||
be returned.
|
|
||||||
'''
|
|
||||||
global os_rel
|
|
||||||
if os_rel:
|
|
||||||
return os_rel
|
|
||||||
os_rel = (get_os_codename_package(package, fatal=False) or
|
|
||||||
get_os_codename_install_source(config('openstack-origin')) or
|
|
||||||
base)
|
|
||||||
return os_rel
|
|
||||||
|
|
||||||
|
|
||||||
def import_key(keyid):
|
|
||||||
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
|
|
||||||
"--recv-keys %s" % keyid
|
|
||||||
try:
|
|
||||||
subprocess.check_call(cmd.split(' '))
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
error_out("Error importing repo key %s" % keyid)
|
|
||||||
|
|
||||||
|
|
||||||
def configure_installation_source(rel):
|
|
||||||
'''Configure apt installation source.'''
|
|
||||||
if rel == 'distro':
|
|
||||||
return
|
|
||||||
elif rel == 'distro-proposed':
|
|
||||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
|
||||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
|
||||||
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
|
||||||
elif rel[:4] == "ppa:":
|
|
||||||
src = rel
|
|
||||||
subprocess.check_call(["add-apt-repository", "-y", src])
|
|
||||||
elif rel[:3] == "deb":
|
|
||||||
l = len(rel.split('|'))
|
|
||||||
if l == 2:
|
|
||||||
src, key = rel.split('|')
|
|
||||||
juju_log("Importing PPA key from keyserver for %s" % src)
|
|
||||||
import_key(key)
|
|
||||||
elif l == 1:
|
|
||||||
src = rel
|
|
||||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
|
||||||
f.write(src)
|
|
||||||
elif rel[:6] == 'cloud:':
|
|
||||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
|
||||||
rel = rel.split(':')[1]
|
|
||||||
u_rel = rel.split('-')[0]
|
|
||||||
ca_rel = rel.split('-')[1]
|
|
||||||
|
|
||||||
if u_rel != ubuntu_rel:
|
|
||||||
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
|
|
||||||
'version (%s)' % (ca_rel, ubuntu_rel)
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
if 'staging' in ca_rel:
|
|
||||||
# staging is just a regular PPA.
|
|
||||||
os_rel = ca_rel.split('/')[0]
|
|
||||||
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
|
|
||||||
cmd = 'add-apt-repository -y %s' % ppa
|
|
||||||
subprocess.check_call(cmd.split(' '))
|
|
||||||
return
|
|
||||||
|
|
||||||
# map charm config options to actual archive pockets.
|
|
||||||
pockets = {
|
|
||||||
'folsom': 'precise-updates/folsom',
|
|
||||||
'folsom/updates': 'precise-updates/folsom',
|
|
||||||
'folsom/proposed': 'precise-proposed/folsom',
|
|
||||||
'grizzly': 'precise-updates/grizzly',
|
|
||||||
'grizzly/updates': 'precise-updates/grizzly',
|
|
||||||
'grizzly/proposed': 'precise-proposed/grizzly',
|
|
||||||
'havana': 'precise-updates/havana',
|
|
||||||
'havana/updates': 'precise-updates/havana',
|
|
||||||
'havana/proposed': 'precise-proposed/havana',
|
|
||||||
'icehouse': 'precise-updates/icehouse',
|
|
||||||
'icehouse/updates': 'precise-updates/icehouse',
|
|
||||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
|
||||||
'juno': 'trusty-updates/juno',
|
|
||||||
'juno/updates': 'trusty-updates/juno',
|
|
||||||
'juno/proposed': 'trusty-proposed/juno',
|
|
||||||
'kilo': 'trusty-updates/kilo',
|
|
||||||
'kilo/updates': 'trusty-updates/kilo',
|
|
||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
pocket = pockets[ca_rel]
|
|
||||||
except KeyError:
|
|
||||||
e = 'Invalid Cloud Archive release specified: %s' % rel
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
|
|
||||||
apt_install('ubuntu-cloud-keyring', fatal=True)
|
|
||||||
|
|
||||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
|
|
||||||
f.write(src)
|
|
||||||
else:
|
|
||||||
error_out("Invalid openstack-release specified: %s" % rel)
|
|
||||||
|
|
||||||
|
|
||||||
def config_value_changed(option):
|
|
||||||
"""
|
|
||||||
Determine if config value changed since last call to this function.
|
|
||||||
"""
|
|
||||||
hook_data = unitdata.HookData()
|
|
||||||
with hook_data():
|
|
||||||
db = unitdata.kv()
|
|
||||||
current = config(option)
|
|
||||||
saved = db.get(option)
|
|
||||||
db.set(option, current)
|
|
||||||
if saved is None:
|
|
||||||
return False
|
|
||||||
return current != saved
|
|
||||||
|
|
||||||
|
|
||||||
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
|
||||||
"""
|
|
||||||
Write an rc file in the charm-delivered directory containing
|
|
||||||
exported environment variables provided by env_vars. Any charm scripts run
|
|
||||||
outside the juju hook environment can source this scriptrc to obtain
|
|
||||||
updated config information necessary to perform health checks or
|
|
||||||
service changes.
|
|
||||||
"""
|
|
||||||
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
|
|
||||||
if not os.path.exists(os.path.dirname(juju_rc_path)):
|
|
||||||
os.mkdir(os.path.dirname(juju_rc_path))
|
|
||||||
with open(juju_rc_path, 'wb') as rc_script:
|
|
||||||
rc_script.write(
|
|
||||||
"#!/bin/bash\n")
|
|
||||||
[rc_script.write('export %s=%s\n' % (u, p))
|
|
||||||
for u, p in six.iteritems(env_vars) if u != "script_path"]
|
|
||||||
|
|
||||||
|
|
||||||
def openstack_upgrade_available(package):
|
|
||||||
"""
|
|
||||||
Determines if an OpenStack upgrade is available from installation
|
|
||||||
source, based on version of installed package.
|
|
||||||
|
|
||||||
:param package: str: Name of installed package.
|
|
||||||
|
|
||||||
:returns: bool: : Returns True if configured installation source offers
|
|
||||||
a newer version of package.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import apt_pkg as apt
|
|
||||||
src = config('openstack-origin')
|
|
||||||
cur_vers = get_os_version_package(package)
|
|
||||||
available_vers = get_os_version_install_source(src)
|
|
||||||
apt.init()
|
|
||||||
return apt.version_compare(available_vers, cur_vers) == 1
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_block_device(block_device):
|
|
||||||
'''
|
|
||||||
Confirm block_device, create as loopback if necessary.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to ensure.
|
|
||||||
|
|
||||||
:returns: str: Full path of ensured block device.
|
|
||||||
'''
|
|
||||||
_none = ['None', 'none', None]
|
|
||||||
if (block_device in _none):
|
|
||||||
error_out('prepare_storage(): Missing required input: block_device=%s.'
|
|
||||||
% block_device)
|
|
||||||
|
|
||||||
if block_device.startswith('/dev/'):
|
|
||||||
bdev = block_device
|
|
||||||
elif block_device.startswith('/'):
|
|
||||||
_bd = block_device.split('|')
|
|
||||||
if len(_bd) == 2:
|
|
||||||
bdev, size = _bd
|
|
||||||
else:
|
|
||||||
bdev = block_device
|
|
||||||
size = DEFAULT_LOOPBACK_SIZE
|
|
||||||
bdev = ensure_loopback_device(bdev, size)
|
|
||||||
else:
|
|
||||||
bdev = '/dev/%s' % block_device
|
|
||||||
|
|
||||||
if not is_block_device(bdev):
|
|
||||||
error_out('Failed to locate valid block device at %s' % bdev)
|
|
||||||
|
|
||||||
return bdev
|
|
||||||
|
|
||||||
|
|
||||||
def clean_storage(block_device):
|
|
||||||
'''
|
|
||||||
Ensures a block device is clean. That is:
|
|
||||||
- unmounted
|
|
||||||
- any lvm volume groups are deactivated
|
|
||||||
- any lvm physical device signatures removed
|
|
||||||
- partition table wiped
|
|
||||||
|
|
||||||
:param block_device: str: Full path to block device to clean.
|
|
||||||
'''
|
|
||||||
for mp, d in mounts():
|
|
||||||
if d == block_device:
|
|
||||||
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
|
|
||||||
(d, mp), level=INFO)
|
|
||||||
umount(mp, persist=True)
|
|
||||||
|
|
||||||
if is_lvm_physical_volume(block_device):
|
|
||||||
deactivate_lvm_volume_group(block_device)
|
|
||||||
remove_lvm_physical_volume(block_device)
|
|
||||||
else:
|
|
||||||
zap_disk(block_device)
|
|
||||||
|
|
||||||
is_ip = ip.is_ip
|
|
||||||
ns_query = ip.ns_query
|
|
||||||
get_host_ip = ip.get_host_ip
|
|
||||||
get_hostname = ip.get_hostname
|
|
||||||
|
|
||||||
|
|
||||||
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
|
|
||||||
mm_map = {}
|
|
||||||
if os.path.isfile(mm_file):
|
|
||||||
with open(mm_file, 'r') as f:
|
|
||||||
mm_map = json.load(f)
|
|
||||||
return mm_map
|
|
||||||
|
|
||||||
|
|
||||||
def sync_db_with_multi_ipv6_addresses(database, database_user,
|
|
||||||
relation_prefix=None):
|
|
||||||
hosts = get_ipv6_addr(dynamic_only=False)
|
|
||||||
|
|
||||||
kwargs = {'database': database,
|
|
||||||
'username': database_user,
|
|
||||||
'hostname': json.dumps(hosts)}
|
|
||||||
|
|
||||||
if relation_prefix:
|
|
||||||
for key in list(kwargs.keys()):
|
|
||||||
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
|
|
||||||
del kwargs[key]
|
|
||||||
|
|
||||||
for rid in relation_ids('shared-db'):
|
|
||||||
relation_set(relation_id=rid, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def os_requires_version(ostack_release, pkg):
|
|
||||||
"""
|
|
||||||
Decorator for hook to specify minimum supported release
|
|
||||||
"""
|
|
||||||
def wrap(f):
|
|
||||||
@wraps(f)
|
|
||||||
def wrapped_f(*args):
|
|
||||||
if os_release(pkg) < ostack_release:
|
|
||||||
raise Exception("This hook is not supported on releases"
|
|
||||||
" before %s" % ostack_release)
|
|
||||||
f(*args)
|
|
||||||
return wrapped_f
|
|
||||||
return wrap
|
|
||||||
|
|
||||||
|
|
||||||
def git_install_requested():
|
|
||||||
"""
|
|
||||||
Returns true if openstack-origin-git is specified.
|
|
||||||
"""
|
|
||||||
return config('openstack-origin-git') is not None
|
|
||||||
|
|
||||||
|
|
||||||
requirements_dir = None
|
|
||||||
|
|
||||||
|
|
||||||
def git_clone_and_install(projects_yaml, core_project):
|
|
||||||
"""
|
|
||||||
Clone/install all specified OpenStack repositories.
|
|
||||||
|
|
||||||
The expected format of projects_yaml is:
|
|
||||||
repositories:
|
|
||||||
- {name: keystone,
|
|
||||||
repository: 'git://git.openstack.org/openstack/keystone.git',
|
|
||||||
branch: 'stable/icehouse'}
|
|
||||||
- {name: requirements,
|
|
||||||
repository: 'git://git.openstack.org/openstack/requirements.git',
|
|
||||||
branch: 'stable/icehouse'}
|
|
||||||
directory: /mnt/openstack-git
|
|
||||||
http_proxy: http://squid.internal:3128
|
|
||||||
https_proxy: https://squid.internal:3128
|
|
||||||
|
|
||||||
The directory, http_proxy, and https_proxy keys are optional.
|
|
||||||
"""
|
|
||||||
global requirements_dir
|
|
||||||
parent_dir = '/mnt/openstack-git'
|
|
||||||
|
|
||||||
if not projects_yaml:
|
|
||||||
return
|
|
||||||
|
|
||||||
projects = yaml.load(projects_yaml)
|
|
||||||
_git_validate_projects_yaml(projects, core_project)
|
|
||||||
|
|
||||||
old_environ = dict(os.environ)
|
|
||||||
|
|
||||||
if 'http_proxy' in projects.keys():
|
|
||||||
os.environ['http_proxy'] = projects['http_proxy']
|
|
||||||
if 'https_proxy' in projects.keys():
|
|
||||||
os.environ['https_proxy'] = projects['https_proxy']
|
|
||||||
|
|
||||||
if 'directory' in projects.keys():
|
|
||||||
parent_dir = projects['directory']
|
|
||||||
|
|
||||||
for p in projects['repositories']:
|
|
||||||
repo = p['repository']
|
|
||||||
branch = p['branch']
|
|
||||||
if p['name'] == 'requirements':
|
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
|
||||||
update_requirements=False)
|
|
||||||
requirements_dir = repo_dir
|
|
||||||
else:
|
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
|
||||||
update_requirements=True)
|
|
||||||
|
|
||||||
os.environ = old_environ
|
|
||||||
|
|
||||||
|
|
||||||
def _git_validate_projects_yaml(projects, core_project):
|
|
||||||
"""
|
|
||||||
Validate the projects yaml.
|
|
||||||
"""
|
|
||||||
_git_ensure_key_exists('repositories', projects)
|
|
||||||
|
|
||||||
for project in projects['repositories']:
|
|
||||||
_git_ensure_key_exists('name', project.keys())
|
|
||||||
_git_ensure_key_exists('repository', project.keys())
|
|
||||||
_git_ensure_key_exists('branch', project.keys())
|
|
||||||
|
|
||||||
if projects['repositories'][0]['name'] != 'requirements':
|
|
||||||
error_out('{} git repo must be specified first'.format('requirements'))
|
|
||||||
|
|
||||||
if projects['repositories'][-1]['name'] != core_project:
|
|
||||||
error_out('{} git repo must be specified last'.format(core_project))
|
|
||||||
|
|
||||||
|
|
||||||
def _git_ensure_key_exists(key, keys):
|
|
||||||
"""
|
|
||||||
Ensure that key exists in keys.
|
|
||||||
"""
|
|
||||||
if key not in keys:
|
|
||||||
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
|
||||||
|
|
||||||
|
|
||||||
def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements):
|
|
||||||
"""
|
|
||||||
Clone and install a single git repository.
|
|
||||||
"""
|
|
||||||
dest_dir = os.path.join(parent_dir, os.path.basename(repo))
|
|
||||||
|
|
||||||
if not os.path.exists(parent_dir):
|
|
||||||
juju_log('Directory already exists at {}. '
|
|
||||||
'No need to create directory.'.format(parent_dir))
|
|
||||||
os.mkdir(parent_dir)
|
|
||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
|
||||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
|
||||||
repo_dir = install_remote(repo, dest=parent_dir, branch=branch)
|
|
||||||
else:
|
|
||||||
repo_dir = dest_dir
|
|
||||||
|
|
||||||
if update_requirements:
|
|
||||||
if not requirements_dir:
|
|
||||||
error_out('requirements repo must be cloned before '
|
|
||||||
'updating from global requirements.')
|
|
||||||
_git_update_requirements(repo_dir, requirements_dir)
|
|
||||||
|
|
||||||
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
|
||||||
pip_install(repo_dir)
|
|
||||||
|
|
||||||
return repo_dir
|
|
||||||
|
|
||||||
|
|
||||||
def _git_update_requirements(package_dir, reqs_dir):
|
|
||||||
"""
|
|
||||||
Update from global requirements.
|
|
||||||
|
|
||||||
Update an OpenStack git directory's requirements.txt and
|
|
||||||
test-requirements.txt from global-requirements.txt.
|
|
||||||
"""
|
|
||||||
orig_dir = os.getcwd()
|
|
||||||
os.chdir(reqs_dir)
|
|
||||||
cmd = ['python', 'update.py', package_dir]
|
|
||||||
try:
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
package = os.path.basename(package_dir)
|
|
||||||
error_out("Error updating {} from global-requirements.txt".format(package))
|
|
||||||
os.chdir(orig_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def git_src_dir(projects_yaml, project):
|
|
||||||
"""
|
|
||||||
Return the directory where the specified project's source is located.
|
|
||||||
"""
|
|
||||||
parent_dir = '/mnt/openstack-git'
|
|
||||||
|
|
||||||
if not projects_yaml:
|
|
||||||
return
|
|
||||||
|
|
||||||
projects = yaml.load(projects_yaml)
|
|
||||||
|
|
||||||
if 'directory' in projects.keys():
|
|
||||||
parent_dir = projects['directory']
|
|
||||||
|
|
||||||
for p in projects['repositories']:
|
|
||||||
if p['name'] == project:
|
|
||||||
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
|
||||||
|
|
||||||
return None
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,96 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install, apt_update
|
|
||||||
from charmhelpers.core.hookenv import log
|
|
||||||
|
|
||||||
try:
|
|
||||||
from pip import main as pip_execute
|
|
||||||
except ImportError:
|
|
||||||
apt_update()
|
|
||||||
apt_install('python-pip')
|
|
||||||
from pip import main as pip_execute
|
|
||||||
|
|
||||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
|
||||||
|
|
||||||
|
|
||||||
def parse_options(given, available):
|
|
||||||
"""Given a set of options, check if available"""
|
|
||||||
for key, value in sorted(given.items()):
|
|
||||||
if key in available:
|
|
||||||
yield "--{0}={1}".format(key, value)
|
|
||||||
|
|
||||||
|
|
||||||
def pip_install_requirements(requirements, **options):
|
|
||||||
"""Install a requirements file """
|
|
||||||
command = ["install"]
|
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', )
|
|
||||||
for option in parse_options(options, available_options):
|
|
||||||
command.append(option)
|
|
||||||
|
|
||||||
command.append("-r {0}".format(requirements))
|
|
||||||
log("Installing from file: {} with options: {}".format(requirements,
|
|
||||||
command))
|
|
||||||
pip_execute(command)
|
|
||||||
|
|
||||||
|
|
||||||
def pip_install(package, fatal=False, upgrade=False, **options):
|
|
||||||
"""Install a python package"""
|
|
||||||
command = ["install"]
|
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', "index-url", )
|
|
||||||
for option in parse_options(options, available_options):
|
|
||||||
command.append(option)
|
|
||||||
|
|
||||||
if upgrade:
|
|
||||||
command.append('--upgrade')
|
|
||||||
|
|
||||||
if isinstance(package, list):
|
|
||||||
command.extend(package)
|
|
||||||
else:
|
|
||||||
command.append(package)
|
|
||||||
|
|
||||||
log("Installing {} package with options: {}".format(package,
|
|
||||||
command))
|
|
||||||
pip_execute(command)
|
|
||||||
|
|
||||||
|
|
||||||
def pip_uninstall(package, **options):
|
|
||||||
"""Uninstall a python package"""
|
|
||||||
command = ["uninstall", "-q", "-y"]
|
|
||||||
|
|
||||||
available_options = ('proxy', 'log', )
|
|
||||||
for option in parse_options(options, available_options):
|
|
||||||
command.append(option)
|
|
||||||
|
|
||||||
if isinstance(package, list):
|
|
||||||
command.extend(package)
|
|
||||||
else:
|
|
||||||
command.append(package)
|
|
||||||
|
|
||||||
log("Uninstalling {} package with options: {}".format(package,
|
|
||||||
command))
|
|
||||||
pip_execute(command)
|
|
||||||
|
|
||||||
|
|
||||||
def pip_list():
|
|
||||||
"""Returns the list of current python installed packages
|
|
||||||
"""
|
|
||||||
return pip_execute(["list"])
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,444 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# This file is sourced from lp:openstack-charm-helpers
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# James Page <james.page@ubuntu.com>
|
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
import os
|
|
||||||
import shutil
|
|
||||||
import json
|
|
||||||
import time
|
|
||||||
|
|
||||||
from subprocess import (
|
|
||||||
check_call,
|
|
||||||
check_output,
|
|
||||||
CalledProcessError,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
relation_get,
|
|
||||||
relation_ids,
|
|
||||||
related_units,
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
INFO,
|
|
||||||
WARNING,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
mount,
|
|
||||||
mounts,
|
|
||||||
service_start,
|
|
||||||
service_stop,
|
|
||||||
service_running,
|
|
||||||
umount,
|
|
||||||
)
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_install,
|
|
||||||
)
|
|
||||||
|
|
||||||
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
|
||||||
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
|
||||||
|
|
||||||
CEPH_CONF = """[global]
|
|
||||||
auth supported = {auth}
|
|
||||||
keyring = {keyring}
|
|
||||||
mon host = {mon_hosts}
|
|
||||||
log to syslog = {use_syslog}
|
|
||||||
err to syslog = {use_syslog}
|
|
||||||
clog to syslog = {use_syslog}
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def install():
|
|
||||||
"""Basic Ceph client installation."""
|
|
||||||
ceph_dir = "/etc/ceph"
|
|
||||||
if not os.path.exists(ceph_dir):
|
|
||||||
os.mkdir(ceph_dir)
|
|
||||||
|
|
||||||
apt_install('ceph-common', fatal=True)
|
|
||||||
|
|
||||||
|
|
||||||
def rbd_exists(service, pool, rbd_img):
|
|
||||||
"""Check to see if a RADOS block device exists."""
|
|
||||||
try:
|
|
||||||
out = check_output(['rbd', 'list', '--id',
|
|
||||||
service, '--pool', pool]).decode('UTF-8')
|
|
||||||
except CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return rbd_img in out
|
|
||||||
|
|
||||||
|
|
||||||
def create_rbd_image(service, pool, image, sizemb):
|
|
||||||
"""Create a new RADOS block device."""
|
|
||||||
cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
|
|
||||||
'--pool', pool]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def pool_exists(service, name):
|
|
||||||
"""Check to see if a RADOS pool already exists."""
|
|
||||||
try:
|
|
||||||
out = check_output(['rados', '--id', service,
|
|
||||||
'lspools']).decode('UTF-8')
|
|
||||||
except CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return name in out
|
|
||||||
|
|
||||||
|
|
||||||
def get_osds(service):
|
|
||||||
"""Return a list of all Ceph Object Storage Daemons currently in the
|
|
||||||
cluster.
|
|
||||||
"""
|
|
||||||
version = ceph_version()
|
|
||||||
if version and version >= '0.56':
|
|
||||||
return json.loads(check_output(['ceph', '--id', service,
|
|
||||||
'osd', 'ls',
|
|
||||||
'--format=json']).decode('UTF-8'))
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def create_pool(service, name, replicas=3):
|
|
||||||
"""Create a new RADOS pool."""
|
|
||||||
if pool_exists(service, name):
|
|
||||||
log("Ceph pool {} already exists, skipping creation".format(name),
|
|
||||||
level=WARNING)
|
|
||||||
return
|
|
||||||
|
|
||||||
# Calculate the number of placement groups based
|
|
||||||
# on upstream recommended best practices.
|
|
||||||
osds = get_osds(service)
|
|
||||||
if osds:
|
|
||||||
pgnum = (len(osds) * 100 // replicas)
|
|
||||||
else:
|
|
||||||
# NOTE(james-page): Default to 200 for older ceph versions
|
|
||||||
# which don't support OSD query from cli
|
|
||||||
pgnum = 200
|
|
||||||
|
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
|
|
||||||
str(replicas)]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_pool(service, name):
|
|
||||||
"""Delete a RADOS pool from ceph."""
|
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
|
|
||||||
'--yes-i-really-really-mean-it']
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def _keyfile_path(service):
|
|
||||||
return KEYFILE.format(service)
|
|
||||||
|
|
||||||
|
|
||||||
def _keyring_path(service):
|
|
||||||
return KEYRING.format(service)
|
|
||||||
|
|
||||||
|
|
||||||
def create_keyring(service, key):
|
|
||||||
"""Create a new Ceph keyring containing key."""
|
|
||||||
keyring = _keyring_path(service)
|
|
||||||
if os.path.exists(keyring):
|
|
||||||
log('Ceph keyring exists at %s.' % keyring, level=WARNING)
|
|
||||||
return
|
|
||||||
|
|
||||||
cmd = ['ceph-authtool', keyring, '--create-keyring',
|
|
||||||
'--name=client.{}'.format(service), '--add-key={}'.format(key)]
|
|
||||||
check_call(cmd)
|
|
||||||
log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_keyring(service):
|
|
||||||
"""Delete an existing Ceph keyring."""
|
|
||||||
keyring = _keyring_path(service)
|
|
||||||
if not os.path.exists(keyring):
|
|
||||||
log('Keyring does not exist at %s' % keyring, level=WARNING)
|
|
||||||
return
|
|
||||||
|
|
||||||
os.remove(keyring)
|
|
||||||
log('Deleted ring at %s.' % keyring, level=INFO)
|
|
||||||
|
|
||||||
|
|
||||||
def create_key_file(service, key):
|
|
||||||
"""Create a file containing key."""
|
|
||||||
keyfile = _keyfile_path(service)
|
|
||||||
if os.path.exists(keyfile):
|
|
||||||
log('Keyfile exists at %s.' % keyfile, level=WARNING)
|
|
||||||
return
|
|
||||||
|
|
||||||
with open(keyfile, 'w') as fd:
|
|
||||||
fd.write(key)
|
|
||||||
|
|
||||||
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
|
||||||
|
|
||||||
|
|
||||||
def get_ceph_nodes():
|
|
||||||
"""Query named relation 'ceph' to determine current nodes."""
|
|
||||||
hosts = []
|
|
||||||
for r_id in relation_ids('ceph'):
|
|
||||||
for unit in related_units(r_id):
|
|
||||||
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
|
||||||
|
|
||||||
return hosts
|
|
||||||
|
|
||||||
|
|
||||||
def configure(service, key, auth, use_syslog):
|
|
||||||
"""Perform basic configuration of Ceph."""
|
|
||||||
create_keyring(service, key)
|
|
||||||
create_key_file(service, key)
|
|
||||||
hosts = get_ceph_nodes()
|
|
||||||
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
|
||||||
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
|
||||||
keyring=_keyring_path(service),
|
|
||||||
mon_hosts=",".join(map(str, hosts)),
|
|
||||||
use_syslog=use_syslog))
|
|
||||||
modprobe('rbd')
|
|
||||||
|
|
||||||
|
|
||||||
def image_mapped(name):
|
|
||||||
"""Determine whether a RADOS block device is mapped locally."""
|
|
||||||
try:
|
|
||||||
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
|
|
||||||
except CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return name in out
|
|
||||||
|
|
||||||
|
|
||||||
def map_block_storage(service, pool, image):
|
|
||||||
"""Map a RADOS block device for local use."""
|
|
||||||
cmd = [
|
|
||||||
'rbd',
|
|
||||||
'map',
|
|
||||||
'{}/{}'.format(pool, image),
|
|
||||||
'--user',
|
|
||||||
service,
|
|
||||||
'--secret',
|
|
||||||
_keyfile_path(service),
|
|
||||||
]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def filesystem_mounted(fs):
|
|
||||||
"""Determine whether a filesytems is already mounted."""
|
|
||||||
return fs in [f for f, m in mounts()]
|
|
||||||
|
|
||||||
|
|
||||||
def make_filesystem(blk_device, fstype='ext4', timeout=10):
|
|
||||||
"""Make a new filesystem on the specified block device."""
|
|
||||||
count = 0
|
|
||||||
e_noent = os.errno.ENOENT
|
|
||||||
while not os.path.exists(blk_device):
|
|
||||||
if count >= timeout:
|
|
||||||
log('Gave up waiting on block device %s' % blk_device,
|
|
||||||
level=ERROR)
|
|
||||||
raise IOError(e_noent, os.strerror(e_noent), blk_device)
|
|
||||||
|
|
||||||
log('Waiting for block device %s to appear' % blk_device,
|
|
||||||
level=DEBUG)
|
|
||||||
count += 1
|
|
||||||
time.sleep(1)
|
|
||||||
else:
|
|
||||||
log('Formatting block device %s as filesystem %s.' %
|
|
||||||
(blk_device, fstype), level=INFO)
|
|
||||||
check_call(['mkfs', '-t', fstype, blk_device])
|
|
||||||
|
|
||||||
|
|
||||||
def place_data_on_block_device(blk_device, data_src_dst):
|
|
||||||
"""Migrate data in data_src_dst to blk_device and then remount."""
|
|
||||||
# mount block device into /mnt
|
|
||||||
mount(blk_device, '/mnt')
|
|
||||||
# copy data to /mnt
|
|
||||||
copy_files(data_src_dst, '/mnt')
|
|
||||||
# umount block device
|
|
||||||
umount('/mnt')
|
|
||||||
# Grab user/group ID's from original source
|
|
||||||
_dir = os.stat(data_src_dst)
|
|
||||||
uid = _dir.st_uid
|
|
||||||
gid = _dir.st_gid
|
|
||||||
# re-mount where the data should originally be
|
|
||||||
# TODO: persist is currently a NO-OP in core.host
|
|
||||||
mount(blk_device, data_src_dst, persist=True)
|
|
||||||
# ensure original ownership of new mount.
|
|
||||||
os.chown(data_src_dst, uid, gid)
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: re-use
|
|
||||||
def modprobe(module):
|
|
||||||
"""Load a kernel module and configure for auto-load on reboot."""
|
|
||||||
log('Loading kernel module', level=INFO)
|
|
||||||
cmd = ['modprobe', module]
|
|
||||||
check_call(cmd)
|
|
||||||
with open('/etc/modules', 'r+') as modules:
|
|
||||||
if module not in modules.read():
|
|
||||||
modules.write(module)
|
|
||||||
|
|
||||||
|
|
||||||
def copy_files(src, dst, symlinks=False, ignore=None):
|
|
||||||
"""Copy files from src to dst."""
|
|
||||||
for item in os.listdir(src):
|
|
||||||
s = os.path.join(src, item)
|
|
||||||
d = os.path.join(dst, item)
|
|
||||||
if os.path.isdir(s):
|
|
||||||
shutil.copytree(s, d, symlinks, ignore)
|
|
||||||
else:
|
|
||||||
shutil.copy2(s, d)
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
|
||||||
blk_device, fstype, system_services=[],
|
|
||||||
replicas=3):
|
|
||||||
"""NOTE: This function must only be called from a single service unit for
|
|
||||||
the same rbd_img otherwise data loss will occur.
|
|
||||||
|
|
||||||
Ensures given pool and RBD image exists, is mapped to a block device,
|
|
||||||
and the device is formatted and mounted at the given mount_point.
|
|
||||||
|
|
||||||
If formatting a device for the first time, data existing at mount_point
|
|
||||||
will be migrated to the RBD device before being re-mounted.
|
|
||||||
|
|
||||||
All services listed in system_services will be stopped prior to data
|
|
||||||
migration and restarted when complete.
|
|
||||||
"""
|
|
||||||
# Ensure pool, RBD image, RBD mappings are in place.
|
|
||||||
if not pool_exists(service, pool):
|
|
||||||
log('Creating new pool {}.'.format(pool), level=INFO)
|
|
||||||
create_pool(service, pool, replicas=replicas)
|
|
||||||
|
|
||||||
if not rbd_exists(service, pool, rbd_img):
|
|
||||||
log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
|
|
||||||
create_rbd_image(service, pool, rbd_img, sizemb)
|
|
||||||
|
|
||||||
if not image_mapped(rbd_img):
|
|
||||||
log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
|
|
||||||
level=INFO)
|
|
||||||
map_block_storage(service, pool, rbd_img)
|
|
||||||
|
|
||||||
# make file system
|
|
||||||
# TODO: What happens if for whatever reason this is run again and
|
|
||||||
# the data is already in the rbd device and/or is mounted??
|
|
||||||
# When it is mounted already, it will fail to make the fs
|
|
||||||
# XXX: This is really sketchy! Need to at least add an fstab entry
|
|
||||||
# otherwise this hook will blow away existing data if its executed
|
|
||||||
# after a reboot.
|
|
||||||
if not filesystem_mounted(mount_point):
|
|
||||||
make_filesystem(blk_device, fstype)
|
|
||||||
|
|
||||||
for svc in system_services:
|
|
||||||
if service_running(svc):
|
|
||||||
log('Stopping services {} prior to migrating data.'
|
|
||||||
.format(svc), level=DEBUG)
|
|
||||||
service_stop(svc)
|
|
||||||
|
|
||||||
place_data_on_block_device(blk_device, mount_point)
|
|
||||||
|
|
||||||
for svc in system_services:
|
|
||||||
log('Starting service {} after migrating data.'
|
|
||||||
.format(svc), level=DEBUG)
|
|
||||||
service_start(svc)
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_ceph_keyring(service, user=None, group=None):
|
|
||||||
"""Ensures a ceph keyring is created for a named service and optionally
|
|
||||||
ensures user and group ownership.
|
|
||||||
|
|
||||||
Returns False if no ceph key is available in relation state.
|
|
||||||
"""
|
|
||||||
key = None
|
|
||||||
for rid in relation_ids('ceph'):
|
|
||||||
for unit in related_units(rid):
|
|
||||||
key = relation_get('key', rid=rid, unit=unit)
|
|
||||||
if key:
|
|
||||||
break
|
|
||||||
|
|
||||||
if not key:
|
|
||||||
return False
|
|
||||||
|
|
||||||
create_keyring(service=service, key=key)
|
|
||||||
keyring = _keyring_path(service)
|
|
||||||
if user and group:
|
|
||||||
check_call(['chown', '%s.%s' % (user, group), keyring])
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def ceph_version():
|
|
||||||
"""Retrieve the local version of ceph."""
|
|
||||||
if os.path.exists('/usr/bin/ceph'):
|
|
||||||
cmd = ['ceph', '-v']
|
|
||||||
output = check_output(cmd).decode('US-ASCII')
|
|
||||||
output = output.split()
|
|
||||||
if len(output) > 3:
|
|
||||||
return output[2]
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class CephBrokerRq(object):
|
|
||||||
"""Ceph broker request.
|
|
||||||
|
|
||||||
Multiple operations can be added to a request and sent to the Ceph broker
|
|
||||||
to be executed.
|
|
||||||
|
|
||||||
Request is json-encoded for sending over the wire.
|
|
||||||
|
|
||||||
The API is versioned and defaults to version 1.
|
|
||||||
"""
|
|
||||||
def __init__(self, api_version=1):
|
|
||||||
self.api_version = api_version
|
|
||||||
self.ops = []
|
|
||||||
|
|
||||||
def add_op_create_pool(self, name, replica_count=3):
|
|
||||||
self.ops.append({'op': 'create-pool', 'name': name,
|
|
||||||
'replicas': replica_count})
|
|
||||||
|
|
||||||
@property
|
|
||||||
def request(self):
|
|
||||||
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
|
|
||||||
|
|
||||||
|
|
||||||
class CephBrokerRsp(object):
|
|
||||||
"""Ceph broker response.
|
|
||||||
|
|
||||||
Response is json-decoded and contents provided as methods/properties.
|
|
||||||
|
|
||||||
The API is versioned and defaults to version 1.
|
|
||||||
"""
|
|
||||||
def __init__(self, encoded_rsp):
|
|
||||||
self.api_version = None
|
|
||||||
self.rsp = json.loads(encoded_rsp)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def exit_code(self):
|
|
||||||
return self.rsp.get('exit-code')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def exit_msg(self):
|
|
||||||
return self.rsp.get('stderr')
|
|
@ -1,78 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from subprocess import (
|
|
||||||
check_call,
|
|
||||||
check_output,
|
|
||||||
)
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
##################################################
|
|
||||||
# loopback device helpers.
|
|
||||||
##################################################
|
|
||||||
def loopback_devices():
|
|
||||||
'''
|
|
||||||
Parse through 'losetup -a' output to determine currently mapped
|
|
||||||
loopback devices. Output is expected to look like:
|
|
||||||
|
|
||||||
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
|
||||||
|
|
||||||
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
|
||||||
'''
|
|
||||||
loopbacks = {}
|
|
||||||
cmd = ['losetup', '-a']
|
|
||||||
devs = [d.strip().split(' ') for d in
|
|
||||||
check_output(cmd).splitlines() if d != '']
|
|
||||||
for dev, _, f in devs:
|
|
||||||
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
|
|
||||||
return loopbacks
|
|
||||||
|
|
||||||
|
|
||||||
def create_loopback(file_path):
|
|
||||||
'''
|
|
||||||
Create a loopback device for a given backing file.
|
|
||||||
|
|
||||||
:returns: str: Full path to new loopback device (eg, /dev/loop0)
|
|
||||||
'''
|
|
||||||
file_path = os.path.abspath(file_path)
|
|
||||||
check_call(['losetup', '--find', file_path])
|
|
||||||
for d, f in six.iteritems(loopback_devices()):
|
|
||||||
if f == file_path:
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_loopback_device(path, size):
|
|
||||||
'''
|
|
||||||
Ensure a loopback device exists for a given backing file path and size.
|
|
||||||
If it a loopback device is not mapped to file, a new one will be created.
|
|
||||||
|
|
||||||
TODO: Confirm size of found loopback device.
|
|
||||||
|
|
||||||
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
|
||||||
'''
|
|
||||||
for d, f in six.iteritems(loopback_devices()):
|
|
||||||
if f == path:
|
|
||||||
return d
|
|
||||||
|
|
||||||
if not os.path.exists(path):
|
|
||||||
cmd = ['truncate', '--size', size, path]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
return create_loopback(path)
|
|
@ -1,105 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from subprocess import (
|
|
||||||
CalledProcessError,
|
|
||||||
check_call,
|
|
||||||
check_output,
|
|
||||||
Popen,
|
|
||||||
PIPE,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
##################################################
|
|
||||||
# LVM helpers.
|
|
||||||
##################################################
|
|
||||||
def deactivate_lvm_volume_group(block_device):
|
|
||||||
'''
|
|
||||||
Deactivate any volume gruop associated with an LVM physical volume.
|
|
||||||
|
|
||||||
:param block_device: str: Full path to LVM physical volume
|
|
||||||
'''
|
|
||||||
vg = list_lvm_volume_group(block_device)
|
|
||||||
if vg:
|
|
||||||
cmd = ['vgchange', '-an', vg]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def is_lvm_physical_volume(block_device):
|
|
||||||
'''
|
|
||||||
Determine whether a block device is initialized as an LVM PV.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to inspect.
|
|
||||||
|
|
||||||
:returns: boolean: True if block device is a PV, False if not.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
check_output(['pvdisplay', block_device])
|
|
||||||
return True
|
|
||||||
except CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def remove_lvm_physical_volume(block_device):
|
|
||||||
'''
|
|
||||||
Remove LVM PV signatures from a given block device.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to scrub.
|
|
||||||
'''
|
|
||||||
p = Popen(['pvremove', '-ff', block_device],
|
|
||||||
stdin=PIPE)
|
|
||||||
p.communicate(input='y\n')
|
|
||||||
|
|
||||||
|
|
||||||
def list_lvm_volume_group(block_device):
|
|
||||||
'''
|
|
||||||
List LVM volume group associated with a given block device.
|
|
||||||
|
|
||||||
Assumes block device is a valid LVM PV.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to inspect.
|
|
||||||
|
|
||||||
:returns: str: Name of volume group associated with block device or None
|
|
||||||
'''
|
|
||||||
vg = None
|
|
||||||
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
|
||||||
for l in pvd:
|
|
||||||
l = l.decode('UTF-8')
|
|
||||||
if l.strip().startswith('VG Name'):
|
|
||||||
vg = ' '.join(l.strip().split()[2:])
|
|
||||||
return vg
|
|
||||||
|
|
||||||
|
|
||||||
def create_lvm_physical_volume(block_device):
|
|
||||||
'''
|
|
||||||
Initialize a block device as an LVM physical volume.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to initialize.
|
|
||||||
|
|
||||||
'''
|
|
||||||
check_call(['pvcreate', block_device])
|
|
||||||
|
|
||||||
|
|
||||||
def create_lvm_volume_group(volume_group, block_device):
|
|
||||||
'''
|
|
||||||
Create an LVM volume group backed by a given block device.
|
|
||||||
|
|
||||||
Assumes block device has already been initialized as an LVM PV.
|
|
||||||
|
|
||||||
:param volume_group: str: Name of volume group to create.
|
|
||||||
:block_device: str: Full path of PV-initialized block device.
|
|
||||||
'''
|
|
||||||
check_call(['vgcreate', volume_group, block_device])
|
|
@ -1,70 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from stat import S_ISBLK
|
|
||||||
|
|
||||||
from subprocess import (
|
|
||||||
check_call,
|
|
||||||
check_output,
|
|
||||||
call
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def is_block_device(path):
|
|
||||||
'''
|
|
||||||
Confirm device at path is a valid block device node.
|
|
||||||
|
|
||||||
:returns: boolean: True if path is a block device, False if not.
|
|
||||||
'''
|
|
||||||
if not os.path.exists(path):
|
|
||||||
return False
|
|
||||||
return S_ISBLK(os.stat(path).st_mode)
|
|
||||||
|
|
||||||
|
|
||||||
def zap_disk(block_device):
|
|
||||||
'''
|
|
||||||
Clear a block device of partition table. Relies on sgdisk, which is
|
|
||||||
installed as pat of the 'gdisk' package in Ubuntu.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to clean.
|
|
||||||
'''
|
|
||||||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
|
||||||
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
|
||||||
'--clear', block_device])
|
|
||||||
dev_end = check_output(['blockdev', '--getsz',
|
|
||||||
block_device]).decode('UTF-8')
|
|
||||||
gpt_end = int(dev_end.split()[0]) - 100
|
|
||||||
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
|
||||||
'bs=1M', 'count=1'])
|
|
||||||
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
|
||||||
'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
|
|
||||||
|
|
||||||
|
|
||||||
def is_device_mounted(device):
|
|
||||||
'''Given a device path, return True if that device is mounted, and False
|
|
||||||
if it isn't.
|
|
||||||
|
|
||||||
:param device: str: Full path of the device to check.
|
|
||||||
:returns: boolean: True if the path represents a mounted device, False if
|
|
||||||
it doesn't.
|
|
||||||
'''
|
|
||||||
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
|
||||||
out = check_output(['mount']).decode('UTF-8')
|
|
||||||
if is_partition:
|
|
||||||
return bool(re.search(device + r"\b", out))
|
|
||||||
return bool(re.search(device + r"[0-9]+\b", out))
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,57 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2014 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Edward Hope-Morley <opentastic@gmail.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
import time
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
|
|
||||||
"""If the decorated function raises exception exc_type, allow num_retries
|
|
||||||
retry attempts before raise the exception.
|
|
||||||
"""
|
|
||||||
def _retry_on_exception_inner_1(f):
|
|
||||||
def _retry_on_exception_inner_2(*args, **kwargs):
|
|
||||||
retries = num_retries
|
|
||||||
multiplier = 1
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
except exc_type:
|
|
||||||
if not retries:
|
|
||||||
raise
|
|
||||||
|
|
||||||
delay = base_delay * multiplier
|
|
||||||
multiplier += 1
|
|
||||||
log("Retrying '%s' %d more times (delay=%s)" %
|
|
||||||
(f.__name__, retries, delay), level=INFO)
|
|
||||||
retries -= 1
|
|
||||||
if delay:
|
|
||||||
time.sleep(delay)
|
|
||||||
|
|
||||||
return _retry_on_exception_inner_2
|
|
||||||
|
|
||||||
return _retry_on_exception_inner_1
|
|
@ -1,134 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import io
|
|
||||||
import os
|
|
||||||
|
|
||||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
|
||||||
|
|
||||||
|
|
||||||
class Fstab(io.FileIO):
|
|
||||||
"""This class extends file in order to implement a file reader/writer
|
|
||||||
for file `/etc/fstab`
|
|
||||||
"""
|
|
||||||
|
|
||||||
class Entry(object):
|
|
||||||
"""Entry class represents a non-comment line on the `/etc/fstab` file
|
|
||||||
"""
|
|
||||||
def __init__(self, device, mountpoint, filesystem,
|
|
||||||
options, d=0, p=0):
|
|
||||||
self.device = device
|
|
||||||
self.mountpoint = mountpoint
|
|
||||||
self.filesystem = filesystem
|
|
||||||
|
|
||||||
if not options:
|
|
||||||
options = "defaults"
|
|
||||||
|
|
||||||
self.options = options
|
|
||||||
self.d = int(d)
|
|
||||||
self.p = int(p)
|
|
||||||
|
|
||||||
def __eq__(self, o):
|
|
||||||
return str(self) == str(o)
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return "{} {} {} {} {} {}".format(self.device,
|
|
||||||
self.mountpoint,
|
|
||||||
self.filesystem,
|
|
||||||
self.options,
|
|
||||||
self.d,
|
|
||||||
self.p)
|
|
||||||
|
|
||||||
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
|
|
||||||
|
|
||||||
def __init__(self, path=None):
|
|
||||||
if path:
|
|
||||||
self._path = path
|
|
||||||
else:
|
|
||||||
self._path = self.DEFAULT_PATH
|
|
||||||
super(Fstab, self).__init__(self._path, 'rb+')
|
|
||||||
|
|
||||||
def _hydrate_entry(self, line):
|
|
||||||
# NOTE: use split with no arguments to split on any
|
|
||||||
# whitespace including tabs
|
|
||||||
return Fstab.Entry(*filter(
|
|
||||||
lambda x: x not in ('', None),
|
|
||||||
line.strip("\n").split()))
|
|
||||||
|
|
||||||
@property
|
|
||||||
def entries(self):
|
|
||||||
self.seek(0)
|
|
||||||
for line in self.readlines():
|
|
||||||
line = line.decode('us-ascii')
|
|
||||||
try:
|
|
||||||
if line.strip() and not line.strip().startswith("#"):
|
|
||||||
yield self._hydrate_entry(line)
|
|
||||||
except ValueError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_entry_by_attr(self, attr, value):
|
|
||||||
for entry in self.entries:
|
|
||||||
e_attr = getattr(entry, attr)
|
|
||||||
if e_attr == value:
|
|
||||||
return entry
|
|
||||||
return None
|
|
||||||
|
|
||||||
def add_entry(self, entry):
|
|
||||||
if self.get_entry_by_attr('device', entry.device):
|
|
||||||
return False
|
|
||||||
|
|
||||||
self.write((str(entry) + '\n').encode('us-ascii'))
|
|
||||||
self.truncate()
|
|
||||||
return entry
|
|
||||||
|
|
||||||
def remove_entry(self, entry):
|
|
||||||
self.seek(0)
|
|
||||||
|
|
||||||
lines = [l.decode('us-ascii') for l in self.readlines()]
|
|
||||||
|
|
||||||
found = False
|
|
||||||
for index, line in enumerate(lines):
|
|
||||||
if line.strip() and not line.strip().startswith("#"):
|
|
||||||
if self._hydrate_entry(line) == entry:
|
|
||||||
found = True
|
|
||||||
break
|
|
||||||
|
|
||||||
if not found:
|
|
||||||
return False
|
|
||||||
|
|
||||||
lines.remove(line)
|
|
||||||
|
|
||||||
self.seek(0)
|
|
||||||
self.write(''.join(lines).encode('us-ascii'))
|
|
||||||
self.truncate()
|
|
||||||
return True
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def remove_by_mountpoint(cls, mountpoint, path=None):
|
|
||||||
fstab = cls(path=path)
|
|
||||||
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
|
|
||||||
if entry:
|
|
||||||
return fstab.remove_entry(entry)
|
|
||||||
return False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def add(cls, device, mountpoint, filesystem, options=None, path=None):
|
|
||||||
return cls(path=path).add_entry(Fstab.Entry(device,
|
|
||||||
mountpoint, filesystem,
|
|
||||||
options=options))
|
|
@ -1,667 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"Interactions with the Juju environment"
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
from functools import wraps
|
|
||||||
import os
|
|
||||||
import json
|
|
||||||
import yaml
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import errno
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
|
|
||||||
import six
|
|
||||||
if not six.PY3:
|
|
||||||
from UserDict import UserDict
|
|
||||||
else:
|
|
||||||
from collections import UserDict
|
|
||||||
|
|
||||||
CRITICAL = "CRITICAL"
|
|
||||||
ERROR = "ERROR"
|
|
||||||
WARNING = "WARNING"
|
|
||||||
INFO = "INFO"
|
|
||||||
DEBUG = "DEBUG"
|
|
||||||
MARKER = object()
|
|
||||||
|
|
||||||
cache = {}
|
|
||||||
|
|
||||||
|
|
||||||
def cached(func):
|
|
||||||
"""Cache return values for multiple executions of func + args
|
|
||||||
|
|
||||||
For example::
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def unit_get(attribute):
|
|
||||||
pass
|
|
||||||
|
|
||||||
unit_get('test')
|
|
||||||
|
|
||||||
will cache the result of unit_get + 'test' for future calls.
|
|
||||||
"""
|
|
||||||
@wraps(func)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
global cache
|
|
||||||
key = str((func, args, kwargs))
|
|
||||||
try:
|
|
||||||
return cache[key]
|
|
||||||
except KeyError:
|
|
||||||
pass # Drop out of the exception handler scope.
|
|
||||||
res = func(*args, **kwargs)
|
|
||||||
cache[key] = res
|
|
||||||
return res
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def flush(key):
|
|
||||||
"""Flushes any entries from function cache where the
|
|
||||||
key is found in the function+args """
|
|
||||||
flush_list = []
|
|
||||||
for item in cache:
|
|
||||||
if key in item:
|
|
||||||
flush_list.append(item)
|
|
||||||
for item in flush_list:
|
|
||||||
del cache[item]
|
|
||||||
|
|
||||||
|
|
||||||
def log(message, level=None):
|
|
||||||
"""Write a message to the juju log"""
|
|
||||||
command = ['juju-log']
|
|
||||||
if level:
|
|
||||||
command += ['-l', level]
|
|
||||||
if not isinstance(message, six.string_types):
|
|
||||||
message = repr(message)
|
|
||||||
command += [message]
|
|
||||||
# Missing juju-log should not cause failures in unit tests
|
|
||||||
# Send log output to stderr
|
|
||||||
try:
|
|
||||||
subprocess.call(command)
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
if level:
|
|
||||||
message = "{}: {}".format(level, message)
|
|
||||||
message = "juju-log: {}".format(message)
|
|
||||||
print(message, file=sys.stderr)
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
class Serializable(UserDict):
|
|
||||||
"""Wrapper, an object that can be serialized to yaml or json"""
|
|
||||||
|
|
||||||
def __init__(self, obj):
|
|
||||||
# wrap the object
|
|
||||||
UserDict.__init__(self)
|
|
||||||
self.data = obj
|
|
||||||
|
|
||||||
def __getattr__(self, attr):
|
|
||||||
# See if this object has attribute.
|
|
||||||
if attr in ("json", "yaml", "data"):
|
|
||||||
return self.__dict__[attr]
|
|
||||||
# Check for attribute in wrapped object.
|
|
||||||
got = getattr(self.data, attr, MARKER)
|
|
||||||
if got is not MARKER:
|
|
||||||
return got
|
|
||||||
# Proxy to the wrapped object via dict interface.
|
|
||||||
try:
|
|
||||||
return self.data[attr]
|
|
||||||
except KeyError:
|
|
||||||
raise AttributeError(attr)
|
|
||||||
|
|
||||||
def __getstate__(self):
|
|
||||||
# Pickle as a standard dictionary.
|
|
||||||
return self.data
|
|
||||||
|
|
||||||
def __setstate__(self, state):
|
|
||||||
# Unpickle into our wrapper.
|
|
||||||
self.data = state
|
|
||||||
|
|
||||||
def json(self):
|
|
||||||
"""Serialize the object to json"""
|
|
||||||
return json.dumps(self.data)
|
|
||||||
|
|
||||||
def yaml(self):
|
|
||||||
"""Serialize the object to yaml"""
|
|
||||||
return yaml.dump(self.data)
|
|
||||||
|
|
||||||
|
|
||||||
def execution_environment():
|
|
||||||
"""A convenient bundling of the current execution context"""
|
|
||||||
context = {}
|
|
||||||
context['conf'] = config()
|
|
||||||
if relation_id():
|
|
||||||
context['reltype'] = relation_type()
|
|
||||||
context['relid'] = relation_id()
|
|
||||||
context['rel'] = relation_get()
|
|
||||||
context['unit'] = local_unit()
|
|
||||||
context['rels'] = relations()
|
|
||||||
context['env'] = os.environ
|
|
||||||
return context
|
|
||||||
|
|
||||||
|
|
||||||
def in_relation_hook():
|
|
||||||
"""Determine whether we're running in a relation hook"""
|
|
||||||
return 'JUJU_RELATION' in os.environ
|
|
||||||
|
|
||||||
|
|
||||||
def relation_type():
|
|
||||||
"""The scope for the current relation hook"""
|
|
||||||
return os.environ.get('JUJU_RELATION', None)
|
|
||||||
|
|
||||||
|
|
||||||
def relation_id():
|
|
||||||
"""The relation ID for the current relation hook"""
|
|
||||||
return os.environ.get('JUJU_RELATION_ID', None)
|
|
||||||
|
|
||||||
|
|
||||||
def local_unit():
|
|
||||||
"""Local unit ID"""
|
|
||||||
return os.environ['JUJU_UNIT_NAME']
|
|
||||||
|
|
||||||
|
|
||||||
def remote_unit():
|
|
||||||
"""The remote unit for the current relation hook"""
|
|
||||||
return os.environ.get('JUJU_REMOTE_UNIT', None)
|
|
||||||
|
|
||||||
|
|
||||||
def service_name():
|
|
||||||
"""The name service group this unit belongs to"""
|
|
||||||
return local_unit().split('/')[0]
|
|
||||||
|
|
||||||
|
|
||||||
def hook_name():
|
|
||||||
"""The name of the currently executing hook"""
|
|
||||||
return os.path.basename(sys.argv[0])
|
|
||||||
|
|
||||||
|
|
||||||
class Config(dict):
|
|
||||||
"""A dictionary representation of the charm's config.yaml, with some
|
|
||||||
extra features:
|
|
||||||
|
|
||||||
- See which values in the dictionary have changed since the previous hook.
|
|
||||||
- For values that have changed, see what the previous value was.
|
|
||||||
- Store arbitrary data for use in a later hook.
|
|
||||||
|
|
||||||
NOTE: Do not instantiate this object directly - instead call
|
|
||||||
``hookenv.config()``, which will return an instance of :class:`Config`.
|
|
||||||
|
|
||||||
Example usage::
|
|
||||||
|
|
||||||
>>> # inside a hook
|
|
||||||
>>> from charmhelpers.core import hookenv
|
|
||||||
>>> config = hookenv.config()
|
|
||||||
>>> config['foo']
|
|
||||||
'bar'
|
|
||||||
>>> # store a new key/value for later use
|
|
||||||
>>> config['mykey'] = 'myval'
|
|
||||||
|
|
||||||
|
|
||||||
>>> # user runs `juju set mycharm foo=baz`
|
|
||||||
>>> # now we're inside subsequent config-changed hook
|
|
||||||
>>> config = hookenv.config()
|
|
||||||
>>> config['foo']
|
|
||||||
'baz'
|
|
||||||
>>> # test to see if this val has changed since last hook
|
|
||||||
>>> config.changed('foo')
|
|
||||||
True
|
|
||||||
>>> # what was the previous value?
|
|
||||||
>>> config.previous('foo')
|
|
||||||
'bar'
|
|
||||||
>>> # keys/values that we add are preserved across hooks
|
|
||||||
>>> config['mykey']
|
|
||||||
'myval'
|
|
||||||
|
|
||||||
"""
|
|
||||||
CONFIG_FILE_NAME = '.juju-persistent-config'
|
|
||||||
|
|
||||||
def __init__(self, *args, **kw):
|
|
||||||
super(Config, self).__init__(*args, **kw)
|
|
||||||
self.implicit_save = True
|
|
||||||
self._prev_dict = None
|
|
||||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
|
||||||
if os.path.exists(self.path):
|
|
||||||
self.load_previous()
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
"""For regular dict lookups, check the current juju config first,
|
|
||||||
then the previous (saved) copy. This ensures that user-saved values
|
|
||||||
will be returned by a dict lookup.
|
|
||||||
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return dict.__getitem__(self, key)
|
|
||||||
except KeyError:
|
|
||||||
return (self._prev_dict or {})[key]
|
|
||||||
|
|
||||||
def get(self, key, default=None):
|
|
||||||
try:
|
|
||||||
return self[key]
|
|
||||||
except KeyError:
|
|
||||||
return default
|
|
||||||
|
|
||||||
def keys(self):
|
|
||||||
prev_keys = []
|
|
||||||
if self._prev_dict is not None:
|
|
||||||
prev_keys = self._prev_dict.keys()
|
|
||||||
return list(set(prev_keys + list(dict.keys(self))))
|
|
||||||
|
|
||||||
def load_previous(self, path=None):
|
|
||||||
"""Load previous copy of config from disk.
|
|
||||||
|
|
||||||
In normal usage you don't need to call this method directly - it
|
|
||||||
is called automatically at object initialization.
|
|
||||||
|
|
||||||
:param path:
|
|
||||||
|
|
||||||
File path from which to load the previous config. If `None`,
|
|
||||||
config is loaded from the default location. If `path` is
|
|
||||||
specified, subsequent `save()` calls will write to the same
|
|
||||||
path.
|
|
||||||
|
|
||||||
"""
|
|
||||||
self.path = path or self.path
|
|
||||||
with open(self.path) as f:
|
|
||||||
self._prev_dict = json.load(f)
|
|
||||||
|
|
||||||
def changed(self, key):
|
|
||||||
"""Return True if the current value for this key is different from
|
|
||||||
the previous value.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._prev_dict is None:
|
|
||||||
return True
|
|
||||||
return self.previous(key) != self.get(key)
|
|
||||||
|
|
||||||
def previous(self, key):
|
|
||||||
"""Return previous value for this key, or None if there
|
|
||||||
is no previous value.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._prev_dict:
|
|
||||||
return self._prev_dict.get(key)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def save(self):
|
|
||||||
"""Save this config to disk.
|
|
||||||
|
|
||||||
If the charm is using the :mod:`Services Framework <services.base>`
|
|
||||||
or :meth:'@hook <Hooks.hook>' decorator, this
|
|
||||||
is called automatically at the end of successful hook execution.
|
|
||||||
Otherwise, it should be called directly by user code.
|
|
||||||
|
|
||||||
To disable automatic saves, set ``implicit_save=False`` on this
|
|
||||||
instance.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if self._prev_dict:
|
|
||||||
for k, v in six.iteritems(self._prev_dict):
|
|
||||||
if k not in self:
|
|
||||||
self[k] = v
|
|
||||||
with open(self.path, 'w') as f:
|
|
||||||
json.dump(self, f)
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def config(scope=None):
|
|
||||||
"""Juju charm configuration"""
|
|
||||||
config_cmd_line = ['config-get']
|
|
||||||
if scope is not None:
|
|
||||||
config_cmd_line.append(scope)
|
|
||||||
config_cmd_line.append('--format=json')
|
|
||||||
try:
|
|
||||||
config_data = json.loads(
|
|
||||||
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
|
||||||
if scope is not None:
|
|
||||||
return config_data
|
|
||||||
return Config(config_data)
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relation_get(attribute=None, unit=None, rid=None):
|
|
||||||
"""Get relation information"""
|
|
||||||
_args = ['relation-get', '--format=json']
|
|
||||||
if rid:
|
|
||||||
_args.append('-r')
|
|
||||||
_args.append(rid)
|
|
||||||
_args.append(attribute or '-')
|
|
||||||
if unit:
|
|
||||||
_args.append(unit)
|
|
||||||
try:
|
|
||||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
except CalledProcessError as e:
|
|
||||||
if e.returncode == 2:
|
|
||||||
return None
|
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
|
||||||
"""Set relation information for the current unit"""
|
|
||||||
relation_settings = relation_settings if relation_settings else {}
|
|
||||||
relation_cmd_line = ['relation-set']
|
|
||||||
if relation_id is not None:
|
|
||||||
relation_cmd_line.extend(('-r', relation_id))
|
|
||||||
for k, v in (list(relation_settings.items()) + list(kwargs.items())):
|
|
||||||
if v is None:
|
|
||||||
relation_cmd_line.append('{}='.format(k))
|
|
||||||
else:
|
|
||||||
relation_cmd_line.append('{}={}'.format(k, v))
|
|
||||||
subprocess.check_call(relation_cmd_line)
|
|
||||||
# Flush cache of any relation-gets for local unit
|
|
||||||
flush(local_unit())
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relation_ids(reltype=None):
|
|
||||||
"""A list of relation_ids"""
|
|
||||||
reltype = reltype or relation_type()
|
|
||||||
relid_cmd_line = ['relation-ids', '--format=json']
|
|
||||||
if reltype is not None:
|
|
||||||
relid_cmd_line.append(reltype)
|
|
||||||
return json.loads(
|
|
||||||
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def related_units(relid=None):
|
|
||||||
"""A list of related units"""
|
|
||||||
relid = relid or relation_id()
|
|
||||||
units_cmd_line = ['relation-list', '--format=json']
|
|
||||||
if relid is not None:
|
|
||||||
units_cmd_line.extend(('-r', relid))
|
|
||||||
return json.loads(
|
|
||||||
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relation_for_unit(unit=None, rid=None):
|
|
||||||
"""Get the json represenation of a unit's relation"""
|
|
||||||
unit = unit or remote_unit()
|
|
||||||
relation = relation_get(unit=unit, rid=rid)
|
|
||||||
for key in relation:
|
|
||||||
if key.endswith('-list'):
|
|
||||||
relation[key] = relation[key].split()
|
|
||||||
relation['__unit__'] = unit
|
|
||||||
return relation
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relations_for_id(relid=None):
|
|
||||||
"""Get relations of a specific relation ID"""
|
|
||||||
relation_data = []
|
|
||||||
relid = relid or relation_ids()
|
|
||||||
for unit in related_units(relid):
|
|
||||||
unit_data = relation_for_unit(unit, relid)
|
|
||||||
unit_data['__relid__'] = relid
|
|
||||||
relation_data.append(unit_data)
|
|
||||||
return relation_data
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relations_of_type(reltype=None):
|
|
||||||
"""Get relations of a specific type"""
|
|
||||||
relation_data = []
|
|
||||||
reltype = reltype or relation_type()
|
|
||||||
for relid in relation_ids(reltype):
|
|
||||||
for relation in relations_for_id(relid):
|
|
||||||
relation['__relid__'] = relid
|
|
||||||
relation_data.append(relation)
|
|
||||||
return relation_data
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def metadata():
|
|
||||||
"""Get the current charm metadata.yaml contents as a python object"""
|
|
||||||
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
|
|
||||||
return yaml.safe_load(md)
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relation_types():
|
|
||||||
"""Get a list of relation types supported by this charm"""
|
|
||||||
rel_types = []
|
|
||||||
md = metadata()
|
|
||||||
for key in ('provides', 'requires', 'peers'):
|
|
||||||
section = md.get(key)
|
|
||||||
if section:
|
|
||||||
rel_types.extend(section.keys())
|
|
||||||
return rel_types
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def charm_name():
|
|
||||||
"""Get the name of the current charm as is specified on metadata.yaml"""
|
|
||||||
return metadata().get('name')
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def relations():
|
|
||||||
"""Get a nested dictionary of relation data for all related units"""
|
|
||||||
rels = {}
|
|
||||||
for reltype in relation_types():
|
|
||||||
relids = {}
|
|
||||||
for relid in relation_ids(reltype):
|
|
||||||
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
|
|
||||||
for unit in related_units(relid):
|
|
||||||
reldata = relation_get(unit=unit, rid=relid)
|
|
||||||
units[unit] = reldata
|
|
||||||
relids[relid] = units
|
|
||||||
rels[reltype] = relids
|
|
||||||
return rels
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def is_relation_made(relation, keys='private-address'):
|
|
||||||
'''
|
|
||||||
Determine whether a relation is established by checking for
|
|
||||||
presence of key(s). If a list of keys is provided, they
|
|
||||||
must all be present for the relation to be identified as made
|
|
||||||
'''
|
|
||||||
if isinstance(keys, str):
|
|
||||||
keys = [keys]
|
|
||||||
for r_id in relation_ids(relation):
|
|
||||||
for unit in related_units(r_id):
|
|
||||||
context = {}
|
|
||||||
for k in keys:
|
|
||||||
context[k] = relation_get(k, rid=r_id,
|
|
||||||
unit=unit)
|
|
||||||
if None not in context.values():
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def open_port(port, protocol="TCP"):
|
|
||||||
"""Open a service network port"""
|
|
||||||
_args = ['open-port']
|
|
||||||
_args.append('{}/{}'.format(port, protocol))
|
|
||||||
subprocess.check_call(_args)
|
|
||||||
|
|
||||||
|
|
||||||
def close_port(port, protocol="TCP"):
|
|
||||||
"""Close a service network port"""
|
|
||||||
_args = ['close-port']
|
|
||||||
_args.append('{}/{}'.format(port, protocol))
|
|
||||||
subprocess.check_call(_args)
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def unit_get(attribute):
|
|
||||||
"""Get the unit ID for the remote unit"""
|
|
||||||
_args = ['unit-get', '--format=json', attribute]
|
|
||||||
try:
|
|
||||||
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
|
||||||
except ValueError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def unit_public_ip():
|
|
||||||
"""Get this unit's public IP address"""
|
|
||||||
return unit_get('public-address')
|
|
||||||
|
|
||||||
|
|
||||||
def unit_private_ip():
|
|
||||||
"""Get this unit's private IP address"""
|
|
||||||
return unit_get('private-address')
|
|
||||||
|
|
||||||
|
|
||||||
class UnregisteredHookError(Exception):
|
|
||||||
"""Raised when an undefined hook is called"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Hooks(object):
|
|
||||||
"""A convenient handler for hook functions.
|
|
||||||
|
|
||||||
Example::
|
|
||||||
|
|
||||||
hooks = Hooks()
|
|
||||||
|
|
||||||
# register a hook, taking its name from the function name
|
|
||||||
@hooks.hook()
|
|
||||||
def install():
|
|
||||||
pass # your code here
|
|
||||||
|
|
||||||
# register a hook, providing a custom hook name
|
|
||||||
@hooks.hook("config-changed")
|
|
||||||
def config_changed():
|
|
||||||
pass # your code here
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# execute a hook based on the name the program is called by
|
|
||||||
hooks.execute(sys.argv)
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, config_save=True):
|
|
||||||
super(Hooks, self).__init__()
|
|
||||||
self._hooks = {}
|
|
||||||
self._config_save = config_save
|
|
||||||
|
|
||||||
def register(self, name, function):
|
|
||||||
"""Register a hook"""
|
|
||||||
self._hooks[name] = function
|
|
||||||
|
|
||||||
def execute(self, args):
|
|
||||||
"""Execute a registered hook based on args[0]"""
|
|
||||||
hook_name = os.path.basename(args[0])
|
|
||||||
if hook_name in self._hooks:
|
|
||||||
self._hooks[hook_name]()
|
|
||||||
if self._config_save:
|
|
||||||
cfg = config()
|
|
||||||
if cfg.implicit_save:
|
|
||||||
cfg.save()
|
|
||||||
else:
|
|
||||||
raise UnregisteredHookError(hook_name)
|
|
||||||
|
|
||||||
def hook(self, *hook_names):
|
|
||||||
"""Decorator, registering them as hooks"""
|
|
||||||
def wrapper(decorated):
|
|
||||||
for hook_name in hook_names:
|
|
||||||
self.register(hook_name, decorated)
|
|
||||||
else:
|
|
||||||
self.register(decorated.__name__, decorated)
|
|
||||||
if '_' in decorated.__name__:
|
|
||||||
self.register(
|
|
||||||
decorated.__name__.replace('_', '-'), decorated)
|
|
||||||
return decorated
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
def charm_dir():
|
|
||||||
"""Return the root directory of the current charm"""
|
|
||||||
return os.environ.get('CHARM_DIR')
|
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def action_get(key=None):
|
|
||||||
"""Gets the value of an action parameter, or all key/value param pairs"""
|
|
||||||
cmd = ['action-get']
|
|
||||||
if key is not None:
|
|
||||||
cmd.append(key)
|
|
||||||
cmd.append('--format=json')
|
|
||||||
action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
|
|
||||||
return action_data
|
|
||||||
|
|
||||||
|
|
||||||
def action_set(values):
|
|
||||||
"""Sets the values to be returned after the action finishes"""
|
|
||||||
cmd = ['action-set']
|
|
||||||
for k, v in list(values.items()):
|
|
||||||
cmd.append('{}={}'.format(k, v))
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def action_fail(message):
|
|
||||||
"""Sets the action status to failed and sets the error message.
|
|
||||||
|
|
||||||
The results set by action_set are preserved."""
|
|
||||||
subprocess.check_call(['action-fail', message])
|
|
||||||
|
|
||||||
|
|
||||||
def status_set(workload_state, message):
|
|
||||||
"""Set the workload state with a message
|
|
||||||
|
|
||||||
Use status-set to set the workload state with a message which is visible
|
|
||||||
to the user via juju status. If the status-set command is not found then
|
|
||||||
assume this is juju < 1.23 and juju-log the message unstead.
|
|
||||||
|
|
||||||
workload_state -- valid juju workload state.
|
|
||||||
message -- status update message
|
|
||||||
"""
|
|
||||||
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
|
|
||||||
if workload_state not in valid_states:
|
|
||||||
raise ValueError(
|
|
||||||
'{!r} is not a valid workload state'.format(workload_state)
|
|
||||||
)
|
|
||||||
cmd = ['status-set', workload_state, message]
|
|
||||||
try:
|
|
||||||
ret = subprocess.call(cmd)
|
|
||||||
if ret == 0:
|
|
||||||
return
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno != errno.ENOENT:
|
|
||||||
raise
|
|
||||||
log_message = 'status-set failed: {} {}'.format(workload_state,
|
|
||||||
message)
|
|
||||||
log(log_message, level='INFO')
|
|
||||||
|
|
||||||
|
|
||||||
def status_get():
|
|
||||||
"""Retrieve the previously set juju workload state
|
|
||||||
|
|
||||||
If the status-set command is not found then assume this is juju < 1.23 and
|
|
||||||
return 'unknown'
|
|
||||||
"""
|
|
||||||
cmd = ['status-get']
|
|
||||||
try:
|
|
||||||
raw_status = subprocess.check_output(cmd, universal_newlines=True)
|
|
||||||
status = raw_status.rstrip()
|
|
||||||
return status
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno == errno.ENOENT:
|
|
||||||
return 'unknown'
|
|
||||||
else:
|
|
||||||
raise
|
|
@ -1,450 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""Tools for working with the host system"""
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Nick Moffitt <nick.moffitt@canonical.com>
|
|
||||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import pwd
|
|
||||||
import grp
|
|
||||||
import random
|
|
||||||
import string
|
|
||||||
import subprocess
|
|
||||||
import hashlib
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from .hookenv import log
|
|
||||||
from .fstab import Fstab
|
|
||||||
|
|
||||||
|
|
||||||
def service_start(service_name):
|
|
||||||
"""Start a system service"""
|
|
||||||
return service('start', service_name)
|
|
||||||
|
|
||||||
|
|
||||||
def service_stop(service_name):
|
|
||||||
"""Stop a system service"""
|
|
||||||
return service('stop', service_name)
|
|
||||||
|
|
||||||
|
|
||||||
def service_restart(service_name):
|
|
||||||
"""Restart a system service"""
|
|
||||||
return service('restart', service_name)
|
|
||||||
|
|
||||||
|
|
||||||
def service_reload(service_name, restart_on_failure=False):
|
|
||||||
"""Reload a system service, optionally falling back to restart if
|
|
||||||
reload fails"""
|
|
||||||
service_result = service('reload', service_name)
|
|
||||||
if not service_result and restart_on_failure:
|
|
||||||
service_result = service('restart', service_name)
|
|
||||||
return service_result
|
|
||||||
|
|
||||||
|
|
||||||
def service(action, service_name):
|
|
||||||
"""Control a system service"""
|
|
||||||
cmd = ['service', service_name, action]
|
|
||||||
return subprocess.call(cmd) == 0
|
|
||||||
|
|
||||||
|
|
||||||
def service_running(service):
|
|
||||||
"""Determine whether a system service is running"""
|
|
||||||
try:
|
|
||||||
output = subprocess.check_output(
|
|
||||||
['service', service, 'status'],
|
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
if ("start/running" in output or "is running" in output):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def service_available(service_name):
|
|
||||||
"""Determine whether a system service is available"""
|
|
||||||
try:
|
|
||||||
subprocess.check_output(
|
|
||||||
['service', service_name, 'status'],
|
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
return b'unrecognized service' not in e.output
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
|
||||||
"""Add a user to the system"""
|
|
||||||
try:
|
|
||||||
user_info = pwd.getpwnam(username)
|
|
||||||
log('user {0} already exists!'.format(username))
|
|
||||||
except KeyError:
|
|
||||||
log('creating user {0}'.format(username))
|
|
||||||
cmd = ['useradd']
|
|
||||||
if system_user or password is None:
|
|
||||||
cmd.append('--system')
|
|
||||||
else:
|
|
||||||
cmd.extend([
|
|
||||||
'--create-home',
|
|
||||||
'--shell', shell,
|
|
||||||
'--password', password,
|
|
||||||
])
|
|
||||||
cmd.append(username)
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
user_info = pwd.getpwnam(username)
|
|
||||||
return user_info
|
|
||||||
|
|
||||||
|
|
||||||
def add_group(group_name, system_group=False):
|
|
||||||
"""Add a group to the system"""
|
|
||||||
try:
|
|
||||||
group_info = grp.getgrnam(group_name)
|
|
||||||
log('group {0} already exists!'.format(group_name))
|
|
||||||
except KeyError:
|
|
||||||
log('creating group {0}'.format(group_name))
|
|
||||||
cmd = ['addgroup']
|
|
||||||
if system_group:
|
|
||||||
cmd.append('--system')
|
|
||||||
else:
|
|
||||||
cmd.extend([
|
|
||||||
'--group',
|
|
||||||
])
|
|
||||||
cmd.append(group_name)
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
group_info = grp.getgrnam(group_name)
|
|
||||||
return group_info
|
|
||||||
|
|
||||||
|
|
||||||
def add_user_to_group(username, group):
|
|
||||||
"""Add a user to a group"""
|
|
||||||
cmd = [
|
|
||||||
'gpasswd', '-a',
|
|
||||||
username,
|
|
||||||
group
|
|
||||||
]
|
|
||||||
log("Adding user {} to group {}".format(username, group))
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def rsync(from_path, to_path, flags='-r', options=None):
|
|
||||||
"""Replicate the contents of a path"""
|
|
||||||
options = options or ['--delete', '--executability']
|
|
||||||
cmd = ['/usr/bin/rsync', flags]
|
|
||||||
cmd.extend(options)
|
|
||||||
cmd.append(from_path)
|
|
||||||
cmd.append(to_path)
|
|
||||||
log(" ".join(cmd))
|
|
||||||
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
|
||||||
|
|
||||||
|
|
||||||
def symlink(source, destination):
|
|
||||||
"""Create a symbolic link"""
|
|
||||||
log("Symlinking {} as {}".format(source, destination))
|
|
||||||
cmd = [
|
|
||||||
'ln',
|
|
||||||
'-sf',
|
|
||||||
source,
|
|
||||||
destination,
|
|
||||||
]
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
|
|
||||||
"""Create a directory"""
|
|
||||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
|
||||||
perms))
|
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
|
||||||
gid = grp.getgrnam(group).gr_gid
|
|
||||||
realpath = os.path.abspath(path)
|
|
||||||
path_exists = os.path.exists(realpath)
|
|
||||||
if path_exists and force:
|
|
||||||
if not os.path.isdir(realpath):
|
|
||||||
log("Removing non-directory file {} prior to mkdir()".format(path))
|
|
||||||
os.unlink(realpath)
|
|
||||||
os.makedirs(realpath, perms)
|
|
||||||
elif not path_exists:
|
|
||||||
os.makedirs(realpath, perms)
|
|
||||||
os.chown(realpath, uid, gid)
|
|
||||||
os.chmod(realpath, perms)
|
|
||||||
|
|
||||||
|
|
||||||
def write_file(path, content, owner='root', group='root', perms=0o444):
|
|
||||||
"""Create or overwrite a file with the contents of a byte string."""
|
|
||||||
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
|
||||||
gid = grp.getgrnam(group).gr_gid
|
|
||||||
with open(path, 'wb') as target:
|
|
||||||
os.fchown(target.fileno(), uid, gid)
|
|
||||||
os.fchmod(target.fileno(), perms)
|
|
||||||
target.write(content)
|
|
||||||
|
|
||||||
|
|
||||||
def fstab_remove(mp):
|
|
||||||
"""Remove the given mountpoint entry from /etc/fstab
|
|
||||||
"""
|
|
||||||
return Fstab.remove_by_mountpoint(mp)
|
|
||||||
|
|
||||||
|
|
||||||
def fstab_add(dev, mp, fs, options=None):
|
|
||||||
"""Adds the given device entry to the /etc/fstab file
|
|
||||||
"""
|
|
||||||
return Fstab.add(dev, mp, fs, options=options)
|
|
||||||
|
|
||||||
|
|
||||||
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
|
|
||||||
"""Mount a filesystem at a particular mountpoint"""
|
|
||||||
cmd_args = ['mount']
|
|
||||||
if options is not None:
|
|
||||||
cmd_args.extend(['-o', options])
|
|
||||||
cmd_args.extend([device, mountpoint])
|
|
||||||
try:
|
|
||||||
subprocess.check_output(cmd_args)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
|
||||||
return False
|
|
||||||
|
|
||||||
if persist:
|
|
||||||
return fstab_add(device, mountpoint, filesystem, options=options)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def umount(mountpoint, persist=False):
|
|
||||||
"""Unmount a filesystem"""
|
|
||||||
cmd_args = ['umount', mountpoint]
|
|
||||||
try:
|
|
||||||
subprocess.check_output(cmd_args)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
|
||||||
return False
|
|
||||||
|
|
||||||
if persist:
|
|
||||||
return fstab_remove(mountpoint)
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def mounts():
|
|
||||||
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
|
|
||||||
with open('/proc/mounts') as f:
|
|
||||||
# [['/mount/point','/dev/path'],[...]]
|
|
||||||
system_mounts = [m[1::-1] for m in [l.strip().split()
|
|
||||||
for l in f.readlines()]]
|
|
||||||
return system_mounts
|
|
||||||
|
|
||||||
|
|
||||||
def file_hash(path, hash_type='md5'):
|
|
||||||
"""
|
|
||||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
|
||||||
|
|
||||||
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
|
||||||
such as md5, sha1, sha256, sha512, etc.
|
|
||||||
"""
|
|
||||||
if os.path.exists(path):
|
|
||||||
h = getattr(hashlib, hash_type)()
|
|
||||||
with open(path, 'rb') as source:
|
|
||||||
h.update(source.read())
|
|
||||||
return h.hexdigest()
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def check_hash(path, checksum, hash_type='md5'):
|
|
||||||
"""
|
|
||||||
Validate a file using a cryptographic checksum.
|
|
||||||
|
|
||||||
:param str checksum: Value of the checksum used to validate the file.
|
|
||||||
:param str hash_type: Hash algorithm used to generate `checksum`.
|
|
||||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
|
||||||
such as md5, sha1, sha256, sha512, etc.
|
|
||||||
:raises ChecksumError: If the file fails the checksum
|
|
||||||
|
|
||||||
"""
|
|
||||||
actual_checksum = file_hash(path, hash_type)
|
|
||||||
if checksum != actual_checksum:
|
|
||||||
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
|
|
||||||
|
|
||||||
|
|
||||||
class ChecksumError(ValueError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def restart_on_change(restart_map, stopstart=False):
|
|
||||||
"""Restart services based on configuration files changing
|
|
||||||
|
|
||||||
This function is used a decorator, for example::
|
|
||||||
|
|
||||||
@restart_on_change({
|
|
||||||
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
|
||||||
})
|
|
||||||
def ceph_client_changed():
|
|
||||||
pass # your code here
|
|
||||||
|
|
||||||
In this example, the cinder-api and cinder-volume services
|
|
||||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
|
||||||
ceph_client_changed function.
|
|
||||||
"""
|
|
||||||
def wrap(f):
|
|
||||||
def wrapped_f(*args, **kwargs):
|
|
||||||
checksums = {}
|
|
||||||
for path in restart_map:
|
|
||||||
checksums[path] = file_hash(path)
|
|
||||||
f(*args, **kwargs)
|
|
||||||
restarts = []
|
|
||||||
for path in restart_map:
|
|
||||||
if checksums[path] != file_hash(path):
|
|
||||||
restarts += restart_map[path]
|
|
||||||
services_list = list(OrderedDict.fromkeys(restarts))
|
|
||||||
if not stopstart:
|
|
||||||
for service_name in services_list:
|
|
||||||
service('restart', service_name)
|
|
||||||
else:
|
|
||||||
for action in ['stop', 'start']:
|
|
||||||
for service_name in services_list:
|
|
||||||
service(action, service_name)
|
|
||||||
return wrapped_f
|
|
||||||
return wrap
|
|
||||||
|
|
||||||
|
|
||||||
def lsb_release():
|
|
||||||
"""Return /etc/lsb-release in a dict"""
|
|
||||||
d = {}
|
|
||||||
with open('/etc/lsb-release', 'r') as lsb:
|
|
||||||
for l in lsb:
|
|
||||||
k, v = l.split('=')
|
|
||||||
d[k.strip()] = v.strip()
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def pwgen(length=None):
|
|
||||||
"""Generate a random pasword."""
|
|
||||||
if length is None:
|
|
||||||
# A random length is ok to use a weak PRNG
|
|
||||||
length = random.choice(range(35, 45))
|
|
||||||
alphanumeric_chars = [
|
|
||||||
l for l in (string.ascii_letters + string.digits)
|
|
||||||
if l not in 'l0QD1vAEIOUaeiou']
|
|
||||||
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
|
|
||||||
# actual password
|
|
||||||
random_generator = random.SystemRandom()
|
|
||||||
random_chars = [
|
|
||||||
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
|
||||||
return(''.join(random_chars))
|
|
||||||
|
|
||||||
|
|
||||||
def list_nics(nic_type):
|
|
||||||
'''Return a list of nics of given type(s)'''
|
|
||||||
if isinstance(nic_type, six.string_types):
|
|
||||||
int_types = [nic_type]
|
|
||||||
else:
|
|
||||||
int_types = nic_type
|
|
||||||
interfaces = []
|
|
||||||
for int_type in int_types:
|
|
||||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
|
||||||
ip_output = (line for line in ip_output if line)
|
|
||||||
for line in ip_output:
|
|
||||||
if line.split()[1].startswith(int_type):
|
|
||||||
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
|
|
||||||
if matched:
|
|
||||||
interface = matched.groups()[0]
|
|
||||||
else:
|
|
||||||
interface = line.split()[1].replace(":", "")
|
|
||||||
interfaces.append(interface)
|
|
||||||
|
|
||||||
return interfaces
|
|
||||||
|
|
||||||
|
|
||||||
def set_nic_mtu(nic, mtu):
|
|
||||||
'''Set MTU on a network interface'''
|
|
||||||
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def get_nic_mtu(nic):
|
|
||||||
cmd = ['ip', 'addr', 'show', nic]
|
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
|
||||||
mtu = ""
|
|
||||||
for line in ip_output:
|
|
||||||
words = line.split()
|
|
||||||
if 'mtu' in words:
|
|
||||||
mtu = words[words.index("mtu") + 1]
|
|
||||||
return mtu
|
|
||||||
|
|
||||||
|
|
||||||
def get_nic_hwaddr(nic):
|
|
||||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
|
||||||
hwaddr = ""
|
|
||||||
words = ip_output.split()
|
|
||||||
if 'link/ether' in words:
|
|
||||||
hwaddr = words[words.index('link/ether') + 1]
|
|
||||||
return hwaddr
|
|
||||||
|
|
||||||
|
|
||||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
|
||||||
'''Compare supplied revno with the revno of the installed package
|
|
||||||
|
|
||||||
* 1 => Installed revno is greater than supplied arg
|
|
||||||
* 0 => Installed revno is the same as supplied arg
|
|
||||||
* -1 => Installed revno is less than supplied arg
|
|
||||||
|
|
||||||
This function imports apt_cache function from charmhelpers.fetch if
|
|
||||||
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
|
||||||
you call this function, or pass an apt_pkg.Cache() instance.
|
|
||||||
'''
|
|
||||||
import apt_pkg
|
|
||||||
if not pkgcache:
|
|
||||||
from charmhelpers.fetch import apt_cache
|
|
||||||
pkgcache = apt_cache()
|
|
||||||
pkg = pkgcache[package]
|
|
||||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def chdir(d):
|
|
||||||
cur = os.getcwd()
|
|
||||||
try:
|
|
||||||
yield os.chdir(d)
|
|
||||||
finally:
|
|
||||||
os.chdir(cur)
|
|
||||||
|
|
||||||
|
|
||||||
def chownr(path, owner, group, follow_links=True):
|
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
|
||||||
gid = grp.getgrnam(group).gr_gid
|
|
||||||
if follow_links:
|
|
||||||
chown = os.chown
|
|
||||||
else:
|
|
||||||
chown = os.lchown
|
|
||||||
|
|
||||||
for root, dirs, files in os.walk(path):
|
|
||||||
for name in dirs + files:
|
|
||||||
full = os.path.join(root, name)
|
|
||||||
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
|
||||||
if not broken_symlink:
|
|
||||||
chown(full, uid, gid)
|
|
||||||
|
|
||||||
|
|
||||||
def lchownr(path, owner, group):
|
|
||||||
chownr(path, owner, group, follow_links=False)
|
|
@ -1,329 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import json
|
|
||||||
from collections import Iterable, OrderedDict
|
|
||||||
|
|
||||||
from charmhelpers.core import host
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['ServiceManager', 'ManagerCallback',
|
|
||||||
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
|
||||||
'service_restart', 'service_stop']
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceManager(object):
|
|
||||||
def __init__(self, services=None):
|
|
||||||
"""
|
|
||||||
Register a list of services, given their definitions.
|
|
||||||
|
|
||||||
Service definitions are dicts in the following formats (all keys except
|
|
||||||
'service' are optional)::
|
|
||||||
|
|
||||||
{
|
|
||||||
"service": <service name>,
|
|
||||||
"required_data": <list of required data contexts>,
|
|
||||||
"provided_data": <list of provided data contexts>,
|
|
||||||
"data_ready": <one or more callbacks>,
|
|
||||||
"data_lost": <one or more callbacks>,
|
|
||||||
"start": <one or more callbacks>,
|
|
||||||
"stop": <one or more callbacks>,
|
|
||||||
"ports": <list of ports to manage>,
|
|
||||||
}
|
|
||||||
|
|
||||||
The 'required_data' list should contain dicts of required data (or
|
|
||||||
dependency managers that act like dicts and know how to collect the data).
|
|
||||||
Only when all items in the 'required_data' list are populated are the list
|
|
||||||
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
|
||||||
information.
|
|
||||||
|
|
||||||
The 'provided_data' list should contain relation data providers, most likely
|
|
||||||
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
|
||||||
that will indicate a set of data to set on a given relation.
|
|
||||||
|
|
||||||
The 'data_ready' value should be either a single callback, or a list of
|
|
||||||
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
|
||||||
Each callback will be called with the service name as the only parameter.
|
|
||||||
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
|
||||||
are fired.
|
|
||||||
|
|
||||||
The 'data_lost' value should be either a single callback, or a list of
|
|
||||||
callbacks, to be called when a 'required_data' item no longer passes
|
|
||||||
`is_ready()`. Each callback will be called with the service name as the
|
|
||||||
only parameter. After all of the 'data_lost' callbacks are called,
|
|
||||||
the 'stop' callbacks are fired.
|
|
||||||
|
|
||||||
The 'start' value should be either a single callback, or a list of
|
|
||||||
callbacks, to be called when starting the service, after the 'data_ready'
|
|
||||||
callbacks are complete. Each callback will be called with the service
|
|
||||||
name as the only parameter. This defaults to
|
|
||||||
`[host.service_start, services.open_ports]`.
|
|
||||||
|
|
||||||
The 'stop' value should be either a single callback, or a list of
|
|
||||||
callbacks, to be called when stopping the service. If the service is
|
|
||||||
being stopped because it no longer has all of its 'required_data', this
|
|
||||||
will be called after all of the 'data_lost' callbacks are complete.
|
|
||||||
Each callback will be called with the service name as the only parameter.
|
|
||||||
This defaults to `[services.close_ports, host.service_stop]`.
|
|
||||||
|
|
||||||
The 'ports' value should be a list of ports to manage. The default
|
|
||||||
'start' handler will open the ports after the service is started,
|
|
||||||
and the default 'stop' handler will close the ports prior to stopping
|
|
||||||
the service.
|
|
||||||
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
The following registers an Upstart service called bingod that depends on
|
|
||||||
a mongodb relation and which runs a custom `db_migrate` function prior to
|
|
||||||
restarting the service, and a Runit service called spadesd::
|
|
||||||
|
|
||||||
manager = services.ServiceManager([
|
|
||||||
{
|
|
||||||
'service': 'bingod',
|
|
||||||
'ports': [80, 443],
|
|
||||||
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
|
||||||
'data_ready': [
|
|
||||||
services.template(source='bingod.conf'),
|
|
||||||
services.template(source='bingod.ini',
|
|
||||||
target='/etc/bingod.ini',
|
|
||||||
owner='bingo', perms=0400),
|
|
||||||
],
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'service': 'spadesd',
|
|
||||||
'data_ready': services.template(source='spadesd_run.j2',
|
|
||||||
target='/etc/sv/spadesd/run',
|
|
||||||
perms=0555),
|
|
||||||
'start': runit_start,
|
|
||||||
'stop': runit_stop,
|
|
||||||
},
|
|
||||||
])
|
|
||||||
manager.manage()
|
|
||||||
"""
|
|
||||||
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
|
||||||
self._ready = None
|
|
||||||
self.services = OrderedDict()
|
|
||||||
for service in services or []:
|
|
||||||
service_name = service['service']
|
|
||||||
self.services[service_name] = service
|
|
||||||
|
|
||||||
def manage(self):
|
|
||||||
"""
|
|
||||||
Handle the current hook by doing The Right Thing with the registered services.
|
|
||||||
"""
|
|
||||||
hook_name = hookenv.hook_name()
|
|
||||||
if hook_name == 'stop':
|
|
||||||
self.stop_services()
|
|
||||||
else:
|
|
||||||
self.provide_data()
|
|
||||||
self.reconfigure_services()
|
|
||||||
cfg = hookenv.config()
|
|
||||||
if cfg.implicit_save:
|
|
||||||
cfg.save()
|
|
||||||
|
|
||||||
def provide_data(self):
|
|
||||||
"""
|
|
||||||
Set the relation data for each provider in the ``provided_data`` list.
|
|
||||||
|
|
||||||
A provider must have a `name` attribute, which indicates which relation
|
|
||||||
to set data on, and a `provide_data()` method, which returns a dict of
|
|
||||||
data to set.
|
|
||||||
"""
|
|
||||||
hook_name = hookenv.hook_name()
|
|
||||||
for service in self.services.values():
|
|
||||||
for provider in service.get('provided_data', []):
|
|
||||||
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
|
|
||||||
data = provider.provide_data()
|
|
||||||
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
|
|
||||||
if _ready:
|
|
||||||
hookenv.relation_set(None, data)
|
|
||||||
|
|
||||||
def reconfigure_services(self, *service_names):
|
|
||||||
"""
|
|
||||||
Update all files for one or more registered services, and,
|
|
||||||
if ready, optionally restart them.
|
|
||||||
|
|
||||||
If no service names are given, reconfigures all registered services.
|
|
||||||
"""
|
|
||||||
for service_name in service_names or self.services.keys():
|
|
||||||
if self.is_ready(service_name):
|
|
||||||
self.fire_event('data_ready', service_name)
|
|
||||||
self.fire_event('start', service_name, default=[
|
|
||||||
service_restart,
|
|
||||||
manage_ports])
|
|
||||||
self.save_ready(service_name)
|
|
||||||
else:
|
|
||||||
if self.was_ready(service_name):
|
|
||||||
self.fire_event('data_lost', service_name)
|
|
||||||
self.fire_event('stop', service_name, default=[
|
|
||||||
manage_ports,
|
|
||||||
service_stop])
|
|
||||||
self.save_lost(service_name)
|
|
||||||
|
|
||||||
def stop_services(self, *service_names):
|
|
||||||
"""
|
|
||||||
Stop one or more registered services, by name.
|
|
||||||
|
|
||||||
If no service names are given, stops all registered services.
|
|
||||||
"""
|
|
||||||
for service_name in service_names or self.services.keys():
|
|
||||||
self.fire_event('stop', service_name, default=[
|
|
||||||
manage_ports,
|
|
||||||
service_stop])
|
|
||||||
|
|
||||||
def get_service(self, service_name):
|
|
||||||
"""
|
|
||||||
Given the name of a registered service, return its service definition.
|
|
||||||
"""
|
|
||||||
service = self.services.get(service_name)
|
|
||||||
if not service:
|
|
||||||
raise KeyError('Service not registered: %s' % service_name)
|
|
||||||
return service
|
|
||||||
|
|
||||||
def fire_event(self, event_name, service_name, default=None):
|
|
||||||
"""
|
|
||||||
Fire a data_ready, data_lost, start, or stop event on a given service.
|
|
||||||
"""
|
|
||||||
service = self.get_service(service_name)
|
|
||||||
callbacks = service.get(event_name, default)
|
|
||||||
if not callbacks:
|
|
||||||
return
|
|
||||||
if not isinstance(callbacks, Iterable):
|
|
||||||
callbacks = [callbacks]
|
|
||||||
for callback in callbacks:
|
|
||||||
if isinstance(callback, ManagerCallback):
|
|
||||||
callback(self, service_name, event_name)
|
|
||||||
else:
|
|
||||||
callback(service_name)
|
|
||||||
|
|
||||||
def is_ready(self, service_name):
|
|
||||||
"""
|
|
||||||
Determine if a registered service is ready, by checking its 'required_data'.
|
|
||||||
|
|
||||||
A 'required_data' item can be any mapping type, and is considered ready
|
|
||||||
if `bool(item)` evaluates as True.
|
|
||||||
"""
|
|
||||||
service = self.get_service(service_name)
|
|
||||||
reqs = service.get('required_data', [])
|
|
||||||
return all(bool(req) for req in reqs)
|
|
||||||
|
|
||||||
def _load_ready_file(self):
|
|
||||||
if self._ready is not None:
|
|
||||||
return
|
|
||||||
if os.path.exists(self._ready_file):
|
|
||||||
with open(self._ready_file) as fp:
|
|
||||||
self._ready = set(json.load(fp))
|
|
||||||
else:
|
|
||||||
self._ready = set()
|
|
||||||
|
|
||||||
def _save_ready_file(self):
|
|
||||||
if self._ready is None:
|
|
||||||
return
|
|
||||||
with open(self._ready_file, 'w') as fp:
|
|
||||||
json.dump(list(self._ready), fp)
|
|
||||||
|
|
||||||
def save_ready(self, service_name):
|
|
||||||
"""
|
|
||||||
Save an indicator that the given service is now data_ready.
|
|
||||||
"""
|
|
||||||
self._load_ready_file()
|
|
||||||
self._ready.add(service_name)
|
|
||||||
self._save_ready_file()
|
|
||||||
|
|
||||||
def save_lost(self, service_name):
|
|
||||||
"""
|
|
||||||
Save an indicator that the given service is no longer data_ready.
|
|
||||||
"""
|
|
||||||
self._load_ready_file()
|
|
||||||
self._ready.discard(service_name)
|
|
||||||
self._save_ready_file()
|
|
||||||
|
|
||||||
def was_ready(self, service_name):
|
|
||||||
"""
|
|
||||||
Determine if the given service was previously data_ready.
|
|
||||||
"""
|
|
||||||
self._load_ready_file()
|
|
||||||
return service_name in self._ready
|
|
||||||
|
|
||||||
|
|
||||||
class ManagerCallback(object):
|
|
||||||
"""
|
|
||||||
Special case of a callback that takes the `ServiceManager` instance
|
|
||||||
in addition to the service name.
|
|
||||||
|
|
||||||
Subclasses should implement `__call__` which should accept three parameters:
|
|
||||||
|
|
||||||
* `manager` The `ServiceManager` instance
|
|
||||||
* `service_name` The name of the service it's being triggered for
|
|
||||||
* `event_name` The name of the event that this callback is handling
|
|
||||||
"""
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
|
|
||||||
class PortManagerCallback(ManagerCallback):
|
|
||||||
"""
|
|
||||||
Callback class that will open or close ports, for use as either
|
|
||||||
a start or stop action.
|
|
||||||
"""
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
|
||||||
service = manager.get_service(service_name)
|
|
||||||
new_ports = service.get('ports', [])
|
|
||||||
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
|
||||||
if os.path.exists(port_file):
|
|
||||||
with open(port_file) as fp:
|
|
||||||
old_ports = fp.read().split(',')
|
|
||||||
for old_port in old_ports:
|
|
||||||
if bool(old_port):
|
|
||||||
old_port = int(old_port)
|
|
||||||
if old_port not in new_ports:
|
|
||||||
hookenv.close_port(old_port)
|
|
||||||
with open(port_file, 'w') as fp:
|
|
||||||
fp.write(','.join(str(port) for port in new_ports))
|
|
||||||
for port in new_ports:
|
|
||||||
if event_name == 'start':
|
|
||||||
hookenv.open_port(port)
|
|
||||||
elif event_name == 'stop':
|
|
||||||
hookenv.close_port(port)
|
|
||||||
|
|
||||||
|
|
||||||
def service_stop(service_name):
|
|
||||||
"""
|
|
||||||
Wrapper around host.service_stop to prevent spurious "unknown service"
|
|
||||||
messages in the logs.
|
|
||||||
"""
|
|
||||||
if host.service_running(service_name):
|
|
||||||
host.service_stop(service_name)
|
|
||||||
|
|
||||||
|
|
||||||
def service_restart(service_name):
|
|
||||||
"""
|
|
||||||
Wrapper around host.service_restart to prevent spurious "unknown service"
|
|
||||||
messages in the logs.
|
|
||||||
"""
|
|
||||||
if host.service_available(service_name):
|
|
||||||
if host.service_running(service_name):
|
|
||||||
host.service_restart(service_name)
|
|
||||||
else:
|
|
||||||
host.service_start(service_name)
|
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases
|
|
||||||
open_ports = close_ports = manage_ports = PortManagerCallback()
|
|
@ -1,267 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import yaml
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
from charmhelpers.core import templating
|
|
||||||
|
|
||||||
from charmhelpers.core.services.base import ManagerCallback
|
|
||||||
|
|
||||||
|
|
||||||
__all__ = ['RelationContext', 'TemplateCallback',
|
|
||||||
'render_template', 'template']
|
|
||||||
|
|
||||||
|
|
||||||
class RelationContext(dict):
|
|
||||||
"""
|
|
||||||
Base class for a context generator that gets relation data from juju.
|
|
||||||
|
|
||||||
Subclasses must provide the attributes `name`, which is the name of the
|
|
||||||
interface of interest, `interface`, which is the type of the interface of
|
|
||||||
interest, and `required_keys`, which is the set of keys required for the
|
|
||||||
relation to be considered complete. The data for all interfaces matching
|
|
||||||
the `name` attribute that are complete will used to populate the dictionary
|
|
||||||
values (see `get_data`, below).
|
|
||||||
|
|
||||||
The generated context will be namespaced under the relation :attr:`name`,
|
|
||||||
to prevent potential naming conflicts.
|
|
||||||
|
|
||||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
|
||||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
|
||||||
"""
|
|
||||||
name = None
|
|
||||||
interface = None
|
|
||||||
|
|
||||||
def __init__(self, name=None, additional_required_keys=None):
|
|
||||||
if not hasattr(self, 'required_keys'):
|
|
||||||
self.required_keys = []
|
|
||||||
|
|
||||||
if name is not None:
|
|
||||||
self.name = name
|
|
||||||
if additional_required_keys:
|
|
||||||
self.required_keys.extend(additional_required_keys)
|
|
||||||
self.get_data()
|
|
||||||
|
|
||||||
def __bool__(self):
|
|
||||||
"""
|
|
||||||
Returns True if all of the required_keys are available.
|
|
||||||
"""
|
|
||||||
return self.is_ready()
|
|
||||||
|
|
||||||
__nonzero__ = __bool__
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return super(RelationContext, self).__repr__()
|
|
||||||
|
|
||||||
def is_ready(self):
|
|
||||||
"""
|
|
||||||
Returns True if all of the `required_keys` are available from any units.
|
|
||||||
"""
|
|
||||||
ready = len(self.get(self.name, [])) > 0
|
|
||||||
if not ready:
|
|
||||||
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
|
||||||
return ready
|
|
||||||
|
|
||||||
def _is_ready(self, unit_data):
|
|
||||||
"""
|
|
||||||
Helper method that tests a set of relation data and returns True if
|
|
||||||
all of the `required_keys` are present.
|
|
||||||
"""
|
|
||||||
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
|
||||||
|
|
||||||
def get_data(self):
|
|
||||||
"""
|
|
||||||
Retrieve the relation data for each unit involved in a relation and,
|
|
||||||
if complete, store it in a list under `self[self.name]`. This
|
|
||||||
is automatically called when the RelationContext is instantiated.
|
|
||||||
|
|
||||||
The units are sorted lexographically first by the service ID, then by
|
|
||||||
the unit ID. Thus, if an interface has two other services, 'db:1'
|
|
||||||
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
|
||||||
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
|
||||||
set of data, the relation data for the units will be stored in the
|
|
||||||
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
|
||||||
|
|
||||||
If you only care about a single unit on the relation, you can just
|
|
||||||
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
|
||||||
support multiple units on a relation, you should iterate over the list,
|
|
||||||
like::
|
|
||||||
|
|
||||||
{% for unit in interface -%}
|
|
||||||
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
|
||||||
{%- endfor %}
|
|
||||||
|
|
||||||
Note that since all sets of relation data from all related services and
|
|
||||||
units are in a single list, if you need to know which service or unit a
|
|
||||||
set of data came from, you'll need to extend this class to preserve
|
|
||||||
that information.
|
|
||||||
"""
|
|
||||||
if not hookenv.relation_ids(self.name):
|
|
||||||
return
|
|
||||||
|
|
||||||
ns = self.setdefault(self.name, [])
|
|
||||||
for rid in sorted(hookenv.relation_ids(self.name)):
|
|
||||||
for unit in sorted(hookenv.related_units(rid)):
|
|
||||||
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
|
||||||
if self._is_ready(reldata):
|
|
||||||
ns.append(reldata)
|
|
||||||
|
|
||||||
def provide_data(self):
|
|
||||||
"""
|
|
||||||
Return data to be relation_set for this interface.
|
|
||||||
"""
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
class MysqlRelation(RelationContext):
|
|
||||||
"""
|
|
||||||
Relation context for the `mysql` interface.
|
|
||||||
|
|
||||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
|
||||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
|
||||||
"""
|
|
||||||
name = 'db'
|
|
||||||
interface = 'mysql'
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.required_keys = ['host', 'user', 'password', 'database']
|
|
||||||
RelationContext.__init__(self, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class HttpRelation(RelationContext):
|
|
||||||
"""
|
|
||||||
Relation context for the `http` interface.
|
|
||||||
|
|
||||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
|
||||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
|
||||||
"""
|
|
||||||
name = 'website'
|
|
||||||
interface = 'http'
|
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.required_keys = ['host', 'port']
|
|
||||||
RelationContext.__init__(self, *args, **kwargs)
|
|
||||||
|
|
||||||
def provide_data(self):
|
|
||||||
return {
|
|
||||||
'host': hookenv.unit_get('private-address'),
|
|
||||||
'port': 80,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class RequiredConfig(dict):
|
|
||||||
"""
|
|
||||||
Data context that loads config options with one or more mandatory options.
|
|
||||||
|
|
||||||
Once the required options have been changed from their default values, all
|
|
||||||
config options will be available, namespaced under `config` to prevent
|
|
||||||
potential naming conflicts (for example, between a config option and a
|
|
||||||
relation property).
|
|
||||||
|
|
||||||
:param list *args: List of options that must be changed from their default values.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *args):
|
|
||||||
self.required_options = args
|
|
||||||
self['config'] = hookenv.config()
|
|
||||||
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
|
||||||
self.config = yaml.load(fp).get('options', {})
|
|
||||||
|
|
||||||
def __bool__(self):
|
|
||||||
for option in self.required_options:
|
|
||||||
if option not in self['config']:
|
|
||||||
return False
|
|
||||||
current_value = self['config'][option]
|
|
||||||
default_value = self.config[option].get('default')
|
|
||||||
if current_value == default_value:
|
|
||||||
return False
|
|
||||||
if current_value in (None, '') and default_value in (None, ''):
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def __nonzero__(self):
|
|
||||||
return self.__bool__()
|
|
||||||
|
|
||||||
|
|
||||||
class StoredContext(dict):
|
|
||||||
"""
|
|
||||||
A data context that always returns the data that it was first created with.
|
|
||||||
|
|
||||||
This is useful to do a one-time generation of things like passwords, that
|
|
||||||
will thereafter use the same value that was originally generated, instead
|
|
||||||
of generating a new value each time it is run.
|
|
||||||
"""
|
|
||||||
def __init__(self, file_name, config_data):
|
|
||||||
"""
|
|
||||||
If the file exists, populate `self` with the data from the file.
|
|
||||||
Otherwise, populate with the given data and persist it to the file.
|
|
||||||
"""
|
|
||||||
if os.path.exists(file_name):
|
|
||||||
self.update(self.read_context(file_name))
|
|
||||||
else:
|
|
||||||
self.store_context(file_name, config_data)
|
|
||||||
self.update(config_data)
|
|
||||||
|
|
||||||
def store_context(self, file_name, config_data):
|
|
||||||
if not os.path.isabs(file_name):
|
|
||||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
|
||||||
with open(file_name, 'w') as file_stream:
|
|
||||||
os.fchmod(file_stream.fileno(), 0o600)
|
|
||||||
yaml.dump(config_data, file_stream)
|
|
||||||
|
|
||||||
def read_context(self, file_name):
|
|
||||||
if not os.path.isabs(file_name):
|
|
||||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
|
||||||
with open(file_name, 'r') as file_stream:
|
|
||||||
data = yaml.load(file_stream)
|
|
||||||
if not data:
|
|
||||||
raise OSError("%s is empty" % file_name)
|
|
||||||
return data
|
|
||||||
|
|
||||||
|
|
||||||
class TemplateCallback(ManagerCallback):
|
|
||||||
"""
|
|
||||||
Callback class that will render a Jinja2 template, for use as a ready
|
|
||||||
action.
|
|
||||||
|
|
||||||
:param str source: The template source file, relative to
|
|
||||||
`$CHARM_DIR/templates`
|
|
||||||
|
|
||||||
:param str target: The target to write the rendered template to
|
|
||||||
:param str owner: The owner of the rendered file
|
|
||||||
:param str group: The group of the rendered file
|
|
||||||
:param int perms: The permissions of the rendered file
|
|
||||||
"""
|
|
||||||
def __init__(self, source, target,
|
|
||||||
owner='root', group='root', perms=0o444):
|
|
||||||
self.source = source
|
|
||||||
self.target = target
|
|
||||||
self.owner = owner
|
|
||||||
self.group = group
|
|
||||||
self.perms = perms
|
|
||||||
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
|
||||||
service = manager.get_service(service_name)
|
|
||||||
context = {}
|
|
||||||
for ctx in service.get('required_data', []):
|
|
||||||
context.update(ctx)
|
|
||||||
templating.render(self.source, self.target, context,
|
|
||||||
self.owner, self.group, self.perms)
|
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases for templates
|
|
||||||
render_template = template = TemplateCallback
|
|
@ -1,42 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
def bool_from_string(value):
|
|
||||||
"""Interpret string value as boolean.
|
|
||||||
|
|
||||||
Returns True if value translates to True otherwise False.
|
|
||||||
"""
|
|
||||||
if isinstance(value, six.string_types):
|
|
||||||
value = six.text_type(value)
|
|
||||||
else:
|
|
||||||
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
|
||||||
raise ValueError(msg)
|
|
||||||
|
|
||||||
value = value.strip().lower()
|
|
||||||
|
|
||||||
if value in ['y', 'yes', 'true', 't', 'on']:
|
|
||||||
return True
|
|
||||||
elif value in ['n', 'no', 'false', 'f', 'off']:
|
|
||||||
return False
|
|
||||||
|
|
||||||
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
|
||||||
raise ValueError(msg)
|
|
@ -1,56 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from subprocess import check_call
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
|
|
||||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
|
||||||
|
|
||||||
|
|
||||||
def create(sysctl_dict, sysctl_file):
|
|
||||||
"""Creates a sysctl.conf file from a YAML associative array
|
|
||||||
|
|
||||||
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
|
|
||||||
:type sysctl_dict: str
|
|
||||||
:param sysctl_file: path to the sysctl file to be saved
|
|
||||||
:type sysctl_file: str or unicode
|
|
||||||
:returns: None
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
|
||||||
except yaml.YAMLError:
|
|
||||||
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
|
||||||
level=ERROR)
|
|
||||||
return
|
|
||||||
|
|
||||||
with open(sysctl_file, "w") as fd:
|
|
||||||
for key, value in sysctl_dict_parsed.items():
|
|
||||||
fd.write("{}={}\n".format(key, value))
|
|
||||||
|
|
||||||
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
|
|
||||||
level=DEBUG)
|
|
||||||
|
|
||||||
check_call(["sysctl", "-p", sysctl_file])
|
|
@ -1,68 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from charmhelpers.core import host
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
def render(source, target, context, owner='root', group='root',
|
|
||||||
perms=0o444, templates_dir=None, encoding='UTF-8'):
|
|
||||||
"""
|
|
||||||
Render a template.
|
|
||||||
|
|
||||||
The `source` path, if not absolute, is relative to the `templates_dir`.
|
|
||||||
|
|
||||||
The `target` path should be absolute.
|
|
||||||
|
|
||||||
The context should be a dict containing the values to be replaced in the
|
|
||||||
template.
|
|
||||||
|
|
||||||
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
|
||||||
|
|
||||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
|
||||||
|
|
||||||
Note: Using this requires python-jinja2; if it is not installed, calling
|
|
||||||
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
|
||||||
except ImportError:
|
|
||||||
try:
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
except ImportError:
|
|
||||||
hookenv.log('Could not import jinja2, and could not import '
|
|
||||||
'charmhelpers.fetch to install it',
|
|
||||||
level=hookenv.ERROR)
|
|
||||||
raise
|
|
||||||
apt_install('python-jinja2', fatal=True)
|
|
||||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
|
||||||
|
|
||||||
if templates_dir is None:
|
|
||||||
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
|
||||||
loader = Environment(loader=FileSystemLoader(templates_dir))
|
|
||||||
try:
|
|
||||||
source = source
|
|
||||||
template = loader.get_template(source)
|
|
||||||
except exceptions.TemplateNotFound as e:
|
|
||||||
hookenv.log('Could not load template %s from %s.' %
|
|
||||||
(source, templates_dir),
|
|
||||||
level=hookenv.ERROR)
|
|
||||||
raise e
|
|
||||||
content = template.render(context)
|
|
||||||
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
|
||||||
host.write_file(target, content.encode(encoding), owner, group, perms)
|
|
@ -1,477 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Kapil Thangavelu <kapil.foss@gmail.com>
|
|
||||||
#
|
|
||||||
"""
|
|
||||||
Intro
|
|
||||||
-----
|
|
||||||
|
|
||||||
A simple way to store state in units. This provides a key value
|
|
||||||
storage with support for versioned, transactional operation,
|
|
||||||
and can calculate deltas from previous values to simplify unit logic
|
|
||||||
when processing changes.
|
|
||||||
|
|
||||||
|
|
||||||
Hook Integration
|
|
||||||
----------------
|
|
||||||
|
|
||||||
There are several extant frameworks for hook execution, including
|
|
||||||
|
|
||||||
- charmhelpers.core.hookenv.Hooks
|
|
||||||
- charmhelpers.core.services.ServiceManager
|
|
||||||
|
|
||||||
The storage classes are framework agnostic, one simple integration is
|
|
||||||
via the HookData contextmanager. It will record the current hook
|
|
||||||
execution environment (including relation data, config data, etc.),
|
|
||||||
setup a transaction and allow easy access to the changes from
|
|
||||||
previously seen values. One consequence of the integration is the
|
|
||||||
reservation of particular keys ('rels', 'unit', 'env', 'config',
|
|
||||||
'charm_revisions') for their respective values.
|
|
||||||
|
|
||||||
Here's a fully worked integration example using hookenv.Hooks::
|
|
||||||
|
|
||||||
from charmhelper.core import hookenv, unitdata
|
|
||||||
|
|
||||||
hook_data = unitdata.HookData()
|
|
||||||
db = unitdata.kv()
|
|
||||||
hooks = hookenv.Hooks()
|
|
||||||
|
|
||||||
@hooks.hook
|
|
||||||
def config_changed():
|
|
||||||
# Print all changes to configuration from previously seen
|
|
||||||
# values.
|
|
||||||
for changed, (prev, cur) in hook_data.conf.items():
|
|
||||||
print('config changed', changed,
|
|
||||||
'previous value', prev,
|
|
||||||
'current value', cur)
|
|
||||||
|
|
||||||
# Get some unit specific bookeeping
|
|
||||||
if not db.get('pkg_key'):
|
|
||||||
key = urllib.urlopen('https://example.com/pkg_key').read()
|
|
||||||
db.set('pkg_key', key)
|
|
||||||
|
|
||||||
# Directly access all charm config as a mapping.
|
|
||||||
conf = db.getrange('config', True)
|
|
||||||
|
|
||||||
# Directly access all relation data as a mapping
|
|
||||||
rels = db.getrange('rels', True)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with hook_data():
|
|
||||||
hook.execute()
|
|
||||||
|
|
||||||
|
|
||||||
A more basic integration is via the hook_scope context manager which simply
|
|
||||||
manages transaction scope (and records hook name, and timestamp)::
|
|
||||||
|
|
||||||
>>> from unitdata import kv
|
|
||||||
>>> db = kv()
|
|
||||||
>>> with db.hook_scope('install'):
|
|
||||||
... # do work, in transactional scope.
|
|
||||||
... db.set('x', 1)
|
|
||||||
>>> db.get('x')
|
|
||||||
1
|
|
||||||
|
|
||||||
|
|
||||||
Usage
|
|
||||||
-----
|
|
||||||
|
|
||||||
Values are automatically json de/serialized to preserve basic typing
|
|
||||||
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
|
|
||||||
|
|
||||||
Individual values can be manipulated via get/set::
|
|
||||||
|
|
||||||
>>> kv.set('y', True)
|
|
||||||
>>> kv.get('y')
|
|
||||||
True
|
|
||||||
|
|
||||||
# We can set complex values (dicts, lists) as a single key.
|
|
||||||
>>> kv.set('config', {'a': 1, 'b': True'})
|
|
||||||
|
|
||||||
# Also supports returning dictionaries as a record which
|
|
||||||
# provides attribute access.
|
|
||||||
>>> config = kv.get('config', record=True)
|
|
||||||
>>> config.b
|
|
||||||
True
|
|
||||||
|
|
||||||
|
|
||||||
Groups of keys can be manipulated with update/getrange::
|
|
||||||
|
|
||||||
>>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
|
|
||||||
>>> kv.getrange('gui.', strip=True)
|
|
||||||
{'z': 1, 'y': 2}
|
|
||||||
|
|
||||||
When updating values, its very helpful to understand which values
|
|
||||||
have actually changed and how have they changed. The storage
|
|
||||||
provides a delta method to provide for this::
|
|
||||||
|
|
||||||
>>> data = {'debug': True, 'option': 2}
|
|
||||||
>>> delta = kv.delta(data, 'config.')
|
|
||||||
>>> delta.debug.previous
|
|
||||||
None
|
|
||||||
>>> delta.debug.current
|
|
||||||
True
|
|
||||||
>>> delta
|
|
||||||
{'debug': (None, True), 'option': (None, 2)}
|
|
||||||
|
|
||||||
Note the delta method does not persist the actual change, it needs to
|
|
||||||
be explicitly saved via 'update' method::
|
|
||||||
|
|
||||||
>>> kv.update(data, 'config.')
|
|
||||||
|
|
||||||
Values modified in the context of a hook scope retain historical values
|
|
||||||
associated to the hookname.
|
|
||||||
|
|
||||||
>>> with db.hook_scope('config-changed'):
|
|
||||||
... db.set('x', 42)
|
|
||||||
>>> db.gethistory('x')
|
|
||||||
[(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
|
|
||||||
(2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import contextlib
|
|
||||||
import datetime
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import pprint
|
|
||||||
import sqlite3
|
|
||||||
import sys
|
|
||||||
|
|
||||||
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
|
||||||
|
|
||||||
|
|
||||||
class Storage(object):
|
|
||||||
"""Simple key value database for local unit state within charms.
|
|
||||||
|
|
||||||
Modifications are automatically committed at hook exit. That's
|
|
||||||
currently regardless of exit code.
|
|
||||||
|
|
||||||
To support dicts, lists, integer, floats, and booleans values
|
|
||||||
are automatically json encoded/decoded.
|
|
||||||
"""
|
|
||||||
def __init__(self, path=None):
|
|
||||||
self.db_path = path
|
|
||||||
if path is None:
|
|
||||||
self.db_path = os.path.join(
|
|
||||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
|
||||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
|
||||||
self.cursor = self.conn.cursor()
|
|
||||||
self.revision = None
|
|
||||||
self._closed = False
|
|
||||||
self._init()
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
if self._closed:
|
|
||||||
return
|
|
||||||
self.flush(False)
|
|
||||||
self.cursor.close()
|
|
||||||
self.conn.close()
|
|
||||||
self._closed = True
|
|
||||||
|
|
||||||
def _scoped_query(self, stmt, params=None):
|
|
||||||
if params is None:
|
|
||||||
params = []
|
|
||||||
return stmt, params
|
|
||||||
|
|
||||||
def get(self, key, default=None, record=False):
|
|
||||||
self.cursor.execute(
|
|
||||||
*self._scoped_query(
|
|
||||||
'select data from kv where key=?', [key]))
|
|
||||||
result = self.cursor.fetchone()
|
|
||||||
if not result:
|
|
||||||
return default
|
|
||||||
if record:
|
|
||||||
return Record(json.loads(result[0]))
|
|
||||||
return json.loads(result[0])
|
|
||||||
|
|
||||||
def getrange(self, key_prefix, strip=False):
|
|
||||||
stmt = "select key, data from kv where key like '%s%%'" % key_prefix
|
|
||||||
self.cursor.execute(*self._scoped_query(stmt))
|
|
||||||
result = self.cursor.fetchall()
|
|
||||||
|
|
||||||
if not result:
|
|
||||||
return None
|
|
||||||
if not strip:
|
|
||||||
key_prefix = ''
|
|
||||||
return dict([
|
|
||||||
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
|
||||||
|
|
||||||
def update(self, mapping, prefix=""):
|
|
||||||
for k, v in mapping.items():
|
|
||||||
self.set("%s%s" % (prefix, k), v)
|
|
||||||
|
|
||||||
def unset(self, key):
|
|
||||||
self.cursor.execute('delete from kv where key=?', [key])
|
|
||||||
if self.revision and self.cursor.rowcount:
|
|
||||||
self.cursor.execute(
|
|
||||||
'insert into kv_revisions values (?, ?, ?)',
|
|
||||||
[key, self.revision, json.dumps('DELETED')])
|
|
||||||
|
|
||||||
def set(self, key, value):
|
|
||||||
serialized = json.dumps(value)
|
|
||||||
|
|
||||||
self.cursor.execute(
|
|
||||||
'select data from kv where key=?', [key])
|
|
||||||
exists = self.cursor.fetchone()
|
|
||||||
|
|
||||||
# Skip mutations to the same value
|
|
||||||
if exists:
|
|
||||||
if exists[0] == serialized:
|
|
||||||
return value
|
|
||||||
|
|
||||||
if not exists:
|
|
||||||
self.cursor.execute(
|
|
||||||
'insert into kv (key, data) values (?, ?)',
|
|
||||||
(key, serialized))
|
|
||||||
else:
|
|
||||||
self.cursor.execute('''
|
|
||||||
update kv
|
|
||||||
set data = ?
|
|
||||||
where key = ?''', [serialized, key])
|
|
||||||
|
|
||||||
# Save
|
|
||||||
if not self.revision:
|
|
||||||
return value
|
|
||||||
|
|
||||||
self.cursor.execute(
|
|
||||||
'select 1 from kv_revisions where key=? and revision=?',
|
|
||||||
[key, self.revision])
|
|
||||||
exists = self.cursor.fetchone()
|
|
||||||
|
|
||||||
if not exists:
|
|
||||||
self.cursor.execute(
|
|
||||||
'''insert into kv_revisions (
|
|
||||||
revision, key, data) values (?, ?, ?)''',
|
|
||||||
(self.revision, key, serialized))
|
|
||||||
else:
|
|
||||||
self.cursor.execute(
|
|
||||||
'''
|
|
||||||
update kv_revisions
|
|
||||||
set data = ?
|
|
||||||
where key = ?
|
|
||||||
and revision = ?''',
|
|
||||||
[serialized, key, self.revision])
|
|
||||||
|
|
||||||
return value
|
|
||||||
|
|
||||||
def delta(self, mapping, prefix):
|
|
||||||
"""
|
|
||||||
return a delta containing values that have changed.
|
|
||||||
"""
|
|
||||||
previous = self.getrange(prefix, strip=True)
|
|
||||||
if not previous:
|
|
||||||
pk = set()
|
|
||||||
else:
|
|
||||||
pk = set(previous.keys())
|
|
||||||
ck = set(mapping.keys())
|
|
||||||
delta = DeltaSet()
|
|
||||||
|
|
||||||
# added
|
|
||||||
for k in ck.difference(pk):
|
|
||||||
delta[k] = Delta(None, mapping[k])
|
|
||||||
|
|
||||||
# removed
|
|
||||||
for k in pk.difference(ck):
|
|
||||||
delta[k] = Delta(previous[k], None)
|
|
||||||
|
|
||||||
# changed
|
|
||||||
for k in pk.intersection(ck):
|
|
||||||
c = mapping[k]
|
|
||||||
p = previous[k]
|
|
||||||
if c != p:
|
|
||||||
delta[k] = Delta(p, c)
|
|
||||||
|
|
||||||
return delta
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def hook_scope(self, name=""):
|
|
||||||
"""Scope all future interactions to the current hook execution
|
|
||||||
revision."""
|
|
||||||
assert not self.revision
|
|
||||||
self.cursor.execute(
|
|
||||||
'insert into hooks (hook, date) values (?, ?)',
|
|
||||||
(name or sys.argv[0],
|
|
||||||
datetime.datetime.utcnow().isoformat()))
|
|
||||||
self.revision = self.cursor.lastrowid
|
|
||||||
try:
|
|
||||||
yield self.revision
|
|
||||||
self.revision = None
|
|
||||||
except:
|
|
||||||
self.flush(False)
|
|
||||||
self.revision = None
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
self.flush()
|
|
||||||
|
|
||||||
def flush(self, save=True):
|
|
||||||
if save:
|
|
||||||
self.conn.commit()
|
|
||||||
elif self._closed:
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
self.conn.rollback()
|
|
||||||
|
|
||||||
def _init(self):
|
|
||||||
self.cursor.execute('''
|
|
||||||
create table if not exists kv (
|
|
||||||
key text,
|
|
||||||
data text,
|
|
||||||
primary key (key)
|
|
||||||
)''')
|
|
||||||
self.cursor.execute('''
|
|
||||||
create table if not exists kv_revisions (
|
|
||||||
key text,
|
|
||||||
revision integer,
|
|
||||||
data text,
|
|
||||||
primary key (key, revision)
|
|
||||||
)''')
|
|
||||||
self.cursor.execute('''
|
|
||||||
create table if not exists hooks (
|
|
||||||
version integer primary key autoincrement,
|
|
||||||
hook text,
|
|
||||||
date text
|
|
||||||
)''')
|
|
||||||
self.conn.commit()
|
|
||||||
|
|
||||||
def gethistory(self, key, deserialize=False):
|
|
||||||
self.cursor.execute(
|
|
||||||
'''
|
|
||||||
select kv.revision, kv.key, kv.data, h.hook, h.date
|
|
||||||
from kv_revisions kv,
|
|
||||||
hooks h
|
|
||||||
where kv.key=?
|
|
||||||
and kv.revision = h.version
|
|
||||||
''', [key])
|
|
||||||
if deserialize is False:
|
|
||||||
return self.cursor.fetchall()
|
|
||||||
return map(_parse_history, self.cursor.fetchall())
|
|
||||||
|
|
||||||
def debug(self, fh=sys.stderr):
|
|
||||||
self.cursor.execute('select * from kv')
|
|
||||||
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
|
||||||
self.cursor.execute('select * from kv_revisions')
|
|
||||||
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_history(d):
|
|
||||||
return (d[0], d[1], json.loads(d[2]), d[3],
|
|
||||||
datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
|
|
||||||
|
|
||||||
|
|
||||||
class HookData(object):
|
|
||||||
"""Simple integration for existing hook exec frameworks.
|
|
||||||
|
|
||||||
Records all unit information, and stores deltas for processing
|
|
||||||
by the hook.
|
|
||||||
|
|
||||||
Sample::
|
|
||||||
|
|
||||||
from charmhelper.core import hookenv, unitdata
|
|
||||||
|
|
||||||
changes = unitdata.HookData()
|
|
||||||
db = unitdata.kv()
|
|
||||||
hooks = hookenv.Hooks()
|
|
||||||
|
|
||||||
@hooks.hook
|
|
||||||
def config_changed():
|
|
||||||
# View all changes to configuration
|
|
||||||
for changed, (prev, cur) in changes.conf.items():
|
|
||||||
print('config changed', changed,
|
|
||||||
'previous value', prev,
|
|
||||||
'current value', cur)
|
|
||||||
|
|
||||||
# Get some unit specific bookeeping
|
|
||||||
if not db.get('pkg_key'):
|
|
||||||
key = urllib.urlopen('https://example.com/pkg_key').read()
|
|
||||||
db.set('pkg_key', key)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
with changes():
|
|
||||||
hook.execute()
|
|
||||||
|
|
||||||
"""
|
|
||||||
def __init__(self):
|
|
||||||
self.kv = kv()
|
|
||||||
self.conf = None
|
|
||||||
self.rels = None
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def __call__(self):
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
hook_name = hookenv.hook_name()
|
|
||||||
|
|
||||||
with self.kv.hook_scope(hook_name):
|
|
||||||
self._record_charm_version(hookenv.charm_dir())
|
|
||||||
delta_config, delta_relation = self._record_hook(hookenv)
|
|
||||||
yield self.kv, delta_config, delta_relation
|
|
||||||
|
|
||||||
def _record_charm_version(self, charm_dir):
|
|
||||||
# Record revisions.. charm revisions are meaningless
|
|
||||||
# to charm authors as they don't control the revision.
|
|
||||||
# so logic dependnent on revision is not particularly
|
|
||||||
# useful, however it is useful for debugging analysis.
|
|
||||||
charm_rev = open(
|
|
||||||
os.path.join(charm_dir, 'revision')).read().strip()
|
|
||||||
charm_rev = charm_rev or '0'
|
|
||||||
revs = self.kv.get('charm_revisions', [])
|
|
||||||
if charm_rev not in revs:
|
|
||||||
revs.append(charm_rev.strip() or '0')
|
|
||||||
self.kv.set('charm_revisions', revs)
|
|
||||||
|
|
||||||
def _record_hook(self, hookenv):
|
|
||||||
data = hookenv.execution_environment()
|
|
||||||
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
|
|
||||||
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
|
|
||||||
self.kv.set('env', dict(data['env']))
|
|
||||||
self.kv.set('unit', data['unit'])
|
|
||||||
self.kv.set('relid', data.get('relid'))
|
|
||||||
return conf_delta, rels_delta
|
|
||||||
|
|
||||||
|
|
||||||
class Record(dict):
|
|
||||||
|
|
||||||
__slots__ = ()
|
|
||||||
|
|
||||||
def __getattr__(self, k):
|
|
||||||
if k in self:
|
|
||||||
return self[k]
|
|
||||||
raise AttributeError(k)
|
|
||||||
|
|
||||||
|
|
||||||
class DeltaSet(Record):
|
|
||||||
|
|
||||||
__slots__ = ()
|
|
||||||
|
|
||||||
|
|
||||||
Delta = collections.namedtuple('Delta', ['previous', 'current'])
|
|
||||||
|
|
||||||
|
|
||||||
_KV = None
|
|
||||||
|
|
||||||
|
|
||||||
def kv():
|
|
||||||
global _KV
|
|
||||||
if _KV is None:
|
|
||||||
_KV = Storage()
|
|
||||||
return _KV
|
|
@ -1,439 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import importlib
|
|
||||||
from tempfile import NamedTemporaryFile
|
|
||||||
import time
|
|
||||||
from yaml import safe_load
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
lsb_release
|
|
||||||
)
|
|
||||||
import subprocess
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
log,
|
|
||||||
)
|
|
||||||
import os
|
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
from urllib.parse import urlparse, urlunparse
|
|
||||||
else:
|
|
||||||
from urlparse import urlparse, urlunparse
|
|
||||||
|
|
||||||
|
|
||||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
|
||||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
|
||||||
"""
|
|
||||||
PROPOSED_POCKET = """# Proposed
|
|
||||||
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
|
|
||||||
"""
|
|
||||||
CLOUD_ARCHIVE_POCKETS = {
|
|
||||||
# Folsom
|
|
||||||
'folsom': 'precise-updates/folsom',
|
|
||||||
'precise-folsom': 'precise-updates/folsom',
|
|
||||||
'precise-folsom/updates': 'precise-updates/folsom',
|
|
||||||
'precise-updates/folsom': 'precise-updates/folsom',
|
|
||||||
'folsom/proposed': 'precise-proposed/folsom',
|
|
||||||
'precise-folsom/proposed': 'precise-proposed/folsom',
|
|
||||||
'precise-proposed/folsom': 'precise-proposed/folsom',
|
|
||||||
# Grizzly
|
|
||||||
'grizzly': 'precise-updates/grizzly',
|
|
||||||
'precise-grizzly': 'precise-updates/grizzly',
|
|
||||||
'precise-grizzly/updates': 'precise-updates/grizzly',
|
|
||||||
'precise-updates/grizzly': 'precise-updates/grizzly',
|
|
||||||
'grizzly/proposed': 'precise-proposed/grizzly',
|
|
||||||
'precise-grizzly/proposed': 'precise-proposed/grizzly',
|
|
||||||
'precise-proposed/grizzly': 'precise-proposed/grizzly',
|
|
||||||
# Havana
|
|
||||||
'havana': 'precise-updates/havana',
|
|
||||||
'precise-havana': 'precise-updates/havana',
|
|
||||||
'precise-havana/updates': 'precise-updates/havana',
|
|
||||||
'precise-updates/havana': 'precise-updates/havana',
|
|
||||||
'havana/proposed': 'precise-proposed/havana',
|
|
||||||
'precise-havana/proposed': 'precise-proposed/havana',
|
|
||||||
'precise-proposed/havana': 'precise-proposed/havana',
|
|
||||||
# Icehouse
|
|
||||||
'icehouse': 'precise-updates/icehouse',
|
|
||||||
'precise-icehouse': 'precise-updates/icehouse',
|
|
||||||
'precise-icehouse/updates': 'precise-updates/icehouse',
|
|
||||||
'precise-updates/icehouse': 'precise-updates/icehouse',
|
|
||||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
|
||||||
'precise-icehouse/proposed': 'precise-proposed/icehouse',
|
|
||||||
'precise-proposed/icehouse': 'precise-proposed/icehouse',
|
|
||||||
# Juno
|
|
||||||
'juno': 'trusty-updates/juno',
|
|
||||||
'trusty-juno': 'trusty-updates/juno',
|
|
||||||
'trusty-juno/updates': 'trusty-updates/juno',
|
|
||||||
'trusty-updates/juno': 'trusty-updates/juno',
|
|
||||||
'juno/proposed': 'trusty-proposed/juno',
|
|
||||||
'trusty-juno/proposed': 'trusty-proposed/juno',
|
|
||||||
'trusty-proposed/juno': 'trusty-proposed/juno',
|
|
||||||
# Kilo
|
|
||||||
'kilo': 'trusty-updates/kilo',
|
|
||||||
'trusty-kilo': 'trusty-updates/kilo',
|
|
||||||
'trusty-kilo/updates': 'trusty-updates/kilo',
|
|
||||||
'trusty-updates/kilo': 'trusty-updates/kilo',
|
|
||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
|
||||||
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
|
||||||
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
|
||||||
}
|
|
||||||
|
|
||||||
# The order of this list is very important. Handlers should be listed in from
|
|
||||||
# least- to most-specific URL matching.
|
|
||||||
FETCH_HANDLERS = (
|
|
||||||
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
|
|
||||||
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
|
|
||||||
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
|
|
||||||
)
|
|
||||||
|
|
||||||
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
|
|
||||||
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
|
|
||||||
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
|
|
||||||
|
|
||||||
|
|
||||||
class SourceConfigError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UnhandledSource(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class AptLockError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class BaseFetchHandler(object):
|
|
||||||
|
|
||||||
"""Base class for FetchHandler implementations in fetch plugins"""
|
|
||||||
|
|
||||||
def can_handle(self, source):
|
|
||||||
"""Returns True if the source can be handled. Otherwise returns
|
|
||||||
a string explaining why it cannot"""
|
|
||||||
return "Wrong source type"
|
|
||||||
|
|
||||||
def install(self, source):
|
|
||||||
"""Try to download and unpack the source. Return the path to the
|
|
||||||
unpacked files or raise UnhandledSource."""
|
|
||||||
raise UnhandledSource("Wrong source type {}".format(source))
|
|
||||||
|
|
||||||
def parse_url(self, url):
|
|
||||||
return urlparse(url)
|
|
||||||
|
|
||||||
def base_url(self, url):
|
|
||||||
"""Return url without querystring or fragment"""
|
|
||||||
parts = list(self.parse_url(url))
|
|
||||||
parts[4:] = ['' for i in parts[4:]]
|
|
||||||
return urlunparse(parts)
|
|
||||||
|
|
||||||
|
|
||||||
def filter_installed_packages(packages):
|
|
||||||
"""Returns a list of packages that require installation"""
|
|
||||||
cache = apt_cache()
|
|
||||||
_pkgs = []
|
|
||||||
for package in packages:
|
|
||||||
try:
|
|
||||||
p = cache[package]
|
|
||||||
p.current_ver or _pkgs.append(package)
|
|
||||||
except KeyError:
|
|
||||||
log('Package {} has no installation candidate.'.format(package),
|
|
||||||
level='WARNING')
|
|
||||||
_pkgs.append(package)
|
|
||||||
return _pkgs
|
|
||||||
|
|
||||||
|
|
||||||
def apt_cache(in_memory=True):
|
|
||||||
"""Build and return an apt cache"""
|
|
||||||
from apt import apt_pkg
|
|
||||||
apt_pkg.init()
|
|
||||||
if in_memory:
|
|
||||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
|
||||||
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
|
|
||||||
return apt_pkg.Cache()
|
|
||||||
|
|
||||||
|
|
||||||
def apt_install(packages, options=None, fatal=False):
|
|
||||||
"""Install one or more packages"""
|
|
||||||
if options is None:
|
|
||||||
options = ['--option=Dpkg::Options::=--force-confold']
|
|
||||||
|
|
||||||
cmd = ['apt-get', '--assume-yes']
|
|
||||||
cmd.extend(options)
|
|
||||||
cmd.append('install')
|
|
||||||
if isinstance(packages, six.string_types):
|
|
||||||
cmd.append(packages)
|
|
||||||
else:
|
|
||||||
cmd.extend(packages)
|
|
||||||
log("Installing {} with options: {}".format(packages,
|
|
||||||
options))
|
|
||||||
_run_apt_command(cmd, fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_upgrade(options=None, fatal=False, dist=False):
|
|
||||||
"""Upgrade all packages"""
|
|
||||||
if options is None:
|
|
||||||
options = ['--option=Dpkg::Options::=--force-confold']
|
|
||||||
|
|
||||||
cmd = ['apt-get', '--assume-yes']
|
|
||||||
cmd.extend(options)
|
|
||||||
if dist:
|
|
||||||
cmd.append('dist-upgrade')
|
|
||||||
else:
|
|
||||||
cmd.append('upgrade')
|
|
||||||
log("Upgrading with options: {}".format(options))
|
|
||||||
_run_apt_command(cmd, fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_update(fatal=False):
|
|
||||||
"""Update local apt cache"""
|
|
||||||
cmd = ['apt-get', 'update']
|
|
||||||
_run_apt_command(cmd, fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_purge(packages, fatal=False):
|
|
||||||
"""Purge one or more packages"""
|
|
||||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
|
||||||
if isinstance(packages, six.string_types):
|
|
||||||
cmd.append(packages)
|
|
||||||
else:
|
|
||||||
cmd.extend(packages)
|
|
||||||
log("Purging {}".format(packages))
|
|
||||||
_run_apt_command(cmd, fatal)
|
|
||||||
|
|
||||||
|
|
||||||
def apt_hold(packages, fatal=False):
|
|
||||||
"""Hold one or more packages"""
|
|
||||||
cmd = ['apt-mark', 'hold']
|
|
||||||
if isinstance(packages, six.string_types):
|
|
||||||
cmd.append(packages)
|
|
||||||
else:
|
|
||||||
cmd.extend(packages)
|
|
||||||
log("Holding {}".format(packages))
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
else:
|
|
||||||
subprocess.call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def add_source(source, key=None):
|
|
||||||
"""Add a package source to this system.
|
|
||||||
|
|
||||||
@param source: a URL or sources.list entry, as supported by
|
|
||||||
add-apt-repository(1). Examples::
|
|
||||||
|
|
||||||
ppa:charmers/example
|
|
||||||
deb https://stub:key@private.example.com/ubuntu trusty main
|
|
||||||
|
|
||||||
In addition:
|
|
||||||
'proposed:' may be used to enable the standard 'proposed'
|
|
||||||
pocket for the release.
|
|
||||||
'cloud:' may be used to activate official cloud archive pockets,
|
|
||||||
such as 'cloud:icehouse'
|
|
||||||
'distro' may be used as a noop
|
|
||||||
|
|
||||||
@param key: A key to be added to the system's APT keyring and used
|
|
||||||
to verify the signatures on packages. Ideally, this should be an
|
|
||||||
ASCII format GPG public key including the block headers. A GPG key
|
|
||||||
id may also be used, but be aware that only insecure protocols are
|
|
||||||
available to retrieve the actual public key from a public keyserver
|
|
||||||
placing your Juju environment at risk. ppa and cloud archive keys
|
|
||||||
are securely added automtically, so sould not be provided.
|
|
||||||
"""
|
|
||||||
if source is None:
|
|
||||||
log('Source is not present. Skipping')
|
|
||||||
return
|
|
||||||
|
|
||||||
if (source.startswith('ppa:') or
|
|
||||||
source.startswith('http') or
|
|
||||||
source.startswith('deb ') or
|
|
||||||
source.startswith('cloud-archive:')):
|
|
||||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
|
||||||
elif source.startswith('cloud:'):
|
|
||||||
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
|
||||||
fatal=True)
|
|
||||||
pocket = source.split(':')[-1]
|
|
||||||
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
|
||||||
raise SourceConfigError(
|
|
||||||
'Unsupported cloud: source option %s' %
|
|
||||||
pocket)
|
|
||||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
|
||||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
|
||||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
|
||||||
elif source == 'proposed':
|
|
||||||
release = lsb_release()['DISTRIB_CODENAME']
|
|
||||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
|
||||||
apt.write(PROPOSED_POCKET.format(release))
|
|
||||||
elif source == 'distro':
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
log("Unknown source: {!r}".format(source))
|
|
||||||
|
|
||||||
if key:
|
|
||||||
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
|
||||||
with NamedTemporaryFile('w+') as key_file:
|
|
||||||
key_file.write(key)
|
|
||||||
key_file.flush()
|
|
||||||
key_file.seek(0)
|
|
||||||
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
|
|
||||||
else:
|
|
||||||
# Note that hkp: is in no way a secure protocol. Using a
|
|
||||||
# GPG key id is pointless from a security POV unless you
|
|
||||||
# absolutely trust your network and DNS.
|
|
||||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
|
||||||
'hkp://keyserver.ubuntu.com:80', '--recv',
|
|
||||||
key])
|
|
||||||
|
|
||||||
|
|
||||||
def configure_sources(update=False,
|
|
||||||
sources_var='install_sources',
|
|
||||||
keys_var='install_keys'):
|
|
||||||
"""
|
|
||||||
Configure multiple sources from charm configuration.
|
|
||||||
|
|
||||||
The lists are encoded as yaml fragments in the configuration.
|
|
||||||
The frament needs to be included as a string. Sources and their
|
|
||||||
corresponding keys are of the types supported by add_source().
|
|
||||||
|
|
||||||
Example config:
|
|
||||||
install_sources: |
|
|
||||||
- "ppa:foo"
|
|
||||||
- "http://example.com/repo precise main"
|
|
||||||
install_keys: |
|
|
||||||
- null
|
|
||||||
- "a1b2c3d4"
|
|
||||||
|
|
||||||
Note that 'null' (a.k.a. None) should not be quoted.
|
|
||||||
"""
|
|
||||||
sources = safe_load((config(sources_var) or '').strip()) or []
|
|
||||||
keys = safe_load((config(keys_var) or '').strip()) or None
|
|
||||||
|
|
||||||
if isinstance(sources, six.string_types):
|
|
||||||
sources = [sources]
|
|
||||||
|
|
||||||
if keys is None:
|
|
||||||
for source in sources:
|
|
||||||
add_source(source, None)
|
|
||||||
else:
|
|
||||||
if isinstance(keys, six.string_types):
|
|
||||||
keys = [keys]
|
|
||||||
|
|
||||||
if len(sources) != len(keys):
|
|
||||||
raise SourceConfigError(
|
|
||||||
'Install sources and keys lists are different lengths')
|
|
||||||
for source, key in zip(sources, keys):
|
|
||||||
add_source(source, key)
|
|
||||||
if update:
|
|
||||||
apt_update(fatal=True)
|
|
||||||
|
|
||||||
|
|
||||||
def install_remote(source, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
Install a file tree from a remote source
|
|
||||||
|
|
||||||
The specified source should be a url of the form:
|
|
||||||
scheme://[host]/path[#[option=value][&...]]
|
|
||||||
|
|
||||||
Schemes supported are based on this modules submodules.
|
|
||||||
Options supported are submodule-specific.
|
|
||||||
Additional arguments are passed through to the submodule.
|
|
||||||
|
|
||||||
For example::
|
|
||||||
|
|
||||||
dest = install_remote('http://example.com/archive.tgz',
|
|
||||||
checksum='deadbeef',
|
|
||||||
hash_type='sha1')
|
|
||||||
|
|
||||||
This will download `archive.tgz`, validate it using SHA1 and, if
|
|
||||||
the file is ok, extract it and return the directory in which it
|
|
||||||
was extracted. If the checksum fails, it will raise
|
|
||||||
:class:`charmhelpers.core.host.ChecksumError`.
|
|
||||||
"""
|
|
||||||
# We ONLY check for True here because can_handle may return a string
|
|
||||||
# explaining why it can't handle a given source.
|
|
||||||
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
|
||||||
installed_to = None
|
|
||||||
for handler in handlers:
|
|
||||||
try:
|
|
||||||
installed_to = handler.install(source, *args, **kwargs)
|
|
||||||
except UnhandledSource:
|
|
||||||
pass
|
|
||||||
if not installed_to:
|
|
||||||
raise UnhandledSource("No handler found for source {}".format(source))
|
|
||||||
return installed_to
|
|
||||||
|
|
||||||
|
|
||||||
def install_from_config(config_var_name):
|
|
||||||
charm_config = config()
|
|
||||||
source = charm_config[config_var_name]
|
|
||||||
return install_remote(source)
|
|
||||||
|
|
||||||
|
|
||||||
def plugins(fetch_handlers=None):
|
|
||||||
if not fetch_handlers:
|
|
||||||
fetch_handlers = FETCH_HANDLERS
|
|
||||||
plugin_list = []
|
|
||||||
for handler_name in fetch_handlers:
|
|
||||||
package, classname = handler_name.rsplit('.', 1)
|
|
||||||
try:
|
|
||||||
handler_class = getattr(
|
|
||||||
importlib.import_module(package),
|
|
||||||
classname)
|
|
||||||
plugin_list.append(handler_class())
|
|
||||||
except (ImportError, AttributeError):
|
|
||||||
# Skip missing plugins so that they can be ommitted from
|
|
||||||
# installation if desired
|
|
||||||
log("FetchHandler {} not found, skipping plugin".format(
|
|
||||||
handler_name))
|
|
||||||
return plugin_list
|
|
||||||
|
|
||||||
|
|
||||||
def _run_apt_command(cmd, fatal=False):
|
|
||||||
"""
|
|
||||||
Run an APT command, checking output and retrying if the fatal flag is set
|
|
||||||
to True.
|
|
||||||
|
|
||||||
:param: cmd: str: The apt command to run.
|
|
||||||
:param: fatal: bool: Whether the command's output should be checked and
|
|
||||||
retried.
|
|
||||||
"""
|
|
||||||
env = os.environ.copy()
|
|
||||||
|
|
||||||
if 'DEBIAN_FRONTEND' not in env:
|
|
||||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
retry_count = 0
|
|
||||||
result = None
|
|
||||||
|
|
||||||
# If the command is considered "fatal", we need to retry if the apt
|
|
||||||
# lock was not acquired.
|
|
||||||
|
|
||||||
while result is None or result == APT_NO_LOCK:
|
|
||||||
try:
|
|
||||||
result = subprocess.check_call(cmd, env=env)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
retry_count = retry_count + 1
|
|
||||||
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
|
||||||
raise
|
|
||||||
result = e.returncode
|
|
||||||
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
|
|
||||||
"".format(APT_NO_LOCK_RETRY_DELAY))
|
|
||||||
time.sleep(APT_NO_LOCK_RETRY_DELAY)
|
|
||||||
|
|
||||||
else:
|
|
||||||
subprocess.call(cmd, env=env)
|
|
@ -1,161 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import hashlib
|
|
||||||
import re
|
|
||||||
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
BaseFetchHandler,
|
|
||||||
UnhandledSource
|
|
||||||
)
|
|
||||||
from charmhelpers.payload.archive import (
|
|
||||||
get_archive_handler,
|
|
||||||
extract,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.host import mkdir, check_hash
|
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
from urllib.request import (
|
|
||||||
build_opener, install_opener, urlopen, urlretrieve,
|
|
||||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
|
||||||
)
|
|
||||||
from urllib.parse import urlparse, urlunparse, parse_qs
|
|
||||||
from urllib.error import URLError
|
|
||||||
else:
|
|
||||||
from urllib import urlretrieve
|
|
||||||
from urllib2 import (
|
|
||||||
build_opener, install_opener, urlopen,
|
|
||||||
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
|
||||||
URLError
|
|
||||||
)
|
|
||||||
from urlparse import urlparse, urlunparse, parse_qs
|
|
||||||
|
|
||||||
|
|
||||||
def splituser(host):
|
|
||||||
'''urllib.splituser(), but six's support of this seems broken'''
|
|
||||||
_userprog = re.compile('^(.*)@(.*)$')
|
|
||||||
match = _userprog.match(host)
|
|
||||||
if match:
|
|
||||||
return match.group(1, 2)
|
|
||||||
return None, host
|
|
||||||
|
|
||||||
|
|
||||||
def splitpasswd(user):
|
|
||||||
'''urllib.splitpasswd(), but six's support of this is missing'''
|
|
||||||
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
|
|
||||||
match = _passwdprog.match(user)
|
|
||||||
if match:
|
|
||||||
return match.group(1, 2)
|
|
||||||
return user, None
|
|
||||||
|
|
||||||
|
|
||||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|
||||||
"""
|
|
||||||
Handler to download archive files from arbitrary URLs.
|
|
||||||
|
|
||||||
Can fetch from http, https, ftp, and file URLs.
|
|
||||||
|
|
||||||
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
|
|
||||||
|
|
||||||
Installs the contents of the archive in $CHARM_DIR/fetched/.
|
|
||||||
"""
|
|
||||||
def can_handle(self, source):
|
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
|
||||||
return "Wrong source type"
|
|
||||||
if get_archive_handler(self.base_url(source)):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def download(self, source, dest):
|
|
||||||
"""
|
|
||||||
Download an archive file.
|
|
||||||
|
|
||||||
:param str source: URL pointing to an archive file.
|
|
||||||
:param str dest: Local path location to download archive file to.
|
|
||||||
"""
|
|
||||||
# propogate all exceptions
|
|
||||||
# URLError, OSError, etc
|
|
||||||
proto, netloc, path, params, query, fragment = urlparse(source)
|
|
||||||
if proto in ('http', 'https'):
|
|
||||||
auth, barehost = splituser(netloc)
|
|
||||||
if auth is not None:
|
|
||||||
source = urlunparse((proto, barehost, path, params, query, fragment))
|
|
||||||
username, password = splitpasswd(auth)
|
|
||||||
passman = HTTPPasswordMgrWithDefaultRealm()
|
|
||||||
# Realm is set to None in add_password to force the username and password
|
|
||||||
# to be used whatever the realm
|
|
||||||
passman.add_password(None, source, username, password)
|
|
||||||
authhandler = HTTPBasicAuthHandler(passman)
|
|
||||||
opener = build_opener(authhandler)
|
|
||||||
install_opener(opener)
|
|
||||||
response = urlopen(source)
|
|
||||||
try:
|
|
||||||
with open(dest, 'w') as dest_file:
|
|
||||||
dest_file.write(response.read())
|
|
||||||
except Exception as e:
|
|
||||||
if os.path.isfile(dest):
|
|
||||||
os.unlink(dest)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
# Mandatory file validation via Sha1 or MD5 hashing.
|
|
||||||
def download_and_validate(self, url, hashsum, validate="sha1"):
|
|
||||||
tempfile, headers = urlretrieve(url)
|
|
||||||
check_hash(tempfile, hashsum, validate)
|
|
||||||
return tempfile
|
|
||||||
|
|
||||||
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
|
|
||||||
"""
|
|
||||||
Download and install an archive file, with optional checksum validation.
|
|
||||||
|
|
||||||
The checksum can also be given on the `source` URL's fragment.
|
|
||||||
For example::
|
|
||||||
|
|
||||||
handler.install('http://example.com/file.tgz#sha1=deadbeef')
|
|
||||||
|
|
||||||
:param str source: URL pointing to an archive file.
|
|
||||||
:param str dest: Local destination path to install to. If not given,
|
|
||||||
installs to `$CHARM_DIR/archives/archive_file_name`.
|
|
||||||
:param str checksum: If given, validate the archive file after download.
|
|
||||||
:param str hash_type: Algorithm used to generate `checksum`.
|
|
||||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
|
||||||
such as md5, sha1, sha256, sha512, etc.
|
|
||||||
|
|
||||||
"""
|
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
|
||||||
if not os.path.exists(dest_dir):
|
|
||||||
mkdir(dest_dir, perms=0o755)
|
|
||||||
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
|
||||||
try:
|
|
||||||
self.download(source, dld_file)
|
|
||||||
except URLError as e:
|
|
||||||
raise UnhandledSource(e.reason)
|
|
||||||
except OSError as e:
|
|
||||||
raise UnhandledSource(e.strerror)
|
|
||||||
options = parse_qs(url_parts.fragment)
|
|
||||||
for key, value in options.items():
|
|
||||||
if not six.PY3:
|
|
||||||
algorithms = hashlib.algorithms
|
|
||||||
else:
|
|
||||||
algorithms = hashlib.algorithms_available
|
|
||||||
if key in algorithms:
|
|
||||||
check_hash(dld_file, value, key)
|
|
||||||
if checksum:
|
|
||||||
check_hash(dld_file, checksum, hash_type)
|
|
||||||
return extract(dld_file, dest)
|
|
@ -1,78 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
BaseFetchHandler,
|
|
||||||
UnhandledSource
|
|
||||||
)
|
|
||||||
from charmhelpers.core.host import mkdir
|
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
raise ImportError('bzrlib does not support Python3')
|
|
||||||
|
|
||||||
try:
|
|
||||||
from bzrlib.branch import Branch
|
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
|
||||||
except ImportError:
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-bzrlib")
|
|
||||||
from bzrlib.branch import Branch
|
|
||||||
from bzrlib import bzrdir, workingtree, errors
|
|
||||||
|
|
||||||
|
|
||||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
|
||||||
"""Handler for bazaar branches via generic and lp URLs"""
|
|
||||||
def can_handle(self, source):
|
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
|
||||||
def branch(self, source, dest):
|
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
# If we use lp:branchname scheme we need to load plugins
|
|
||||||
if not self.can_handle(source):
|
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
|
||||||
if url_parts.scheme == "lp":
|
|
||||||
from bzrlib.plugin import load_plugins
|
|
||||||
load_plugins()
|
|
||||||
try:
|
|
||||||
local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
|
|
||||||
except errors.AlreadyControlDirError:
|
|
||||||
local_branch = Branch.open(dest)
|
|
||||||
try:
|
|
||||||
remote_branch = Branch.open(source)
|
|
||||||
remote_branch.push(local_branch)
|
|
||||||
tree = workingtree.WorkingTree.open(dest)
|
|
||||||
tree.update()
|
|
||||||
except Exception as e:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
def install(self, source):
|
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
|
||||||
branch_name)
|
|
||||||
if not os.path.exists(dest_dir):
|
|
||||||
mkdir(dest_dir, perms=0o755)
|
|
||||||
try:
|
|
||||||
self.branch(source, dest_dir)
|
|
||||||
except OSError as e:
|
|
||||||
raise UnhandledSource(e.strerror)
|
|
||||||
return dest_dir
|
|
@ -1,71 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
BaseFetchHandler,
|
|
||||||
UnhandledSource
|
|
||||||
)
|
|
||||||
from charmhelpers.core.host import mkdir
|
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
raise ImportError('GitPython does not support Python 3')
|
|
||||||
|
|
||||||
try:
|
|
||||||
from git import Repo
|
|
||||||
except ImportError:
|
|
||||||
from charmhelpers.fetch import apt_install
|
|
||||||
apt_install("python-git")
|
|
||||||
from git import Repo
|
|
||||||
|
|
||||||
from git.exc import GitCommandError # noqa E402
|
|
||||||
|
|
||||||
|
|
||||||
class GitUrlFetchHandler(BaseFetchHandler):
|
|
||||||
"""Handler for git branches via generic and github URLs"""
|
|
||||||
def can_handle(self, source):
|
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
# TODO (mattyw) no support for ssh git@ yet
|
|
||||||
if url_parts.scheme not in ('http', 'https', 'git'):
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
return True
|
|
||||||
|
|
||||||
def clone(self, source, dest, branch):
|
|
||||||
if not self.can_handle(source):
|
|
||||||
raise UnhandledSource("Cannot handle {}".format(source))
|
|
||||||
|
|
||||||
repo = Repo.clone_from(source, dest)
|
|
||||||
repo.git.checkout(branch)
|
|
||||||
|
|
||||||
def install(self, source, branch="master", dest=None):
|
|
||||||
url_parts = self.parse_url(source)
|
|
||||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
|
||||||
if dest:
|
|
||||||
dest_dir = os.path.join(dest, branch_name)
|
|
||||||
else:
|
|
||||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
|
||||||
branch_name)
|
|
||||||
if not os.path.exists(dest_dir):
|
|
||||||
mkdir(dest_dir, perms=0o755)
|
|
||||||
try:
|
|
||||||
self.clone(source, dest_dir, branch)
|
|
||||||
except GitCommandError as e:
|
|
||||||
raise UnhandledSource(e.message)
|
|
||||||
except OSError as e:
|
|
||||||
raise UnhandledSource(e.strerror)
|
|
||||||
return dest_dir
|
|
@ -1,17 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"Tools for working with files injected into a charm just before deployment."
|
|
@ -1,66 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import subprocess
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
def default_execd_dir():
|
|
||||||
return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
|
|
||||||
|
|
||||||
|
|
||||||
def execd_module_paths(execd_dir=None):
|
|
||||||
"""Generate a list of full paths to modules within execd_dir."""
|
|
||||||
if not execd_dir:
|
|
||||||
execd_dir = default_execd_dir()
|
|
||||||
|
|
||||||
if not os.path.exists(execd_dir):
|
|
||||||
return
|
|
||||||
|
|
||||||
for subpath in os.listdir(execd_dir):
|
|
||||||
module = os.path.join(execd_dir, subpath)
|
|
||||||
if os.path.isdir(module):
|
|
||||||
yield module
|
|
||||||
|
|
||||||
|
|
||||||
def execd_submodule_paths(command, execd_dir=None):
|
|
||||||
"""Generate a list of full paths to the specified command within exec_dir.
|
|
||||||
"""
|
|
||||||
for module_path in execd_module_paths(execd_dir):
|
|
||||||
path = os.path.join(module_path, command)
|
|
||||||
if os.access(path, os.X_OK) and os.path.isfile(path):
|
|
||||||
yield path
|
|
||||||
|
|
||||||
|
|
||||||
def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
|
|
||||||
"""Run command for each module within execd_dir which defines it."""
|
|
||||||
for submodule_path in execd_submodule_paths(command, execd_dir):
|
|
||||||
try:
|
|
||||||
subprocess.check_call(submodule_path, shell=True, stderr=stderr)
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
hookenv.log("Error ({}) running {}. Output: {}".format(
|
|
||||||
e.returncode, e.cmd, e.output))
|
|
||||||
if die_on_error:
|
|
||||||
sys.exit(e.returncode)
|
|
||||||
|
|
||||||
|
|
||||||
def execd_preinstall(execd_dir=None):
|
|
||||||
"""Run charm-pre-install for each module within execd_dir."""
|
|
||||||
execd_run('charm-pre-install', execd_dir=execd_dir)
|
|
93
hooks/charmhelpers/contrib/amulet/deployment.py
Normal file
93
hooks/charmhelpers/contrib/amulet/deployment.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import os
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
class AmuletDeployment(object):
|
||||||
|
"""Amulet deployment.
|
||||||
|
|
||||||
|
This class provides generic Amulet deployment and test runner
|
||||||
|
methods.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, series=None):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
self.series = None
|
||||||
|
|
||||||
|
if series:
|
||||||
|
self.series = series
|
||||||
|
self.d = amulet.Deployment(series=self.series)
|
||||||
|
else:
|
||||||
|
self.d = amulet.Deployment()
|
||||||
|
|
||||||
|
def _add_services(self, this_service, other_services):
|
||||||
|
"""Add services.
|
||||||
|
|
||||||
|
Add services to the deployment where this_service is the local charm
|
||||||
|
that we're testing and other_services are the other services that
|
||||||
|
are being used in the local amulet tests.
|
||||||
|
"""
|
||||||
|
if this_service['name'] != os.path.basename(os.getcwd()):
|
||||||
|
s = this_service['name']
|
||||||
|
msg = "The charm's root directory name needs to be {}".format(s)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
if 'units' not in this_service:
|
||||||
|
this_service['units'] = 1
|
||||||
|
|
||||||
|
self.d.add(this_service['name'], units=this_service['units'])
|
||||||
|
|
||||||
|
for svc in other_services:
|
||||||
|
if 'location' in svc:
|
||||||
|
branch_location = svc['location']
|
||||||
|
elif self.series:
|
||||||
|
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
|
||||||
|
else:
|
||||||
|
branch_location = None
|
||||||
|
|
||||||
|
if 'units' not in svc:
|
||||||
|
svc['units'] = 1
|
||||||
|
|
||||||
|
self.d.add(svc['name'], charm=branch_location, units=svc['units'])
|
||||||
|
|
||||||
|
def _add_relations(self, relations):
|
||||||
|
"""Add all of the relations for the services."""
|
||||||
|
for k, v in six.iteritems(relations):
|
||||||
|
self.d.relate(k, v)
|
||||||
|
|
||||||
|
def _configure_services(self, configs):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
for service, config in six.iteritems(configs):
|
||||||
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _deploy(self):
|
||||||
|
"""Deploy environment and wait for all hooks to finish executing."""
|
||||||
|
try:
|
||||||
|
self.d.setup(timeout=900)
|
||||||
|
self.d.sentry.wait(timeout=900)
|
||||||
|
except amulet.helpers.TimeoutError:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
|
||||||
|
except Exception:
|
||||||
|
raise
|
||||||
|
|
||||||
|
def run_tests(self):
|
||||||
|
"""Run all of the methods that are prefixed with 'test_'."""
|
||||||
|
for test in dir(self):
|
||||||
|
if test.startswith('test_'):
|
||||||
|
getattr(self, test)()
|
533
hooks/charmhelpers/contrib/amulet/utils.py
Normal file
533
hooks/charmhelpers/contrib/amulet/utils.py
Normal file
@ -0,0 +1,533 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import ConfigParser
|
||||||
|
import distro_info
|
||||||
|
import io
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import six
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
|
||||||
|
class AmuletUtils(object):
|
||||||
|
"""Amulet utilities.
|
||||||
|
|
||||||
|
This class provides common utility functions that are used by Amulet
|
||||||
|
tests.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, log_level=logging.ERROR):
|
||||||
|
self.log = self.get_logger(level=log_level)
|
||||||
|
self.ubuntu_releases = self.get_ubuntu_releases()
|
||||||
|
|
||||||
|
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
|
||||||
|
"""Get a logger object that will log to stdout."""
|
||||||
|
log = logging
|
||||||
|
logger = log.getLogger(name)
|
||||||
|
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
||||||
|
"%(levelname)s: %(message)s")
|
||||||
|
|
||||||
|
handler = log.StreamHandler(stream=sys.stdout)
|
||||||
|
handler.setLevel(level)
|
||||||
|
handler.setFormatter(fmt)
|
||||||
|
|
||||||
|
logger.addHandler(handler)
|
||||||
|
logger.setLevel(level)
|
||||||
|
|
||||||
|
return logger
|
||||||
|
|
||||||
|
def valid_ip(self, ip):
|
||||||
|
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def valid_url(self, url):
|
||||||
|
p = re.compile(
|
||||||
|
r'^(?:http|ftp)s?://'
|
||||||
|
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
|
||||||
|
r'localhost|'
|
||||||
|
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
|
||||||
|
r'(?::\d+)?'
|
||||||
|
r'(?:/?|[/?]\S+)$',
|
||||||
|
re.IGNORECASE)
|
||||||
|
if p.match(url):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_ubuntu_release_from_sentry(self, sentry_unit):
|
||||||
|
"""Get Ubuntu release codename from sentry unit.
|
||||||
|
|
||||||
|
:param sentry_unit: amulet sentry/service unit pointer
|
||||||
|
:returns: list of strings - release codename, failure message
|
||||||
|
"""
|
||||||
|
msg = None
|
||||||
|
cmd = 'lsb_release -cs'
|
||||||
|
release, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} lsb_release: {}'.format(
|
||||||
|
sentry_unit.info['unit_name'], release))
|
||||||
|
else:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, release, code))
|
||||||
|
if release not in self.ubuntu_releases:
|
||||||
|
msg = ("Release ({}) not found in Ubuntu releases "
|
||||||
|
"({})".format(release, self.ubuntu_releases))
|
||||||
|
return release, msg
|
||||||
|
|
||||||
|
def validate_services(self, commands):
|
||||||
|
"""Validate that lists of commands succeed on service units. Can be
|
||||||
|
used to verify system services are running on the corresponding
|
||||||
|
service units.
|
||||||
|
|
||||||
|
:param commands: dict with sentry keys and arbitrary command list vals
|
||||||
|
:returns: None if successful, Failure string message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking status of system services...')
|
||||||
|
|
||||||
|
# /!\ DEPRECATION WARNING (beisner):
|
||||||
|
# New and existing tests should be rewritten to use
|
||||||
|
# validate_services_by_name() as it is aware of init systems.
|
||||||
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
|
'validate_services_by_name instead of validate_services '
|
||||||
|
'due to init system differences.')
|
||||||
|
|
||||||
|
for k, v in six.iteritems(commands):
|
||||||
|
for cmd in v:
|
||||||
|
output, code = k.run(cmd)
|
||||||
|
self.log.debug('{} `{}` returned '
|
||||||
|
'{}'.format(k.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
if code != 0:
|
||||||
|
return "command `{}` returned {}".format(cmd, str(code))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_services_by_name(self, sentry_services):
|
||||||
|
"""Validate system service status by service name, automatically
|
||||||
|
detecting init system based on Ubuntu release codename.
|
||||||
|
|
||||||
|
:param sentry_services: dict with sentry keys and svc list values
|
||||||
|
:returns: None if successful, Failure string message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking status of system services...')
|
||||||
|
|
||||||
|
# Point at which systemd became a thing
|
||||||
|
systemd_switch = self.ubuntu_releases.index('vivid')
|
||||||
|
|
||||||
|
for sentry_unit, services_list in six.iteritems(sentry_services):
|
||||||
|
# Get lsb_release codename from unit
|
||||||
|
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
|
||||||
|
if ret:
|
||||||
|
return ret
|
||||||
|
|
||||||
|
for service_name in services_list:
|
||||||
|
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
||||||
|
service_name == "rabbitmq-server"):
|
||||||
|
# init is systemd
|
||||||
|
cmd = 'sudo service {} status'.format(service_name)
|
||||||
|
elif self.ubuntu_releases.index(release) < systemd_switch:
|
||||||
|
# init is upstart
|
||||||
|
cmd = 'sudo status {}'.format(service_name)
|
||||||
|
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
self.log.debug('{} `{}` returned '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
if code != 0:
|
||||||
|
return "command `{}` returned {}".format(cmd, str(code))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_config(self, unit, filename):
|
||||||
|
"""Get a ConfigParser object for parsing a unit's config file."""
|
||||||
|
file_contents = unit.file_contents(filename)
|
||||||
|
|
||||||
|
# NOTE(beisner): by default, ConfigParser does not handle options
|
||||||
|
# with no value, such as the flags used in the mysql my.cnf file.
|
||||||
|
# https://bugs.python.org/issue7005
|
||||||
|
config = ConfigParser.ConfigParser(allow_no_value=True)
|
||||||
|
config.readfp(io.StringIO(file_contents))
|
||||||
|
return config
|
||||||
|
|
||||||
|
def validate_config_data(self, sentry_unit, config_file, section,
|
||||||
|
expected):
|
||||||
|
"""Validate config file data.
|
||||||
|
|
||||||
|
Verify that the specified section of the config file contains
|
||||||
|
the expected option key:value pairs.
|
||||||
|
|
||||||
|
Compare expected dictionary data vs actual dictionary data.
|
||||||
|
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||||
|
longs, or can be a function that evaluates a variable and returns a
|
||||||
|
bool.
|
||||||
|
"""
|
||||||
|
self.log.debug('Validating config file data ({} in {} on {})'
|
||||||
|
'...'.format(section, config_file,
|
||||||
|
sentry_unit.info['unit_name']))
|
||||||
|
config = self._get_config(sentry_unit, config_file)
|
||||||
|
|
||||||
|
if section != 'DEFAULT' and not config.has_section(section):
|
||||||
|
return "section [{}] does not exist".format(section)
|
||||||
|
|
||||||
|
for k in expected.keys():
|
||||||
|
if not config.has_option(section, k):
|
||||||
|
return "section [{}] is missing option {}".format(section, k)
|
||||||
|
|
||||||
|
actual = config.get(section, k)
|
||||||
|
v = expected[k]
|
||||||
|
if (isinstance(v, six.string_types) or
|
||||||
|
isinstance(v, bool) or
|
||||||
|
isinstance(v, six.integer_types)):
|
||||||
|
# handle explicit values
|
||||||
|
if actual != v:
|
||||||
|
return "section [{}] {}:{} != expected {}:{}".format(
|
||||||
|
section, k, actual, k, expected[k])
|
||||||
|
# handle function pointers, such as not_null or valid_ip
|
||||||
|
elif not v(actual):
|
||||||
|
return "section [{}] {}:{} != expected {}:{}".format(
|
||||||
|
section, k, actual, k, expected[k])
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _validate_dict_data(self, expected, actual):
|
||||||
|
"""Validate dictionary data.
|
||||||
|
|
||||||
|
Compare expected dictionary data vs actual dictionary data.
|
||||||
|
The values in the 'expected' dictionary can be strings, bools, ints,
|
||||||
|
longs, or can be a function that evaluates a variable and returns a
|
||||||
|
bool.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
self.log.debug('expected: {}'.format(repr(expected)))
|
||||||
|
|
||||||
|
for k, v in six.iteritems(expected):
|
||||||
|
if k in actual:
|
||||||
|
if (isinstance(v, six.string_types) or
|
||||||
|
isinstance(v, bool) or
|
||||||
|
isinstance(v, six.integer_types)):
|
||||||
|
# handle explicit values
|
||||||
|
if v != actual[k]:
|
||||||
|
return "{}:{}".format(k, actual[k])
|
||||||
|
# handle function pointers, such as not_null or valid_ip
|
||||||
|
elif not v(actual[k]):
|
||||||
|
return "{}:{}".format(k, actual[k])
|
||||||
|
else:
|
||||||
|
return "key '{}' does not exist".format(k)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_relation_data(self, sentry_unit, relation, expected):
|
||||||
|
"""Validate actual relation data based on expected relation data."""
|
||||||
|
actual = sentry_unit.relation(relation[0], relation[1])
|
||||||
|
return self._validate_dict_data(expected, actual)
|
||||||
|
|
||||||
|
def _validate_list_data(self, expected, actual):
|
||||||
|
"""Compare expected list vs actual list data."""
|
||||||
|
for e in expected:
|
||||||
|
if e not in actual:
|
||||||
|
return "expected item {} not found in actual list".format(e)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def not_null(self, string):
|
||||||
|
if string is not None:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _get_file_mtime(self, sentry_unit, filename):
|
||||||
|
"""Get last modification time of file."""
|
||||||
|
return sentry_unit.file_stat(filename)['mtime']
|
||||||
|
|
||||||
|
def _get_dir_mtime(self, sentry_unit, directory):
|
||||||
|
"""Get last modification time of directory."""
|
||||||
|
return sentry_unit.directory_stat(directory)['mtime']
|
||||||
|
|
||||||
|
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False):
|
||||||
|
"""Get process' start time.
|
||||||
|
|
||||||
|
Determine start time of the process based on the last modification
|
||||||
|
time of the /proc/pid directory. If pgrep_full is True, the process
|
||||||
|
name is matched against the full command line.
|
||||||
|
"""
|
||||||
|
if pgrep_full:
|
||||||
|
cmd = 'pgrep -o -f {}'.format(service)
|
||||||
|
else:
|
||||||
|
cmd = 'pgrep -o {}'.format(service)
|
||||||
|
cmd = cmd + ' | grep -v pgrep || exit 0'
|
||||||
|
cmd_out = sentry_unit.run(cmd)
|
||||||
|
self.log.debug('CMDout: ' + str(cmd_out))
|
||||||
|
if cmd_out[0]:
|
||||||
|
self.log.debug('Pid for %s %s' % (service, str(cmd_out[0])))
|
||||||
|
proc_dir = '/proc/{}'.format(cmd_out[0].strip())
|
||||||
|
return self._get_dir_mtime(sentry_unit, proc_dir)
|
||||||
|
|
||||||
|
def service_restarted(self, sentry_unit, service, filename,
|
||||||
|
pgrep_full=False, sleep_time=20):
|
||||||
|
"""Check if service was restarted.
|
||||||
|
|
||||||
|
Compare a service's start time vs a file's last modification time
|
||||||
|
(such as a config file for that service) to determine if the service
|
||||||
|
has been restarted.
|
||||||
|
"""
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
||||||
|
self._get_file_mtime(sentry_unit, filename)):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def service_restarted_since(self, sentry_unit, mtime, service,
|
||||||
|
pgrep_full=False, sleep_time=20,
|
||||||
|
retry_count=2):
|
||||||
|
"""Check if service was been started after a given time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sentry_unit (sentry): The sentry unit to check for the service on
|
||||||
|
mtime (float): The epoch time to check against
|
||||||
|
service (string): service name to look for in process table
|
||||||
|
pgrep_full (boolean): Use full command line search mode with pgrep
|
||||||
|
sleep_time (int): Seconds to sleep before looking for process
|
||||||
|
retry_count (int): If service is not found, how many times to retry
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if service found and its start time it newer than mtime,
|
||||||
|
False if service is older than mtime or if service was
|
||||||
|
not found.
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking %s restarted since %s' % (service, mtime))
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
proc_start_time = self._get_proc_start_time(sentry_unit, service,
|
||||||
|
pgrep_full)
|
||||||
|
while retry_count > 0 and not proc_start_time:
|
||||||
|
self.log.debug('No pid file found for service %s, will retry %i '
|
||||||
|
'more times' % (service, retry_count))
|
||||||
|
time.sleep(30)
|
||||||
|
proc_start_time = self._get_proc_start_time(sentry_unit, service,
|
||||||
|
pgrep_full)
|
||||||
|
retry_count = retry_count - 1
|
||||||
|
|
||||||
|
if not proc_start_time:
|
||||||
|
self.log.warn('No proc start time found, assuming service did '
|
||||||
|
'not start')
|
||||||
|
return False
|
||||||
|
if proc_start_time >= mtime:
|
||||||
|
self.log.debug('proc start time is newer than provided mtime'
|
||||||
|
'(%s >= %s)' % (proc_start_time, mtime))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.warn('proc start time (%s) is older than provided mtime '
|
||||||
|
'(%s), service did not restart' % (proc_start_time,
|
||||||
|
mtime))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def config_updated_since(self, sentry_unit, filename, mtime,
|
||||||
|
sleep_time=20):
|
||||||
|
"""Check if file was modified after a given time.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sentry_unit (sentry): The sentry unit to check the file mtime on
|
||||||
|
filename (string): The file to check mtime of
|
||||||
|
mtime (float): The epoch time to check against
|
||||||
|
sleep_time (int): Seconds to sleep before looking for process
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if file was modified more recently than mtime, False if
|
||||||
|
file was modified before mtime,
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking %s updated since %s' % (filename, mtime))
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
||||||
|
if file_mtime >= mtime:
|
||||||
|
self.log.debug('File mtime is newer than provided mtime '
|
||||||
|
'(%s >= %s)' % (file_mtime, mtime))
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.warn('File mtime %s is older than provided mtime %s'
|
||||||
|
% (file_mtime, mtime))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
||||||
|
filename, pgrep_full=False,
|
||||||
|
sleep_time=20, retry_count=2):
|
||||||
|
"""Check service and file were updated after mtime
|
||||||
|
|
||||||
|
Args:
|
||||||
|
sentry_unit (sentry): The sentry unit to check for the service on
|
||||||
|
mtime (float): The epoch time to check against
|
||||||
|
service (string): service name to look for in process table
|
||||||
|
filename (string): The file to check mtime of
|
||||||
|
pgrep_full (boolean): Use full command line search mode with pgrep
|
||||||
|
sleep_time (int): Seconds to sleep before looking for process
|
||||||
|
retry_count (int): If service is not found, how many times to retry
|
||||||
|
|
||||||
|
Typical Usage:
|
||||||
|
u = OpenStackAmuletUtils(ERROR)
|
||||||
|
...
|
||||||
|
mtime = u.get_sentry_time(self.cinder_sentry)
|
||||||
|
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
|
||||||
|
if not u.validate_service_config_changed(self.cinder_sentry,
|
||||||
|
mtime,
|
||||||
|
'cinder-api',
|
||||||
|
'/etc/cinder/cinder.conf')
|
||||||
|
amulet.raise_status(amulet.FAIL, msg='update failed')
|
||||||
|
Returns:
|
||||||
|
bool: True if both service and file where updated/restarted after
|
||||||
|
mtime, False if service is older than mtime or if service was
|
||||||
|
not found or if filename was modified before mtime.
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking %s restarted since %s' % (service, mtime))
|
||||||
|
time.sleep(sleep_time)
|
||||||
|
service_restart = self.service_restarted_since(sentry_unit, mtime,
|
||||||
|
service,
|
||||||
|
pgrep_full=pgrep_full,
|
||||||
|
sleep_time=0,
|
||||||
|
retry_count=retry_count)
|
||||||
|
config_update = self.config_updated_since(sentry_unit, filename, mtime,
|
||||||
|
sleep_time=0)
|
||||||
|
return service_restart and config_update
|
||||||
|
|
||||||
|
def get_sentry_time(self, sentry_unit):
|
||||||
|
"""Return current epoch time on a sentry"""
|
||||||
|
cmd = "date +'%s'"
|
||||||
|
return float(sentry_unit.run(cmd)[0])
|
||||||
|
|
||||||
|
def relation_error(self, name, data):
|
||||||
|
return 'unexpected relation data in {} - {}'.format(name, data)
|
||||||
|
|
||||||
|
def endpoint_error(self, name, data):
|
||||||
|
return 'unexpected endpoint data in {} - {}'.format(name, data)
|
||||||
|
|
||||||
|
def get_ubuntu_releases(self):
|
||||||
|
"""Return a list of all Ubuntu releases in order of release."""
|
||||||
|
_d = distro_info.UbuntuDistroInfo()
|
||||||
|
_release_list = _d.all
|
||||||
|
self.log.debug('Ubuntu release list: {}'.format(_release_list))
|
||||||
|
return _release_list
|
||||||
|
|
||||||
|
def file_to_url(self, file_rel_path):
|
||||||
|
"""Convert a relative file path to a file URL."""
|
||||||
|
_abs_path = os.path.abspath(file_rel_path)
|
||||||
|
return urlparse.urlparse(_abs_path, scheme='file').geturl()
|
||||||
|
|
||||||
|
def check_commands_on_units(self, commands, sentry_units):
|
||||||
|
"""Check that all commands in a list exit zero on all
|
||||||
|
sentry units in a list.
|
||||||
|
|
||||||
|
:param commands: list of bash commands
|
||||||
|
:param sentry_units: list of sentry unit pointers
|
||||||
|
:returns: None if successful; Failure message otherwise
|
||||||
|
"""
|
||||||
|
self.log.debug('Checking exit codes for {} commands on {} '
|
||||||
|
'sentry units...'.format(len(commands),
|
||||||
|
len(sentry_units)))
|
||||||
|
for sentry_unit in sentry_units:
|
||||||
|
for cmd in commands:
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code == 0:
|
||||||
|
self.log.debug('{} `{}` returned {} '
|
||||||
|
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code))
|
||||||
|
else:
|
||||||
|
return ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_process_id_list(self, sentry_unit, process_name):
|
||||||
|
"""Get a list of process ID(s) from a single sentry juju unit
|
||||||
|
for a single process name.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:param process_name: Process name
|
||||||
|
:returns: List of process IDs
|
||||||
|
"""
|
||||||
|
cmd = 'pidof {}'.format(process_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return str(output).split()
|
||||||
|
|
||||||
|
def get_unit_process_ids(self, unit_processes):
|
||||||
|
"""Construct a dict containing unit sentries, process names, and
|
||||||
|
process IDs."""
|
||||||
|
pid_dict = {}
|
||||||
|
for sentry_unit, process_list in unit_processes.iteritems():
|
||||||
|
pid_dict[sentry_unit] = {}
|
||||||
|
for process in process_list:
|
||||||
|
pids = self.get_process_id_list(sentry_unit, process)
|
||||||
|
pid_dict[sentry_unit].update({process: pids})
|
||||||
|
return pid_dict
|
||||||
|
|
||||||
|
def validate_unit_process_ids(self, expected, actual):
|
||||||
|
"""Validate process id quantities for services on units."""
|
||||||
|
self.log.debug('Checking units for running processes...')
|
||||||
|
self.log.debug('Expected PIDs: {}'.format(expected))
|
||||||
|
self.log.debug('Actual PIDs: {}'.format(actual))
|
||||||
|
|
||||||
|
if len(actual) != len(expected):
|
||||||
|
return ('Unit count mismatch. expected, actual: {}, '
|
||||||
|
'{} '.format(len(expected), len(actual)))
|
||||||
|
|
||||||
|
for (e_sentry, e_proc_names) in expected.iteritems():
|
||||||
|
e_sentry_name = e_sentry.info['unit_name']
|
||||||
|
if e_sentry in actual.keys():
|
||||||
|
a_proc_names = actual[e_sentry]
|
||||||
|
else:
|
||||||
|
return ('Expected sentry ({}) not found in actual dict data.'
|
||||||
|
'{}'.format(e_sentry_name, e_sentry))
|
||||||
|
|
||||||
|
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
|
||||||
|
return ('Process name count mismatch. expected, actual: {}, '
|
||||||
|
'{}'.format(len(expected), len(actual)))
|
||||||
|
|
||||||
|
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
|
||||||
|
zip(e_proc_names.items(), a_proc_names.items()):
|
||||||
|
if e_proc_name != a_proc_name:
|
||||||
|
return ('Process name mismatch. expected, actual: {}, '
|
||||||
|
'{}'.format(e_proc_name, a_proc_name))
|
||||||
|
|
||||||
|
a_pids_length = len(a_pids)
|
||||||
|
if e_pids_length != a_pids_length:
|
||||||
|
return ('PID count mismatch. {} ({}) expected, actual: '
|
||||||
|
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
||||||
|
e_pids_length, a_pids_length,
|
||||||
|
a_pids))
|
||||||
|
else:
|
||||||
|
self.log.debug('PID check OK: {} {} {}: '
|
||||||
|
'{}'.format(e_sentry_name, e_proc_name,
|
||||||
|
e_pids_length, a_pids))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def validate_list_of_identical_dicts(self, list_of_dicts):
|
||||||
|
"""Check that all dicts within a list are identical."""
|
||||||
|
hashes = []
|
||||||
|
for _dict in list_of_dicts:
|
||||||
|
hashes.append(hash(frozenset(_dict.items())))
|
||||||
|
|
||||||
|
self.log.debug('Hashes: {}'.format(hashes))
|
||||||
|
if len(set(hashes)) == 1:
|
||||||
|
self.log.debug('Dicts within list are identical')
|
||||||
|
else:
|
||||||
|
return 'Dicts within list are not identical'
|
||||||
|
|
||||||
|
return None
|
254
hooks/charmhelpers/contrib/ansible/__init__.py
Normal file
254
hooks/charmhelpers/contrib/ansible/__init__.py
Normal file
@ -0,0 +1,254 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Copyright 2013 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||||
|
"""Charm Helpers ansible - declare the state of your machines.
|
||||||
|
|
||||||
|
This helper enables you to declare your machine state, rather than
|
||||||
|
program it procedurally (and have to test each change to your procedures).
|
||||||
|
Your install hook can be as simple as::
|
||||||
|
|
||||||
|
{{{
|
||||||
|
import charmhelpers.contrib.ansible
|
||||||
|
|
||||||
|
|
||||||
|
def install():
|
||||||
|
charmhelpers.contrib.ansible.install_ansible_support()
|
||||||
|
charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
|
||||||
|
}}}
|
||||||
|
|
||||||
|
and won't need to change (nor will its tests) when you change the machine
|
||||||
|
state.
|
||||||
|
|
||||||
|
All of your juju config and relation-data are available as template
|
||||||
|
variables within your playbooks and templates. An install playbook looks
|
||||||
|
something like::
|
||||||
|
|
||||||
|
{{{
|
||||||
|
---
|
||||||
|
- hosts: localhost
|
||||||
|
user: root
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Add private repositories.
|
||||||
|
template:
|
||||||
|
src: ../templates/private-repositories.list.jinja2
|
||||||
|
dest: /etc/apt/sources.list.d/private.list
|
||||||
|
|
||||||
|
- name: Update the cache.
|
||||||
|
apt: update_cache=yes
|
||||||
|
|
||||||
|
- name: Install dependencies.
|
||||||
|
apt: pkg={{ item }}
|
||||||
|
with_items:
|
||||||
|
- python-mimeparse
|
||||||
|
- python-webob
|
||||||
|
- sunburnt
|
||||||
|
|
||||||
|
- name: Setup groups.
|
||||||
|
group: name={{ item.name }} gid={{ item.gid }}
|
||||||
|
with_items:
|
||||||
|
- { name: 'deploy_user', gid: 1800 }
|
||||||
|
- { name: 'service_user', gid: 1500 }
|
||||||
|
|
||||||
|
...
|
||||||
|
}}}
|
||||||
|
|
||||||
|
Read more online about `playbooks`_ and standard ansible `modules`_.
|
||||||
|
|
||||||
|
.. _playbooks: http://www.ansibleworks.com/docs/playbooks.html
|
||||||
|
.. _modules: http://www.ansibleworks.com/docs/modules.html
|
||||||
|
|
||||||
|
A further feature os the ansible hooks is to provide a light weight "action"
|
||||||
|
scripting tool. This is a decorator that you apply to a function, and that
|
||||||
|
function can now receive cli args, and can pass extra args to the playbook.
|
||||||
|
|
||||||
|
e.g.
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.action()
|
||||||
|
def some_action(amount, force="False"):
|
||||||
|
"Usage: some-action AMOUNT [force=True]" # <-- shown on error
|
||||||
|
# process the arguments
|
||||||
|
# do some calls
|
||||||
|
# return extra-vars to be passed to ansible-playbook
|
||||||
|
return {
|
||||||
|
'amount': int(amount),
|
||||||
|
'type': force,
|
||||||
|
}
|
||||||
|
|
||||||
|
You can now create a symlink to hooks.py that can be invoked like a hook, but
|
||||||
|
with cli params:
|
||||||
|
|
||||||
|
# link actions/some-action to hooks/hooks.py
|
||||||
|
|
||||||
|
actions/some-action amount=10 force=true
|
||||||
|
|
||||||
|
"""
|
||||||
|
import os
|
||||||
|
import stat
|
||||||
|
import subprocess
|
||||||
|
import functools
|
||||||
|
|
||||||
|
import charmhelpers.contrib.templating.contexts
|
||||||
|
import charmhelpers.core.host
|
||||||
|
import charmhelpers.core.hookenv
|
||||||
|
import charmhelpers.fetch
|
||||||
|
|
||||||
|
|
||||||
|
charm_dir = os.environ.get('CHARM_DIR', '')
|
||||||
|
ansible_hosts_path = '/etc/ansible/hosts'
|
||||||
|
# Ansible will automatically include any vars in the following
|
||||||
|
# file in its inventory when run locally.
|
||||||
|
ansible_vars_path = '/etc/ansible/host_vars/localhost'
|
||||||
|
|
||||||
|
|
||||||
|
def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'):
|
||||||
|
"""Installs the ansible package.
|
||||||
|
|
||||||
|
By default it is installed from the `PPA`_ linked from
|
||||||
|
the ansible `website`_ or from a ppa specified by a charm config..
|
||||||
|
|
||||||
|
.. _PPA: https://launchpad.net/~rquillo/+archive/ansible
|
||||||
|
.. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
|
||||||
|
|
||||||
|
If from_ppa is empty, you must ensure that the package is available
|
||||||
|
from a configured repository.
|
||||||
|
"""
|
||||||
|
if from_ppa:
|
||||||
|
charmhelpers.fetch.add_source(ppa_location)
|
||||||
|
charmhelpers.fetch.apt_update(fatal=True)
|
||||||
|
charmhelpers.fetch.apt_install('ansible')
|
||||||
|
with open(ansible_hosts_path, 'w+') as hosts_file:
|
||||||
|
hosts_file.write('localhost ansible_connection=local')
|
||||||
|
|
||||||
|
|
||||||
|
def apply_playbook(playbook, tags=None, extra_vars=None):
|
||||||
|
tags = tags or []
|
||||||
|
tags = ",".join(tags)
|
||||||
|
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
|
||||||
|
ansible_vars_path, namespace_separator='__',
|
||||||
|
allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
|
||||||
|
|
||||||
|
# we want ansible's log output to be unbuffered
|
||||||
|
env = os.environ.copy()
|
||||||
|
env['PYTHONUNBUFFERED'] = "1"
|
||||||
|
call = [
|
||||||
|
'ansible-playbook',
|
||||||
|
'-c',
|
||||||
|
'local',
|
||||||
|
playbook,
|
||||||
|
]
|
||||||
|
if tags:
|
||||||
|
call.extend(['--tags', '{}'.format(tags)])
|
||||||
|
if extra_vars:
|
||||||
|
extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()]
|
||||||
|
call.extend(['--extra-vars', " ".join(extra)])
|
||||||
|
subprocess.check_call(call, env=env)
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
|
||||||
|
"""Run a playbook with the hook-name as the tag.
|
||||||
|
|
||||||
|
This helper builds on the standard hookenv.Hooks helper,
|
||||||
|
but additionally runs the playbook with the hook-name specified
|
||||||
|
using --tags (ie. running all the tasks tagged with the hook-name).
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
|
||||||
|
|
||||||
|
# All the tasks within my_machine_state.yaml tagged with 'install'
|
||||||
|
# will be run automatically after do_custom_work()
|
||||||
|
@hooks.hook()
|
||||||
|
def install():
|
||||||
|
do_custom_work()
|
||||||
|
|
||||||
|
# For most of your hooks, you won't need to do anything other
|
||||||
|
# than run the tagged tasks for the hook:
|
||||||
|
@hooks.hook('config-changed', 'start', 'stop')
|
||||||
|
def just_use_playbook():
|
||||||
|
pass
|
||||||
|
|
||||||
|
# As a convenience, you can avoid the above noop function by specifying
|
||||||
|
# the hooks which are handled by ansible-only and they'll be registered
|
||||||
|
# for you:
|
||||||
|
# hooks = AnsibleHooks(
|
||||||
|
# 'playbooks/my_machine_state.yaml',
|
||||||
|
# default_hooks=['config-changed', 'start', 'stop'])
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# execute a hook based on the name the program is called by
|
||||||
|
hooks.execute(sys.argv)
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, playbook_path, default_hooks=None):
|
||||||
|
"""Register any hooks handled by ansible."""
|
||||||
|
super(AnsibleHooks, self).__init__()
|
||||||
|
|
||||||
|
self._actions = {}
|
||||||
|
self.playbook_path = playbook_path
|
||||||
|
|
||||||
|
default_hooks = default_hooks or []
|
||||||
|
|
||||||
|
def noop(*args, **kwargs):
|
||||||
|
pass
|
||||||
|
|
||||||
|
for hook in default_hooks:
|
||||||
|
self.register(hook, noop)
|
||||||
|
|
||||||
|
def register_action(self, name, function):
|
||||||
|
"""Register a hook"""
|
||||||
|
self._actions[name] = function
|
||||||
|
|
||||||
|
def execute(self, args):
|
||||||
|
"""Execute the hook followed by the playbook using the hook as tag."""
|
||||||
|
hook_name = os.path.basename(args[0])
|
||||||
|
extra_vars = None
|
||||||
|
if hook_name in self._actions:
|
||||||
|
extra_vars = self._actions[hook_name](args[1:])
|
||||||
|
else:
|
||||||
|
super(AnsibleHooks, self).execute(args)
|
||||||
|
|
||||||
|
charmhelpers.contrib.ansible.apply_playbook(
|
||||||
|
self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
|
||||||
|
|
||||||
|
def action(self, *action_names):
|
||||||
|
"""Decorator, registering them as actions"""
|
||||||
|
def action_wrapper(decorated):
|
||||||
|
|
||||||
|
@functools.wraps(decorated)
|
||||||
|
def wrapper(argv):
|
||||||
|
kwargs = dict(arg.split('=') for arg in argv)
|
||||||
|
try:
|
||||||
|
return decorated(**kwargs)
|
||||||
|
except TypeError as e:
|
||||||
|
if decorated.__doc__:
|
||||||
|
e.args += (decorated.__doc__,)
|
||||||
|
raise
|
||||||
|
|
||||||
|
self.register_action(decorated.__name__, wrapper)
|
||||||
|
if '_' in decorated.__name__:
|
||||||
|
self.register_action(
|
||||||
|
decorated.__name__.replace('_', '-'), wrapper)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
return action_wrapper
|
126
hooks/charmhelpers/contrib/benchmark/__init__.py
Normal file
126
hooks/charmhelpers/contrib/benchmark/__init__.py
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
from distutils.spawn import find_executable
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
in_relation_hook,
|
||||||
|
relation_ids,
|
||||||
|
relation_set,
|
||||||
|
relation_get,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def action_set(key, val):
|
||||||
|
if find_executable('action-set'):
|
||||||
|
action_cmd = ['action-set']
|
||||||
|
|
||||||
|
if isinstance(val, dict):
|
||||||
|
for k, v in iter(val.items()):
|
||||||
|
action_set('%s.%s' % (key, k), v)
|
||||||
|
return True
|
||||||
|
|
||||||
|
action_cmd.append('%s=%s' % (key, val))
|
||||||
|
subprocess.check_call(action_cmd)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class Benchmark():
|
||||||
|
"""
|
||||||
|
Helper class for the `benchmark` interface.
|
||||||
|
|
||||||
|
:param list actions: Define the actions that are also benchmarks
|
||||||
|
|
||||||
|
From inside the benchmark-relation-changed hook, you would
|
||||||
|
Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
siege = Benchmark(['siege'])
|
||||||
|
siege.start()
|
||||||
|
[... run siege ...]
|
||||||
|
# The higher the score, the better the benchmark
|
||||||
|
siege.set_composite_score(16.70, 'trans/sec', 'desc')
|
||||||
|
siege.finish()
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
|
||||||
|
|
||||||
|
required_keys = [
|
||||||
|
'hostname',
|
||||||
|
'port',
|
||||||
|
'graphite_port',
|
||||||
|
'graphite_endpoint',
|
||||||
|
'api_port'
|
||||||
|
]
|
||||||
|
|
||||||
|
def __init__(self, benchmarks=None):
|
||||||
|
if in_relation_hook():
|
||||||
|
if benchmarks is not None:
|
||||||
|
for rid in sorted(relation_ids('benchmark')):
|
||||||
|
relation_set(relation_id=rid, relation_settings={
|
||||||
|
'benchmarks': ",".join(benchmarks)
|
||||||
|
})
|
||||||
|
|
||||||
|
# Check the relation data
|
||||||
|
config = {}
|
||||||
|
for key in self.required_keys:
|
||||||
|
val = relation_get(key)
|
||||||
|
if val is not None:
|
||||||
|
config[key] = val
|
||||||
|
else:
|
||||||
|
# We don't have all of the required keys
|
||||||
|
config = {}
|
||||||
|
break
|
||||||
|
|
||||||
|
if len(config):
|
||||||
|
with open(self.BENCHMARK_CONF, 'w') as f:
|
||||||
|
for key, val in iter(config.items()):
|
||||||
|
f.write("%s=%s\n" % (key, val))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def start():
|
||||||
|
action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
||||||
|
|
||||||
|
"""
|
||||||
|
If the collectd charm is also installed, tell it to send a snapshot
|
||||||
|
of the current profile data.
|
||||||
|
"""
|
||||||
|
COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
|
||||||
|
if os.path.exists(COLLECT_PROFILE_DATA):
|
||||||
|
subprocess.check_output([COLLECT_PROFILE_DATA])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def finish():
|
||||||
|
action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def set_composite_score(value, units, direction='asc'):
|
||||||
|
"""
|
||||||
|
Set the composite score for a benchmark run. This is a single number
|
||||||
|
representative of the benchmark results. This could be the most
|
||||||
|
important metric, or an amalgamation of metric scores.
|
||||||
|
"""
|
||||||
|
return action_set(
|
||||||
|
"meta.composite",
|
||||||
|
{'value': value, 'units': units, 'direction': direction}
|
||||||
|
)
|
208
hooks/charmhelpers/contrib/charmhelpers/__init__.py
Normal file
208
hooks/charmhelpers/contrib/charmhelpers/__init__.py
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Copyright 2012 Canonical Ltd. This software is licensed under the
|
||||||
|
# GNU Affero General Public License version 3 (see the file LICENSE).
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa
|
||||||
|
|
||||||
|
import operator
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
import yaml
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
import six
|
||||||
|
if six.PY3:
|
||||||
|
from urllib.request import urlopen
|
||||||
|
from urllib.error import (HTTPError, URLError)
|
||||||
|
else:
|
||||||
|
from urllib2 import (urlopen, HTTPError, URLError)
|
||||||
|
|
||||||
|
"""Helper functions for writing Juju charms in Python."""
|
||||||
|
|
||||||
|
__metaclass__ = type
|
||||||
|
__all__ = [
|
||||||
|
# 'get_config', # core.hookenv.config()
|
||||||
|
# 'log', # core.hookenv.log()
|
||||||
|
# 'log_entry', # core.hookenv.log()
|
||||||
|
# 'log_exit', # core.hookenv.log()
|
||||||
|
# 'relation_get', # core.hookenv.relation_get()
|
||||||
|
# 'relation_set', # core.hookenv.relation_set()
|
||||||
|
# 'relation_ids', # core.hookenv.relation_ids()
|
||||||
|
# 'relation_list', # core.hookenv.relation_units()
|
||||||
|
# 'config_get', # core.hookenv.config()
|
||||||
|
# 'unit_get', # core.hookenv.unit_get()
|
||||||
|
# 'open_port', # core.hookenv.open_port()
|
||||||
|
# 'close_port', # core.hookenv.close_port()
|
||||||
|
# 'service_control', # core.host.service()
|
||||||
|
'unit_info', # client-side, NOT IMPLEMENTED
|
||||||
|
'wait_for_machine', # client-side, NOT IMPLEMENTED
|
||||||
|
'wait_for_page_contents', # client-side, NOT IMPLEMENTED
|
||||||
|
'wait_for_relation', # client-side, NOT IMPLEMENTED
|
||||||
|
'wait_for_unit', # client-side, NOT IMPLEMENTED
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
SLEEP_AMOUNT = 0.1
|
||||||
|
|
||||||
|
|
||||||
|
# We create a juju_status Command here because it makes testing much,
|
||||||
|
# much easier.
|
||||||
|
def juju_status():
|
||||||
|
subprocess.check_call(['juju', 'status'])
|
||||||
|
|
||||||
|
# re-implemented as charmhelpers.fetch.configure_sources()
|
||||||
|
# def configure_source(update=False):
|
||||||
|
# source = config_get('source')
|
||||||
|
# if ((source.startswith('ppa:') or
|
||||||
|
# source.startswith('cloud:') or
|
||||||
|
# source.startswith('http:'))):
|
||||||
|
# run('add-apt-repository', source)
|
||||||
|
# if source.startswith("http:"):
|
||||||
|
# run('apt-key', 'import', config_get('key'))
|
||||||
|
# if update:
|
||||||
|
# run('apt-get', 'update')
|
||||||
|
|
||||||
|
|
||||||
|
# DEPRECATED: client-side only
|
||||||
|
def make_charm_config_file(charm_config):
|
||||||
|
charm_config_file = tempfile.NamedTemporaryFile(mode='w+')
|
||||||
|
charm_config_file.write(yaml.dump(charm_config))
|
||||||
|
charm_config_file.flush()
|
||||||
|
# The NamedTemporaryFile instance is returned instead of just the name
|
||||||
|
# because we want to take advantage of garbage collection-triggered
|
||||||
|
# deletion of the temp file when it goes out of scope in the caller.
|
||||||
|
return charm_config_file
|
||||||
|
|
||||||
|
|
||||||
|
# DEPRECATED: client-side only
|
||||||
|
def unit_info(service_name, item_name, data=None, unit=None):
|
||||||
|
if data is None:
|
||||||
|
data = yaml.safe_load(juju_status())
|
||||||
|
service = data['services'].get(service_name)
|
||||||
|
if service is None:
|
||||||
|
# XXX 2012-02-08 gmb:
|
||||||
|
# This allows us to cope with the race condition that we
|
||||||
|
# have between deploying a service and having it come up in
|
||||||
|
# `juju status`. We could probably do with cleaning it up so
|
||||||
|
# that it fails a bit more noisily after a while.
|
||||||
|
return ''
|
||||||
|
units = service['units']
|
||||||
|
if unit is not None:
|
||||||
|
item = units[unit][item_name]
|
||||||
|
else:
|
||||||
|
# It might seem odd to sort the units here, but we do it to
|
||||||
|
# ensure that when no unit is specified, the first unit for the
|
||||||
|
# service (or at least the one with the lowest number) is the
|
||||||
|
# one whose data gets returned.
|
||||||
|
sorted_unit_names = sorted(units.keys())
|
||||||
|
item = units[sorted_unit_names[0]][item_name]
|
||||||
|
return item
|
||||||
|
|
||||||
|
|
||||||
|
# DEPRECATED: client-side only
|
||||||
|
def get_machine_data():
|
||||||
|
return yaml.safe_load(juju_status())['machines']
|
||||||
|
|
||||||
|
|
||||||
|
# DEPRECATED: client-side only
|
||||||
|
def wait_for_machine(num_machines=1, timeout=300):
|
||||||
|
"""Wait `timeout` seconds for `num_machines` machines to come up.
|
||||||
|
|
||||||
|
This wait_for... function can be called by other wait_for functions
|
||||||
|
whose timeouts might be too short in situations where only a bare
|
||||||
|
Juju setup has been bootstrapped.
|
||||||
|
|
||||||
|
:return: A tuple of (num_machines, time_taken). This is used for
|
||||||
|
testing.
|
||||||
|
"""
|
||||||
|
# You may think this is a hack, and you'd be right. The easiest way
|
||||||
|
# to tell what environment we're working in (LXC vs EC2) is to check
|
||||||
|
# the dns-name of the first machine. If it's localhost we're in LXC
|
||||||
|
# and we can just return here.
|
||||||
|
if get_machine_data()[0]['dns-name'] == 'localhost':
|
||||||
|
return 1, 0
|
||||||
|
start_time = time.time()
|
||||||
|
while True:
|
||||||
|
# Drop the first machine, since it's the Zookeeper and that's
|
||||||
|
# not a machine that we need to wait for. This will only work
|
||||||
|
# for EC2 environments, which is why we return early above if
|
||||||
|
# we're in LXC.
|
||||||
|
machine_data = get_machine_data()
|
||||||
|
non_zookeeper_machines = [
|
||||||
|
machine_data[key] for key in list(machine_data.keys())[1:]]
|
||||||
|
if len(non_zookeeper_machines) >= num_machines:
|
||||||
|
all_machines_running = True
|
||||||
|
for machine in non_zookeeper_machines:
|
||||||
|
if machine.get('instance-state') != 'running':
|
||||||
|
all_machines_running = False
|
||||||
|
break
|
||||||
|
if all_machines_running:
|
||||||
|
break
|
||||||
|
if time.time() - start_time >= timeout:
|
||||||
|
raise RuntimeError('timeout waiting for service to start')
|
||||||
|
time.sleep(SLEEP_AMOUNT)
|
||||||
|
return num_machines, time.time() - start_time
|
||||||
|
|
||||||
|
|
||||||
|
# DEPRECATED: client-side only
|
||||||
|
def wait_for_unit(service_name, timeout=480):
|
||||||
|
"""Wait `timeout` seconds for a given service name to come up."""
|
||||||
|
wait_for_machine(num_machines=1)
|
||||||
|
start_time = time.time()
|
||||||
|
while True:
|
||||||
|
state = unit_info(service_name, 'agent-state')
|
||||||
|
if 'error' in state or state == 'started':
|
||||||
|
break
|
||||||
|
if time.time() - start_time >= timeout:
|
||||||
|
raise RuntimeError('timeout waiting for service to start')
|
||||||
|
time.sleep(SLEEP_AMOUNT)
|
||||||
|
if state != 'started':
|
||||||
|
raise RuntimeError('unit did not start, agent-state: ' + state)
|
||||||
|
|
||||||
|
|
||||||
|
# DEPRECATED: client-side only
|
||||||
|
def wait_for_relation(service_name, relation_name, timeout=120):
|
||||||
|
"""Wait `timeout` seconds for a given relation to come up."""
|
||||||
|
start_time = time.time()
|
||||||
|
while True:
|
||||||
|
relation = unit_info(service_name, 'relations').get(relation_name)
|
||||||
|
if relation is not None and relation['state'] == 'up':
|
||||||
|
break
|
||||||
|
if time.time() - start_time >= timeout:
|
||||||
|
raise RuntimeError('timeout waiting for relation to be up')
|
||||||
|
time.sleep(SLEEP_AMOUNT)
|
||||||
|
|
||||||
|
|
||||||
|
# DEPRECATED: client-side only
|
||||||
|
def wait_for_page_contents(url, contents, timeout=120, validate=None):
|
||||||
|
if validate is None:
|
||||||
|
validate = operator.contains
|
||||||
|
start_time = time.time()
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
stream = urlopen(url)
|
||||||
|
except (HTTPError, URLError):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
page = stream.read()
|
||||||
|
if validate(page, contents):
|
||||||
|
return page
|
||||||
|
if time.time() - start_time >= timeout:
|
||||||
|
raise RuntimeError('timeout waiting for contents of ' + url)
|
||||||
|
time.sleep(SLEEP_AMOUNT)
|
360
hooks/charmhelpers/contrib/charmsupport/nrpe.py
Normal file
360
hooks/charmhelpers/contrib/charmsupport/nrpe.py
Normal file
@ -0,0 +1,360 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""Compatibility with the nrpe-external-master charm"""
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import pwd
|
||||||
|
import grp
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
import shutil
|
||||||
|
import re
|
||||||
|
import shlex
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
local_unit,
|
||||||
|
log,
|
||||||
|
relation_ids,
|
||||||
|
relation_set,
|
||||||
|
relations_of_type,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.host import service
|
||||||
|
|
||||||
|
# This module adds compatibility with the nrpe-external-master and plain nrpe
|
||||||
|
# subordinate charms. To use it in your charm:
|
||||||
|
#
|
||||||
|
# 1. Update metadata.yaml
|
||||||
|
#
|
||||||
|
# provides:
|
||||||
|
# (...)
|
||||||
|
# nrpe-external-master:
|
||||||
|
# interface: nrpe-external-master
|
||||||
|
# scope: container
|
||||||
|
#
|
||||||
|
# and/or
|
||||||
|
#
|
||||||
|
# provides:
|
||||||
|
# (...)
|
||||||
|
# local-monitors:
|
||||||
|
# interface: local-monitors
|
||||||
|
# scope: container
|
||||||
|
|
||||||
|
#
|
||||||
|
# 2. Add the following to config.yaml
|
||||||
|
#
|
||||||
|
# nagios_context:
|
||||||
|
# default: "juju"
|
||||||
|
# type: string
|
||||||
|
# description: |
|
||||||
|
# Used by the nrpe subordinate charms.
|
||||||
|
# A string that will be prepended to instance name to set the host name
|
||||||
|
# in nagios. So for instance the hostname would be something like:
|
||||||
|
# juju-myservice-0
|
||||||
|
# If you're running multiple environments with the same services in them
|
||||||
|
# this allows you to differentiate between them.
|
||||||
|
# nagios_servicegroups:
|
||||||
|
# default: ""
|
||||||
|
# type: string
|
||||||
|
# description: |
|
||||||
|
# A comma-separated list of nagios servicegroups.
|
||||||
|
# If left empty, the nagios_context will be used as the servicegroup
|
||||||
|
#
|
||||||
|
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
|
||||||
|
#
|
||||||
|
# 4. Update your hooks.py with something like this:
|
||||||
|
#
|
||||||
|
# from charmsupport.nrpe import NRPE
|
||||||
|
# (...)
|
||||||
|
# def update_nrpe_config():
|
||||||
|
# nrpe_compat = NRPE()
|
||||||
|
# nrpe_compat.add_check(
|
||||||
|
# shortname = "myservice",
|
||||||
|
# description = "Check MyService",
|
||||||
|
# check_cmd = "check_http -w 2 -c 10 http://localhost"
|
||||||
|
# )
|
||||||
|
# nrpe_compat.add_check(
|
||||||
|
# "myservice_other",
|
||||||
|
# "Check for widget failures",
|
||||||
|
# check_cmd = "/srv/myapp/scripts/widget_check"
|
||||||
|
# )
|
||||||
|
# nrpe_compat.write()
|
||||||
|
#
|
||||||
|
# def config_changed():
|
||||||
|
# (...)
|
||||||
|
# update_nrpe_config()
|
||||||
|
#
|
||||||
|
# def nrpe_external_master_relation_changed():
|
||||||
|
# update_nrpe_config()
|
||||||
|
#
|
||||||
|
# def local_monitors_relation_changed():
|
||||||
|
# update_nrpe_config()
|
||||||
|
#
|
||||||
|
# 5. ln -s hooks.py nrpe-external-master-relation-changed
|
||||||
|
# ln -s hooks.py local-monitors-relation-changed
|
||||||
|
|
||||||
|
|
||||||
|
class CheckException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Check(object):
|
||||||
|
shortname_re = '[A-Za-z0-9-_]+$'
|
||||||
|
service_template = ("""
|
||||||
|
#---------------------------------------------------
|
||||||
|
# This file is Juju managed
|
||||||
|
#---------------------------------------------------
|
||||||
|
define service {{
|
||||||
|
use active-service
|
||||||
|
host_name {nagios_hostname}
|
||||||
|
service_description {nagios_hostname}[{shortname}] """
|
||||||
|
"""{description}
|
||||||
|
check_command check_nrpe!{command}
|
||||||
|
servicegroups {nagios_servicegroup}
|
||||||
|
}}
|
||||||
|
""")
|
||||||
|
|
||||||
|
def __init__(self, shortname, description, check_cmd):
|
||||||
|
super(Check, self).__init__()
|
||||||
|
# XXX: could be better to calculate this from the service name
|
||||||
|
if not re.match(self.shortname_re, shortname):
|
||||||
|
raise CheckException("shortname must match {}".format(
|
||||||
|
Check.shortname_re))
|
||||||
|
self.shortname = shortname
|
||||||
|
self.command = "check_{}".format(shortname)
|
||||||
|
# Note: a set of invalid characters is defined by the
|
||||||
|
# Nagios server config
|
||||||
|
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
|
||||||
|
self.description = description
|
||||||
|
self.check_cmd = self._locate_cmd(check_cmd)
|
||||||
|
|
||||||
|
def _locate_cmd(self, check_cmd):
|
||||||
|
search_path = (
|
||||||
|
'/usr/lib/nagios/plugins',
|
||||||
|
'/usr/local/lib/nagios/plugins',
|
||||||
|
)
|
||||||
|
parts = shlex.split(check_cmd)
|
||||||
|
for path in search_path:
|
||||||
|
if os.path.exists(os.path.join(path, parts[0])):
|
||||||
|
command = os.path.join(path, parts[0])
|
||||||
|
if len(parts) > 1:
|
||||||
|
command += " " + " ".join(parts[1:])
|
||||||
|
return command
|
||||||
|
log('Check command not found: {}'.format(parts[0]))
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def write(self, nagios_context, hostname, nagios_servicegroups):
|
||||||
|
nrpe_check_file = '/etc/nagios/nrpe.d/{}.cfg'.format(
|
||||||
|
self.command)
|
||||||
|
with open(nrpe_check_file, 'w') as nrpe_check_config:
|
||||||
|
nrpe_check_config.write("# check {}\n".format(self.shortname))
|
||||||
|
nrpe_check_config.write("command[{}]={}\n".format(
|
||||||
|
self.command, self.check_cmd))
|
||||||
|
|
||||||
|
if not os.path.exists(NRPE.nagios_exportdir):
|
||||||
|
log('Not writing service config as {} is not accessible'.format(
|
||||||
|
NRPE.nagios_exportdir))
|
||||||
|
else:
|
||||||
|
self.write_service_config(nagios_context, hostname,
|
||||||
|
nagios_servicegroups)
|
||||||
|
|
||||||
|
def write_service_config(self, nagios_context, hostname,
|
||||||
|
nagios_servicegroups):
|
||||||
|
for f in os.listdir(NRPE.nagios_exportdir):
|
||||||
|
if re.search('.*{}.cfg'.format(self.command), f):
|
||||||
|
os.remove(os.path.join(NRPE.nagios_exportdir, f))
|
||||||
|
|
||||||
|
templ_vars = {
|
||||||
|
'nagios_hostname': hostname,
|
||||||
|
'nagios_servicegroup': nagios_servicegroups,
|
||||||
|
'description': self.description,
|
||||||
|
'shortname': self.shortname,
|
||||||
|
'command': self.command,
|
||||||
|
}
|
||||||
|
nrpe_service_text = Check.service_template.format(**templ_vars)
|
||||||
|
nrpe_service_file = '{}/service__{}_{}.cfg'.format(
|
||||||
|
NRPE.nagios_exportdir, hostname, self.command)
|
||||||
|
with open(nrpe_service_file, 'w') as nrpe_service_config:
|
||||||
|
nrpe_service_config.write(str(nrpe_service_text))
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
subprocess.call(self.check_cmd)
|
||||||
|
|
||||||
|
|
||||||
|
class NRPE(object):
|
||||||
|
nagios_logdir = '/var/log/nagios'
|
||||||
|
nagios_exportdir = '/var/lib/nagios/export'
|
||||||
|
nrpe_confdir = '/etc/nagios/nrpe.d'
|
||||||
|
|
||||||
|
def __init__(self, hostname=None):
|
||||||
|
super(NRPE, self).__init__()
|
||||||
|
self.config = config()
|
||||||
|
self.nagios_context = self.config['nagios_context']
|
||||||
|
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
|
||||||
|
self.nagios_servicegroups = self.config['nagios_servicegroups']
|
||||||
|
else:
|
||||||
|
self.nagios_servicegroups = self.nagios_context
|
||||||
|
self.unit_name = local_unit().replace('/', '-')
|
||||||
|
if hostname:
|
||||||
|
self.hostname = hostname
|
||||||
|
else:
|
||||||
|
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
|
||||||
|
self.checks = []
|
||||||
|
|
||||||
|
def add_check(self, *args, **kwargs):
|
||||||
|
self.checks.append(Check(*args, **kwargs))
|
||||||
|
|
||||||
|
def write(self):
|
||||||
|
try:
|
||||||
|
nagios_uid = pwd.getpwnam('nagios').pw_uid
|
||||||
|
nagios_gid = grp.getgrnam('nagios').gr_gid
|
||||||
|
except:
|
||||||
|
log("Nagios user not set up, nrpe checks not updated")
|
||||||
|
return
|
||||||
|
|
||||||
|
if not os.path.exists(NRPE.nagios_logdir):
|
||||||
|
os.mkdir(NRPE.nagios_logdir)
|
||||||
|
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
|
||||||
|
|
||||||
|
nrpe_monitors = {}
|
||||||
|
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
|
||||||
|
for nrpecheck in self.checks:
|
||||||
|
nrpecheck.write(self.nagios_context, self.hostname,
|
||||||
|
self.nagios_servicegroups)
|
||||||
|
nrpe_monitors[nrpecheck.shortname] = {
|
||||||
|
"command": nrpecheck.command,
|
||||||
|
}
|
||||||
|
|
||||||
|
service('restart', 'nagios-nrpe-server')
|
||||||
|
|
||||||
|
monitor_ids = relation_ids("local-monitors") + \
|
||||||
|
relation_ids("nrpe-external-master")
|
||||||
|
for rid in monitor_ids:
|
||||||
|
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
||||||
|
|
||||||
|
|
||||||
|
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
||||||
|
"""
|
||||||
|
Query relation with nrpe subordinate, return the nagios_host_context
|
||||||
|
|
||||||
|
:param str relation_name: Name of relation nrpe sub joined to
|
||||||
|
"""
|
||||||
|
for rel in relations_of_type(relation_name):
|
||||||
|
if 'nagios_hostname' in rel:
|
||||||
|
return rel['nagios_host_context']
|
||||||
|
|
||||||
|
|
||||||
|
def get_nagios_hostname(relation_name='nrpe-external-master'):
|
||||||
|
"""
|
||||||
|
Query relation with nrpe subordinate, return the nagios_hostname
|
||||||
|
|
||||||
|
:param str relation_name: Name of relation nrpe sub joined to
|
||||||
|
"""
|
||||||
|
for rel in relations_of_type(relation_name):
|
||||||
|
if 'nagios_hostname' in rel:
|
||||||
|
return rel['nagios_hostname']
|
||||||
|
|
||||||
|
|
||||||
|
def get_nagios_unit_name(relation_name='nrpe-external-master'):
|
||||||
|
"""
|
||||||
|
Return the nagios unit name prepended with host_context if needed
|
||||||
|
|
||||||
|
:param str relation_name: Name of relation nrpe sub joined to
|
||||||
|
"""
|
||||||
|
host_context = get_nagios_hostcontext(relation_name)
|
||||||
|
if host_context:
|
||||||
|
unit = "%s:%s" % (host_context, local_unit())
|
||||||
|
else:
|
||||||
|
unit = local_unit()
|
||||||
|
return unit
|
||||||
|
|
||||||
|
|
||||||
|
def add_init_service_checks(nrpe, services, unit_name):
|
||||||
|
"""
|
||||||
|
Add checks for each service in list
|
||||||
|
|
||||||
|
:param NRPE nrpe: NRPE object to add check to
|
||||||
|
:param list services: List of services to check
|
||||||
|
:param str unit_name: Unit name to use in check description
|
||||||
|
"""
|
||||||
|
for svc in services:
|
||||||
|
upstart_init = '/etc/init/%s.conf' % svc
|
||||||
|
sysv_init = '/etc/init.d/%s' % svc
|
||||||
|
if os.path.exists(upstart_init):
|
||||||
|
nrpe.add_check(
|
||||||
|
shortname=svc,
|
||||||
|
description='process check {%s}' % unit_name,
|
||||||
|
check_cmd='check_upstart_job %s' % svc
|
||||||
|
)
|
||||||
|
elif os.path.exists(sysv_init):
|
||||||
|
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
||||||
|
cron_file = ('*/5 * * * * root '
|
||||||
|
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
|
||||||
|
'-s /etc/init.d/%s status > '
|
||||||
|
'/var/lib/nagios/service-check-%s.txt\n' % (svc,
|
||||||
|
svc)
|
||||||
|
)
|
||||||
|
f = open(cronpath, 'w')
|
||||||
|
f.write(cron_file)
|
||||||
|
f.close()
|
||||||
|
nrpe.add_check(
|
||||||
|
shortname=svc,
|
||||||
|
description='process check {%s}' % unit_name,
|
||||||
|
check_cmd='check_status_file.py -f '
|
||||||
|
'/var/lib/nagios/service-check-%s.txt' % svc,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def copy_nrpe_checks():
|
||||||
|
"""
|
||||||
|
Copy the nrpe checks into place
|
||||||
|
|
||||||
|
"""
|
||||||
|
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
|
||||||
|
nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
|
||||||
|
'charmhelpers', 'contrib', 'openstack',
|
||||||
|
'files')
|
||||||
|
|
||||||
|
if not os.path.exists(NAGIOS_PLUGINS):
|
||||||
|
os.makedirs(NAGIOS_PLUGINS)
|
||||||
|
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
|
||||||
|
if os.path.isfile(fname):
|
||||||
|
shutil.copy2(fname,
|
||||||
|
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
|
||||||
|
|
||||||
|
|
||||||
|
def add_haproxy_checks(nrpe, unit_name):
|
||||||
|
"""
|
||||||
|
Add checks for each service in list
|
||||||
|
|
||||||
|
:param NRPE nrpe: NRPE object to add check to
|
||||||
|
:param str unit_name: Unit name to use in check description
|
||||||
|
"""
|
||||||
|
nrpe.add_check(
|
||||||
|
shortname='haproxy_servers',
|
||||||
|
description='Check HAProxy {%s}' % unit_name,
|
||||||
|
check_cmd='check_haproxy.sh')
|
||||||
|
nrpe.add_check(
|
||||||
|
shortname='haproxy_queue',
|
||||||
|
description='Check HAProxy queue depth {%s}' % unit_name,
|
||||||
|
check_cmd='check_haproxy_queue_depth.sh')
|
175
hooks/charmhelpers/contrib/charmsupport/volumes.py
Normal file
175
hooks/charmhelpers/contrib/charmsupport/volumes.py
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
'''
|
||||||
|
Functions for managing volumes in juju units. One volume is supported per unit.
|
||||||
|
Subordinates may have their own storage, provided it is on its own partition.
|
||||||
|
|
||||||
|
Configuration stanzas::
|
||||||
|
|
||||||
|
volume-ephemeral:
|
||||||
|
type: boolean
|
||||||
|
default: true
|
||||||
|
description: >
|
||||||
|
If false, a volume is mounted as sepecified in "volume-map"
|
||||||
|
If true, ephemeral storage will be used, meaning that log data
|
||||||
|
will only exist as long as the machine. YOU HAVE BEEN WARNED.
|
||||||
|
volume-map:
|
||||||
|
type: string
|
||||||
|
default: {}
|
||||||
|
description: >
|
||||||
|
YAML map of units to device names, e.g:
|
||||||
|
"{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
|
||||||
|
Service units will raise a configure-error if volume-ephemeral
|
||||||
|
is 'true' and no volume-map value is set. Use 'juju set' to set a
|
||||||
|
value and 'juju resolved' to complete configuration.
|
||||||
|
|
||||||
|
Usage::
|
||||||
|
|
||||||
|
from charmsupport.volumes import configure_volume, VolumeConfigurationError
|
||||||
|
from charmsupport.hookenv import log, ERROR
|
||||||
|
def post_mount_hook():
|
||||||
|
stop_service('myservice')
|
||||||
|
def post_mount_hook():
|
||||||
|
start_service('myservice')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
try:
|
||||||
|
configure_volume(before_change=pre_mount_hook,
|
||||||
|
after_change=post_mount_hook)
|
||||||
|
except VolumeConfigurationError:
|
||||||
|
log('Storage could not be configured', ERROR)
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
# XXX: Known limitations
|
||||||
|
# - fstab is neither consulted nor updated
|
||||||
|
|
||||||
|
import os
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import host
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
|
MOUNT_BASE = '/srv/juju/volumes'
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeConfigurationError(Exception):
|
||||||
|
'''Volume configuration data is missing or invalid'''
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_config():
|
||||||
|
'''Gather and sanity-check volume configuration data'''
|
||||||
|
volume_config = {}
|
||||||
|
config = hookenv.config()
|
||||||
|
|
||||||
|
errors = False
|
||||||
|
|
||||||
|
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
|
||||||
|
volume_config['ephemeral'] = True
|
||||||
|
else:
|
||||||
|
volume_config['ephemeral'] = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
hookenv.log("Error parsing YAML volume-map: {}".format(e),
|
||||||
|
hookenv.ERROR)
|
||||||
|
errors = True
|
||||||
|
if volume_map is None:
|
||||||
|
# probably an empty string
|
||||||
|
volume_map = {}
|
||||||
|
elif not isinstance(volume_map, dict):
|
||||||
|
hookenv.log("Volume-map should be a dictionary, not {}".format(
|
||||||
|
type(volume_map)))
|
||||||
|
errors = True
|
||||||
|
|
||||||
|
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
|
||||||
|
if volume_config['device'] and volume_config['ephemeral']:
|
||||||
|
# asked for ephemeral storage but also defined a volume ID
|
||||||
|
hookenv.log('A volume is defined for this unit, but ephemeral '
|
||||||
|
'storage was requested', hookenv.ERROR)
|
||||||
|
errors = True
|
||||||
|
elif not volume_config['device'] and not volume_config['ephemeral']:
|
||||||
|
# asked for permanent storage but did not define volume ID
|
||||||
|
hookenv.log('Ephemeral storage was requested, but there is no volume '
|
||||||
|
'defined for this unit.', hookenv.ERROR)
|
||||||
|
errors = True
|
||||||
|
|
||||||
|
unit_mount_name = hookenv.local_unit().replace('/', '-')
|
||||||
|
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
|
||||||
|
|
||||||
|
if errors:
|
||||||
|
return None
|
||||||
|
return volume_config
|
||||||
|
|
||||||
|
|
||||||
|
def mount_volume(config):
|
||||||
|
if os.path.exists(config['mountpoint']):
|
||||||
|
if not os.path.isdir(config['mountpoint']):
|
||||||
|
hookenv.log('Not a directory: {}'.format(config['mountpoint']))
|
||||||
|
raise VolumeConfigurationError()
|
||||||
|
else:
|
||||||
|
host.mkdir(config['mountpoint'])
|
||||||
|
if os.path.ismount(config['mountpoint']):
|
||||||
|
unmount_volume(config)
|
||||||
|
if not host.mount(config['device'], config['mountpoint'], persist=True):
|
||||||
|
raise VolumeConfigurationError()
|
||||||
|
|
||||||
|
|
||||||
|
def unmount_volume(config):
|
||||||
|
if os.path.ismount(config['mountpoint']):
|
||||||
|
if not host.umount(config['mountpoint'], persist=True):
|
||||||
|
raise VolumeConfigurationError()
|
||||||
|
|
||||||
|
|
||||||
|
def managed_mounts():
|
||||||
|
'''List of all mounted managed volumes'''
|
||||||
|
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
|
||||||
|
|
||||||
|
|
||||||
|
def configure_volume(before_change=lambda: None, after_change=lambda: None):
|
||||||
|
'''Set up storage (or don't) according to the charm's volume configuration.
|
||||||
|
Returns the mount point or "ephemeral". before_change and after_change
|
||||||
|
are optional functions to be called if the volume configuration changes.
|
||||||
|
'''
|
||||||
|
|
||||||
|
config = get_config()
|
||||||
|
if not config:
|
||||||
|
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
|
||||||
|
raise VolumeConfigurationError()
|
||||||
|
|
||||||
|
if config['ephemeral']:
|
||||||
|
if os.path.ismount(config['mountpoint']):
|
||||||
|
before_change()
|
||||||
|
unmount_volume(config)
|
||||||
|
after_change()
|
||||||
|
return 'ephemeral'
|
||||||
|
else:
|
||||||
|
# persistent storage
|
||||||
|
if os.path.ismount(config['mountpoint']):
|
||||||
|
mounts = dict(managed_mounts())
|
||||||
|
if mounts.get(config['mountpoint']) != config['device']:
|
||||||
|
before_change()
|
||||||
|
unmount_volume(config)
|
||||||
|
mount_volume(config)
|
||||||
|
after_change()
|
||||||
|
else:
|
||||||
|
before_change()
|
||||||
|
mount_volume(config)
|
||||||
|
after_change()
|
||||||
|
return config['mountpoint']
|
0
hooks/charmhelpers/contrib/database/__init__.py
Normal file
0
hooks/charmhelpers/contrib/database/__init__.py
Normal file
412
hooks/charmhelpers/contrib/database/mysql.py
Normal file
412
hooks/charmhelpers/contrib/database/mysql.py
Normal file
@ -0,0 +1,412 @@
|
|||||||
|
"""Helper for working with a MySQL database"""
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import platform
|
||||||
|
import os
|
||||||
|
import glob
|
||||||
|
|
||||||
|
# from string import upper
|
||||||
|
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
mkdir,
|
||||||
|
pwgen,
|
||||||
|
write_file
|
||||||
|
)
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config as config_get,
|
||||||
|
relation_get,
|
||||||
|
related_units,
|
||||||
|
unit_get,
|
||||||
|
log,
|
||||||
|
DEBUG,
|
||||||
|
INFO,
|
||||||
|
WARNING,
|
||||||
|
)
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
apt_install,
|
||||||
|
apt_update,
|
||||||
|
filter_installed_packages,
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.peerstorage import (
|
||||||
|
peer_store,
|
||||||
|
peer_retrieve,
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.network.ip import get_host_ip
|
||||||
|
|
||||||
|
try:
|
||||||
|
import MySQLdb
|
||||||
|
except ImportError:
|
||||||
|
apt_update(fatal=True)
|
||||||
|
apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
|
||||||
|
import MySQLdb
|
||||||
|
|
||||||
|
|
||||||
|
class MySQLHelper(object):
|
||||||
|
|
||||||
|
def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
|
||||||
|
migrate_passwd_to_peer_relation=True,
|
||||||
|
delete_ondisk_passwd_file=True):
|
||||||
|
self.host = host
|
||||||
|
# Password file path templates
|
||||||
|
self.root_passwd_file_template = rpasswdf_template
|
||||||
|
self.user_passwd_file_template = upasswdf_template
|
||||||
|
|
||||||
|
self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation
|
||||||
|
# If we migrate we have the option to delete local copy of root passwd
|
||||||
|
self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
|
||||||
|
|
||||||
|
def connect(self, user='root', password=None):
|
||||||
|
log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG)
|
||||||
|
self.connection = MySQLdb.connect(user=user, host=self.host,
|
||||||
|
passwd=password)
|
||||||
|
|
||||||
|
def database_exists(self, db_name):
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
try:
|
||||||
|
cursor.execute("SHOW DATABASES")
|
||||||
|
databases = [i[0] for i in cursor.fetchall()]
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
return db_name in databases
|
||||||
|
|
||||||
|
def create_database(self, db_name):
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
try:
|
||||||
|
cursor.execute("CREATE DATABASE {} CHARACTER SET UTF8"
|
||||||
|
.format(db_name))
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
def grant_exists(self, db_name, db_user, remote_ip):
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \
|
||||||
|
"TO '{}'@'{}'".format(db_name, db_user, remote_ip)
|
||||||
|
try:
|
||||||
|
cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
|
||||||
|
remote_ip))
|
||||||
|
grants = [i[0] for i in cursor.fetchall()]
|
||||||
|
except MySQLdb.OperationalError:
|
||||||
|
return False
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
# TODO: review for different grants
|
||||||
|
return priv_string in grants
|
||||||
|
|
||||||
|
def create_grant(self, db_name, db_user, remote_ip, password):
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
try:
|
||||||
|
# TODO: review for different grants
|
||||||
|
cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' "
|
||||||
|
"IDENTIFIED BY '{}'".format(db_name,
|
||||||
|
db_user,
|
||||||
|
remote_ip,
|
||||||
|
password))
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
def create_admin_grant(self, db_user, remote_ip, password):
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
try:
|
||||||
|
cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' "
|
||||||
|
"IDENTIFIED BY '{}'".format(db_user,
|
||||||
|
remote_ip,
|
||||||
|
password))
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
def cleanup_grant(self, db_user, remote_ip):
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
try:
|
||||||
|
cursor.execute("DROP FROM mysql.user WHERE user='{}' "
|
||||||
|
"AND HOST='{}'".format(db_user,
|
||||||
|
remote_ip))
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
def execute(self, sql):
|
||||||
|
"""Execute arbitary SQL against the database."""
|
||||||
|
cursor = self.connection.cursor()
|
||||||
|
try:
|
||||||
|
cursor.execute(sql)
|
||||||
|
finally:
|
||||||
|
cursor.close()
|
||||||
|
|
||||||
|
def migrate_passwords_to_peer_relation(self, excludes=None):
|
||||||
|
"""Migrate any passwords storage on disk to cluster peer relation."""
|
||||||
|
dirname = os.path.dirname(self.root_passwd_file_template)
|
||||||
|
path = os.path.join(dirname, '*.passwd')
|
||||||
|
for f in glob.glob(path):
|
||||||
|
if excludes and f in excludes:
|
||||||
|
log("Excluding %s from peer migration" % (f), level=DEBUG)
|
||||||
|
continue
|
||||||
|
|
||||||
|
key = os.path.basename(f)
|
||||||
|
with open(f, 'r') as passwd:
|
||||||
|
_value = passwd.read().strip()
|
||||||
|
|
||||||
|
try:
|
||||||
|
peer_store(key, _value)
|
||||||
|
|
||||||
|
if self.delete_ondisk_passwd_file:
|
||||||
|
os.unlink(f)
|
||||||
|
except ValueError:
|
||||||
|
# NOTE cluster relation not yet ready - skip for now
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_mysql_password_on_disk(self, username=None, password=None):
|
||||||
|
"""Retrieve, generate or store a mysql password for the provided
|
||||||
|
username on disk."""
|
||||||
|
if username:
|
||||||
|
template = self.user_passwd_file_template
|
||||||
|
passwd_file = template.format(username)
|
||||||
|
else:
|
||||||
|
passwd_file = self.root_passwd_file_template
|
||||||
|
|
||||||
|
_password = None
|
||||||
|
if os.path.exists(passwd_file):
|
||||||
|
log("Using existing password file '%s'" % passwd_file, level=DEBUG)
|
||||||
|
with open(passwd_file, 'r') as passwd:
|
||||||
|
_password = passwd.read().strip()
|
||||||
|
else:
|
||||||
|
log("Generating new password file '%s'" % passwd_file, level=DEBUG)
|
||||||
|
if not os.path.isdir(os.path.dirname(passwd_file)):
|
||||||
|
# NOTE: need to ensure this is not mysql root dir (which needs
|
||||||
|
# to be mysql readable)
|
||||||
|
mkdir(os.path.dirname(passwd_file), owner='root', group='root',
|
||||||
|
perms=0o770)
|
||||||
|
# Force permissions - for some reason the chmod in makedirs
|
||||||
|
# fails
|
||||||
|
os.chmod(os.path.dirname(passwd_file), 0o770)
|
||||||
|
|
||||||
|
_password = password or pwgen(length=32)
|
||||||
|
write_file(passwd_file, _password, owner='root', group='root',
|
||||||
|
perms=0o660)
|
||||||
|
|
||||||
|
return _password
|
||||||
|
|
||||||
|
def passwd_keys(self, username):
|
||||||
|
"""Generator to return keys used to store passwords in peer store.
|
||||||
|
|
||||||
|
NOTE: we support both legacy and new format to support mysql
|
||||||
|
charm prior to refactor. This is necessary to avoid LP 1451890.
|
||||||
|
"""
|
||||||
|
keys = []
|
||||||
|
if username == 'mysql':
|
||||||
|
log("Bad username '%s'" % (username), level=WARNING)
|
||||||
|
|
||||||
|
if username:
|
||||||
|
# IMPORTANT: *newer* format must be returned first
|
||||||
|
keys.append('mysql-%s.passwd' % (username))
|
||||||
|
keys.append('%s.passwd' % (username))
|
||||||
|
else:
|
||||||
|
keys.append('mysql.passwd')
|
||||||
|
|
||||||
|
for key in keys:
|
||||||
|
yield key
|
||||||
|
|
||||||
|
def get_mysql_password(self, username=None, password=None):
|
||||||
|
"""Retrieve, generate or store a mysql password for the provided
|
||||||
|
username using peer relation cluster."""
|
||||||
|
excludes = []
|
||||||
|
|
||||||
|
# First check peer relation.
|
||||||
|
try:
|
||||||
|
for key in self.passwd_keys(username):
|
||||||
|
_password = peer_retrieve(key)
|
||||||
|
if _password:
|
||||||
|
break
|
||||||
|
|
||||||
|
# If root password available don't update peer relation from local
|
||||||
|
if _password and not username:
|
||||||
|
excludes.append(self.root_passwd_file_template)
|
||||||
|
|
||||||
|
except ValueError:
|
||||||
|
# cluster relation is not yet started; use on-disk
|
||||||
|
_password = None
|
||||||
|
|
||||||
|
# If none available, generate new one
|
||||||
|
if not _password:
|
||||||
|
_password = self.get_mysql_password_on_disk(username, password)
|
||||||
|
|
||||||
|
# Put on wire if required
|
||||||
|
if self.migrate_passwd_to_peer_relation:
|
||||||
|
self.migrate_passwords_to_peer_relation(excludes=excludes)
|
||||||
|
|
||||||
|
return _password
|
||||||
|
|
||||||
|
def get_mysql_root_password(self, password=None):
|
||||||
|
"""Retrieve or generate mysql root password for service units."""
|
||||||
|
return self.get_mysql_password(username=None, password=password)
|
||||||
|
|
||||||
|
def normalize_address(self, hostname):
|
||||||
|
"""Ensure that address returned is an IP address (i.e. not fqdn)"""
|
||||||
|
if config_get('prefer-ipv6'):
|
||||||
|
# TODO: add support for ipv6 dns
|
||||||
|
return hostname
|
||||||
|
|
||||||
|
if hostname != unit_get('private-address'):
|
||||||
|
return get_host_ip(hostname, fallback=hostname)
|
||||||
|
|
||||||
|
# Otherwise assume localhost
|
||||||
|
return '127.0.0.1'
|
||||||
|
|
||||||
|
def get_allowed_units(self, database, username, relation_id=None):
|
||||||
|
"""Get list of units with access grants for database with username.
|
||||||
|
|
||||||
|
This is typically used to provide shared-db relations with a list of
|
||||||
|
which units have been granted access to the given database.
|
||||||
|
"""
|
||||||
|
self.connect(password=self.get_mysql_root_password())
|
||||||
|
allowed_units = set()
|
||||||
|
for unit in related_units(relation_id):
|
||||||
|
settings = relation_get(rid=relation_id, unit=unit)
|
||||||
|
# First check for setting with prefix, then without
|
||||||
|
for attr in ["%s_hostname" % (database), 'hostname']:
|
||||||
|
hosts = settings.get(attr, None)
|
||||||
|
if hosts:
|
||||||
|
break
|
||||||
|
|
||||||
|
if hosts:
|
||||||
|
# hostname can be json-encoded list of hostnames
|
||||||
|
try:
|
||||||
|
hosts = json.loads(hosts)
|
||||||
|
except ValueError:
|
||||||
|
hosts = [hosts]
|
||||||
|
else:
|
||||||
|
hosts = [settings['private-address']]
|
||||||
|
|
||||||
|
if hosts:
|
||||||
|
for host in hosts:
|
||||||
|
host = self.normalize_address(host)
|
||||||
|
if self.grant_exists(database, username, host):
|
||||||
|
log("Grant exists for host '%s' on db '%s'" %
|
||||||
|
(host, database), level=DEBUG)
|
||||||
|
if unit not in allowed_units:
|
||||||
|
allowed_units.add(unit)
|
||||||
|
else:
|
||||||
|
log("Grant does NOT exist for host '%s' on db '%s'" %
|
||||||
|
(host, database), level=DEBUG)
|
||||||
|
else:
|
||||||
|
log("No hosts found for grant check", level=INFO)
|
||||||
|
|
||||||
|
return allowed_units
|
||||||
|
|
||||||
|
def configure_db(self, hostname, database, username, admin=False):
|
||||||
|
"""Configure access to database for username from hostname."""
|
||||||
|
self.connect(password=self.get_mysql_root_password())
|
||||||
|
if not self.database_exists(database):
|
||||||
|
self.create_database(database)
|
||||||
|
|
||||||
|
remote_ip = self.normalize_address(hostname)
|
||||||
|
password = self.get_mysql_password(username)
|
||||||
|
if not self.grant_exists(database, username, remote_ip):
|
||||||
|
if not admin:
|
||||||
|
self.create_grant(database, username, remote_ip, password)
|
||||||
|
else:
|
||||||
|
self.create_admin_grant(username, remote_ip, password)
|
||||||
|
|
||||||
|
return password
|
||||||
|
|
||||||
|
|
||||||
|
class PerconaClusterHelper(object):
|
||||||
|
|
||||||
|
# Going for the biggest page size to avoid wasted bytes.
|
||||||
|
# InnoDB page size is 16MB
|
||||||
|
|
||||||
|
DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
|
||||||
|
DEFAULT_INNODB_BUFFER_FACTOR = 0.50
|
||||||
|
|
||||||
|
def human_to_bytes(self, human):
|
||||||
|
"""Convert human readable configuration options to bytes."""
|
||||||
|
num_re = re.compile('^[0-9]+$')
|
||||||
|
if num_re.match(human):
|
||||||
|
return human
|
||||||
|
|
||||||
|
factors = {
|
||||||
|
'K': 1024,
|
||||||
|
'M': 1048576,
|
||||||
|
'G': 1073741824,
|
||||||
|
'T': 1099511627776
|
||||||
|
}
|
||||||
|
modifier = human[-1]
|
||||||
|
if modifier in factors:
|
||||||
|
return int(human[:-1]) * factors[modifier]
|
||||||
|
|
||||||
|
if modifier == '%':
|
||||||
|
total_ram = self.human_to_bytes(self.get_mem_total())
|
||||||
|
if self.is_32bit_system() and total_ram > self.sys_mem_limit():
|
||||||
|
total_ram = self.sys_mem_limit()
|
||||||
|
factor = int(human[:-1]) * 0.01
|
||||||
|
pctram = total_ram * factor
|
||||||
|
return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE))
|
||||||
|
|
||||||
|
raise ValueError("Can only convert K,M,G, or T")
|
||||||
|
|
||||||
|
def is_32bit_system(self):
|
||||||
|
"""Determine whether system is 32 or 64 bit."""
|
||||||
|
try:
|
||||||
|
return sys.maxsize < 2 ** 32
|
||||||
|
except OverflowError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
def sys_mem_limit(self):
|
||||||
|
"""Determine the default memory limit for the current service unit."""
|
||||||
|
if platform.machine() in ['armv7l']:
|
||||||
|
_mem_limit = self.human_to_bytes('2700M') # experimentally determined
|
||||||
|
else:
|
||||||
|
# Limit for x86 based 32bit systems
|
||||||
|
_mem_limit = self.human_to_bytes('4G')
|
||||||
|
|
||||||
|
return _mem_limit
|
||||||
|
|
||||||
|
def get_mem_total(self):
|
||||||
|
"""Calculate the total memory in the current service unit."""
|
||||||
|
with open('/proc/meminfo') as meminfo_file:
|
||||||
|
for line in meminfo_file:
|
||||||
|
key, mem = line.split(':', 2)
|
||||||
|
if key == 'MemTotal':
|
||||||
|
mtot, modifier = mem.strip().split(' ')
|
||||||
|
return '%s%s' % (mtot, modifier[0].upper())
|
||||||
|
|
||||||
|
def parse_config(self):
|
||||||
|
"""Parse charm configuration and calculate values for config files."""
|
||||||
|
config = config_get()
|
||||||
|
mysql_config = {}
|
||||||
|
if 'max-connections' in config:
|
||||||
|
mysql_config['max_connections'] = config['max-connections']
|
||||||
|
|
||||||
|
if 'wait-timeout' in config:
|
||||||
|
mysql_config['wait_timeout'] = config['wait-timeout']
|
||||||
|
|
||||||
|
if 'innodb-flush-log-at-trx-commit' in config:
|
||||||
|
mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit']
|
||||||
|
|
||||||
|
# Set a sane default key_buffer size
|
||||||
|
mysql_config['key_buffer'] = self.human_to_bytes('32M')
|
||||||
|
total_memory = self.human_to_bytes(self.get_mem_total())
|
||||||
|
|
||||||
|
dataset_bytes = config.get('dataset-size', None)
|
||||||
|
innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None)
|
||||||
|
|
||||||
|
if innodb_buffer_pool_size:
|
||||||
|
innodb_buffer_pool_size = self.human_to_bytes(
|
||||||
|
innodb_buffer_pool_size)
|
||||||
|
elif dataset_bytes:
|
||||||
|
log("Option 'dataset-size' has been deprecated, please use"
|
||||||
|
"innodb_buffer_pool_size option instead", level="WARN")
|
||||||
|
innodb_buffer_pool_size = self.human_to_bytes(
|
||||||
|
dataset_bytes)
|
||||||
|
else:
|
||||||
|
innodb_buffer_pool_size = int(
|
||||||
|
total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR)
|
||||||
|
|
||||||
|
if innodb_buffer_pool_size > total_memory:
|
||||||
|
log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format(
|
||||||
|
innodb_buffer_pool_size,
|
||||||
|
total_memory), level='WARN')
|
||||||
|
|
||||||
|
mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size
|
||||||
|
return mysql_config
|
@ -44,6 +44,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
ERROR,
|
ERROR,
|
||||||
WARNING,
|
WARNING,
|
||||||
unit_get,
|
unit_get,
|
||||||
|
is_leader as juju_is_leader
|
||||||
)
|
)
|
||||||
from charmhelpers.core.decorators import (
|
from charmhelpers.core.decorators import (
|
||||||
retry_on_exception,
|
retry_on_exception,
|
||||||
@ -63,17 +64,30 @@ class CRMResourceNotFound(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CRMDCNotFound(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
def is_elected_leader(resource):
|
def is_elected_leader(resource):
|
||||||
"""
|
"""
|
||||||
Returns True if the charm executing this is the elected cluster leader.
|
Returns True if the charm executing this is the elected cluster leader.
|
||||||
|
|
||||||
It relies on two mechanisms to determine leadership:
|
It relies on two mechanisms to determine leadership:
|
||||||
1. If the charm is part of a corosync cluster, call corosync to
|
1. If juju is sufficiently new and leadership election is supported,
|
||||||
|
the is_leader command will be used.
|
||||||
|
2. If the charm is part of a corosync cluster, call corosync to
|
||||||
determine leadership.
|
determine leadership.
|
||||||
2. If the charm is not part of a corosync cluster, the leader is
|
3. If the charm is not part of a corosync cluster, the leader is
|
||||||
determined as being "the alive unit with the lowest unit numer". In
|
determined as being "the alive unit with the lowest unit numer". In
|
||||||
other words, the oldest surviving unit.
|
other words, the oldest surviving unit.
|
||||||
"""
|
"""
|
||||||
|
try:
|
||||||
|
return juju_is_leader()
|
||||||
|
except NotImplementedError:
|
||||||
|
log('Juju leadership election feature not enabled'
|
||||||
|
', using fallback support',
|
||||||
|
level=WARNING)
|
||||||
|
|
||||||
if is_clustered():
|
if is_clustered():
|
||||||
if not is_crm_leader(resource):
|
if not is_crm_leader(resource):
|
||||||
log('Deferring action to CRM leader.', level=INFO)
|
log('Deferring action to CRM leader.', level=INFO)
|
||||||
@ -106,8 +120,9 @@ def is_crm_dc():
|
|||||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||||
if not isinstance(status, six.text_type):
|
if not isinstance(status, six.text_type):
|
||||||
status = six.text_type(status, "utf-8")
|
status = six.text_type(status, "utf-8")
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError as ex:
|
||||||
return False
|
raise CRMDCNotFound(str(ex))
|
||||||
|
|
||||||
current_dc = ''
|
current_dc = ''
|
||||||
for line in status.split('\n'):
|
for line in status.split('\n'):
|
||||||
if line.startswith('Current DC'):
|
if line.startswith('Current DC'):
|
||||||
@ -115,10 +130,14 @@ def is_crm_dc():
|
|||||||
current_dc = line.split(':')[1].split()[0]
|
current_dc = line.split(':')[1].split()[0]
|
||||||
if current_dc == get_unit_hostname():
|
if current_dc == get_unit_hostname():
|
||||||
return True
|
return True
|
||||||
|
elif current_dc == 'NONE':
|
||||||
|
raise CRMDCNotFound('Current DC: NONE')
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
|
@retry_on_exception(5, base_delay=2,
|
||||||
|
exc_type=(CRMResourceNotFound, CRMDCNotFound))
|
||||||
def is_crm_leader(resource, retry=False):
|
def is_crm_leader(resource, retry=False):
|
||||||
"""
|
"""
|
||||||
Returns True if the charm calling this is the elected corosync leader,
|
Returns True if the charm calling this is the elected corosync leader,
|
||||||
|
319
hooks/charmhelpers/contrib/network/ufw.py
Normal file
319
hooks/charmhelpers/contrib/network/ufw.py
Normal file
@ -0,0 +1,319 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""
|
||||||
|
This module contains helpers to add and remove ufw rules.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
- open SSH port for subnet 10.0.3.0/24:
|
||||||
|
|
||||||
|
>>> from charmhelpers.contrib.network import ufw
|
||||||
|
>>> ufw.enable()
|
||||||
|
>>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
|
||||||
|
|
||||||
|
- open service by name as defined in /etc/services:
|
||||||
|
|
||||||
|
>>> from charmhelpers.contrib.network import ufw
|
||||||
|
>>> ufw.enable()
|
||||||
|
>>> ufw.service('ssh', 'open')
|
||||||
|
|
||||||
|
- close service by port number:
|
||||||
|
|
||||||
|
>>> from charmhelpers.contrib.network import ufw
|
||||||
|
>>> ufw.enable()
|
||||||
|
>>> ufw.service('4949', 'close') # munin
|
||||||
|
"""
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
|
||||||
|
|
||||||
|
|
||||||
|
class UFWError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UFWIPv6Error(UFWError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def is_enabled():
|
||||||
|
"""
|
||||||
|
Check if `ufw` is enabled
|
||||||
|
|
||||||
|
:returns: True if ufw is enabled
|
||||||
|
"""
|
||||||
|
output = subprocess.check_output(['ufw', 'status'],
|
||||||
|
universal_newlines=True,
|
||||||
|
env={'LANG': 'en_US',
|
||||||
|
'PATH': os.environ['PATH']})
|
||||||
|
|
||||||
|
m = re.findall(r'^Status: active\n', output, re.M)
|
||||||
|
|
||||||
|
return len(m) >= 1
|
||||||
|
|
||||||
|
|
||||||
|
def is_ipv6_ok(soft_fail=False):
|
||||||
|
"""
|
||||||
|
Check if IPv6 support is present and ip6tables functional
|
||||||
|
|
||||||
|
:param soft_fail: If set to True and IPv6 support is broken, then reports
|
||||||
|
that the host doesn't have IPv6 support, otherwise a
|
||||||
|
UFWIPv6Error exception is raised.
|
||||||
|
:returns: True if IPv6 is working, False otherwise
|
||||||
|
"""
|
||||||
|
|
||||||
|
# do we have IPv6 in the machine?
|
||||||
|
if os.path.isdir('/proc/sys/net/ipv6'):
|
||||||
|
# is ip6tables kernel module loaded?
|
||||||
|
lsmod = subprocess.check_output(['lsmod'], universal_newlines=True)
|
||||||
|
matches = re.findall('^ip6_tables[ ]+', lsmod, re.M)
|
||||||
|
if len(matches) == 0:
|
||||||
|
# ip6tables support isn't complete, let's try to load it
|
||||||
|
try:
|
||||||
|
subprocess.check_output(['modprobe', 'ip6_tables'],
|
||||||
|
universal_newlines=True)
|
||||||
|
# great, we could load the module
|
||||||
|
return True
|
||||||
|
except subprocess.CalledProcessError as ex:
|
||||||
|
hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
|
||||||
|
level="WARN")
|
||||||
|
# we are in a world where ip6tables isn't working
|
||||||
|
if soft_fail:
|
||||||
|
# so we inform that the machine doesn't have IPv6
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
raise UFWIPv6Error("IPv6 firewall support broken")
|
||||||
|
else:
|
||||||
|
# the module is present :)
|
||||||
|
return True
|
||||||
|
|
||||||
|
else:
|
||||||
|
# the system doesn't have IPv6
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def disable_ipv6():
|
||||||
|
"""
|
||||||
|
Disable ufw IPv6 support in /etc/default/ufw
|
||||||
|
"""
|
||||||
|
exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
|
||||||
|
'/etc/default/ufw'])
|
||||||
|
if exit_code == 0:
|
||||||
|
hookenv.log('IPv6 support in ufw disabled', level='INFO')
|
||||||
|
else:
|
||||||
|
hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
|
||||||
|
raise UFWError("Couldn't disable IPv6 support in ufw")
|
||||||
|
|
||||||
|
|
||||||
|
def enable(soft_fail=False):
|
||||||
|
"""
|
||||||
|
Enable ufw
|
||||||
|
|
||||||
|
:param soft_fail: If set to True silently disables IPv6 support in ufw,
|
||||||
|
otherwise a UFWIPv6Error exception is raised when IP6
|
||||||
|
support is broken.
|
||||||
|
:returns: True if ufw is successfully enabled
|
||||||
|
"""
|
||||||
|
if is_enabled():
|
||||||
|
return True
|
||||||
|
|
||||||
|
if not is_ipv6_ok(soft_fail):
|
||||||
|
disable_ipv6()
|
||||||
|
|
||||||
|
output = subprocess.check_output(['ufw', 'enable'],
|
||||||
|
universal_newlines=True,
|
||||||
|
env={'LANG': 'en_US',
|
||||||
|
'PATH': os.environ['PATH']})
|
||||||
|
|
||||||
|
m = re.findall('^Firewall is active and enabled on system startup\n',
|
||||||
|
output, re.M)
|
||||||
|
hookenv.log(output, level='DEBUG')
|
||||||
|
|
||||||
|
if len(m) == 0:
|
||||||
|
hookenv.log("ufw couldn't be enabled", level='WARN')
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
hookenv.log("ufw enabled", level='INFO')
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def disable():
|
||||||
|
"""
|
||||||
|
Disable ufw
|
||||||
|
|
||||||
|
:returns: True if ufw is successfully disabled
|
||||||
|
"""
|
||||||
|
if not is_enabled():
|
||||||
|
return True
|
||||||
|
|
||||||
|
output = subprocess.check_output(['ufw', 'disable'],
|
||||||
|
universal_newlines=True,
|
||||||
|
env={'LANG': 'en_US',
|
||||||
|
'PATH': os.environ['PATH']})
|
||||||
|
|
||||||
|
m = re.findall(r'^Firewall stopped and disabled on system startup\n',
|
||||||
|
output, re.M)
|
||||||
|
hookenv.log(output, level='DEBUG')
|
||||||
|
|
||||||
|
if len(m) == 0:
|
||||||
|
hookenv.log("ufw couldn't be disabled", level='WARN')
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
hookenv.log("ufw disabled", level='INFO')
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def default_policy(policy='deny', direction='incoming'):
|
||||||
|
"""
|
||||||
|
Changes the default policy for traffic `direction`
|
||||||
|
|
||||||
|
:param policy: allow, deny or reject
|
||||||
|
:param direction: traffic direction, possible values: incoming, outgoing,
|
||||||
|
routed
|
||||||
|
"""
|
||||||
|
if policy not in ['allow', 'deny', 'reject']:
|
||||||
|
raise UFWError(('Unknown policy %s, valid values: '
|
||||||
|
'allow, deny, reject') % policy)
|
||||||
|
|
||||||
|
if direction not in ['incoming', 'outgoing', 'routed']:
|
||||||
|
raise UFWError(('Unknown direction %s, valid values: '
|
||||||
|
'incoming, outgoing, routed') % direction)
|
||||||
|
|
||||||
|
output = subprocess.check_output(['ufw', 'default', policy, direction],
|
||||||
|
universal_newlines=True,
|
||||||
|
env={'LANG': 'en_US',
|
||||||
|
'PATH': os.environ['PATH']})
|
||||||
|
hookenv.log(output, level='DEBUG')
|
||||||
|
|
||||||
|
m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
|
||||||
|
policy),
|
||||||
|
output, re.M)
|
||||||
|
if len(m) == 0:
|
||||||
|
hookenv.log("ufw couldn't change the default policy to %s for %s"
|
||||||
|
% (policy, direction), level='WARN')
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
hookenv.log("ufw default policy for %s changed to %s"
|
||||||
|
% (direction, policy), level='INFO')
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def modify_access(src, dst='any', port=None, proto=None, action='allow',
|
||||||
|
index=None):
|
||||||
|
"""
|
||||||
|
Grant access to an address or subnet
|
||||||
|
|
||||||
|
:param src: address (e.g. 192.168.1.234) or subnet
|
||||||
|
(e.g. 192.168.1.0/24).
|
||||||
|
:param dst: destiny of the connection, if the machine has multiple IPs and
|
||||||
|
connections to only one of those have to accepted this is the
|
||||||
|
field has to be set.
|
||||||
|
:param port: destiny port
|
||||||
|
:param proto: protocol (tcp or udp)
|
||||||
|
:param action: `allow` or `delete`
|
||||||
|
:param index: if different from None the rule is inserted at the given
|
||||||
|
`index`.
|
||||||
|
"""
|
||||||
|
if not is_enabled():
|
||||||
|
hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
|
||||||
|
return
|
||||||
|
|
||||||
|
if action == 'delete':
|
||||||
|
cmd = ['ufw', 'delete', 'allow']
|
||||||
|
elif index is not None:
|
||||||
|
cmd = ['ufw', 'insert', str(index), action]
|
||||||
|
else:
|
||||||
|
cmd = ['ufw', action]
|
||||||
|
|
||||||
|
if src is not None:
|
||||||
|
cmd += ['from', src]
|
||||||
|
|
||||||
|
if dst is not None:
|
||||||
|
cmd += ['to', dst]
|
||||||
|
|
||||||
|
if port is not None:
|
||||||
|
cmd += ['port', str(port)]
|
||||||
|
|
||||||
|
if proto is not None:
|
||||||
|
cmd += ['proto', proto]
|
||||||
|
|
||||||
|
hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
|
||||||
|
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
||||||
|
(stdout, stderr) = p.communicate()
|
||||||
|
|
||||||
|
hookenv.log(stdout, level='INFO')
|
||||||
|
|
||||||
|
if p.returncode != 0:
|
||||||
|
hookenv.log(stderr, level='ERROR')
|
||||||
|
hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
|
||||||
|
p.returncode),
|
||||||
|
level='ERROR')
|
||||||
|
|
||||||
|
|
||||||
|
def grant_access(src, dst='any', port=None, proto=None, index=None):
|
||||||
|
"""
|
||||||
|
Grant access to an address or subnet
|
||||||
|
|
||||||
|
:param src: address (e.g. 192.168.1.234) or subnet
|
||||||
|
(e.g. 192.168.1.0/24).
|
||||||
|
:param dst: destiny of the connection, if the machine has multiple IPs and
|
||||||
|
connections to only one of those have to accepted this is the
|
||||||
|
field has to be set.
|
||||||
|
:param port: destiny port
|
||||||
|
:param proto: protocol (tcp or udp)
|
||||||
|
:param index: if different from None the rule is inserted at the given
|
||||||
|
`index`.
|
||||||
|
"""
|
||||||
|
return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
|
||||||
|
index=index)
|
||||||
|
|
||||||
|
|
||||||
|
def revoke_access(src, dst='any', port=None, proto=None):
|
||||||
|
"""
|
||||||
|
Revoke access to an address or subnet
|
||||||
|
|
||||||
|
:param src: address (e.g. 192.168.1.234) or subnet
|
||||||
|
(e.g. 192.168.1.0/24).
|
||||||
|
:param dst: destiny of the connection, if the machine has multiple IPs and
|
||||||
|
connections to only one of those have to accepted this is the
|
||||||
|
field has to be set.
|
||||||
|
:param port: destiny port
|
||||||
|
:param proto: protocol (tcp or udp)
|
||||||
|
"""
|
||||||
|
return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
|
||||||
|
|
||||||
|
|
||||||
|
def service(name, action):
|
||||||
|
"""
|
||||||
|
Open/close access to a service
|
||||||
|
|
||||||
|
:param name: could be a service name defined in `/etc/services` or a port
|
||||||
|
number.
|
||||||
|
:param action: `open` or `close`
|
||||||
|
"""
|
||||||
|
if action == 'open':
|
||||||
|
subprocess.check_output(['ufw', 'allow', str(name)],
|
||||||
|
universal_newlines=True)
|
||||||
|
elif action == 'close':
|
||||||
|
subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
|
||||||
|
universal_newlines=True)
|
||||||
|
else:
|
||||||
|
raise UFWError(("'{}' not supported, use 'allow' "
|
||||||
|
"or 'delete'").format(action))
|
@ -79,9 +79,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
services.append(this_service)
|
services.append(this_service)
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw']
|
||||||
# Openstack subordinate charms do not expose an origin option as that
|
# Most OpenStack subordinate charms do not expose an origin option
|
||||||
# is controlled by the principle
|
# as that is controlled by the principle.
|
||||||
ignore = ['neutron-openvswitch']
|
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
@ -110,7 +110,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||||
self.precise_havana, self.precise_icehouse,
|
self.precise_havana, self.precise_icehouse,
|
||||||
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
self.trusty_kilo, self.vivid_kilo) = range(10)
|
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
||||||
|
self.wily_liberty) = range(12)
|
||||||
|
|
||||||
releases = {
|
releases = {
|
||||||
('precise', None): self.precise_essex,
|
('precise', None): self.precise_essex,
|
||||||
@ -121,8 +122,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('trusty', None): self.trusty_icehouse,
|
('trusty', None): self.trusty_icehouse,
|
||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
|
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
||||||
('utopic', None): self.utopic_juno,
|
('utopic', None): self.utopic_juno,
|
||||||
('vivid', None): self.vivid_kilo}
|
('vivid', None): self.vivid_kilo,
|
||||||
|
('wily', None): self.wily_liberty}
|
||||||
return releases[(self.series, self.openstack)]
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
def _get_openstack_release_string(self):
|
||||||
@ -138,9 +141,43 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
('trusty', 'icehouse'),
|
('trusty', 'icehouse'),
|
||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
|
('wily', 'liberty'),
|
||||||
])
|
])
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
os_origin = self.openstack.split(':')[1]
|
os_origin = self.openstack.split(':')[1]
|
||||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||||
else:
|
else:
|
||||||
return releases[self.series]
|
return releases[self.series]
|
||||||
|
|
||||||
|
def get_ceph_expected_pools(self, radosgw=False):
|
||||||
|
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
||||||
|
test scenario, based on OpenStack release and whether ceph radosgw
|
||||||
|
is flagged as present or not."""
|
||||||
|
|
||||||
|
if self._get_openstack_release() >= self.trusty_kilo:
|
||||||
|
# Kilo or later
|
||||||
|
pools = [
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
# Juno or earlier
|
||||||
|
pools = [
|
||||||
|
'data',
|
||||||
|
'metadata',
|
||||||
|
'rbd',
|
||||||
|
'cinder',
|
||||||
|
'glance'
|
||||||
|
]
|
||||||
|
|
||||||
|
if radosgw:
|
||||||
|
pools.extend([
|
||||||
|
'.rgw.root',
|
||||||
|
'.rgw.control',
|
||||||
|
'.rgw',
|
||||||
|
'.rgw.gc',
|
||||||
|
'.users.uid'
|
||||||
|
])
|
||||||
|
|
||||||
|
return pools
|
||||||
|
@ -14,16 +14,20 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import amulet
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
|
|
||||||
|
import cinderclient.v1.client as cinder_client
|
||||||
import glanceclient.v1.client as glance_client
|
import glanceclient.v1.client as glance_client
|
||||||
|
import heatclient.v1.client as heat_client
|
||||||
import keystoneclient.v2_0 as keystone_client
|
import keystoneclient.v2_0 as keystone_client
|
||||||
import novaclient.v1_1.client as nova_client
|
import novaclient.v1_1.client as nova_client
|
||||||
|
import swiftclient
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.contrib.amulet.utils import (
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
AmuletUtils
|
AmuletUtils
|
||||||
@ -37,7 +41,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
"""OpenStack amulet utilities.
|
"""OpenStack amulet utilities.
|
||||||
|
|
||||||
This class inherits from AmuletUtils and has additional support
|
This class inherits from AmuletUtils and has additional support
|
||||||
that is specifically for use by OpenStack charms.
|
that is specifically for use by OpenStack charm tests.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, log_level=ERROR):
|
def __init__(self, log_level=ERROR):
|
||||||
@ -51,6 +55,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
Validate actual endpoint data vs expected endpoint data. The ports
|
Validate actual endpoint data vs expected endpoint data. The ports
|
||||||
are used to find the matching endpoint.
|
are used to find the matching endpoint.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating endpoint data...')
|
||||||
|
self.log.debug('actual: {}'.format(repr(endpoints)))
|
||||||
found = False
|
found = False
|
||||||
for ep in endpoints:
|
for ep in endpoints:
|
||||||
self.log.debug('endpoint: {}'.format(repr(ep)))
|
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||||
@ -77,6 +83,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
Validate a list of actual service catalog endpoints vs a list of
|
Validate a list of actual service catalog endpoints vs a list of
|
||||||
expected service catalog endpoints.
|
expected service catalog endpoints.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating service catalog endpoint data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for k, v in six.iteritems(expected):
|
for k, v in six.iteritems(expected):
|
||||||
if k in actual:
|
if k in actual:
|
||||||
@ -93,6 +100,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
Validate a list of actual tenant data vs list of expected tenant
|
Validate a list of actual tenant data vs list of expected tenant
|
||||||
data.
|
data.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating tenant data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
@ -114,6 +122,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
Validate a list of actual role data vs a list of expected role
|
Validate a list of actual role data vs a list of expected role
|
||||||
data.
|
data.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating role data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
@ -134,6 +143,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
Validate a list of actual user data vs a list of expected user
|
Validate a list of actual user data vs a list of expected user
|
||||||
data.
|
data.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating user data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
@ -155,17 +165,30 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
|
|
||||||
Validate a list of actual flavors vs a list of expected flavors.
|
Validate a list of actual flavors vs a list of expected flavors.
|
||||||
"""
|
"""
|
||||||
|
self.log.debug('Validating flavor data...')
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
act = [a.name for a in actual]
|
act = [a.name for a in actual]
|
||||||
return self._validate_list_data(expected, act)
|
return self._validate_list_data(expected, act)
|
||||||
|
|
||||||
def tenant_exists(self, keystone, tenant):
|
def tenant_exists(self, keystone, tenant):
|
||||||
"""Return True if tenant exists."""
|
"""Return True if tenant exists."""
|
||||||
|
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
||||||
return tenant in [t.name for t in keystone.tenants.list()]
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
def authenticate_cinder_admin(self, keystone_sentry, username,
|
||||||
|
password, tenant):
|
||||||
|
"""Authenticates admin user with cinder."""
|
||||||
|
# NOTE(beisner): cinder python client doesn't accept tokens.
|
||||||
|
service_ip = \
|
||||||
|
keystone_sentry.relation('shared-db',
|
||||||
|
'mysql:shared-db')['private-address']
|
||||||
|
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
|
return cinder_client.Client(username, password, tenant, ept)
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
tenant):
|
tenant):
|
||||||
"""Authenticates admin user with the keystone admin endpoint."""
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
|
self.log.debug('Authenticating keystone admin...')
|
||||||
unit = keystone_sentry
|
unit = keystone_sentry
|
||||||
service_ip = unit.relation('shared-db',
|
service_ip = unit.relation('shared-db',
|
||||||
'mysql:shared-db')['private-address']
|
'mysql:shared-db')['private-address']
|
||||||
@ -175,6 +198,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
|
|
||||||
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||||
"""Authenticates a regular user with the keystone public endpoint."""
|
"""Authenticates a regular user with the keystone public endpoint."""
|
||||||
|
self.log.debug('Authenticating keystone user ({})...'.format(user))
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
endpoint_type='publicURL')
|
endpoint_type='publicURL')
|
||||||
return keystone_client.Client(username=user, password=password,
|
return keystone_client.Client(username=user, password=password,
|
||||||
@ -182,19 +206,49 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
|
|
||||||
def authenticate_glance_admin(self, keystone):
|
def authenticate_glance_admin(self, keystone):
|
||||||
"""Authenticates admin user with glance."""
|
"""Authenticates admin user with glance."""
|
||||||
|
self.log.debug('Authenticating glance admin...')
|
||||||
ep = keystone.service_catalog.url_for(service_type='image',
|
ep = keystone.service_catalog.url_for(service_type='image',
|
||||||
endpoint_type='adminURL')
|
endpoint_type='adminURL')
|
||||||
return glance_client.Client(ep, token=keystone.auth_token)
|
return glance_client.Client(ep, token=keystone.auth_token)
|
||||||
|
|
||||||
|
def authenticate_heat_admin(self, keystone):
|
||||||
|
"""Authenticates the admin user with heat."""
|
||||||
|
self.log.debug('Authenticating heat admin...')
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='orchestration',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return heat_client.Client(endpoint=ep, token=keystone.auth_token)
|
||||||
|
|
||||||
def authenticate_nova_user(self, keystone, user, password, tenant):
|
def authenticate_nova_user(self, keystone, user, password, tenant):
|
||||||
"""Authenticates a regular user with nova-api."""
|
"""Authenticates a regular user with nova-api."""
|
||||||
|
self.log.debug('Authenticating nova user ({})...'.format(user))
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
endpoint_type='publicURL')
|
endpoint_type='publicURL')
|
||||||
return nova_client.Client(username=user, api_key=password,
|
return nova_client.Client(username=user, api_key=password,
|
||||||
project_id=tenant, auth_url=ep)
|
project_id=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_swift_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with swift api."""
|
||||||
|
self.log.debug('Authenticating swift user ({})...'.format(user))
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return swiftclient.Connection(authurl=ep,
|
||||||
|
user=user,
|
||||||
|
key=password,
|
||||||
|
tenant_name=tenant,
|
||||||
|
auth_version='2.0')
|
||||||
|
|
||||||
def create_cirros_image(self, glance, image_name):
|
def create_cirros_image(self, glance, image_name):
|
||||||
"""Download the latest cirros image and upload it to glance."""
|
"""Download the latest cirros image and upload it to glance,
|
||||||
|
validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param glance: pointer to authenticated glance connection
|
||||||
|
:param image_name: display name for new image
|
||||||
|
:returns: glance image pointer
|
||||||
|
"""
|
||||||
|
self.log.debug('Creating glance cirros image '
|
||||||
|
'({})...'.format(image_name))
|
||||||
|
|
||||||
|
# Download cirros image
|
||||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||||
if http_proxy:
|
if http_proxy:
|
||||||
@ -203,57 +257,67 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
else:
|
else:
|
||||||
opener = urllib.FancyURLopener()
|
opener = urllib.FancyURLopener()
|
||||||
|
|
||||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
f = opener.open('http://download.cirros-cloud.net/version/released')
|
||||||
version = f.read().strip()
|
version = f.read().strip()
|
||||||
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
|
||||||
local_path = os.path.join('tests', cirros_img)
|
local_path = os.path.join('tests', cirros_img)
|
||||||
|
|
||||||
if not os.path.exists(local_path):
|
if not os.path.exists(local_path):
|
||||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
|
||||||
version, cirros_img)
|
version, cirros_img)
|
||||||
opener.retrieve(cirros_url, local_path)
|
opener.retrieve(cirros_url, local_path)
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
# Create glance image
|
||||||
with open(local_path) as f:
|
with open(local_path) as f:
|
||||||
image = glance.images.create(name=image_name, is_public=True,
|
image = glance.images.create(name=image_name, is_public=True,
|
||||||
disk_format='qcow2',
|
disk_format='qcow2',
|
||||||
container_format='bare', data=f)
|
container_format='bare', data=f)
|
||||||
count = 1
|
|
||||||
status = image.status
|
|
||||||
while status != 'active' and count < 10:
|
|
||||||
time.sleep(3)
|
|
||||||
image = glance.images.get(image.id)
|
|
||||||
status = image.status
|
|
||||||
self.log.debug('image status: {}'.format(status))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if status != 'active':
|
# Wait for image to reach active status
|
||||||
self.log.error('image creation timed out')
|
img_id = image.id
|
||||||
return None
|
ret = self.resource_reaches_status(glance.images, img_id,
|
||||||
|
expected_stat='active',
|
||||||
|
msg='Image status wait')
|
||||||
|
if not ret:
|
||||||
|
msg = 'Glance image failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new image
|
||||||
|
self.log.debug('Validating image attributes...')
|
||||||
|
val_img_name = glance.images.get(img_id).name
|
||||||
|
val_img_stat = glance.images.get(img_id).status
|
||||||
|
val_img_pub = glance.images.get(img_id).is_public
|
||||||
|
val_img_cfmt = glance.images.get(img_id).container_format
|
||||||
|
val_img_dfmt = glance.images.get(img_id).disk_format
|
||||||
|
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
|
||||||
|
'container fmt:{} disk fmt:{}'.format(
|
||||||
|
val_img_name, val_img_pub, img_id,
|
||||||
|
val_img_stat, val_img_cfmt, val_img_dfmt))
|
||||||
|
|
||||||
|
if val_img_name == image_name and val_img_stat == 'active' \
|
||||||
|
and val_img_pub is True and val_img_cfmt == 'bare' \
|
||||||
|
and val_img_dfmt == 'qcow2':
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
return image
|
return image
|
||||||
|
|
||||||
def delete_image(self, glance, image):
|
def delete_image(self, glance, image):
|
||||||
"""Delete the specified image."""
|
"""Delete the specified image."""
|
||||||
num_before = len(list(glance.images.list()))
|
|
||||||
glance.images.delete(image)
|
|
||||||
|
|
||||||
count = 1
|
# /!\ DEPRECATION WARNING
|
||||||
num_after = len(list(glance.images.list()))
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
while num_after != (num_before - 1) and count < 10:
|
'delete_resource instead of delete_image.')
|
||||||
time.sleep(3)
|
self.log.debug('Deleting glance image ({})...'.format(image))
|
||||||
num_after = len(list(glance.images.list()))
|
return self.delete_resource(glance.images, image, msg='glance image')
|
||||||
self.log.debug('number of images: {}'.format(num_after))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
|
||||||
self.log.error('image deletion timed out')
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||||
"""Create the specified instance."""
|
"""Create the specified instance."""
|
||||||
|
self.log.debug('Creating instance '
|
||||||
|
'({}|{}|{})'.format(instance_name, image_name, flavor))
|
||||||
image = nova.images.find(name=image_name)
|
image = nova.images.find(name=image_name)
|
||||||
flavor = nova.flavors.find(name=flavor)
|
flavor = nova.flavors.find(name=flavor)
|
||||||
instance = nova.servers.create(name=instance_name, image=image,
|
instance = nova.servers.create(name=instance_name, image=image,
|
||||||
@ -276,19 +340,265 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
|
|
||||||
def delete_instance(self, nova, instance):
|
def delete_instance(self, nova, instance):
|
||||||
"""Delete the specified instance."""
|
"""Delete the specified instance."""
|
||||||
num_before = len(list(nova.servers.list()))
|
|
||||||
nova.servers.delete(instance)
|
|
||||||
|
|
||||||
count = 1
|
# /!\ DEPRECATION WARNING
|
||||||
num_after = len(list(nova.servers.list()))
|
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||||
while num_after != (num_before - 1) and count < 10:
|
'delete_resource instead of delete_instance.')
|
||||||
time.sleep(3)
|
self.log.debug('Deleting instance ({})...'.format(instance))
|
||||||
num_after = len(list(nova.servers.list()))
|
return self.delete_resource(nova.servers, instance,
|
||||||
self.log.debug('number of instances: {}'.format(num_after))
|
msg='nova instance')
|
||||||
count += 1
|
|
||||||
|
|
||||||
if num_after != (num_before - 1):
|
def create_or_get_keypair(self, nova, keypair_name="testkey"):
|
||||||
self.log.error('instance deletion timed out')
|
"""Create a new keypair, or return pointer if it already exists."""
|
||||||
|
try:
|
||||||
|
_keypair = nova.keypairs.get(keypair_name)
|
||||||
|
self.log.debug('Keypair ({}) already exists, '
|
||||||
|
'using it.'.format(keypair_name))
|
||||||
|
return _keypair
|
||||||
|
except:
|
||||||
|
self.log.debug('Keypair ({}) does not exist, '
|
||||||
|
'creating it.'.format(keypair_name))
|
||||||
|
|
||||||
|
_keypair = nova.keypairs.create(name=keypair_name)
|
||||||
|
return _keypair
|
||||||
|
|
||||||
|
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
|
||||||
|
img_id=None, src_vol_id=None, snap_id=None):
|
||||||
|
"""Create cinder volume, optionally from a glance image, OR
|
||||||
|
optionally as a clone of an existing volume, OR optionally
|
||||||
|
from a snapshot. Wait for the new volume status to reach
|
||||||
|
the expected status, validate and return a resource pointer.
|
||||||
|
|
||||||
|
:param vol_name: cinder volume display name
|
||||||
|
:param vol_size: size in gigabytes
|
||||||
|
:param img_id: optional glance image id
|
||||||
|
:param src_vol_id: optional source volume id to clone
|
||||||
|
:param snap_id: optional snapshot id to use
|
||||||
|
:returns: cinder volume pointer
|
||||||
|
"""
|
||||||
|
# Handle parameter input and avoid impossible combinations
|
||||||
|
if img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume from image
|
||||||
|
self.log.debug('Creating cinder volume from glance image...')
|
||||||
|
bootable = 'true'
|
||||||
|
elif src_vol_id and not img_id and not snap_id:
|
||||||
|
# Clone an existing volume
|
||||||
|
self.log.debug('Cloning cinder volume...')
|
||||||
|
bootable = cinder.volumes.get(src_vol_id).bootable
|
||||||
|
elif snap_id and not src_vol_id and not img_id:
|
||||||
|
# Create volume from snapshot
|
||||||
|
self.log.debug('Creating cinder volume from snapshot...')
|
||||||
|
snap = cinder.volume_snapshots.find(id=snap_id)
|
||||||
|
vol_size = snap.size
|
||||||
|
snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
|
||||||
|
bootable = cinder.volumes.get(snap_vol_id).bootable
|
||||||
|
elif not img_id and not src_vol_id and not snap_id:
|
||||||
|
# Create volume
|
||||||
|
self.log.debug('Creating cinder volume...')
|
||||||
|
bootable = 'false'
|
||||||
|
else:
|
||||||
|
# Impossible combination of parameters
|
||||||
|
msg = ('Invalid method use - name:{} size:{} img_id:{} '
|
||||||
|
'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
|
||||||
|
img_id, src_vol_id,
|
||||||
|
snap_id))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Create new volume
|
||||||
|
try:
|
||||||
|
vol_new = cinder.volumes.create(display_name=vol_name,
|
||||||
|
imageRef=img_id,
|
||||||
|
size=vol_size,
|
||||||
|
source_volid=src_vol_id,
|
||||||
|
snapshot_id=snap_id)
|
||||||
|
vol_id = vol_new.id
|
||||||
|
except Exception as e:
|
||||||
|
msg = 'Failed to create volume: {}'.format(e)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Wait for volume to reach available status
|
||||||
|
ret = self.resource_reaches_status(cinder.volumes, vol_id,
|
||||||
|
expected_stat="available",
|
||||||
|
msg="Volume status wait")
|
||||||
|
if not ret:
|
||||||
|
msg = 'Cinder volume failed to reach expected state.'
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Re-validate new volume
|
||||||
|
self.log.debug('Validating volume attributes...')
|
||||||
|
val_vol_name = cinder.volumes.get(vol_id).display_name
|
||||||
|
val_vol_boot = cinder.volumes.get(vol_id).bootable
|
||||||
|
val_vol_stat = cinder.volumes.get(vol_id).status
|
||||||
|
val_vol_size = cinder.volumes.get(vol_id).size
|
||||||
|
msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
|
||||||
|
'{} size:{}'.format(val_vol_name, vol_id,
|
||||||
|
val_vol_stat, val_vol_boot,
|
||||||
|
val_vol_size))
|
||||||
|
|
||||||
|
if val_vol_boot == bootable and val_vol_stat == 'available' \
|
||||||
|
and val_vol_name == vol_name and val_vol_size == vol_size:
|
||||||
|
self.log.debug(msg_attr)
|
||||||
|
else:
|
||||||
|
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
return vol_new
|
||||||
|
|
||||||
|
def delete_resource(self, resource, resource_id,
|
||||||
|
msg="resource", max_wait=120):
|
||||||
|
"""Delete one openstack resource, such as one instance, keypair,
|
||||||
|
image, volume, stack, etc., and confirm deletion within max wait time.
|
||||||
|
|
||||||
|
:param resource: pointer to os resource type, ex:glance_client.images
|
||||||
|
:param resource_id: unique name or id for the openstack resource
|
||||||
|
:param msg: text to identify purpose in logging
|
||||||
|
:param max_wait: maximum wait time in seconds
|
||||||
|
:returns: True if successful, otherwise False
|
||||||
|
"""
|
||||||
|
self.log.debug('Deleting OpenStack resource '
|
||||||
|
'{} ({})'.format(resource_id, msg))
|
||||||
|
num_before = len(list(resource.list()))
|
||||||
|
resource.delete(resource_id)
|
||||||
|
|
||||||
|
tries = 0
|
||||||
|
num_after = len(list(resource.list()))
|
||||||
|
while num_after != (num_before - 1) and tries < (max_wait / 4):
|
||||||
|
self.log.debug('{} delete check: '
|
||||||
|
'{} [{}:{}] {}'.format(msg, tries,
|
||||||
|
num_before,
|
||||||
|
num_after,
|
||||||
|
resource_id))
|
||||||
|
time.sleep(4)
|
||||||
|
num_after = len(list(resource.list()))
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
self.log.debug('{}: expected, actual count = {}, '
|
||||||
|
'{}'.format(msg, num_before - 1, num_after))
|
||||||
|
|
||||||
|
if num_after == (num_before - 1):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
self.log.error('{} delete timed out'.format(msg))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def resource_reaches_status(self, resource, resource_id,
|
||||||
|
expected_stat='available',
|
||||||
|
msg='resource', max_wait=120):
|
||||||
|
"""Wait for an openstack resources status to reach an
|
||||||
|
expected status within a specified time. Useful to confirm that
|
||||||
|
nova instances, cinder vols, snapshots, glance images, heat stacks
|
||||||
|
and other resources eventually reach the expected status.
|
||||||
|
|
||||||
|
:param resource: pointer to os resource type, ex: heat_client.stacks
|
||||||
|
:param resource_id: unique id for the openstack resource
|
||||||
|
:param expected_stat: status to expect resource to reach
|
||||||
|
:param msg: text to identify purpose in logging
|
||||||
|
:param max_wait: maximum wait time in seconds
|
||||||
|
:returns: True if successful, False if status is not reached
|
||||||
|
"""
|
||||||
|
|
||||||
|
tries = 0
|
||||||
|
resource_stat = resource.get(resource_id).status
|
||||||
|
while resource_stat != expected_stat and tries < (max_wait / 4):
|
||||||
|
self.log.debug('{} status check: '
|
||||||
|
'{} [{}:{}] {}'.format(msg, tries,
|
||||||
|
resource_stat,
|
||||||
|
expected_stat,
|
||||||
|
resource_id))
|
||||||
|
time.sleep(4)
|
||||||
|
resource_stat = resource.get(resource_id).status
|
||||||
|
tries += 1
|
||||||
|
|
||||||
|
self.log.debug('{}: expected, actual status = {}, '
|
||||||
|
'{}'.format(msg, resource_stat, expected_stat))
|
||||||
|
|
||||||
|
if resource_stat == expected_stat:
|
||||||
return True
|
return True
|
||||||
|
else:
|
||||||
|
self.log.debug('{} never reached expected status: '
|
||||||
|
'{}'.format(resource_id, expected_stat))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_ceph_osd_id_cmd(self, index):
|
||||||
|
"""Produce a shell command that will return a ceph-osd id."""
|
||||||
|
return ("`initctl list | grep 'ceph-osd ' | "
|
||||||
|
"awk 'NR=={} {{ print $2 }}' | "
|
||||||
|
"grep -o '[0-9]*'`".format(index + 1))
|
||||||
|
|
||||||
|
def get_ceph_pools(self, sentry_unit):
|
||||||
|
"""Return a dict of ceph pools from a single ceph unit, with
|
||||||
|
pool name as keys, pool id as vals."""
|
||||||
|
pools = {}
|
||||||
|
cmd = 'sudo ceph osd lspools'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
|
||||||
|
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
|
||||||
|
for pool in str(output).split(','):
|
||||||
|
pool_id_name = pool.split(' ')
|
||||||
|
if len(pool_id_name) == 2:
|
||||||
|
pool_id = pool_id_name[0]
|
||||||
|
pool_name = pool_id_name[1]
|
||||||
|
pools[pool_name] = int(pool_id)
|
||||||
|
|
||||||
|
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
|
||||||
|
pools))
|
||||||
|
return pools
|
||||||
|
|
||||||
|
def get_ceph_df(self, sentry_unit):
|
||||||
|
"""Return dict of ceph df json output, including ceph pool state.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:returns: Dict of ceph df output
|
||||||
|
"""
|
||||||
|
cmd = 'sudo ceph df --format=json'
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
if code != 0:
|
||||||
|
msg = ('{} `{}` returned {} '
|
||||||
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
|
cmd, code, output))
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
|
return json.loads(output)
|
||||||
|
|
||||||
|
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
|
||||||
|
"""Take a sample of attributes of a ceph pool, returning ceph
|
||||||
|
pool name, object count and disk space used for the specified
|
||||||
|
pool ID number.
|
||||||
|
|
||||||
|
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||||
|
:param pool_id: Ceph pool ID
|
||||||
|
:returns: List of pool name, object count, kb disk space used
|
||||||
|
"""
|
||||||
|
df = self.get_ceph_df(sentry_unit)
|
||||||
|
pool_name = df['pools'][pool_id]['name']
|
||||||
|
obj_count = df['pools'][pool_id]['stats']['objects']
|
||||||
|
kb_used = df['pools'][pool_id]['stats']['kb_used']
|
||||||
|
self.log.debug('Ceph {} pool (ID {}): {} objects, '
|
||||||
|
'{} kb used'.format(pool_name, pool_id,
|
||||||
|
obj_count, kb_used))
|
||||||
|
return pool_name, obj_count, kb_used
|
||||||
|
|
||||||
|
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
|
||||||
|
"""Validate ceph pool samples taken over time, such as pool
|
||||||
|
object counts or pool kb used, before adding, after adding, and
|
||||||
|
after deleting items which affect those pool attributes. The
|
||||||
|
2nd element is expected to be greater than the 1st; 3rd is expected
|
||||||
|
to be less than the 2nd.
|
||||||
|
|
||||||
|
:param samples: List containing 3 data samples
|
||||||
|
:param sample_type: String for logging and usage context
|
||||||
|
:returns: None if successful, Failure message otherwise
|
||||||
|
"""
|
||||||
|
original, created, deleted = range(3)
|
||||||
|
if samples[created] <= samples[original] or \
|
||||||
|
samples[deleted] >= samples[created]:
|
||||||
|
return ('Ceph {} samples ({}) '
|
||||||
|
'unexpected.'.format(sample_type, samples))
|
||||||
|
else:
|
||||||
|
self.log.debug('Ceph {} samples (OK): '
|
||||||
|
'{}'.format(sample_type, samples))
|
||||||
|
return None
|
||||||
|
@ -122,12 +122,14 @@ def config_flags_parser(config_flags):
|
|||||||
of specifying multiple key value pairs within the same string. For
|
of specifying multiple key value pairs within the same string. For
|
||||||
example, a string in the format of 'key1=value1, key2=value2' will
|
example, a string in the format of 'key1=value1, key2=value2' will
|
||||||
return a dict of:
|
return a dict of:
|
||||||
|
|
||||||
{'key1': 'value1',
|
{'key1': 'value1',
|
||||||
'key2': 'value2'}.
|
'key2': 'value2'}.
|
||||||
|
|
||||||
2. A string in the above format, but supporting a comma-delimited list
|
2. A string in the above format, but supporting a comma-delimited list
|
||||||
of values for the same key. For example, a string in the format of
|
of values for the same key. For example, a string in the format of
|
||||||
'key1=value1, key2=value3,value4,value5' will return a dict of:
|
'key1=value1, key2=value3,value4,value5' will return a dict of:
|
||||||
|
|
||||||
{'key1', 'value1',
|
{'key1', 'value1',
|
||||||
'key2', 'value2,value3,value4'}
|
'key2', 'value2,value3,value4'}
|
||||||
|
|
||||||
@ -136,6 +138,7 @@ def config_flags_parser(config_flags):
|
|||||||
used to specify more complex key value pairs. For example,
|
used to specify more complex key value pairs. For example,
|
||||||
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
|
a string in the format of 'key1: subkey1=value1, subkey2=value2' will
|
||||||
return a dict of:
|
return a dict of:
|
||||||
|
|
||||||
{'key1', 'subkey1=value1, subkey2=value2'}
|
{'key1', 'subkey1=value1, subkey2=value2'}
|
||||||
|
|
||||||
The provided config_flags string may be a list of comma-separated values
|
The provided config_flags string may be a list of comma-separated values
|
||||||
@ -240,7 +243,7 @@ class SharedDBContext(OSContextGenerator):
|
|||||||
if self.relation_prefix:
|
if self.relation_prefix:
|
||||||
password_setting = self.relation_prefix + '_password'
|
password_setting = self.relation_prefix + '_password'
|
||||||
|
|
||||||
for rid in relation_ids('shared-db'):
|
for rid in relation_ids(self.interfaces[0]):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rdata = relation_get(rid=rid, unit=unit)
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
host = rdata.get('db_host')
|
host = rdata.get('db_host')
|
||||||
@ -880,13 +883,13 @@ class NeutronContext(OSContextGenerator):
|
|||||||
self.network_manager)
|
self.network_manager)
|
||||||
config = neutron_plugin_attribute(self.plugin, 'config',
|
config = neutron_plugin_attribute(self.plugin, 'config',
|
||||||
self.network_manager)
|
self.network_manager)
|
||||||
ovs_ctxt = {'core_plugin': driver,
|
pg_ctxt = {'core_plugin': driver,
|
||||||
'neutron_plugin': 'plumgrid',
|
'neutron_plugin': 'plumgrid',
|
||||||
'neutron_security_groups': self.neutron_security_groups,
|
'neutron_security_groups': self.neutron_security_groups,
|
||||||
'local_ip': unit_private_ip(),
|
'local_ip': unit_private_ip(),
|
||||||
'config': config}
|
'config': config}
|
||||||
|
|
||||||
return ovs_ctxt
|
return pg_ctxt
|
||||||
|
|
||||||
def neutron_ctxt(self):
|
def neutron_ctxt(self):
|
||||||
if https():
|
if https():
|
||||||
@ -904,8 +907,6 @@ class NeutronContext(OSContextGenerator):
|
|||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
self._ensure_packages()
|
|
||||||
|
|
||||||
if self.network_manager not in ['quantum', 'neutron']:
|
if self.network_manager not in ['quantum', 'neutron']:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
@ -1,32 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#--------------------------------------------
|
|
||||||
# This file is managed by Juju
|
|
||||||
#--------------------------------------------
|
|
||||||
#
|
|
||||||
# Copyright 2009,2012 Canonical Ltd.
|
|
||||||
# Author: Tom Haddon
|
|
||||||
|
|
||||||
CRITICAL=0
|
|
||||||
NOTACTIVE=''
|
|
||||||
LOGFILE=/var/log/nagios/check_haproxy.log
|
|
||||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
|
||||||
|
|
||||||
for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
|
|
||||||
do
|
|
||||||
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
|
|
||||||
if [ $? != 0 ]; then
|
|
||||||
date >> $LOGFILE
|
|
||||||
echo $output >> $LOGFILE
|
|
||||||
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
|
|
||||||
CRITICAL=1
|
|
||||||
NOTACTIVE="${NOTACTIVE} $appserver"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ $CRITICAL = 1 ]; then
|
|
||||||
echo "CRITICAL:${NOTACTIVE}"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "OK: All haproxy instances looking good"
|
|
||||||
exit 0
|
|
@ -1,30 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#--------------------------------------------
|
|
||||||
# This file is managed by Juju
|
|
||||||
#--------------------------------------------
|
|
||||||
#
|
|
||||||
# Copyright 2009,2012 Canonical Ltd.
|
|
||||||
# Author: Tom Haddon
|
|
||||||
|
|
||||||
# These should be config options at some stage
|
|
||||||
CURRQthrsh=0
|
|
||||||
MAXQthrsh=100
|
|
||||||
|
|
||||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
|
||||||
|
|
||||||
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
|
|
||||||
|
|
||||||
for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
|
|
||||||
do
|
|
||||||
CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
|
|
||||||
MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
|
|
||||||
|
|
||||||
if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
|
|
||||||
echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "OK: All haproxy queue depths looking good"
|
|
||||||
exit 0
|
|
||||||
|
|
@ -17,6 +17,7 @@
|
|||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
config,
|
config,
|
||||||
unit_get,
|
unit_get,
|
||||||
|
service_name,
|
||||||
)
|
)
|
||||||
from charmhelpers.contrib.network.ip import (
|
from charmhelpers.contrib.network.ip import (
|
||||||
get_address_in_network,
|
get_address_in_network,
|
||||||
@ -26,8 +27,6 @@ from charmhelpers.contrib.network.ip import (
|
|||||||
)
|
)
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
|
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
PUBLIC = 'public'
|
PUBLIC = 'public'
|
||||||
INTERNAL = 'int'
|
INTERNAL = 'int'
|
||||||
ADMIN = 'admin'
|
ADMIN = 'admin'
|
||||||
@ -35,15 +34,18 @@ ADMIN = 'admin'
|
|||||||
ADDRESS_MAP = {
|
ADDRESS_MAP = {
|
||||||
PUBLIC: {
|
PUBLIC: {
|
||||||
'config': 'os-public-network',
|
'config': 'os-public-network',
|
||||||
'fallback': 'public-address'
|
'fallback': 'public-address',
|
||||||
|
'override': 'os-public-hostname',
|
||||||
},
|
},
|
||||||
INTERNAL: {
|
INTERNAL: {
|
||||||
'config': 'os-internal-network',
|
'config': 'os-internal-network',
|
||||||
'fallback': 'private-address'
|
'fallback': 'private-address',
|
||||||
|
'override': 'os-internal-hostname',
|
||||||
},
|
},
|
||||||
ADMIN: {
|
ADMIN: {
|
||||||
'config': 'os-admin-network',
|
'config': 'os-admin-network',
|
||||||
'fallback': 'private-address'
|
'fallback': 'private-address',
|
||||||
|
'override': 'os-admin-hostname',
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -57,15 +59,50 @@ def canonical_url(configs, endpoint_type=PUBLIC):
|
|||||||
:param endpoint_type: str endpoint type to resolve.
|
:param endpoint_type: str endpoint type to resolve.
|
||||||
:param returns: str base URL for services on the current service unit.
|
:param returns: str base URL for services on the current service unit.
|
||||||
"""
|
"""
|
||||||
scheme = 'http'
|
scheme = _get_scheme(configs)
|
||||||
if 'https' in configs.complete_contexts():
|
|
||||||
scheme = 'https'
|
|
||||||
address = resolve_address(endpoint_type)
|
address = resolve_address(endpoint_type)
|
||||||
if is_ipv6(address):
|
if is_ipv6(address):
|
||||||
address = "[{}]".format(address)
|
address = "[{}]".format(address)
|
||||||
|
|
||||||
return '%s://%s' % (scheme, address)
|
return '%s://%s' % (scheme, address)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_scheme(configs):
|
||||||
|
"""Returns the scheme to use for the url (either http or https)
|
||||||
|
depending upon whether https is in the configs value.
|
||||||
|
|
||||||
|
:param configs: OSTemplateRenderer config templating object to inspect
|
||||||
|
for a complete https context.
|
||||||
|
:returns: either 'http' or 'https' depending on whether https is
|
||||||
|
configured within the configs context.
|
||||||
|
"""
|
||||||
|
scheme = 'http'
|
||||||
|
if configs and 'https' in configs.complete_contexts():
|
||||||
|
scheme = 'https'
|
||||||
|
return scheme
|
||||||
|
|
||||||
|
|
||||||
|
def _get_address_override(endpoint_type=PUBLIC):
|
||||||
|
"""Returns any address overrides that the user has defined based on the
|
||||||
|
endpoint type.
|
||||||
|
|
||||||
|
Note: this function allows for the service name to be inserted into the
|
||||||
|
address if the user specifies {service_name}.somehost.org.
|
||||||
|
|
||||||
|
:param endpoint_type: the type of endpoint to retrieve the override
|
||||||
|
value for.
|
||||||
|
:returns: any endpoint address or hostname that the user has overridden
|
||||||
|
or None if an override is not present.
|
||||||
|
"""
|
||||||
|
override_key = ADDRESS_MAP[endpoint_type]['override']
|
||||||
|
addr_override = config(override_key)
|
||||||
|
if not addr_override:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return addr_override.format(service_name=service_name())
|
||||||
|
|
||||||
|
|
||||||
def resolve_address(endpoint_type=PUBLIC):
|
def resolve_address(endpoint_type=PUBLIC):
|
||||||
"""Return unit address depending on net config.
|
"""Return unit address depending on net config.
|
||||||
|
|
||||||
@ -77,7 +114,10 @@ def resolve_address(endpoint_type=PUBLIC):
|
|||||||
|
|
||||||
:param endpoint_type: Network endpoing type
|
:param endpoint_type: Network endpoing type
|
||||||
"""
|
"""
|
||||||
resolved_address = None
|
resolved_address = _get_address_override(endpoint_type)
|
||||||
|
if resolved_address:
|
||||||
|
return resolved_address
|
||||||
|
|
||||||
vips = config('vip')
|
vips = config('vip')
|
||||||
if vips:
|
if vips:
|
||||||
vips = vips.split()
|
vips = vips.split()
|
||||||
@ -109,38 +149,3 @@ def resolve_address(endpoint_type=PUBLIC):
|
|||||||
"clustered=%s)" % (net_type, clustered))
|
"clustered=%s)" % (net_type, clustered))
|
||||||
|
|
||||||
return resolved_address
|
return resolved_address
|
||||||
|
|
||||||
|
|
||||||
def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC,
|
|
||||||
override=None):
|
|
||||||
"""Returns the correct endpoint URL to advertise to Keystone.
|
|
||||||
|
|
||||||
This method provides the correct endpoint URL which should be advertised to
|
|
||||||
the keystone charm for endpoint creation. This method allows for the url to
|
|
||||||
be overridden to force a keystone endpoint to have specific URL for any of
|
|
||||||
the defined scopes (admin, internal, public).
|
|
||||||
|
|
||||||
:param configs: OSTemplateRenderer config templating object to inspect
|
|
||||||
for a complete https context.
|
|
||||||
:param url_template: str format string for creating the url template. Only
|
|
||||||
two values will be passed - the scheme+hostname
|
|
||||||
returned by the canonical_url and the port.
|
|
||||||
:param endpoint_type: str endpoint type to resolve.
|
|
||||||
:param override: str the name of the config option which overrides the
|
|
||||||
endpoint URL defined by the charm itself. None will
|
|
||||||
disable any overrides (default).
|
|
||||||
"""
|
|
||||||
if override:
|
|
||||||
# Return any user-defined overrides for the keystone endpoint URL.
|
|
||||||
user_value = config(override)
|
|
||||||
if user_value:
|
|
||||||
return user_value.strip()
|
|
||||||
|
|
||||||
return url_template % (canonical_url(configs, endpoint_type), port)
|
|
||||||
|
|
||||||
|
|
||||||
public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC)
|
|
||||||
|
|
||||||
internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL)
|
|
||||||
|
|
||||||
admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN)
|
|
||||||
|
@ -172,14 +172,16 @@ def neutron_plugins():
|
|||||||
'services': ['calico-felix',
|
'services': ['calico-felix',
|
||||||
'bird',
|
'bird',
|
||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
'nova-api-metadata'],
|
'nova-api-metadata',
|
||||||
|
'etcd'],
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
['calico-compute',
|
['calico-compute',
|
||||||
'bird',
|
'bird',
|
||||||
'neutron-dhcp-agent',
|
'neutron-dhcp-agent',
|
||||||
'nova-api-metadata']],
|
'nova-api-metadata',
|
||||||
'server_packages': ['neutron-server', 'calico-control'],
|
'etcd']],
|
||||||
'server_services': ['neutron-server']
|
'server_packages': ['neutron-server', 'calico-control', 'etcd'],
|
||||||
|
'server_services': ['neutron-server', 'etcd']
|
||||||
},
|
},
|
||||||
'vsp': {
|
'vsp': {
|
||||||
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
|
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
|
||||||
@ -203,8 +205,7 @@ def neutron_plugins():
|
|||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': [],
|
'services': [],
|
||||||
'packages': [['plumgrid-lxc'],
|
'packages': [['plumgrid-lxc'],
|
||||||
['iovisor-dkms'],
|
['iovisor-dkms']],
|
||||||
['plumgrid-puppet']],
|
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-plumgrid'],
|
'neutron-plugin-plumgrid'],
|
||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
@ -271,11 +272,14 @@ def network_manager():
|
|||||||
def parse_mappings(mappings):
|
def parse_mappings(mappings):
|
||||||
parsed = {}
|
parsed = {}
|
||||||
if mappings:
|
if mappings:
|
||||||
mappings = mappings.split(' ')
|
mappings = mappings.split()
|
||||||
for m in mappings:
|
for m in mappings:
|
||||||
p = m.partition(':')
|
p = m.partition(':')
|
||||||
if p[1] == ':':
|
key = p[0].strip()
|
||||||
parsed[p[0].strip()] = p[2].strip()
|
if p[1]:
|
||||||
|
parsed[key] = p[2].strip()
|
||||||
|
else:
|
||||||
|
parsed[key] = ''
|
||||||
|
|
||||||
return parsed
|
return parsed
|
||||||
|
|
||||||
@ -298,13 +302,13 @@ def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
|||||||
Returns dict of the form {bridge:port}.
|
Returns dict of the form {bridge:port}.
|
||||||
"""
|
"""
|
||||||
_mappings = parse_mappings(mappings)
|
_mappings = parse_mappings(mappings)
|
||||||
if not _mappings:
|
if not _mappings or list(_mappings.values()) == ['']:
|
||||||
if not mappings:
|
if not mappings:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
# For backwards-compatibility we need to support port-only provided in
|
# For backwards-compatibility we need to support port-only provided in
|
||||||
# config.
|
# config.
|
||||||
_mappings = {default_bridge: mappings.split(' ')[0]}
|
_mappings = {default_bridge: mappings.split()[0]}
|
||||||
|
|
||||||
bridges = _mappings.keys()
|
bridges = _mappings.keys()
|
||||||
ports = _mappings.values()
|
ports = _mappings.values()
|
||||||
@ -324,6 +328,8 @@ def parse_vlan_range_mappings(mappings):
|
|||||||
|
|
||||||
Mappings must be a space-delimited list of provider:start:end mappings.
|
Mappings must be a space-delimited list of provider:start:end mappings.
|
||||||
|
|
||||||
|
The start:end range is optional and may be omitted.
|
||||||
|
|
||||||
Returns dict of the form {provider: (start, end)}.
|
Returns dict of the form {provider: (start, end)}.
|
||||||
"""
|
"""
|
||||||
_mappings = parse_mappings(mappings)
|
_mappings = parse_mappings(mappings)
|
||||||
|
@ -1,15 +0,0 @@
|
|||||||
###############################################################################
|
|
||||||
# [ WARNING ]
|
|
||||||
# cinder configuration file maintained by Juju
|
|
||||||
# local changes may be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
[global]
|
|
||||||
{% if auth -%}
|
|
||||||
auth_supported = {{ auth }}
|
|
||||||
keyring = /etc/ceph/$cluster.$name.keyring
|
|
||||||
mon host = {{ mon_hosts }}
|
|
||||||
{% endif -%}
|
|
||||||
log to syslog = {{ use_syslog }}
|
|
||||||
err to syslog = {{ use_syslog }}
|
|
||||||
clog to syslog = {{ use_syslog }}
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
|||||||
description "{{ service_description }}"
|
|
||||||
author "Juju {{ service_name }} Charm <juju@localhost>"
|
|
||||||
|
|
||||||
start on runlevel [2345]
|
|
||||||
stop on runlevel [!2345]
|
|
||||||
|
|
||||||
respawn
|
|
||||||
|
|
||||||
exec start-stop-daemon --start --chuid {{ user_name }} \
|
|
||||||
--chdir {{ start_dir }} --name {{ process_name }} \
|
|
||||||
--exec {{ executable_name }} -- \
|
|
||||||
{% for config_file in config_files -%}
|
|
||||||
--config-file={{ config_file }} \
|
|
||||||
{% endfor -%}
|
|
||||||
{% if log_file -%}
|
|
||||||
--log-file={{ log_file }}
|
|
||||||
{% endif -%}
|
|
@ -1,58 +0,0 @@
|
|||||||
global
|
|
||||||
log {{ local_host }} local0
|
|
||||||
log {{ local_host }} local1 notice
|
|
||||||
maxconn 20000
|
|
||||||
user haproxy
|
|
||||||
group haproxy
|
|
||||||
spread-checks 0
|
|
||||||
|
|
||||||
defaults
|
|
||||||
log global
|
|
||||||
mode tcp
|
|
||||||
option tcplog
|
|
||||||
option dontlognull
|
|
||||||
retries 3
|
|
||||||
timeout queue 1000
|
|
||||||
timeout connect 1000
|
|
||||||
{% if haproxy_client_timeout -%}
|
|
||||||
timeout client {{ haproxy_client_timeout }}
|
|
||||||
{% else -%}
|
|
||||||
timeout client 30000
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if haproxy_server_timeout -%}
|
|
||||||
timeout server {{ haproxy_server_timeout }}
|
|
||||||
{% else -%}
|
|
||||||
timeout server 30000
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
listen stats {{ stat_port }}
|
|
||||||
mode http
|
|
||||||
stats enable
|
|
||||||
stats hide-version
|
|
||||||
stats realm Haproxy\ Statistics
|
|
||||||
stats uri /
|
|
||||||
stats auth admin:password
|
|
||||||
|
|
||||||
{% if frontends -%}
|
|
||||||
{% for service, ports in service_ports.items() -%}
|
|
||||||
frontend tcp-in_{{ service }}
|
|
||||||
bind *:{{ ports[0] }}
|
|
||||||
{% if ipv6 -%}
|
|
||||||
bind :::{{ ports[0] }}
|
|
||||||
{% endif -%}
|
|
||||||
{% for frontend in frontends -%}
|
|
||||||
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
|
|
||||||
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
|
|
||||||
{% endfor -%}
|
|
||||||
default_backend {{ service }}_{{ default_backend }}
|
|
||||||
|
|
||||||
{% for frontend in frontends -%}
|
|
||||||
backend {{ service }}_{{ frontend }}
|
|
||||||
balance leastconn
|
|
||||||
{% for unit, address in frontends[frontend]['backends'].items() -%}
|
|
||||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
|
||||||
{% endfor %}
|
|
||||||
{% endfor -%}
|
|
||||||
{% endfor -%}
|
|
||||||
{% endif -%}
|
|
@ -1,24 +0,0 @@
|
|||||||
{% if endpoints -%}
|
|
||||||
{% for ext_port in ext_ports -%}
|
|
||||||
Listen {{ ext_port }}
|
|
||||||
{% endfor -%}
|
|
||||||
{% for address, endpoint, ext, int in endpoints -%}
|
|
||||||
<VirtualHost {{ address }}:{{ ext }}>
|
|
||||||
ServerName {{ endpoint }}
|
|
||||||
SSLEngine on
|
|
||||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
|
||||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
|
||||||
ProxyPass / http://localhost:{{ int }}/
|
|
||||||
ProxyPassReverse / http://localhost:{{ int }}/
|
|
||||||
ProxyPreserveHost on
|
|
||||||
</VirtualHost>
|
|
||||||
{% endfor -%}
|
|
||||||
<Proxy *>
|
|
||||||
Order deny,allow
|
|
||||||
Allow from all
|
|
||||||
</Proxy>
|
|
||||||
<Location />
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
</Location>
|
|
||||||
{% endif -%}
|
|
@ -1,24 +0,0 @@
|
|||||||
{% if endpoints -%}
|
|
||||||
{% for ext_port in ext_ports -%}
|
|
||||||
Listen {{ ext_port }}
|
|
||||||
{% endfor -%}
|
|
||||||
{% for address, endpoint, ext, int in endpoints -%}
|
|
||||||
<VirtualHost {{ address }}:{{ ext }}>
|
|
||||||
ServerName {{ endpoint }}
|
|
||||||
SSLEngine on
|
|
||||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
|
||||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
|
||||||
ProxyPass / http://localhost:{{ int }}/
|
|
||||||
ProxyPassReverse / http://localhost:{{ int }}/
|
|
||||||
ProxyPreserveHost on
|
|
||||||
</VirtualHost>
|
|
||||||
{% endfor -%}
|
|
||||||
<Proxy *>
|
|
||||||
Order deny,allow
|
|
||||||
Allow from all
|
|
||||||
</Proxy>
|
|
||||||
<Location />
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
</Location>
|
|
||||||
{% endif -%}
|
|
@ -1,9 +0,0 @@
|
|||||||
{% if auth_host -%}
|
|
||||||
[keystone_authtoken]
|
|
||||||
identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
|
|
||||||
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
|
|
||||||
admin_tenant_name = {{ admin_tenant_name }}
|
|
||||||
admin_user = {{ admin_user }}
|
|
||||||
admin_password = {{ admin_password }}
|
|
||||||
signing_dir = {{ signing_dir }}
|
|
||||||
{% endif -%}
|
|
@ -1,22 +0,0 @@
|
|||||||
{% if rabbitmq_host or rabbitmq_hosts -%}
|
|
||||||
[oslo_messaging_rabbit]
|
|
||||||
rabbit_userid = {{ rabbitmq_user }}
|
|
||||||
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
|
||||||
rabbit_password = {{ rabbitmq_password }}
|
|
||||||
{% if rabbitmq_hosts -%}
|
|
||||||
rabbit_hosts = {{ rabbitmq_hosts }}
|
|
||||||
{% if rabbitmq_ha_queues -%}
|
|
||||||
rabbit_ha_queues = True
|
|
||||||
rabbit_durable_queues = False
|
|
||||||
{% endif -%}
|
|
||||||
{% else -%}
|
|
||||||
rabbit_host = {{ rabbitmq_host }}
|
|
||||||
{% endif -%}
|
|
||||||
{% if rabbit_ssl_port -%}
|
|
||||||
rabbit_use_ssl = True
|
|
||||||
rabbit_port = {{ rabbit_ssl_port }}
|
|
||||||
{% if rabbit_ssl_ca -%}
|
|
||||||
kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
|
@ -1,14 +0,0 @@
|
|||||||
{% if zmq_host -%}
|
|
||||||
# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
|
|
||||||
rpc_backend = zmq
|
|
||||||
rpc_zmq_host = {{ zmq_host }}
|
|
||||||
{% if zmq_redis_address -%}
|
|
||||||
rpc_zmq_matchmaker = redis
|
|
||||||
matchmaker_heartbeat_freq = 15
|
|
||||||
matchmaker_heartbeat_ttl = 30
|
|
||||||
[matchmaker_redis]
|
|
||||||
host = {{ zmq_redis_address }}
|
|
||||||
{% else -%}
|
|
||||||
rpc_zmq_matchmaker = ring
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
|
@ -29,8 +29,8 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
|||||||
try:
|
try:
|
||||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
except ImportError:
|
except ImportError:
|
||||||
# python-jinja2 may not be installed yet, or we're running unittests.
|
apt_install('python-jinja2', fatal=True)
|
||||||
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
|
|
||||||
|
|
||||||
class OSConfigException(Exception):
|
class OSConfigException(Exception):
|
||||||
|
@ -53,9 +53,13 @@ from charmhelpers.contrib.network.ip import (
|
|||||||
get_ipv6_addr
|
get_ipv6_addr
|
||||||
)
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.python.packages import (
|
||||||
|
pip_create_virtualenv,
|
||||||
|
pip_install,
|
||||||
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||||
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
||||||
from charmhelpers.contrib.python.packages import pip_install
|
|
||||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||||
|
|
||||||
@ -75,6 +79,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|||||||
('trusty', 'icehouse'),
|
('trusty', 'icehouse'),
|
||||||
('utopic', 'juno'),
|
('utopic', 'juno'),
|
||||||
('vivid', 'kilo'),
|
('vivid', 'kilo'),
|
||||||
|
('wily', 'liberty'),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
||||||
@ -87,6 +92,7 @@ OPENSTACK_CODENAMES = OrderedDict([
|
|||||||
('2014.1', 'icehouse'),
|
('2014.1', 'icehouse'),
|
||||||
('2014.2', 'juno'),
|
('2014.2', 'juno'),
|
||||||
('2015.1', 'kilo'),
|
('2015.1', 'kilo'),
|
||||||
|
('2015.2', 'liberty'),
|
||||||
])
|
])
|
||||||
|
|
||||||
# The ugly duckling
|
# The ugly duckling
|
||||||
@ -109,6 +115,7 @@ SWIFT_CODENAMES = OrderedDict([
|
|||||||
('2.2.0', 'juno'),
|
('2.2.0', 'juno'),
|
||||||
('2.2.1', 'kilo'),
|
('2.2.1', 'kilo'),
|
||||||
('2.2.2', 'kilo'),
|
('2.2.2', 'kilo'),
|
||||||
|
('2.3.0', 'liberty'),
|
||||||
])
|
])
|
||||||
|
|
||||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
@ -317,6 +324,9 @@ def configure_installation_source(rel):
|
|||||||
'kilo': 'trusty-updates/kilo',
|
'kilo': 'trusty-updates/kilo',
|
||||||
'kilo/updates': 'trusty-updates/kilo',
|
'kilo/updates': 'trusty-updates/kilo',
|
||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
'liberty': 'trusty-updates/liberty',
|
||||||
|
'liberty/updates': 'trusty-updates/liberty',
|
||||||
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -497,11 +507,22 @@ def git_install_requested():
|
|||||||
requirements_dir = None
|
requirements_dir = None
|
||||||
|
|
||||||
|
|
||||||
def git_clone_and_install(projects_yaml, core_project):
|
def _git_yaml_load(projects_yaml):
|
||||||
|
"""
|
||||||
|
Load the specified yaml into a dictionary.
|
||||||
|
"""
|
||||||
|
if not projects_yaml:
|
||||||
|
return None
|
||||||
|
|
||||||
|
return yaml.load(projects_yaml)
|
||||||
|
|
||||||
|
|
||||||
|
def git_clone_and_install(projects_yaml, core_project, depth=1):
|
||||||
"""
|
"""
|
||||||
Clone/install all specified OpenStack repositories.
|
Clone/install all specified OpenStack repositories.
|
||||||
|
|
||||||
The expected format of projects_yaml is:
|
The expected format of projects_yaml is:
|
||||||
|
|
||||||
repositories:
|
repositories:
|
||||||
- {name: keystone,
|
- {name: keystone,
|
||||||
repository: 'git://git.openstack.org/openstack/keystone.git',
|
repository: 'git://git.openstack.org/openstack/keystone.git',
|
||||||
@ -509,24 +530,25 @@ def git_clone_and_install(projects_yaml, core_project):
|
|||||||
- {name: requirements,
|
- {name: requirements,
|
||||||
repository: 'git://git.openstack.org/openstack/requirements.git',
|
repository: 'git://git.openstack.org/openstack/requirements.git',
|
||||||
branch: 'stable/icehouse'}
|
branch: 'stable/icehouse'}
|
||||||
|
|
||||||
directory: /mnt/openstack-git
|
directory: /mnt/openstack-git
|
||||||
http_proxy: http://squid.internal:3128
|
http_proxy: squid-proxy-url
|
||||||
https_proxy: https://squid.internal:3128
|
https_proxy: squid-proxy-url
|
||||||
|
|
||||||
The directory, http_proxy, and https_proxy keys are optional.
|
The directory, http_proxy, and https_proxy keys are optional.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
global requirements_dir
|
global requirements_dir
|
||||||
parent_dir = '/mnt/openstack-git'
|
parent_dir = '/mnt/openstack-git'
|
||||||
|
http_proxy = None
|
||||||
|
|
||||||
if not projects_yaml:
|
projects = _git_yaml_load(projects_yaml)
|
||||||
return
|
|
||||||
|
|
||||||
projects = yaml.load(projects_yaml)
|
|
||||||
_git_validate_projects_yaml(projects, core_project)
|
_git_validate_projects_yaml(projects, core_project)
|
||||||
|
|
||||||
old_environ = dict(os.environ)
|
old_environ = dict(os.environ)
|
||||||
|
|
||||||
if 'http_proxy' in projects.keys():
|
if 'http_proxy' in projects.keys():
|
||||||
|
http_proxy = projects['http_proxy']
|
||||||
os.environ['http_proxy'] = projects['http_proxy']
|
os.environ['http_proxy'] = projects['http_proxy']
|
||||||
if 'https_proxy' in projects.keys():
|
if 'https_proxy' in projects.keys():
|
||||||
os.environ['https_proxy'] = projects['https_proxy']
|
os.environ['https_proxy'] = projects['https_proxy']
|
||||||
@ -534,15 +556,25 @@ def git_clone_and_install(projects_yaml, core_project):
|
|||||||
if 'directory' in projects.keys():
|
if 'directory' in projects.keys():
|
||||||
parent_dir = projects['directory']
|
parent_dir = projects['directory']
|
||||||
|
|
||||||
|
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
|
||||||
|
|
||||||
|
# Upgrade setuptools and pip from default virtualenv versions. The default
|
||||||
|
# versions in trusty break master OpenStack branch deployments.
|
||||||
|
for p in ['pip', 'setuptools']:
|
||||||
|
pip_install(p, upgrade=True, proxy=http_proxy,
|
||||||
|
venv=os.path.join(parent_dir, 'venv'))
|
||||||
|
|
||||||
for p in projects['repositories']:
|
for p in projects['repositories']:
|
||||||
repo = p['repository']
|
repo = p['repository']
|
||||||
branch = p['branch']
|
branch = p['branch']
|
||||||
if p['name'] == 'requirements':
|
if p['name'] == 'requirements':
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||||
|
parent_dir, http_proxy,
|
||||||
update_requirements=False)
|
update_requirements=False)
|
||||||
requirements_dir = repo_dir
|
requirements_dir = repo_dir
|
||||||
else:
|
else:
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
||||||
|
parent_dir, http_proxy,
|
||||||
update_requirements=True)
|
update_requirements=True)
|
||||||
|
|
||||||
os.environ = old_environ
|
os.environ = old_environ
|
||||||
@ -574,7 +606,8 @@ def _git_ensure_key_exists(key, keys):
|
|||||||
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
||||||
|
|
||||||
|
|
||||||
def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements):
|
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
|
||||||
|
update_requirements):
|
||||||
"""
|
"""
|
||||||
Clone and install a single git repository.
|
Clone and install a single git repository.
|
||||||
"""
|
"""
|
||||||
@ -587,23 +620,29 @@ def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements)
|
|||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
if not os.path.exists(dest_dir):
|
||||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||||
repo_dir = install_remote(repo, dest=parent_dir, branch=branch)
|
repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
|
||||||
|
depth=depth)
|
||||||
else:
|
else:
|
||||||
repo_dir = dest_dir
|
repo_dir = dest_dir
|
||||||
|
|
||||||
|
venv = os.path.join(parent_dir, 'venv')
|
||||||
|
|
||||||
if update_requirements:
|
if update_requirements:
|
||||||
if not requirements_dir:
|
if not requirements_dir:
|
||||||
error_out('requirements repo must be cloned before '
|
error_out('requirements repo must be cloned before '
|
||||||
'updating from global requirements.')
|
'updating from global requirements.')
|
||||||
_git_update_requirements(repo_dir, requirements_dir)
|
_git_update_requirements(venv, repo_dir, requirements_dir)
|
||||||
|
|
||||||
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
||||||
pip_install(repo_dir)
|
if http_proxy:
|
||||||
|
pip_install(repo_dir, proxy=http_proxy, venv=venv)
|
||||||
|
else:
|
||||||
|
pip_install(repo_dir, venv=venv)
|
||||||
|
|
||||||
return repo_dir
|
return repo_dir
|
||||||
|
|
||||||
|
|
||||||
def _git_update_requirements(package_dir, reqs_dir):
|
def _git_update_requirements(venv, package_dir, reqs_dir):
|
||||||
"""
|
"""
|
||||||
Update from global requirements.
|
Update from global requirements.
|
||||||
|
|
||||||
@ -612,25 +651,38 @@ def _git_update_requirements(package_dir, reqs_dir):
|
|||||||
"""
|
"""
|
||||||
orig_dir = os.getcwd()
|
orig_dir = os.getcwd()
|
||||||
os.chdir(reqs_dir)
|
os.chdir(reqs_dir)
|
||||||
cmd = ['python', 'update.py', package_dir]
|
python = os.path.join(venv, 'bin/python')
|
||||||
|
cmd = [python, 'update.py', package_dir]
|
||||||
try:
|
try:
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
package = os.path.basename(package_dir)
|
package = os.path.basename(package_dir)
|
||||||
error_out("Error updating {} from global-requirements.txt".format(package))
|
error_out("Error updating {} from "
|
||||||
|
"global-requirements.txt".format(package))
|
||||||
os.chdir(orig_dir)
|
os.chdir(orig_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def git_pip_venv_dir(projects_yaml):
|
||||||
|
"""
|
||||||
|
Return the pip virtualenv path.
|
||||||
|
"""
|
||||||
|
parent_dir = '/mnt/openstack-git'
|
||||||
|
|
||||||
|
projects = _git_yaml_load(projects_yaml)
|
||||||
|
|
||||||
|
if 'directory' in projects.keys():
|
||||||
|
parent_dir = projects['directory']
|
||||||
|
|
||||||
|
return os.path.join(parent_dir, 'venv')
|
||||||
|
|
||||||
|
|
||||||
def git_src_dir(projects_yaml, project):
|
def git_src_dir(projects_yaml, project):
|
||||||
"""
|
"""
|
||||||
Return the directory where the specified project's source is located.
|
Return the directory where the specified project's source is located.
|
||||||
"""
|
"""
|
||||||
parent_dir = '/mnt/openstack-git'
|
parent_dir = '/mnt/openstack-git'
|
||||||
|
|
||||||
if not projects_yaml:
|
projects = _git_yaml_load(projects_yaml)
|
||||||
return
|
|
||||||
|
|
||||||
projects = yaml.load(projects_yaml)
|
|
||||||
|
|
||||||
if 'directory' in projects.keys():
|
if 'directory' in projects.keys():
|
||||||
parent_dir = projects['directory']
|
parent_dir = projects['directory']
|
||||||
@ -640,3 +692,15 @@ def git_src_dir(projects_yaml, project):
|
|||||||
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def git_yaml_value(projects_yaml, key):
|
||||||
|
"""
|
||||||
|
Return the value in projects_yaml for the specified key.
|
||||||
|
"""
|
||||||
|
projects = _git_yaml_load(projects_yaml)
|
||||||
|
|
||||||
|
if key in projects.keys():
|
||||||
|
return projects[key]
|
||||||
|
|
||||||
|
return None
|
||||||
|
268
hooks/charmhelpers/contrib/peerstorage/__init__.py
Normal file
268
hooks/charmhelpers/contrib/peerstorage/__init__.py
Normal file
@ -0,0 +1,268 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import json
|
||||||
|
import six
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import relation_id as current_relation_id
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
is_relation_made,
|
||||||
|
relation_ids,
|
||||||
|
relation_get as _relation_get,
|
||||||
|
local_unit,
|
||||||
|
relation_set as _relation_set,
|
||||||
|
leader_get as _leader_get,
|
||||||
|
leader_set,
|
||||||
|
is_leader,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
This helper provides functions to support use of a peer relation
|
||||||
|
for basic key/value storage, with the added benefit that all storage
|
||||||
|
can be replicated across peer units.
|
||||||
|
|
||||||
|
Requirement to use:
|
||||||
|
|
||||||
|
To use this, the "peer_echo()" method has to be called form the peer
|
||||||
|
relation's relation-changed hook:
|
||||||
|
|
||||||
|
@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
|
||||||
|
def cluster_relation_changed():
|
||||||
|
peer_echo()
|
||||||
|
|
||||||
|
Once this is done, you can use peer storage from anywhere:
|
||||||
|
|
||||||
|
@hooks.hook("some-hook")
|
||||||
|
def some_hook():
|
||||||
|
# You can store and retrieve key/values this way:
|
||||||
|
if is_relation_made("cluster"): # from charmhelpers.core.hookenv
|
||||||
|
# There are peers available so we can work with peer storage
|
||||||
|
peer_store("mykey", "myvalue")
|
||||||
|
value = peer_retrieve("mykey")
|
||||||
|
print value
|
||||||
|
else:
|
||||||
|
print "No peers joind the relation, cannot share key/values :("
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def leader_get(attribute=None):
|
||||||
|
"""Wrapper to ensure that settings are migrated from the peer relation.
|
||||||
|
|
||||||
|
This is to support upgrading an environment that does not support
|
||||||
|
Juju leadership election to one that does.
|
||||||
|
|
||||||
|
If a setting is not extant in the leader-get but is on the relation-get
|
||||||
|
peer rel, it is migrated and marked as such so that it is not re-migrated.
|
||||||
|
"""
|
||||||
|
migration_key = '__leader_get_migrated_settings__'
|
||||||
|
if not is_leader():
|
||||||
|
return _leader_get(attribute=attribute)
|
||||||
|
|
||||||
|
settings_migrated = False
|
||||||
|
leader_settings = _leader_get(attribute=attribute)
|
||||||
|
previously_migrated = _leader_get(attribute=migration_key)
|
||||||
|
|
||||||
|
if previously_migrated:
|
||||||
|
migrated = set(json.loads(previously_migrated))
|
||||||
|
else:
|
||||||
|
migrated = set([])
|
||||||
|
|
||||||
|
try:
|
||||||
|
if migration_key in leader_settings:
|
||||||
|
del leader_settings[migration_key]
|
||||||
|
except TypeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if attribute:
|
||||||
|
if attribute in migrated:
|
||||||
|
return leader_settings
|
||||||
|
|
||||||
|
# If attribute not present in leader db, check if this unit has set
|
||||||
|
# the attribute in the peer relation
|
||||||
|
if not leader_settings:
|
||||||
|
peer_setting = relation_get(attribute=attribute, unit=local_unit())
|
||||||
|
if peer_setting:
|
||||||
|
leader_set(settings={attribute: peer_setting})
|
||||||
|
leader_settings = peer_setting
|
||||||
|
|
||||||
|
if leader_settings:
|
||||||
|
settings_migrated = True
|
||||||
|
migrated.add(attribute)
|
||||||
|
else:
|
||||||
|
r_settings = relation_get(unit=local_unit())
|
||||||
|
if r_settings:
|
||||||
|
for key in set(r_settings.keys()).difference(migrated):
|
||||||
|
# Leader setting wins
|
||||||
|
if not leader_settings.get(key):
|
||||||
|
leader_settings[key] = r_settings[key]
|
||||||
|
|
||||||
|
settings_migrated = True
|
||||||
|
migrated.add(key)
|
||||||
|
|
||||||
|
if settings_migrated:
|
||||||
|
leader_set(**leader_settings)
|
||||||
|
|
||||||
|
if migrated and settings_migrated:
|
||||||
|
migrated = json.dumps(list(migrated))
|
||||||
|
leader_set(settings={migration_key: migrated})
|
||||||
|
|
||||||
|
return leader_settings
|
||||||
|
|
||||||
|
|
||||||
|
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||||
|
"""Attempt to use leader-set if supported in the current version of Juju,
|
||||||
|
otherwise falls back on relation-set.
|
||||||
|
|
||||||
|
Note that we only attempt to use leader-set if the provided relation_id is
|
||||||
|
a peer relation id or no relation id is provided (in which case we assume
|
||||||
|
we are within the peer relation context).
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if relation_id in relation_ids('cluster'):
|
||||||
|
return leader_set(settings=relation_settings, **kwargs)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
except NotImplementedError:
|
||||||
|
return _relation_set(relation_id=relation_id,
|
||||||
|
relation_settings=relation_settings, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def relation_get(attribute=None, unit=None, rid=None):
|
||||||
|
"""Attempt to use leader-get if supported in the current version of Juju,
|
||||||
|
otherwise falls back on relation-get.
|
||||||
|
|
||||||
|
Note that we only attempt to use leader-get if the provided rid is a peer
|
||||||
|
relation id or no relation id is provided (in which case we assume we are
|
||||||
|
within the peer relation context).
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
if rid in relation_ids('cluster'):
|
||||||
|
return leader_get(attribute)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError
|
||||||
|
except NotImplementedError:
|
||||||
|
return _relation_get(attribute=attribute, rid=rid, unit=unit)
|
||||||
|
|
||||||
|
|
||||||
|
def peer_retrieve(key, relation_name='cluster'):
|
||||||
|
"""Retrieve a named key from peer relation `relation_name`."""
|
||||||
|
cluster_rels = relation_ids(relation_name)
|
||||||
|
if len(cluster_rels) > 0:
|
||||||
|
cluster_rid = cluster_rels[0]
|
||||||
|
return relation_get(attribute=key, rid=cluster_rid,
|
||||||
|
unit=local_unit())
|
||||||
|
else:
|
||||||
|
raise ValueError('Unable to detect'
|
||||||
|
'peer relation {}'.format(relation_name))
|
||||||
|
|
||||||
|
|
||||||
|
def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
|
||||||
|
inc_list=None, exc_list=None):
|
||||||
|
""" Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
|
||||||
|
inc_list = inc_list if inc_list else []
|
||||||
|
exc_list = exc_list if exc_list else []
|
||||||
|
peerdb_settings = peer_retrieve('-', relation_name=relation_name)
|
||||||
|
matched = {}
|
||||||
|
if peerdb_settings is None:
|
||||||
|
return matched
|
||||||
|
for k, v in peerdb_settings.items():
|
||||||
|
full_prefix = prefix + delimiter
|
||||||
|
if k.startswith(full_prefix):
|
||||||
|
new_key = k.replace(full_prefix, '')
|
||||||
|
if new_key in exc_list:
|
||||||
|
continue
|
||||||
|
if new_key in inc_list or len(inc_list) == 0:
|
||||||
|
matched[new_key] = v
|
||||||
|
return matched
|
||||||
|
|
||||||
|
|
||||||
|
def peer_store(key, value, relation_name='cluster'):
|
||||||
|
"""Store the key/value pair on the named peer relation `relation_name`."""
|
||||||
|
cluster_rels = relation_ids(relation_name)
|
||||||
|
if len(cluster_rels) > 0:
|
||||||
|
cluster_rid = cluster_rels[0]
|
||||||
|
relation_set(relation_id=cluster_rid,
|
||||||
|
relation_settings={key: value})
|
||||||
|
else:
|
||||||
|
raise ValueError('Unable to detect '
|
||||||
|
'peer relation {}'.format(relation_name))
|
||||||
|
|
||||||
|
|
||||||
|
def peer_echo(includes=None, force=False):
|
||||||
|
"""Echo filtered attributes back onto the same relation for storage.
|
||||||
|
|
||||||
|
This is a requirement to use the peerstorage module - it needs to be called
|
||||||
|
from the peer relation's changed hook.
|
||||||
|
|
||||||
|
If Juju leader support exists this will be a noop unless force is True.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
is_leader()
|
||||||
|
except NotImplementedError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if not force:
|
||||||
|
return # NOOP if leader-election is supported
|
||||||
|
|
||||||
|
# Use original non-leader calls
|
||||||
|
relation_get = _relation_get
|
||||||
|
relation_set = _relation_set
|
||||||
|
|
||||||
|
rdata = relation_get()
|
||||||
|
echo_data = {}
|
||||||
|
if includes is None:
|
||||||
|
echo_data = rdata.copy()
|
||||||
|
for ex in ['private-address', 'public-address']:
|
||||||
|
if ex in echo_data:
|
||||||
|
echo_data.pop(ex)
|
||||||
|
else:
|
||||||
|
for attribute, value in six.iteritems(rdata):
|
||||||
|
for include in includes:
|
||||||
|
if include in attribute:
|
||||||
|
echo_data[attribute] = value
|
||||||
|
if len(echo_data) > 0:
|
||||||
|
relation_set(relation_settings=echo_data)
|
||||||
|
|
||||||
|
|
||||||
|
def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
|
||||||
|
peer_store_fatal=False, relation_settings=None,
|
||||||
|
delimiter='_', **kwargs):
|
||||||
|
"""Store passed-in arguments both in argument relation and in peer storage.
|
||||||
|
|
||||||
|
It functions like doing relation_set() and peer_store() at the same time,
|
||||||
|
with the same data.
|
||||||
|
|
||||||
|
@param relation_id: the id of the relation to store the data on. Defaults
|
||||||
|
to the current relation.
|
||||||
|
@param peer_store_fatal: Set to True, the function will raise an exception
|
||||||
|
should the peer sotrage not be avialable."""
|
||||||
|
|
||||||
|
relation_settings = relation_settings if relation_settings else {}
|
||||||
|
relation_set(relation_id=relation_id,
|
||||||
|
relation_settings=relation_settings,
|
||||||
|
**kwargs)
|
||||||
|
if is_relation_made(peer_relation_name):
|
||||||
|
for key, value in six.iteritems(dict(list(kwargs.items()) +
|
||||||
|
list(relation_settings.items()))):
|
||||||
|
key_prefix = relation_id or current_relation_id()
|
||||||
|
peer_store(key_prefix + delimiter + key,
|
||||||
|
value,
|
||||||
|
relation_name=peer_relation_name)
|
||||||
|
else:
|
||||||
|
if peer_store_fatal:
|
||||||
|
raise ValueError('Unable to detect '
|
||||||
|
'peer relation {}'.format(peer_relation_name))
|
56
hooks/charmhelpers/contrib/python/debug.py
Normal file
56
hooks/charmhelpers/contrib/python/debug.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
|
||||||
|
import atexit
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from charmhelpers.contrib.python.rpdb import Rpdb
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
open_port,
|
||||||
|
close_port,
|
||||||
|
ERROR,
|
||||||
|
log
|
||||||
|
)
|
||||||
|
|
||||||
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
DEFAULT_ADDR = "0.0.0.0"
|
||||||
|
DEFAULT_PORT = 4444
|
||||||
|
|
||||||
|
|
||||||
|
def _error(message):
|
||||||
|
log(message, level=ERROR)
|
||||||
|
|
||||||
|
|
||||||
|
def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
|
||||||
|
"""
|
||||||
|
Set a trace point using the remote debugger
|
||||||
|
"""
|
||||||
|
atexit.register(close_port, port)
|
||||||
|
try:
|
||||||
|
log("Starting a remote python debugger session on %s:%s" % (addr,
|
||||||
|
port))
|
||||||
|
open_port(port)
|
||||||
|
debugger = Rpdb(addr=addr, port=port)
|
||||||
|
debugger.set_trace(sys._getframe().f_back)
|
||||||
|
except:
|
||||||
|
_error("Cannot start a remote debug session on %s:%s" % (addr,
|
||||||
|
port))
|
@ -17,8 +17,11 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install, apt_update
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
from charmhelpers.core.hookenv import log
|
from charmhelpers.core.hookenv import charm_dir, log
|
||||||
|
|
||||||
try:
|
try:
|
||||||
from pip import main as pip_execute
|
from pip import main as pip_execute
|
||||||
@ -33,6 +36,8 @@ __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
|||||||
def parse_options(given, available):
|
def parse_options(given, available):
|
||||||
"""Given a set of options, check if available"""
|
"""Given a set of options, check if available"""
|
||||||
for key, value in sorted(given.items()):
|
for key, value in sorted(given.items()):
|
||||||
|
if not value:
|
||||||
|
continue
|
||||||
if key in available:
|
if key in available:
|
||||||
yield "--{0}={1}".format(key, value)
|
yield "--{0}={1}".format(key, value)
|
||||||
|
|
||||||
@ -51,11 +56,15 @@ def pip_install_requirements(requirements, **options):
|
|||||||
pip_execute(command)
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
def pip_install(package, fatal=False, upgrade=False, **options):
|
def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
|
||||||
"""Install a python package"""
|
"""Install a python package"""
|
||||||
|
if venv:
|
||||||
|
venv_python = os.path.join(venv, 'bin/pip')
|
||||||
|
command = [venv_python, "install"]
|
||||||
|
else:
|
||||||
command = ["install"]
|
command = ["install"]
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', "index-url", )
|
available_options = ('proxy', 'src', 'log', 'index-url', )
|
||||||
for option in parse_options(options, available_options):
|
for option in parse_options(options, available_options):
|
||||||
command.append(option)
|
command.append(option)
|
||||||
|
|
||||||
@ -69,6 +78,9 @@ def pip_install(package, fatal=False, upgrade=False, **options):
|
|||||||
|
|
||||||
log("Installing {} package with options: {}".format(package,
|
log("Installing {} package with options: {}".format(package,
|
||||||
command))
|
command))
|
||||||
|
if venv:
|
||||||
|
subprocess.check_call(command)
|
||||||
|
else:
|
||||||
pip_execute(command)
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
@ -94,3 +106,16 @@ def pip_list():
|
|||||||
"""Returns the list of current python installed packages
|
"""Returns the list of current python installed packages
|
||||||
"""
|
"""
|
||||||
return pip_execute(["list"])
|
return pip_execute(["list"])
|
||||||
|
|
||||||
|
|
||||||
|
def pip_create_virtualenv(path=None):
|
||||||
|
"""Create an isolated Python environment."""
|
||||||
|
apt_install('python-virtualenv')
|
||||||
|
|
||||||
|
if path:
|
||||||
|
venv_path = path
|
||||||
|
else:
|
||||||
|
venv_path = os.path.join(charm_dir(), 'venv')
|
||||||
|
|
||||||
|
if not os.path.exists(venv_path):
|
||||||
|
subprocess.check_call(['virtualenv', venv_path])
|
||||||
|
58
hooks/charmhelpers/contrib/python/rpdb.py
Normal file
58
hooks/charmhelpers/contrib/python/rpdb.py
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""Remote Python Debugger (pdb wrapper)."""
|
||||||
|
|
||||||
|
import pdb
|
||||||
|
import socket
|
||||||
|
import sys
|
||||||
|
|
||||||
|
__author__ = "Bertrand Janin <b@janin.com>"
|
||||||
|
__version__ = "0.1.3"
|
||||||
|
|
||||||
|
|
||||||
|
class Rpdb(pdb.Pdb):
|
||||||
|
|
||||||
|
def __init__(self, addr="127.0.0.1", port=4444):
|
||||||
|
"""Initialize the socket and initialize pdb."""
|
||||||
|
|
||||||
|
# Backup stdin and stdout before replacing them by the socket handle
|
||||||
|
self.old_stdout = sys.stdout
|
||||||
|
self.old_stdin = sys.stdin
|
||||||
|
|
||||||
|
# Open a 'reusable' socket to let the webapp reload on the same port
|
||||||
|
self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
|
||||||
|
self.skt.bind((addr, port))
|
||||||
|
self.skt.listen(1)
|
||||||
|
(clientsocket, address) = self.skt.accept()
|
||||||
|
handle = clientsocket.makefile('rw')
|
||||||
|
pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
|
||||||
|
sys.stdout = sys.stdin = handle
|
||||||
|
|
||||||
|
def shutdown(self):
|
||||||
|
"""Revert stdin and stdout, close the socket."""
|
||||||
|
sys.stdout = self.old_stdout
|
||||||
|
sys.stdin = self.old_stdin
|
||||||
|
self.skt.close()
|
||||||
|
self.set_continue()
|
||||||
|
|
||||||
|
def do_continue(self, arg):
|
||||||
|
"""Stop all operation on ``continue``."""
|
||||||
|
self.shutdown()
|
||||||
|
return 1
|
||||||
|
|
||||||
|
do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
|
@ -1,3 +1,6 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
#
|
#
|
||||||
# This file is part of charm-helpers.
|
# This file is part of charm-helpers.
|
||||||
@ -14,5 +17,18 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
import sys
|
||||||
# module
|
|
||||||
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
|
||||||
|
def current_version():
|
||||||
|
"""Current system python version"""
|
||||||
|
return sys.version_info
|
||||||
|
|
||||||
|
|
||||||
|
def current_version_string():
|
||||||
|
"""Current system python version as string major.minor.micro"""
|
||||||
|
return "{0}.{1}.{2}".format(sys.version_info.major,
|
||||||
|
sys.version_info.minor,
|
||||||
|
sys.version_info.micro)
|
118
hooks/charmhelpers/contrib/saltstack/__init__.py
Normal file
118
hooks/charmhelpers/contrib/saltstack/__init__.py
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""Charm Helpers saltstack - declare the state of your machines.
|
||||||
|
|
||||||
|
This helper enables you to declare your machine state, rather than
|
||||||
|
program it procedurally (and have to test each change to your procedures).
|
||||||
|
Your install hook can be as simple as::
|
||||||
|
|
||||||
|
{{{
|
||||||
|
from charmhelpers.contrib.saltstack import (
|
||||||
|
install_salt_support,
|
||||||
|
update_machine_state,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def install():
|
||||||
|
install_salt_support()
|
||||||
|
update_machine_state('machine_states/dependencies.yaml')
|
||||||
|
update_machine_state('machine_states/installed.yaml')
|
||||||
|
}}}
|
||||||
|
|
||||||
|
and won't need to change (nor will its tests) when you change the machine
|
||||||
|
state.
|
||||||
|
|
||||||
|
It's using a python package called salt-minion which allows various formats for
|
||||||
|
specifying resources, such as::
|
||||||
|
|
||||||
|
{{{
|
||||||
|
/srv/{{ basedir }}:
|
||||||
|
file.directory:
|
||||||
|
- group: ubunet
|
||||||
|
- user: ubunet
|
||||||
|
- require:
|
||||||
|
- user: ubunet
|
||||||
|
- recurse:
|
||||||
|
- user
|
||||||
|
- group
|
||||||
|
|
||||||
|
ubunet:
|
||||||
|
group.present:
|
||||||
|
- gid: 1500
|
||||||
|
user.present:
|
||||||
|
- uid: 1500
|
||||||
|
- gid: 1500
|
||||||
|
- createhome: False
|
||||||
|
- require:
|
||||||
|
- group: ubunet
|
||||||
|
}}}
|
||||||
|
|
||||||
|
The docs for all the different state definitions are at:
|
||||||
|
http://docs.saltstack.com/ref/states/all/
|
||||||
|
|
||||||
|
|
||||||
|
TODO:
|
||||||
|
* Add test helpers which will ensure that machine state definitions
|
||||||
|
are functionally (but not necessarily logically) correct (ie. getting
|
||||||
|
salt to parse all state defs.
|
||||||
|
* Add a link to a public bootstrap charm example / blogpost.
|
||||||
|
* Find a way to obviate the need to use the grains['charm_dir'] syntax
|
||||||
|
in templates.
|
||||||
|
"""
|
||||||
|
# Copyright 2013 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
import charmhelpers.contrib.templating.contexts
|
||||||
|
import charmhelpers.core.host
|
||||||
|
import charmhelpers.core.hookenv
|
||||||
|
|
||||||
|
|
||||||
|
salt_grains_path = '/etc/salt/grains'
|
||||||
|
|
||||||
|
|
||||||
|
def install_salt_support(from_ppa=True):
|
||||||
|
"""Installs the salt-minion helper for machine state.
|
||||||
|
|
||||||
|
By default the salt-minion package is installed from
|
||||||
|
the saltstack PPA. If from_ppa is False you must ensure
|
||||||
|
that the salt-minion package is available in the apt cache.
|
||||||
|
"""
|
||||||
|
if from_ppa:
|
||||||
|
subprocess.check_call([
|
||||||
|
'/usr/bin/add-apt-repository',
|
||||||
|
'--yes',
|
||||||
|
'ppa:saltstack/salt',
|
||||||
|
])
|
||||||
|
subprocess.check_call(['/usr/bin/apt-get', 'update'])
|
||||||
|
# We install salt-common as salt-minion would run the salt-minion
|
||||||
|
# daemon.
|
||||||
|
charmhelpers.fetch.apt_install('salt-common')
|
||||||
|
|
||||||
|
|
||||||
|
def update_machine_state(state_path):
|
||||||
|
"""Update the machine state using the provided state declaration."""
|
||||||
|
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
|
||||||
|
salt_grains_path)
|
||||||
|
subprocess.check_call([
|
||||||
|
'salt-call',
|
||||||
|
'--local',
|
||||||
|
'state.template',
|
||||||
|
state_path,
|
||||||
|
])
|
94
hooks/charmhelpers/contrib/ssl/__init__.py
Normal file
94
hooks/charmhelpers/contrib/ssl/__init__.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
|
||||||
|
"""Generate selfsigned SSL keypair
|
||||||
|
|
||||||
|
You must provide one of the 3 optional arguments:
|
||||||
|
config, subject or cn
|
||||||
|
If more than one is provided the leftmost will be used
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
keyfile -- (required) full path to the keyfile to be created
|
||||||
|
certfile -- (required) full path to the certfile to be created
|
||||||
|
keysize -- (optional) SSL key length
|
||||||
|
config -- (optional) openssl configuration file
|
||||||
|
subject -- (optional) dictionary with SSL subject variables
|
||||||
|
cn -- (optional) cerfificate common name
|
||||||
|
|
||||||
|
Required keys in subject dict:
|
||||||
|
cn -- Common name (eq. FQDN)
|
||||||
|
|
||||||
|
Optional keys in subject dict
|
||||||
|
country -- Country Name (2 letter code)
|
||||||
|
state -- State or Province Name (full name)
|
||||||
|
locality -- Locality Name (eg, city)
|
||||||
|
organization -- Organization Name (eg, company)
|
||||||
|
organizational_unit -- Organizational Unit Name (eg, section)
|
||||||
|
email -- Email Address
|
||||||
|
"""
|
||||||
|
|
||||||
|
cmd = []
|
||||||
|
if config:
|
||||||
|
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
||||||
|
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
||||||
|
"-keyout", keyfile,
|
||||||
|
"-out", certfile, "-config", config]
|
||||||
|
elif subject:
|
||||||
|
ssl_subject = ""
|
||||||
|
if "country" in subject:
|
||||||
|
ssl_subject = ssl_subject + "/C={}".format(subject["country"])
|
||||||
|
if "state" in subject:
|
||||||
|
ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
|
||||||
|
if "locality" in subject:
|
||||||
|
ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
|
||||||
|
if "organization" in subject:
|
||||||
|
ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
|
||||||
|
if "organizational_unit" in subject:
|
||||||
|
ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
|
||||||
|
if "cn" in subject:
|
||||||
|
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
|
||||||
|
else:
|
||||||
|
hookenv.log("When using \"subject\" argument you must "
|
||||||
|
"provide \"cn\" field at very least")
|
||||||
|
return False
|
||||||
|
if "email" in subject:
|
||||||
|
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
|
||||||
|
|
||||||
|
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
||||||
|
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
||||||
|
"-keyout", keyfile,
|
||||||
|
"-out", certfile, "-subj", ssl_subject]
|
||||||
|
elif cn:
|
||||||
|
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
||||||
|
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
||||||
|
"-keyout", keyfile,
|
||||||
|
"-out", certfile, "-subj", "/CN={}".format(cn)]
|
||||||
|
|
||||||
|
if not cmd:
|
||||||
|
hookenv.log("No config, subject or cn provided,"
|
||||||
|
"unable to generate self signed SSL certificates")
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
return True
|
||||||
|
except Exception as e:
|
||||||
|
print("Execution of openssl command failed:\n{}".format(e))
|
||||||
|
return False
|
279
hooks/charmhelpers/contrib/ssl/service.py
Normal file
279
hooks/charmhelpers/contrib/ssl/service.py
Normal file
@ -0,0 +1,279 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
from os.path import join as path_join
|
||||||
|
from os.path import exists
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import log, DEBUG
|
||||||
|
|
||||||
|
STD_CERT = "standard"
|
||||||
|
|
||||||
|
# Mysql server is fairly picky about cert creation
|
||||||
|
# and types, spec its creation separately for now.
|
||||||
|
MYSQL_CERT = "mysql"
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceCA(object):
|
||||||
|
|
||||||
|
default_expiry = str(365 * 2)
|
||||||
|
default_ca_expiry = str(365 * 6)
|
||||||
|
|
||||||
|
def __init__(self, name, ca_dir, cert_type=STD_CERT):
|
||||||
|
self.name = name
|
||||||
|
self.ca_dir = ca_dir
|
||||||
|
self.cert_type = cert_type
|
||||||
|
|
||||||
|
###############
|
||||||
|
# Hook Helper API
|
||||||
|
@staticmethod
|
||||||
|
def get_ca(type=STD_CERT):
|
||||||
|
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
||||||
|
ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca')
|
||||||
|
ca = ServiceCA(service_name, ca_path, type)
|
||||||
|
ca.init()
|
||||||
|
return ca
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_service_cert(cls, type=STD_CERT):
|
||||||
|
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
||||||
|
ca = cls.get_ca()
|
||||||
|
crt, key = ca.get_or_create_cert(service_name)
|
||||||
|
return crt, key, ca.get_ca_bundle()
|
||||||
|
|
||||||
|
###############
|
||||||
|
|
||||||
|
def init(self):
|
||||||
|
log("initializing service ca", level=DEBUG)
|
||||||
|
if not exists(self.ca_dir):
|
||||||
|
self._init_ca_dir(self.ca_dir)
|
||||||
|
self._init_ca()
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ca_key(self):
|
||||||
|
return path_join(self.ca_dir, 'private', 'cacert.key')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ca_cert(self):
|
||||||
|
return path_join(self.ca_dir, 'cacert.pem')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ca_conf(self):
|
||||||
|
return path_join(self.ca_dir, 'ca.cnf')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def signing_conf(self):
|
||||||
|
return path_join(self.ca_dir, 'signing.cnf')
|
||||||
|
|
||||||
|
def _init_ca_dir(self, ca_dir):
|
||||||
|
os.mkdir(ca_dir)
|
||||||
|
for i in ['certs', 'crl', 'newcerts', 'private']:
|
||||||
|
sd = path_join(ca_dir, i)
|
||||||
|
if not exists(sd):
|
||||||
|
os.mkdir(sd)
|
||||||
|
|
||||||
|
if not exists(path_join(ca_dir, 'serial')):
|
||||||
|
with open(path_join(ca_dir, 'serial'), 'w') as fh:
|
||||||
|
fh.write('02\n')
|
||||||
|
|
||||||
|
if not exists(path_join(ca_dir, 'index.txt')):
|
||||||
|
with open(path_join(ca_dir, 'index.txt'), 'w') as fh:
|
||||||
|
fh.write('')
|
||||||
|
|
||||||
|
def _init_ca(self):
|
||||||
|
"""Generate the root ca's cert and key.
|
||||||
|
"""
|
||||||
|
if not exists(path_join(self.ca_dir, 'ca.cnf')):
|
||||||
|
with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
|
||||||
|
fh.write(
|
||||||
|
CA_CONF_TEMPLATE % (self.get_conf_variables()))
|
||||||
|
|
||||||
|
if not exists(path_join(self.ca_dir, 'signing.cnf')):
|
||||||
|
with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
|
||||||
|
fh.write(
|
||||||
|
SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
|
||||||
|
|
||||||
|
if exists(self.ca_cert) or exists(self.ca_key):
|
||||||
|
raise RuntimeError("Initialized called when CA already exists")
|
||||||
|
cmd = ['openssl', 'req', '-config', self.ca_conf,
|
||||||
|
'-x509', '-nodes', '-newkey', 'rsa',
|
||||||
|
'-days', self.default_ca_expiry,
|
||||||
|
'-keyout', self.ca_key, '-out', self.ca_cert,
|
||||||
|
'-outform', 'PEM']
|
||||||
|
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||||
|
log("CA Init:\n %s" % output, level=DEBUG)
|
||||||
|
|
||||||
|
def get_conf_variables(self):
|
||||||
|
return dict(
|
||||||
|
org_name="juju",
|
||||||
|
org_unit_name="%s service" % self.name,
|
||||||
|
common_name=self.name,
|
||||||
|
ca_dir=self.ca_dir)
|
||||||
|
|
||||||
|
def get_or_create_cert(self, common_name):
|
||||||
|
if common_name in self:
|
||||||
|
return self.get_certificate(common_name)
|
||||||
|
return self.create_certificate(common_name)
|
||||||
|
|
||||||
|
def create_certificate(self, common_name):
|
||||||
|
if common_name in self:
|
||||||
|
return self.get_certificate(common_name)
|
||||||
|
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
||||||
|
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
||||||
|
csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name)
|
||||||
|
self._create_certificate(common_name, key_p, csr_p, crt_p)
|
||||||
|
return self.get_certificate(common_name)
|
||||||
|
|
||||||
|
def get_certificate(self, common_name):
|
||||||
|
if common_name not in self:
|
||||||
|
raise ValueError("No certificate for %s" % common_name)
|
||||||
|
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
||||||
|
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
||||||
|
with open(crt_p) as fh:
|
||||||
|
crt = fh.read()
|
||||||
|
with open(key_p) as fh:
|
||||||
|
key = fh.read()
|
||||||
|
return crt, key
|
||||||
|
|
||||||
|
def __contains__(self, common_name):
|
||||||
|
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
||||||
|
return exists(crt_p)
|
||||||
|
|
||||||
|
def _create_certificate(self, common_name, key_p, csr_p, crt_p):
|
||||||
|
template_vars = self.get_conf_variables()
|
||||||
|
template_vars['common_name'] = common_name
|
||||||
|
subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
|
||||||
|
template_vars)
|
||||||
|
|
||||||
|
log("CA Create Cert %s" % common_name, level=DEBUG)
|
||||||
|
cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
|
||||||
|
'-nodes', '-days', self.default_expiry,
|
||||||
|
'-keyout', key_p, '-out', csr_p, '-subj', subj]
|
||||||
|
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
||||||
|
cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
|
||||||
|
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
log("CA Sign Cert %s" % common_name, level=DEBUG)
|
||||||
|
if self.cert_type == MYSQL_CERT:
|
||||||
|
cmd = ['openssl', 'x509', '-req',
|
||||||
|
'-in', csr_p, '-days', self.default_expiry,
|
||||||
|
'-CA', self.ca_cert, '-CAkey', self.ca_key,
|
||||||
|
'-set_serial', '01', '-out', crt_p]
|
||||||
|
else:
|
||||||
|
cmd = ['openssl', 'ca', '-config', self.signing_conf,
|
||||||
|
'-extensions', 'req_extensions',
|
||||||
|
'-days', self.default_expiry, '-notext',
|
||||||
|
'-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
|
||||||
|
log("running %s" % " ".join(cmd), level=DEBUG)
|
||||||
|
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
||||||
|
|
||||||
|
def get_ca_bundle(self):
|
||||||
|
with open(self.ca_cert) as fh:
|
||||||
|
return fh.read()
|
||||||
|
|
||||||
|
|
||||||
|
CA_CONF_TEMPLATE = """
|
||||||
|
[ ca ]
|
||||||
|
default_ca = CA_default
|
||||||
|
|
||||||
|
[ CA_default ]
|
||||||
|
dir = %(ca_dir)s
|
||||||
|
policy = policy_match
|
||||||
|
database = $dir/index.txt
|
||||||
|
serial = $dir/serial
|
||||||
|
certs = $dir/certs
|
||||||
|
crl_dir = $dir/crl
|
||||||
|
new_certs_dir = $dir/newcerts
|
||||||
|
certificate = $dir/cacert.pem
|
||||||
|
private_key = $dir/private/cacert.key
|
||||||
|
RANDFILE = $dir/private/.rand
|
||||||
|
default_md = default
|
||||||
|
|
||||||
|
[ req ]
|
||||||
|
default_bits = 1024
|
||||||
|
default_md = sha1
|
||||||
|
|
||||||
|
prompt = no
|
||||||
|
distinguished_name = ca_distinguished_name
|
||||||
|
|
||||||
|
x509_extensions = ca_extensions
|
||||||
|
|
||||||
|
[ ca_distinguished_name ]
|
||||||
|
organizationName = %(org_name)s
|
||||||
|
organizationalUnitName = %(org_unit_name)s Certificate Authority
|
||||||
|
|
||||||
|
|
||||||
|
[ policy_match ]
|
||||||
|
countryName = optional
|
||||||
|
stateOrProvinceName = optional
|
||||||
|
organizationName = match
|
||||||
|
organizationalUnitName = optional
|
||||||
|
commonName = supplied
|
||||||
|
|
||||||
|
[ ca_extensions ]
|
||||||
|
basicConstraints = critical,CA:true
|
||||||
|
subjectKeyIdentifier = hash
|
||||||
|
authorityKeyIdentifier = keyid:always, issuer
|
||||||
|
keyUsage = cRLSign, keyCertSign
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
SIGNING_CONF_TEMPLATE = """
|
||||||
|
[ ca ]
|
||||||
|
default_ca = CA_default
|
||||||
|
|
||||||
|
[ CA_default ]
|
||||||
|
dir = %(ca_dir)s
|
||||||
|
policy = policy_match
|
||||||
|
database = $dir/index.txt
|
||||||
|
serial = $dir/serial
|
||||||
|
certs = $dir/certs
|
||||||
|
crl_dir = $dir/crl
|
||||||
|
new_certs_dir = $dir/newcerts
|
||||||
|
certificate = $dir/cacert.pem
|
||||||
|
private_key = $dir/private/cacert.key
|
||||||
|
RANDFILE = $dir/private/.rand
|
||||||
|
default_md = default
|
||||||
|
|
||||||
|
[ req ]
|
||||||
|
default_bits = 1024
|
||||||
|
default_md = sha1
|
||||||
|
|
||||||
|
prompt = no
|
||||||
|
distinguished_name = req_distinguished_name
|
||||||
|
|
||||||
|
x509_extensions = req_extensions
|
||||||
|
|
||||||
|
[ req_distinguished_name ]
|
||||||
|
organizationName = %(org_name)s
|
||||||
|
organizationalUnitName = %(org_unit_name)s machine resources
|
||||||
|
commonName = %(common_name)s
|
||||||
|
|
||||||
|
[ policy_match ]
|
||||||
|
countryName = optional
|
||||||
|
stateOrProvinceName = optional
|
||||||
|
organizationName = match
|
||||||
|
organizationalUnitName = optional
|
||||||
|
commonName = supplied
|
||||||
|
|
||||||
|
[ req_extensions ]
|
||||||
|
basicConstraints = CA:false
|
||||||
|
subjectKeyIdentifier = hash
|
||||||
|
authorityKeyIdentifier = keyid:always, issuer
|
||||||
|
keyUsage = digitalSignature, keyEncipherment, keyAgreement
|
||||||
|
extendedKeyUsage = serverAuth, clientAuth
|
||||||
|
"""
|
@ -67,4 +67,4 @@ def is_device_mounted(device):
|
|||||||
out = check_output(['mount']).decode('UTF-8')
|
out = check_output(['mount']).decode('UTF-8')
|
||||||
if is_partition:
|
if is_partition:
|
||||||
return bool(re.search(device + r"\b", out))
|
return bool(re.search(device + r"\b", out))
|
||||||
return bool(re.search(device + r"[0-9]+\b", out))
|
return bool(re.search(device + r"[0-9]*\b", out))
|
||||||
|
139
hooks/charmhelpers/contrib/templating/contexts.py
Normal file
139
hooks/charmhelpers/contrib/templating/contexts.py
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Copyright 2013 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||||
|
"""A helper to create a yaml cache of config with namespaced relation data."""
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
import charmhelpers.core.hookenv
|
||||||
|
|
||||||
|
|
||||||
|
charm_dir = os.environ.get('CHARM_DIR', '')
|
||||||
|
|
||||||
|
|
||||||
|
def dict_keys_without_hyphens(a_dict):
|
||||||
|
"""Return the a new dict with underscores instead of hyphens in keys."""
|
||||||
|
return dict(
|
||||||
|
(key.replace('-', '_'), val) for key, val in a_dict.items())
|
||||||
|
|
||||||
|
|
||||||
|
def update_relations(context, namespace_separator=':'):
|
||||||
|
"""Update the context with the relation data."""
|
||||||
|
# Add any relation data prefixed with the relation type.
|
||||||
|
relation_type = charmhelpers.core.hookenv.relation_type()
|
||||||
|
relations = []
|
||||||
|
context['current_relation'] = {}
|
||||||
|
if relation_type is not None:
|
||||||
|
relation_data = charmhelpers.core.hookenv.relation_get()
|
||||||
|
context['current_relation'] = relation_data
|
||||||
|
# Deprecated: the following use of relation data as keys
|
||||||
|
# directly in the context will be removed.
|
||||||
|
relation_data = dict(
|
||||||
|
("{relation_type}{namespace_separator}{key}".format(
|
||||||
|
relation_type=relation_type,
|
||||||
|
key=key,
|
||||||
|
namespace_separator=namespace_separator), val)
|
||||||
|
for key, val in relation_data.items())
|
||||||
|
relation_data = dict_keys_without_hyphens(relation_data)
|
||||||
|
context.update(relation_data)
|
||||||
|
relations = charmhelpers.core.hookenv.relations_of_type(relation_type)
|
||||||
|
relations = [dict_keys_without_hyphens(rel) for rel in relations]
|
||||||
|
|
||||||
|
context['relations_full'] = charmhelpers.core.hookenv.relations()
|
||||||
|
|
||||||
|
# the hookenv.relations() data structure is effectively unusable in
|
||||||
|
# templates and other contexts when trying to access relation data other
|
||||||
|
# than the current relation. So provide a more useful structure that works
|
||||||
|
# with any hook.
|
||||||
|
local_unit = charmhelpers.core.hookenv.local_unit()
|
||||||
|
relations = {}
|
||||||
|
for rname, rids in context['relations_full'].items():
|
||||||
|
relations[rname] = []
|
||||||
|
for rid, rdata in rids.items():
|
||||||
|
data = rdata.copy()
|
||||||
|
if local_unit in rdata:
|
||||||
|
data.pop(local_unit)
|
||||||
|
for unit_name, rel_data in data.items():
|
||||||
|
new_data = {'__relid__': rid, '__unit__': unit_name}
|
||||||
|
new_data.update(rel_data)
|
||||||
|
relations[rname].append(new_data)
|
||||||
|
context['relations'] = relations
|
||||||
|
|
||||||
|
|
||||||
|
def juju_state_to_yaml(yaml_path, namespace_separator=':',
|
||||||
|
allow_hyphens_in_keys=True, mode=None):
|
||||||
|
"""Update the juju config and state in a yaml file.
|
||||||
|
|
||||||
|
This includes any current relation-get data, and the charm
|
||||||
|
directory.
|
||||||
|
|
||||||
|
This function was created for the ansible and saltstack
|
||||||
|
support, as those libraries can use a yaml file to supply
|
||||||
|
context to templates, but it may be useful generally to
|
||||||
|
create and update an on-disk cache of all the config, including
|
||||||
|
previous relation data.
|
||||||
|
|
||||||
|
By default, hyphens are allowed in keys as this is supported
|
||||||
|
by yaml, but for tools like ansible, hyphens are not valid [1].
|
||||||
|
|
||||||
|
[1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
|
||||||
|
"""
|
||||||
|
config = charmhelpers.core.hookenv.config()
|
||||||
|
|
||||||
|
# Add the charm_dir which we will need to refer to charm
|
||||||
|
# file resources etc.
|
||||||
|
config['charm_dir'] = charm_dir
|
||||||
|
config['local_unit'] = charmhelpers.core.hookenv.local_unit()
|
||||||
|
config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip()
|
||||||
|
config['unit_public_address'] = charmhelpers.core.hookenv.unit_get(
|
||||||
|
'public-address'
|
||||||
|
)
|
||||||
|
|
||||||
|
# Don't use non-standard tags for unicode which will not
|
||||||
|
# work when salt uses yaml.load_safe.
|
||||||
|
yaml.add_representer(six.text_type,
|
||||||
|
lambda dumper, value: dumper.represent_scalar(
|
||||||
|
six.u('tag:yaml.org,2002:str'), value))
|
||||||
|
|
||||||
|
yaml_dir = os.path.dirname(yaml_path)
|
||||||
|
if not os.path.exists(yaml_dir):
|
||||||
|
os.makedirs(yaml_dir)
|
||||||
|
|
||||||
|
if os.path.exists(yaml_path):
|
||||||
|
with open(yaml_path, "r") as existing_vars_file:
|
||||||
|
existing_vars = yaml.load(existing_vars_file.read())
|
||||||
|
else:
|
||||||
|
with open(yaml_path, "w+"):
|
||||||
|
pass
|
||||||
|
existing_vars = {}
|
||||||
|
|
||||||
|
if mode is not None:
|
||||||
|
os.chmod(yaml_path, mode)
|
||||||
|
|
||||||
|
if not allow_hyphens_in_keys:
|
||||||
|
config = dict_keys_without_hyphens(config)
|
||||||
|
existing_vars.update(config)
|
||||||
|
|
||||||
|
update_relations(existing_vars, namespace_separator)
|
||||||
|
|
||||||
|
with open(yaml_path, "w+") as fp:
|
||||||
|
fp.write(yaml.dump(existing_vars, default_flow_style=False))
|
@ -14,20 +14,26 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
''' Helper for managing alternatives for file conflict resolution '''
|
"""
|
||||||
|
Templating using the python-jinja2 package.
|
||||||
import subprocess
|
"""
|
||||||
import shutil
|
import six
|
||||||
import os
|
from charmhelpers.fetch import apt_install
|
||||||
|
try:
|
||||||
|
import jinja2
|
||||||
|
except ImportError:
|
||||||
|
if six.PY3:
|
||||||
|
apt_install(["python3-jinja2"])
|
||||||
|
else:
|
||||||
|
apt_install(["python-jinja2"])
|
||||||
|
import jinja2
|
||||||
|
|
||||||
|
|
||||||
def install_alternative(name, target, source, priority=50):
|
DEFAULT_TEMPLATES_DIR = 'templates'
|
||||||
''' Install alternative configuration '''
|
|
||||||
if (os.path.exists(target) and not os.path.islink(target)):
|
|
||||||
# Move existing file/directory away before installing
|
def render(template_name, context, template_dir=DEFAULT_TEMPLATES_DIR):
|
||||||
shutil.move(target, '{}.bak'.format(target))
|
templates = jinja2.Environment(
|
||||||
cmd = [
|
loader=jinja2.FileSystemLoader(template_dir))
|
||||||
'update-alternatives', '--force', '--install',
|
template = templates.get_template(template_name)
|
||||||
target, name, source, str(priority)
|
return template.render(context)
|
||||||
]
|
|
||||||
subprocess.check_call(cmd)
|
|
@ -14,5 +14,16 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
from .base import * # NOQA
|
'''
|
||||||
from .helpers import * # NOQA
|
Templating using standard Python str.format() method.
|
||||||
|
'''
|
||||||
|
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
def render(template, extra={}, **kwargs):
|
||||||
|
"""Return the template rendered using Python's str.format()."""
|
||||||
|
context = hookenv.execution_environment()
|
||||||
|
context.update(extra)
|
||||||
|
context.update(kwargs)
|
||||||
|
return template.format(**context)
|
313
hooks/charmhelpers/contrib/unison/__init__.py
Normal file
313
hooks/charmhelpers/contrib/unison/__init__.py
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Easy file synchronization among peer units using ssh + unison.
|
||||||
|
#
|
||||||
|
# For the -joined, -changed, and -departed peer relations, add a call to
|
||||||
|
# ssh_authorized_peers() describing the peer relation and the desired
|
||||||
|
# user + group. After all peer relations have settled, all hosts should
|
||||||
|
# be able to connect to on another via key auth'd ssh as the specified user.
|
||||||
|
#
|
||||||
|
# Other hooks are then free to synchronize files and directories using
|
||||||
|
# sync_to_peers().
|
||||||
|
#
|
||||||
|
# For a peer relation named 'cluster', for example:
|
||||||
|
#
|
||||||
|
# cluster-relation-joined:
|
||||||
|
# ...
|
||||||
|
# ssh_authorized_peers(peer_interface='cluster',
|
||||||
|
# user='juju_ssh', group='juju_ssh',
|
||||||
|
# ensure_local_user=True)
|
||||||
|
# ...
|
||||||
|
#
|
||||||
|
# cluster-relation-changed:
|
||||||
|
# ...
|
||||||
|
# ssh_authorized_peers(peer_interface='cluster',
|
||||||
|
# user='juju_ssh', group='juju_ssh',
|
||||||
|
# ensure_local_user=True)
|
||||||
|
# ...
|
||||||
|
#
|
||||||
|
# cluster-relation-departed:
|
||||||
|
# ...
|
||||||
|
# ssh_authorized_peers(peer_interface='cluster',
|
||||||
|
# user='juju_ssh', group='juju_ssh',
|
||||||
|
# ensure_local_user=True)
|
||||||
|
# ...
|
||||||
|
#
|
||||||
|
# Hooks are now free to sync files as easily as:
|
||||||
|
#
|
||||||
|
# files = ['/etc/fstab', '/etc/apt.conf.d/']
|
||||||
|
# sync_to_peers(peer_interface='cluster',
|
||||||
|
# user='juju_ssh, paths=[files])
|
||||||
|
#
|
||||||
|
# It is assumed the charm itself has setup permissions on each unit
|
||||||
|
# such that 'juju_ssh' has read + write permissions. Also assumed
|
||||||
|
# that the calling charm takes care of leader delegation.
|
||||||
|
#
|
||||||
|
# Additionally files can be synchronized only to an specific unit:
|
||||||
|
# sync_to_peer(slave_address, user='juju_ssh',
|
||||||
|
# paths=[files], verbose=False)
|
||||||
|
|
||||||
|
import os
|
||||||
|
import pwd
|
||||||
|
|
||||||
|
from copy import copy
|
||||||
|
from subprocess import check_call, check_output
|
||||||
|
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
adduser,
|
||||||
|
add_user_to_group,
|
||||||
|
pwgen,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
hook_name,
|
||||||
|
relation_ids,
|
||||||
|
related_units,
|
||||||
|
relation_set,
|
||||||
|
relation_get,
|
||||||
|
unit_private_ip,
|
||||||
|
INFO,
|
||||||
|
ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
|
BASE_CMD = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
|
||||||
|
'-fastcheck=true', '-group=false', '-owner=false',
|
||||||
|
'-prefer=newer', '-times=true']
|
||||||
|
|
||||||
|
|
||||||
|
def get_homedir(user):
|
||||||
|
try:
|
||||||
|
user = pwd.getpwnam(user)
|
||||||
|
return user.pw_dir
|
||||||
|
except KeyError:
|
||||||
|
log('Could not get homedir for user %s: user exists?' % (user), ERROR)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
|
||||||
|
def create_private_key(user, priv_key_path, key_type='rsa'):
|
||||||
|
types_bits = {
|
||||||
|
'rsa': '2048',
|
||||||
|
'ecdsa': '521',
|
||||||
|
}
|
||||||
|
if key_type not in types_bits:
|
||||||
|
log('Unknown ssh key type {}, using rsa'.format(key_type), ERROR)
|
||||||
|
key_type = 'rsa'
|
||||||
|
if not os.path.isfile(priv_key_path):
|
||||||
|
log('Generating new SSH key for user %s.' % user)
|
||||||
|
cmd = ['ssh-keygen', '-q', '-N', '', '-t', key_type,
|
||||||
|
'-b', types_bits[key_type], '-f', priv_key_path]
|
||||||
|
check_call(cmd)
|
||||||
|
else:
|
||||||
|
log('SSH key already exists at %s.' % priv_key_path)
|
||||||
|
check_call(['chown', user, priv_key_path])
|
||||||
|
check_call(['chmod', '0600', priv_key_path])
|
||||||
|
|
||||||
|
|
||||||
|
def create_public_key(user, priv_key_path, pub_key_path):
|
||||||
|
if not os.path.isfile(pub_key_path):
|
||||||
|
log('Generating missing ssh public key @ %s.' % pub_key_path)
|
||||||
|
cmd = ['ssh-keygen', '-y', '-f', priv_key_path]
|
||||||
|
p = check_output(cmd).strip()
|
||||||
|
with open(pub_key_path, 'wb') as out:
|
||||||
|
out.write(p)
|
||||||
|
check_call(['chown', user, pub_key_path])
|
||||||
|
|
||||||
|
|
||||||
|
def get_keypair(user):
|
||||||
|
home_dir = get_homedir(user)
|
||||||
|
ssh_dir = os.path.join(home_dir, '.ssh')
|
||||||
|
priv_key = os.path.join(ssh_dir, 'id_rsa')
|
||||||
|
pub_key = '%s.pub' % priv_key
|
||||||
|
|
||||||
|
if not os.path.isdir(ssh_dir):
|
||||||
|
os.mkdir(ssh_dir)
|
||||||
|
check_call(['chown', '-R', user, ssh_dir])
|
||||||
|
|
||||||
|
create_private_key(user, priv_key)
|
||||||
|
create_public_key(user, priv_key, pub_key)
|
||||||
|
|
||||||
|
with open(priv_key, 'r') as p:
|
||||||
|
_priv = p.read().strip()
|
||||||
|
|
||||||
|
with open(pub_key, 'r') as p:
|
||||||
|
_pub = p.read().strip()
|
||||||
|
|
||||||
|
return (_priv, _pub)
|
||||||
|
|
||||||
|
|
||||||
|
def write_authorized_keys(user, keys):
|
||||||
|
home_dir = get_homedir(user)
|
||||||
|
ssh_dir = os.path.join(home_dir, '.ssh')
|
||||||
|
auth_keys = os.path.join(ssh_dir, 'authorized_keys')
|
||||||
|
log('Syncing authorized_keys @ %s.' % auth_keys)
|
||||||
|
with open(auth_keys, 'w') as out:
|
||||||
|
for k in keys:
|
||||||
|
out.write('%s\n' % k)
|
||||||
|
|
||||||
|
|
||||||
|
def write_known_hosts(user, hosts):
|
||||||
|
home_dir = get_homedir(user)
|
||||||
|
ssh_dir = os.path.join(home_dir, '.ssh')
|
||||||
|
known_hosts = os.path.join(ssh_dir, 'known_hosts')
|
||||||
|
khosts = []
|
||||||
|
for host in hosts:
|
||||||
|
cmd = ['ssh-keyscan', host]
|
||||||
|
remote_key = check_output(cmd, universal_newlines=True).strip()
|
||||||
|
khosts.append(remote_key)
|
||||||
|
log('Syncing known_hosts @ %s.' % known_hosts)
|
||||||
|
with open(known_hosts, 'w') as out:
|
||||||
|
for host in khosts:
|
||||||
|
out.write('%s\n' % host)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_user(user, group=None):
|
||||||
|
adduser(user, pwgen())
|
||||||
|
if group:
|
||||||
|
add_user_to_group(user, group)
|
||||||
|
|
||||||
|
|
||||||
|
def ssh_authorized_peers(peer_interface, user, group=None,
|
||||||
|
ensure_local_user=False):
|
||||||
|
"""
|
||||||
|
Main setup function, should be called from both peer -changed and -joined
|
||||||
|
hooks with the same parameters.
|
||||||
|
"""
|
||||||
|
if ensure_local_user:
|
||||||
|
ensure_user(user, group)
|
||||||
|
priv_key, pub_key = get_keypair(user)
|
||||||
|
hook = hook_name()
|
||||||
|
if hook == '%s-relation-joined' % peer_interface:
|
||||||
|
relation_set(ssh_pub_key=pub_key)
|
||||||
|
elif hook == '%s-relation-changed' % peer_interface or \
|
||||||
|
hook == '%s-relation-departed' % peer_interface:
|
||||||
|
hosts = []
|
||||||
|
keys = []
|
||||||
|
|
||||||
|
for r_id in relation_ids(peer_interface):
|
||||||
|
for unit in related_units(r_id):
|
||||||
|
ssh_pub_key = relation_get('ssh_pub_key',
|
||||||
|
rid=r_id,
|
||||||
|
unit=unit)
|
||||||
|
priv_addr = relation_get('private-address',
|
||||||
|
rid=r_id,
|
||||||
|
unit=unit)
|
||||||
|
if ssh_pub_key:
|
||||||
|
keys.append(ssh_pub_key)
|
||||||
|
hosts.append(priv_addr)
|
||||||
|
else:
|
||||||
|
log('ssh_authorized_peers(): ssh_pub_key '
|
||||||
|
'missing for unit %s, skipping.' % unit)
|
||||||
|
write_authorized_keys(user, keys)
|
||||||
|
write_known_hosts(user, hosts)
|
||||||
|
authed_hosts = ':'.join(hosts)
|
||||||
|
relation_set(ssh_authorized_hosts=authed_hosts)
|
||||||
|
|
||||||
|
|
||||||
|
def _run_as_user(user, gid=None):
|
||||||
|
try:
|
||||||
|
user = pwd.getpwnam(user)
|
||||||
|
except KeyError:
|
||||||
|
log('Invalid user: %s' % user)
|
||||||
|
raise Exception
|
||||||
|
uid = user.pw_uid
|
||||||
|
gid = gid or user.pw_gid
|
||||||
|
os.environ['HOME'] = user.pw_dir
|
||||||
|
|
||||||
|
def _inner():
|
||||||
|
os.setgid(gid)
|
||||||
|
os.setuid(uid)
|
||||||
|
return _inner
|
||||||
|
|
||||||
|
|
||||||
|
def run_as_user(user, cmd, gid=None):
|
||||||
|
return check_output(cmd, preexec_fn=_run_as_user(user, gid), cwd='/')
|
||||||
|
|
||||||
|
|
||||||
|
def collect_authed_hosts(peer_interface):
|
||||||
|
'''Iterate through the units on peer interface to find all that
|
||||||
|
have the calling host in its authorized hosts list'''
|
||||||
|
hosts = []
|
||||||
|
for r_id in (relation_ids(peer_interface) or []):
|
||||||
|
for unit in related_units(r_id):
|
||||||
|
private_addr = relation_get('private-address',
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
authed_hosts = relation_get('ssh_authorized_hosts',
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
|
||||||
|
if not authed_hosts:
|
||||||
|
log('Peer %s has not authorized *any* hosts yet, skipping.' %
|
||||||
|
(unit), level=INFO)
|
||||||
|
continue
|
||||||
|
|
||||||
|
if unit_private_ip() in authed_hosts.split(':'):
|
||||||
|
hosts.append(private_addr)
|
||||||
|
else:
|
||||||
|
log('Peer %s has not authorized *this* host yet, skipping.' %
|
||||||
|
(unit), level=INFO)
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
|
||||||
|
def sync_path_to_host(path, host, user, verbose=False, cmd=None, gid=None,
|
||||||
|
fatal=False):
|
||||||
|
"""Sync path to an specific peer host
|
||||||
|
|
||||||
|
Propagates exception if operation fails and fatal=True.
|
||||||
|
"""
|
||||||
|
cmd = cmd or copy(BASE_CMD)
|
||||||
|
if not verbose:
|
||||||
|
cmd.append('-silent')
|
||||||
|
|
||||||
|
# removing trailing slash from directory paths, unison
|
||||||
|
# doesn't like these.
|
||||||
|
if path.endswith('/'):
|
||||||
|
path = path[:(len(path) - 1)]
|
||||||
|
|
||||||
|
cmd = cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
|
||||||
|
|
||||||
|
try:
|
||||||
|
log('Syncing local path %s to %s@%s:%s' % (path, user, host, path))
|
||||||
|
run_as_user(user, cmd, gid)
|
||||||
|
except:
|
||||||
|
log('Error syncing remote files')
|
||||||
|
if fatal:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def sync_to_peer(host, user, paths=None, verbose=False, cmd=None, gid=None,
|
||||||
|
fatal=False):
|
||||||
|
"""Sync paths to an specific peer host
|
||||||
|
|
||||||
|
Propagates exception if any operation fails and fatal=True.
|
||||||
|
"""
|
||||||
|
if paths:
|
||||||
|
for p in paths:
|
||||||
|
sync_path_to_host(p, host, user, verbose, cmd, gid, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def sync_to_peers(peer_interface, user, paths=None, verbose=False, cmd=None,
|
||||||
|
gid=None, fatal=False):
|
||||||
|
"""Sync all hosts to an specific path
|
||||||
|
|
||||||
|
The type of group is integer, it allows user has permissions to
|
||||||
|
operate a directory have a different group id with the user id.
|
||||||
|
|
||||||
|
Propagates exception if any operation fails and fatal=True.
|
||||||
|
"""
|
||||||
|
if paths:
|
||||||
|
for host in collect_authed_hosts(peer_interface):
|
||||||
|
sync_to_peer(host, user, paths, verbose, cmd, gid, fatal)
|
45
hooks/charmhelpers/core/files.py
Normal file
45
hooks/charmhelpers/core/files.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def sed(filename, before, after, flags='g'):
|
||||||
|
"""
|
||||||
|
Search and replaces the given pattern on filename.
|
||||||
|
|
||||||
|
:param filename: relative or absolute file path.
|
||||||
|
:param before: expression to be replaced (see 'man sed')
|
||||||
|
:param after: expression to replace with (see 'man sed')
|
||||||
|
:param flags: sed-compatible regex flags in example, to make
|
||||||
|
the search and replace case insensitive, specify ``flags="i"``.
|
||||||
|
The ``g`` flag is always specified regardless, so you do not
|
||||||
|
need to remember to include it when overriding this parameter.
|
||||||
|
:returns: If the sed command exit code was zero then return,
|
||||||
|
otherwise raise CalledProcessError.
|
||||||
|
"""
|
||||||
|
expression = r's/{0}/{1}/{2}'.format(before,
|
||||||
|
after, flags)
|
||||||
|
|
||||||
|
return subprocess.check_call(["sed", "-i", "-r", "-e",
|
||||||
|
expression,
|
||||||
|
os.path.expanduser(filename)])
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user