First phase of charm-helper refactoring
This commit is contained in:
parent
5e1a973484
commit
f3ed95e621
8
Makefile
Normal file
8
Makefile
Normal file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/make
|
||||
|
||||
lint:
|
||||
@flake8 --exclude hooks/charmhelpers hooks
|
||||
@charm proof
|
||||
|
||||
sync:
|
||||
@charm-helper-sync -c charm-helpers-sync.yaml
|
7
charm-helpers-sync.yaml
Normal file
7
charm-helpers-sync.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
branch: lp:charm-helpers
|
||||
destination: hooks/charmhelpers
|
||||
include:
|
||||
- core
|
||||
- contrib.openstack
|
||||
- contrib.hahelpers
|
||||
- contrib.network.ovs
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
0
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
196
hooks/charmhelpers/contrib/hahelpers/apache_utils.py
Normal file
196
hooks/charmhelpers/contrib/hahelpers/apache_utils.py
Normal file
@ -0,0 +1,196 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
from utils import (
|
||||
relation_ids,
|
||||
relation_list,
|
||||
relation_get,
|
||||
render_template,
|
||||
juju_log,
|
||||
config_get,
|
||||
install,
|
||||
get_host_ip,
|
||||
restart
|
||||
)
|
||||
from cluster_utils import https
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
from base64 import b64decode
|
||||
|
||||
APACHE_SITE_DIR = "/etc/apache2/sites-available"
|
||||
SITE_TEMPLATE = "apache2_site.tmpl"
|
||||
RELOAD_CHECK = "To activate the new configuration"
|
||||
|
||||
|
||||
def get_cert():
|
||||
cert = config_get('ssl_cert')
|
||||
key = config_get('ssl_key')
|
||||
if not (cert and key):
|
||||
juju_log('INFO',
|
||||
"Inspecting identity-service relations for SSL certificate.")
|
||||
cert = key = None
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not cert:
|
||||
cert = relation_get('ssl_cert',
|
||||
rid=r_id, unit=unit)
|
||||
if not key:
|
||||
key = relation_get('ssl_key',
|
||||
rid=r_id, unit=unit)
|
||||
return (cert, key)
|
||||
|
||||
|
||||
def get_ca_cert():
|
||||
ca_cert = None
|
||||
juju_log('INFO',
|
||||
"Inspecting identity-service relations for CA SSL certificate.")
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not ca_cert:
|
||||
ca_cert = relation_get('ca_cert',
|
||||
rid=r_id, unit=unit)
|
||||
return ca_cert
|
||||
|
||||
|
||||
def install_ca_cert(ca_cert):
|
||||
if ca_cert:
|
||||
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
|
||||
'w') as crt:
|
||||
crt.write(ca_cert)
|
||||
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
||||
|
||||
|
||||
def enable_https(port_maps, namespace, cert, key, ca_cert=None):
|
||||
'''
|
||||
For a given number of port mappings, configures apache2
|
||||
HTTPs local reverse proxying using certficates and keys provided in
|
||||
either configuration data (preferred) or relation data. Assumes ports
|
||||
are not in use (calling charm should ensure that).
|
||||
|
||||
port_maps: dict: external to internal port mappings
|
||||
namespace: str: name of charm
|
||||
'''
|
||||
def _write_if_changed(path, new_content):
|
||||
content = None
|
||||
if os.path.exists(path):
|
||||
with open(path, 'r') as f:
|
||||
content = f.read().strip()
|
||||
if content != new_content:
|
||||
with open(path, 'w') as f:
|
||||
f.write(new_content)
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps))
|
||||
http_restart = False
|
||||
|
||||
if cert:
|
||||
cert = b64decode(cert)
|
||||
if key:
|
||||
key = b64decode(key)
|
||||
if ca_cert:
|
||||
ca_cert = b64decode(ca_cert)
|
||||
|
||||
if not cert and not key:
|
||||
juju_log('ERROR',
|
||||
"Expected but could not find SSL certificate data, not "
|
||||
"configuring HTTPS!")
|
||||
return False
|
||||
|
||||
install('apache2')
|
||||
if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl',
|
||||
'proxy', 'proxy_http']):
|
||||
http_restart = True
|
||||
|
||||
ssl_dir = os.path.join('/etc/apache2/ssl', namespace)
|
||||
if not os.path.exists(ssl_dir):
|
||||
os.makedirs(ssl_dir)
|
||||
|
||||
if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)):
|
||||
http_restart = True
|
||||
if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)):
|
||||
http_restart = True
|
||||
os.chmod(os.path.join(ssl_dir, 'key'), 0600)
|
||||
|
||||
install_ca_cert(ca_cert)
|
||||
|
||||
sites_dir = '/etc/apache2/sites-available'
|
||||
for ext_port, int_port in port_maps.items():
|
||||
juju_log('INFO',
|
||||
'Creating apache2 reverse proxy vhost'
|
||||
' for {}:{}'.format(ext_port,
|
||||
int_port))
|
||||
site = "{}_{}".format(namespace, ext_port)
|
||||
site_path = os.path.join(sites_dir, site)
|
||||
with open(site_path, 'w') as fsite:
|
||||
context = {
|
||||
"ext": ext_port,
|
||||
"int": int_port,
|
||||
"namespace": namespace,
|
||||
"private_address": get_host_ip()
|
||||
}
|
||||
fsite.write(render_template(SITE_TEMPLATE,
|
||||
context))
|
||||
|
||||
if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]):
|
||||
http_restart = True
|
||||
|
||||
if http_restart:
|
||||
restart('apache2')
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def disable_https(port_maps, namespace):
|
||||
'''
|
||||
Ensure HTTPS reverse proxying is disables for given port mappings
|
||||
|
||||
port_maps: dict: of ext -> int port mappings
|
||||
namespace: str: name of chamr
|
||||
'''
|
||||
juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps))
|
||||
|
||||
if (not os.path.exists('/etc/apache2') or
|
||||
not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))):
|
||||
return
|
||||
|
||||
http_restart = False
|
||||
for ext_port in port_maps.keys():
|
||||
if os.path.exists(os.path.join(APACHE_SITE_DIR,
|
||||
"{}_{}".format(namespace,
|
||||
ext_port))):
|
||||
juju_log('INFO',
|
||||
"Disabling HTTPS reverse proxy"
|
||||
" for {} {}.".format(namespace,
|
||||
ext_port))
|
||||
if (RELOAD_CHECK in
|
||||
subprocess.check_output(['a2dissite',
|
||||
'{}_{}'.format(namespace,
|
||||
ext_port)])):
|
||||
http_restart = True
|
||||
|
||||
if http_restart:
|
||||
restart(['apache2'])
|
||||
|
||||
|
||||
def setup_https(port_maps, namespace, cert, key, ca_cert=None):
|
||||
'''
|
||||
Ensures HTTPS is either enabled or disabled for given port
|
||||
mapping.
|
||||
|
||||
port_maps: dict: of ext -> int port mappings
|
||||
namespace: str: name of charm
|
||||
'''
|
||||
if not https:
|
||||
disable_https(port_maps, namespace)
|
||||
else:
|
||||
enable_https(port_maps, namespace, cert, key, ca_cert)
|
256
hooks/charmhelpers/contrib/hahelpers/ceph_utils.py
Normal file
256
hooks/charmhelpers/contrib/hahelpers/ceph_utils.py
Normal file
@ -0,0 +1,256 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import commands
|
||||
import subprocess
|
||||
import os
|
||||
import shutil
|
||||
import utils
|
||||
|
||||
KEYRING = '/etc/ceph/ceph.client.%s.keyring'
|
||||
KEYFILE = '/etc/ceph/ceph.client.%s.key'
|
||||
|
||||
CEPH_CONF = """[global]
|
||||
auth supported = %(auth)s
|
||||
keyring = %(keyring)s
|
||||
mon host = %(mon_hosts)s
|
||||
"""
|
||||
|
||||
|
||||
def execute(cmd):
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def execute_shell(cmd):
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def install():
|
||||
ceph_dir = "/etc/ceph"
|
||||
if not os.path.isdir(ceph_dir):
|
||||
os.mkdir(ceph_dir)
|
||||
utils.install('ceph-common')
|
||||
|
||||
|
||||
def rbd_exists(service, pool, rbd_img):
|
||||
(rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' %\
|
||||
(service, pool))
|
||||
return rbd_img in out
|
||||
|
||||
|
||||
def create_rbd_image(service, pool, image, sizemb):
|
||||
cmd = [
|
||||
'rbd',
|
||||
'create',
|
||||
image,
|
||||
'--size',
|
||||
str(sizemb),
|
||||
'--id',
|
||||
service,
|
||||
'--pool',
|
||||
pool
|
||||
]
|
||||
execute(cmd)
|
||||
|
||||
|
||||
def pool_exists(service, name):
|
||||
(rc, out) = commands.getstatusoutput("rados --id %s lspools" % service)
|
||||
return name in out
|
||||
|
||||
|
||||
def create_pool(service, name):
|
||||
cmd = [
|
||||
'rados',
|
||||
'--id',
|
||||
service,
|
||||
'mkpool',
|
||||
name
|
||||
]
|
||||
execute(cmd)
|
||||
|
||||
|
||||
def keyfile_path(service):
|
||||
return KEYFILE % service
|
||||
|
||||
|
||||
def keyring_path(service):
|
||||
return KEYRING % service
|
||||
|
||||
|
||||
def create_keyring(service, key):
|
||||
keyring = keyring_path(service)
|
||||
if os.path.exists(keyring):
|
||||
utils.juju_log('INFO', 'ceph: Keyring exists at %s.' % keyring)
|
||||
cmd = [
|
||||
'ceph-authtool',
|
||||
keyring,
|
||||
'--create-keyring',
|
||||
'--name=client.%s' % service,
|
||||
'--add-key=%s' % key
|
||||
]
|
||||
execute(cmd)
|
||||
utils.juju_log('INFO', 'ceph: Created new ring at %s.' % keyring)
|
||||
|
||||
|
||||
def create_key_file(service, key):
|
||||
# create a file containing the key
|
||||
keyfile = keyfile_path(service)
|
||||
if os.path.exists(keyfile):
|
||||
utils.juju_log('INFO', 'ceph: Keyfile exists at %s.' % keyfile)
|
||||
fd = open(keyfile, 'w')
|
||||
fd.write(key)
|
||||
fd.close()
|
||||
utils.juju_log('INFO', 'ceph: Created new keyfile at %s.' % keyfile)
|
||||
|
||||
|
||||
def get_ceph_nodes():
|
||||
hosts = []
|
||||
for r_id in utils.relation_ids('ceph'):
|
||||
for unit in utils.relation_list(r_id):
|
||||
hosts.append(utils.relation_get('private-address',
|
||||
unit=unit, rid=r_id))
|
||||
return hosts
|
||||
|
||||
|
||||
def configure(service, key, auth):
|
||||
create_keyring(service, key)
|
||||
create_key_file(service, key)
|
||||
hosts = get_ceph_nodes()
|
||||
mon_hosts = ",".join(map(str, hosts))
|
||||
keyring = keyring_path(service)
|
||||
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||
ceph_conf.write(CEPH_CONF % locals())
|
||||
modprobe_kernel_module('rbd')
|
||||
|
||||
|
||||
def image_mapped(image_name):
|
||||
(rc, out) = commands.getstatusoutput('rbd showmapped')
|
||||
return image_name in out
|
||||
|
||||
|
||||
def map_block_storage(service, pool, image):
|
||||
cmd = [
|
||||
'rbd',
|
||||
'map',
|
||||
'%s/%s' % (pool, image),
|
||||
'--user',
|
||||
service,
|
||||
'--secret',
|
||||
keyfile_path(service),
|
||||
]
|
||||
execute(cmd)
|
||||
|
||||
|
||||
def filesystem_mounted(fs):
|
||||
return subprocess.call(['grep', '-wqs', fs, '/proc/mounts']) == 0
|
||||
|
||||
|
||||
def make_filesystem(blk_device, fstype='ext4'):
|
||||
utils.juju_log('INFO',
|
||||
'ceph: Formatting block device %s as filesystem %s.' %\
|
||||
(blk_device, fstype))
|
||||
cmd = ['mkfs', '-t', fstype, blk_device]
|
||||
execute(cmd)
|
||||
|
||||
|
||||
def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'):
|
||||
# mount block device into /mnt
|
||||
cmd = ['mount', '-t', fstype, blk_device, '/mnt']
|
||||
execute(cmd)
|
||||
|
||||
# copy data to /mnt
|
||||
try:
|
||||
copy_files(data_src_dst, '/mnt')
|
||||
except:
|
||||
pass
|
||||
|
||||
# umount block device
|
||||
cmd = ['umount', '/mnt']
|
||||
execute(cmd)
|
||||
|
||||
_dir = os.stat(data_src_dst)
|
||||
uid = _dir.st_uid
|
||||
gid = _dir.st_gid
|
||||
|
||||
# re-mount where the data should originally be
|
||||
cmd = ['mount', '-t', fstype, blk_device, data_src_dst]
|
||||
execute(cmd)
|
||||
|
||||
# ensure original ownership of new mount.
|
||||
cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst]
|
||||
execute(cmd)
|
||||
|
||||
|
||||
# TODO: re-use
|
||||
def modprobe_kernel_module(module):
|
||||
utils.juju_log('INFO', 'Loading kernel module')
|
||||
cmd = ['modprobe', module]
|
||||
execute(cmd)
|
||||
cmd = 'echo %s >> /etc/modules' % module
|
||||
execute_shell(cmd)
|
||||
|
||||
|
||||
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||
for item in os.listdir(src):
|
||||
s = os.path.join(src, item)
|
||||
d = os.path.join(dst, item)
|
||||
if os.path.isdir(s):
|
||||
shutil.copytree(s, d, symlinks, ignore)
|
||||
else:
|
||||
shutil.copy2(s, d)
|
||||
|
||||
|
||||
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||
blk_device, fstype, system_services=[]):
|
||||
"""
|
||||
To be called from the current cluster leader.
|
||||
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||
and the device is formatted and mounted at the given mount_point.
|
||||
|
||||
If formatting a device for the first time, data existing at mount_point
|
||||
will be migrated to the RBD device before being remounted.
|
||||
|
||||
All services listed in system_services will be stopped prior to data
|
||||
migration and restarted when complete.
|
||||
"""
|
||||
# Ensure pool, RBD image, RBD mappings are in place.
|
||||
if not pool_exists(service, pool):
|
||||
utils.juju_log('INFO', 'ceph: Creating new pool %s.' % pool)
|
||||
create_pool(service, pool)
|
||||
|
||||
if not rbd_exists(service, pool, rbd_img):
|
||||
utils.juju_log('INFO', 'ceph: Creating RBD image (%s).' % rbd_img)
|
||||
create_rbd_image(service, pool, rbd_img, sizemb)
|
||||
|
||||
if not image_mapped(rbd_img):
|
||||
utils.juju_log('INFO', 'ceph: Mapping RBD Image as a Block Device.')
|
||||
map_block_storage(service, pool, rbd_img)
|
||||
|
||||
# make file system
|
||||
# TODO: What happens if for whatever reason this is run again and
|
||||
# the data is already in the rbd device and/or is mounted??
|
||||
# When it is mounted already, it will fail to make the fs
|
||||
# XXX: This is really sketchy! Need to at least add an fstab entry
|
||||
# otherwise this hook will blow away existing data if its executed
|
||||
# after a reboot.
|
||||
if not filesystem_mounted(mount_point):
|
||||
make_filesystem(blk_device, fstype)
|
||||
|
||||
for svc in system_services:
|
||||
if utils.running(svc):
|
||||
utils.juju_log('INFO',
|
||||
'Stopping services %s prior to migrating '\
|
||||
'data' % svc)
|
||||
utils.stop(svc)
|
||||
|
||||
place_data_on_ceph(service, blk_device, mount_point, fstype)
|
||||
|
||||
for svc in system_services:
|
||||
utils.start(svc)
|
@ -8,18 +8,22 @@
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
from lib.utils import (
|
||||
from utils import (
|
||||
juju_log,
|
||||
relation_ids,
|
||||
relation_list,
|
||||
relation_get,
|
||||
get_unit_hostname,
|
||||
config_get
|
||||
)
|
||||
)
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
|
||||
class HAIncompleteConfig(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def is_clustered():
|
||||
for r_id in (relation_ids('ha') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
@ -90,10 +94,12 @@ def https():
|
||||
return True
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if (relation_get('https_keystone', rid=r_id, unit=unit) and
|
||||
relation_get('ssl_cert', rid=r_id, unit=unit) and
|
||||
relation_get('ssl_key', rid=r_id, unit=unit) and
|
||||
relation_get('ca_cert', rid=r_id, unit=unit)):
|
||||
if None not in [
|
||||
relation_get('https_keystone', rid=r_id, unit=unit),
|
||||
relation_get('ssl_cert', rid=r_id, unit=unit),
|
||||
relation_get('ssl_key', rid=r_id, unit=unit),
|
||||
relation_get('ca_cert', rid=r_id, unit=unit),
|
||||
]:
|
||||
return True
|
||||
return False
|
||||
|
||||
@ -128,3 +134,45 @@ def determine_haproxy_port(public_port):
|
||||
if https():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def get_hacluster_config():
|
||||
'''
|
||||
Obtains all relevant configuration from charm configuration required
|
||||
for initiating a relation to hacluster:
|
||||
|
||||
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
|
||||
|
||||
returns: dict: A dict containing settings keyed by setting name.
|
||||
raises: HAIncompleteConfig if settings are missing.
|
||||
'''
|
||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
|
||||
conf = {}
|
||||
for setting in settings:
|
||||
conf[setting] = config_get(setting)
|
||||
missing = []
|
||||
[missing.append(s) for s, v in conf.iteritems() if v is None]
|
||||
if missing:
|
||||
juju_log('Insufficient config data to configure hacluster.')
|
||||
raise HAIncompleteConfig
|
||||
return conf
|
||||
|
||||
|
||||
def canonical_url(configs, vip_setting='vip'):
|
||||
'''
|
||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||
configuration and hacluster.
|
||||
|
||||
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
||||
a complete https context.
|
||||
:vip_setting: str: Setting in charm config that specifies
|
||||
VIP address.
|
||||
'''
|
||||
scheme = 'http'
|
||||
if 'https' in configs.complete_contexts():
|
||||
scheme = 'https'
|
||||
if is_clustered():
|
||||
addr = config_get(vip_setting)
|
||||
else:
|
||||
addr = get_unit_hostname()
|
||||
return '%s://%s' % (scheme, addr)
|
55
hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py
Normal file
55
hooks/charmhelpers/contrib/hahelpers/haproxy_utils.py
Normal file
@ -0,0 +1,55 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
from utils import (
|
||||
relation_ids,
|
||||
relation_list,
|
||||
relation_get,
|
||||
unit_get,
|
||||
reload,
|
||||
render_template
|
||||
)
|
||||
import os
|
||||
|
||||
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
|
||||
HAPROXY_DEFAULT = '/etc/default/haproxy'
|
||||
|
||||
|
||||
def configure_haproxy(service_ports):
|
||||
'''
|
||||
Configure HAProxy based on the current peers in the service
|
||||
cluster using the provided port map:
|
||||
|
||||
"swift": [ 8080, 8070 ]
|
||||
|
||||
HAproxy will also be reloaded/started if required
|
||||
|
||||
service_ports: dict: dict of lists of [ frontend, backend ]
|
||||
'''
|
||||
cluster_hosts = {}
|
||||
cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \
|
||||
unit_get('private-address')
|
||||
for r_id in relation_ids('cluster'):
|
||||
for unit in relation_list(r_id):
|
||||
cluster_hosts[unit.replace('/', '-')] = \
|
||||
relation_get(attribute='private-address',
|
||||
rid=r_id,
|
||||
unit=unit)
|
||||
context = {
|
||||
'units': cluster_hosts,
|
||||
'service_ports': service_ports
|
||||
}
|
||||
with open(HAPROXY_CONF, 'w') as f:
|
||||
f.write(render_template(os.path.basename(HAPROXY_CONF),
|
||||
context))
|
||||
with open(HAPROXY_DEFAULT, 'w') as f:
|
||||
f.write('ENABLED=1')
|
||||
|
||||
reload('haproxy')
|
333
hooks/charmhelpers/contrib/hahelpers/utils.py
Normal file
333
hooks/charmhelpers/contrib/hahelpers/utils.py
Normal file
@ -0,0 +1,333 @@
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Paul Collins <paul.collins@canonical.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import socket
|
||||
import sys
|
||||
|
||||
|
||||
def do_hooks(hooks):
|
||||
hook = os.path.basename(sys.argv[0])
|
||||
|
||||
try:
|
||||
hook_func = hooks[hook]
|
||||
except KeyError:
|
||||
juju_log('INFO',
|
||||
"This charm doesn't know how to handle '{}'.".format(hook))
|
||||
else:
|
||||
hook_func()
|
||||
|
||||
|
||||
def install(*pkgs):
|
||||
cmd = [
|
||||
'apt-get',
|
||||
'-y',
|
||||
'install'
|
||||
]
|
||||
for pkg in pkgs:
|
||||
cmd.append(pkg)
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
TEMPLATES_DIR = 'templates'
|
||||
|
||||
try:
|
||||
import jinja2
|
||||
except ImportError:
|
||||
install('python-jinja2')
|
||||
import jinja2
|
||||
|
||||
try:
|
||||
import dns.resolver
|
||||
except ImportError:
|
||||
install('python-dnspython')
|
||||
import dns.resolver
|
||||
|
||||
|
||||
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
|
||||
templates = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(template_dir)
|
||||
)
|
||||
template = templates.get_template(template_name)
|
||||
return template.render(context)
|
||||
|
||||
CLOUD_ARCHIVE = \
|
||||
""" # Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
"""
|
||||
|
||||
CLOUD_ARCHIVE_POCKETS = {
|
||||
'folsom': 'precise-updates/folsom',
|
||||
'folsom/updates': 'precise-updates/folsom',
|
||||
'folsom/proposed': 'precise-proposed/folsom',
|
||||
'grizzly': 'precise-updates/grizzly',
|
||||
'grizzly/updates': 'precise-updates/grizzly',
|
||||
'grizzly/proposed': 'precise-proposed/grizzly'
|
||||
}
|
||||
|
||||
|
||||
def configure_source():
|
||||
source = str(config_get('openstack-origin'))
|
||||
if not source:
|
||||
return
|
||||
if source.startswith('ppa:'):
|
||||
cmd = [
|
||||
'add-apt-repository',
|
||||
source
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
if source.startswith('cloud:'):
|
||||
# CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
|
||||
# cloud:precise-folsom/updates or cloud:precise-folsom/proposed
|
||||
install('ubuntu-cloud-keyring')
|
||||
pocket = source.split(':')[1]
|
||||
pocket = pocket.split('-')[1]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
|
||||
if source.startswith('deb'):
|
||||
l = len(source.split('|'))
|
||||
if l == 2:
|
||||
(apt_line, key) = source.split('|')
|
||||
cmd = [
|
||||
'apt-key',
|
||||
'adv', '--keyserver keyserver.ubuntu.com',
|
||||
'--recv-keys', key
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
elif l == 1:
|
||||
apt_line = source
|
||||
|
||||
with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
|
||||
apt.write(apt_line + "\n")
|
||||
cmd = [
|
||||
'apt-get',
|
||||
'update'
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
# Protocols
|
||||
TCP = 'TCP'
|
||||
UDP = 'UDP'
|
||||
|
||||
|
||||
def expose(port, protocol='TCP'):
|
||||
cmd = [
|
||||
'open-port',
|
||||
'{}/{}'.format(port, protocol)
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def juju_log(severity, message):
|
||||
cmd = [
|
||||
'juju-log',
|
||||
'--log-level', severity,
|
||||
message
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
cache = {}
|
||||
|
||||
|
||||
def cached(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
global cache
|
||||
key = str((func, args, kwargs))
|
||||
try:
|
||||
return cache[key]
|
||||
except KeyError:
|
||||
res = func(*args, **kwargs)
|
||||
cache[key] = res
|
||||
return res
|
||||
return wrapper
|
||||
|
||||
|
||||
@cached
|
||||
def relation_ids(relation):
|
||||
cmd = [
|
||||
'relation-ids',
|
||||
relation
|
||||
]
|
||||
result = str(subprocess.check_output(cmd)).split()
|
||||
if result == "":
|
||||
return None
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
@cached
|
||||
def relation_list(rid):
|
||||
cmd = [
|
||||
'relation-list',
|
||||
'-r', rid,
|
||||
]
|
||||
result = str(subprocess.check_output(cmd)).split()
|
||||
if result == "":
|
||||
return None
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
@cached
|
||||
def relation_get(attribute, unit=None, rid=None):
|
||||
cmd = [
|
||||
'relation-get',
|
||||
]
|
||||
if rid:
|
||||
cmd.append('-r')
|
||||
cmd.append(rid)
|
||||
cmd.append(attribute)
|
||||
if unit:
|
||||
cmd.append(unit)
|
||||
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
if value == "":
|
||||
return None
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
@cached
|
||||
def relation_get_dict(relation_id=None, remote_unit=None):
|
||||
"""Obtain all relation data as dict by way of JSON"""
|
||||
cmd = [
|
||||
'relation-get', '--format=json'
|
||||
]
|
||||
if relation_id:
|
||||
cmd.append('-r')
|
||||
cmd.append(relation_id)
|
||||
if remote_unit:
|
||||
remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None)
|
||||
os.environ['JUJU_REMOTE_UNIT'] = remote_unit
|
||||
j = subprocess.check_output(cmd)
|
||||
if remote_unit and remote_unit_orig:
|
||||
os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig
|
||||
d = json.loads(j)
|
||||
settings = {}
|
||||
# convert unicode to strings
|
||||
for k, v in d.iteritems():
|
||||
settings[str(k)] = str(v)
|
||||
return settings
|
||||
|
||||
|
||||
def relation_set(**kwargs):
|
||||
cmd = [
|
||||
'relation-set'
|
||||
]
|
||||
args = []
|
||||
for k, v in kwargs.items():
|
||||
if k == 'rid':
|
||||
if v:
|
||||
cmd.append('-r')
|
||||
cmd.append(v)
|
||||
else:
|
||||
args.append('{}={}'.format(k, v))
|
||||
cmd += args
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
cmd = [
|
||||
'unit-get',
|
||||
attribute
|
||||
]
|
||||
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
if value == "":
|
||||
return None
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
@cached
|
||||
def config_get(attribute):
|
||||
cmd = [
|
||||
'config-get',
|
||||
'--format',
|
||||
'json',
|
||||
]
|
||||
out = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
cfg = json.loads(out)
|
||||
|
||||
try:
|
||||
return cfg[attribute]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
@cached
|
||||
def get_unit_hostname():
|
||||
return socket.gethostname()
|
||||
|
||||
|
||||
@cached
|
||||
def get_host_ip(hostname=None):
|
||||
hostname = hostname or unit_get('private-address')
|
||||
try:
|
||||
# Test to see if already an IPv4 address
|
||||
socket.inet_aton(hostname)
|
||||
return hostname
|
||||
except socket.error:
|
||||
answers = dns.resolver.query(hostname, 'A')
|
||||
if answers:
|
||||
return answers[0].address
|
||||
return None
|
||||
|
||||
|
||||
def _svc_control(service, action):
|
||||
subprocess.check_call(['service', service, action])
|
||||
|
||||
|
||||
def restart(*services):
|
||||
for service in services:
|
||||
_svc_control(service, 'restart')
|
||||
|
||||
|
||||
def stop(*services):
|
||||
for service in services:
|
||||
_svc_control(service, 'stop')
|
||||
|
||||
|
||||
def start(*services):
|
||||
for service in services:
|
||||
_svc_control(service, 'start')
|
||||
|
||||
|
||||
def reload(*services):
|
||||
for service in services:
|
||||
try:
|
||||
_svc_control(service, 'reload')
|
||||
except subprocess.CalledProcessError:
|
||||
# Reload failed - either service does not support reload
|
||||
# or it was not running - restart will fixup most things
|
||||
_svc_control(service, 'restart')
|
||||
|
||||
|
||||
def running(service):
|
||||
try:
|
||||
output = subprocess.check_output(['service', service, 'status'])
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if ("start/running" in output or
|
||||
"is running" in output):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def is_relation_made(relation, key='private-address'):
|
||||
for r_id in (relation_ids(relation) or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
if relation_get(key, rid=r_id, unit=unit):
|
||||
return True
|
||||
return False
|
0
hooks/charmhelpers/contrib/network/__init__.py
Normal file
0
hooks/charmhelpers/contrib/network/__init__.py
Normal file
72
hooks/charmhelpers/contrib/network/ovs/__init__.py
Normal file
72
hooks/charmhelpers/contrib/network/ovs/__init__.py
Normal file
@ -0,0 +1,72 @@
|
||||
''' Helpers for interacting with OpenvSwitch '''
|
||||
import subprocess
|
||||
import os
|
||||
from charmhelpers.core.hookenv import (
|
||||
log, WARNING
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
service
|
||||
)
|
||||
|
||||
|
||||
def add_bridge(name):
|
||||
''' Add the named bridge to openvswitch '''
|
||||
log('Creating bridge {}'.format(name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
|
||||
|
||||
|
||||
def del_bridge(name):
|
||||
''' Delete the named bridge from openvswitch '''
|
||||
log('Deleting bridge {}'.format(name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
|
||||
|
||||
|
||||
def add_bridge_port(name, port):
|
||||
''' Add a port to the named openvswitch bridge '''
|
||||
log('Adding port {} to bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
|
||||
name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "up"])
|
||||
|
||||
|
||||
def del_bridge_port(name, port):
|
||||
''' Delete a port from the named openvswitch bridge '''
|
||||
log('Deleting port {} from bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
|
||||
name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "down"])
|
||||
|
||||
|
||||
def set_manager(manager):
|
||||
''' Set the controller for the local openvswitch '''
|
||||
log('Setting manager for local ovs to {}'.format(manager))
|
||||
subprocess.check_call(['ovs-vsctl', 'set-manager',
|
||||
'ssl:{}'.format(manager)])
|
||||
|
||||
|
||||
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
|
||||
|
||||
|
||||
def get_certificate():
|
||||
''' Read openvswitch certificate from disk '''
|
||||
if os.path.exists(CERT_PATH):
|
||||
log('Reading ovs certificate from {}'.format(CERT_PATH))
|
||||
with open(CERT_PATH, 'r') as cert:
|
||||
full_cert = cert.read()
|
||||
begin_marker = "-----BEGIN CERTIFICATE-----"
|
||||
end_marker = "-----END CERTIFICATE-----"
|
||||
begin_index = full_cert.find(begin_marker)
|
||||
end_index = full_cert.rfind(end_marker)
|
||||
if end_index == -1 or begin_index == -1:
|
||||
raise RuntimeError("Certificate does not contain valid begin"
|
||||
" and end markers.")
|
||||
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
|
||||
return full_cert
|
||||
else:
|
||||
log('Certificate not found', level=WARNING)
|
||||
return None
|
||||
|
||||
|
||||
def full_restart():
|
||||
''' Full restart and reload of openvswitch '''
|
||||
service('force-reload-kmod', 'openvswitch-switch')
|
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
0
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
@ -5,6 +5,15 @@
|
||||
import apt_pkg as apt
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release,
|
||||
)
|
||||
|
||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||
@ -42,30 +51,19 @@ def juju_log(msg):
|
||||
|
||||
def error_out(msg):
|
||||
juju_log("FATAL ERROR: %s" % msg)
|
||||
exit(1)
|
||||
|
||||
|
||||
def lsb_release():
|
||||
'''Return /etc/lsb-release in a dict'''
|
||||
lsb = open('/etc/lsb-release', 'r')
|
||||
d = {}
|
||||
for l in lsb:
|
||||
k, v = l.split('=')
|
||||
d[k.strip()] = v.strip()
|
||||
return d
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_os_codename_install_source(src):
|
||||
'''Derive OpenStack release codename from a given installation source.'''
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
|
||||
rel = ''
|
||||
if src == 'distro':
|
||||
try:
|
||||
rel = ubuntu_openstack_release[ubuntu_rel]
|
||||
except KeyError:
|
||||
e = 'Code not derive openstack release for '\
|
||||
'this Ubuntu release: %s' % rel
|
||||
e = 'Could not derive openstack release for '\
|
||||
'this Ubuntu release: %s' % ubuntu_rel
|
||||
error_out(e)
|
||||
return rel
|
||||
|
||||
@ -81,6 +79,11 @@ def get_os_codename_install_source(src):
|
||||
return v
|
||||
|
||||
|
||||
def get_os_version_install_source(src):
|
||||
codename = get_os_codename_install_source(src)
|
||||
return get_os_version_codename(codename)
|
||||
|
||||
|
||||
def get_os_codename_version(vers):
|
||||
'''Determine OpenStack codename from version number.'''
|
||||
try:
|
||||
@ -95,18 +98,21 @@ def get_os_version_codename(codename):
|
||||
for k, v in openstack_codenames.iteritems():
|
||||
if v == codename:
|
||||
return k
|
||||
e = 'Code not derive OpenStack version for '\
|
||||
e = 'Could not derive OpenStack version for '\
|
||||
'codename: %s' % codename
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_codename_package(pkg):
|
||||
def get_os_codename_package(pkg, fatal=True):
|
||||
'''Derive OpenStack release codename from an installed package.'''
|
||||
apt.init()
|
||||
cache = apt.Cache()
|
||||
|
||||
try:
|
||||
pkg = cache[pkg]
|
||||
except:
|
||||
if not fatal:
|
||||
return None
|
||||
e = 'Could not determine version of installed package: %s' % pkg
|
||||
error_out(e)
|
||||
|
||||
@ -124,9 +130,12 @@ def get_os_codename_package(pkg):
|
||||
error_out(e)
|
||||
|
||||
|
||||
def get_os_version_package(pkg):
|
||||
def get_os_version_package(pkg, fatal=True):
|
||||
'''Derive OpenStack version number from an installed package.'''
|
||||
codename = get_os_codename_package(pkg)
|
||||
codename = get_os_codename_package(pkg, fatal=fatal)
|
||||
|
||||
if not codename:
|
||||
return None
|
||||
|
||||
if 'swift' in pkg:
|
||||
vers_map = swift_codenames
|
||||
@ -136,21 +145,21 @@ def get_os_version_package(pkg):
|
||||
for version, cname in vers_map.iteritems():
|
||||
if cname == codename:
|
||||
return version
|
||||
e = "Could not determine OpenStack version for package: %s" % pkg
|
||||
error_out(e)
|
||||
#e = "Could not determine OpenStack version for package: %s" % pkg
|
||||
#error_out(e)
|
||||
|
||||
|
||||
def import_key(keyid):
|
||||
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
|
||||
"--recv-keys %s" % keyid
|
||||
try:
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
except subprocess.CalledProcessError:
|
||||
error_out("Error importing repo key %s" % keyid)
|
||||
|
||||
|
||||
def configure_installation_source(rel):
|
||||
'''Configure apt installation source.'''
|
||||
|
||||
def _import_key(keyid):
|
||||
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
|
||||
"--recv-keys %s" % keyid
|
||||
try:
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
except subprocess.CalledProcessError:
|
||||
error_out("Error importing repo key %s" % keyid)
|
||||
|
||||
if rel == 'distro':
|
||||
return
|
||||
elif rel[:4] == "ppa:":
|
||||
@ -161,12 +170,9 @@ def configure_installation_source(rel):
|
||||
if l == 2:
|
||||
src, key = rel.split('|')
|
||||
juju_log("Importing PPA key from keyserver for %s" % src)
|
||||
_import_key(key)
|
||||
import_key(key)
|
||||
elif l == 1:
|
||||
src = rel
|
||||
else:
|
||||
error_out("Invalid openstack-release: %s" % rel)
|
||||
|
||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||
f.write(src)
|
||||
elif rel[:6] == 'cloud:':
|
||||
@ -205,7 +211,8 @@ def configure_installation_source(rel):
|
||||
error_out(e)
|
||||
|
||||
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
|
||||
_import_key(CLOUD_ARCHIVE_KEY_ID)
|
||||
# TODO: Replace key import with cloud archive keyring pkg.
|
||||
import_key(CLOUD_ARCHIVE_KEY_ID)
|
||||
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
|
||||
f.write(src)
|
||||
@ -221,10 +228,29 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||
updated config information necessary to perform health checks or
|
||||
service changes.
|
||||
"""
|
||||
charm_dir = os.getenv('CHARM_DIR')
|
||||
juju_rc_path = "%s/%s" % (charm_dir, script_path)
|
||||
unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '-')
|
||||
juju_rc_path = "/var/lib/juju/units/%s/charm/%s" % (unit_name, script_path)
|
||||
with open(juju_rc_path, 'wb') as rc_script:
|
||||
rc_script.write(
|
||||
"#!/bin/bash\n")
|
||||
[rc_script.write('export %s=%s\n' % (u, p))
|
||||
for u, p in env_vars.iteritems() if u != "script_path"]
|
||||
|
||||
|
||||
def openstack_upgrade_available(package):
|
||||
"""
|
||||
Determines if an OpenStack upgrade is available from installation
|
||||
source, based on version of installed package.
|
||||
|
||||
:param package: str: Name of installed package.
|
||||
|
||||
:returns: bool: : Returns True if configured installation source offers
|
||||
a newer version of package.
|
||||
|
||||
"""
|
||||
|
||||
src = config('openstack-origin')
|
||||
cur_vers = get_os_version_package(package)
|
||||
available_vers = get_os_version_install_source(src)
|
||||
apt.init()
|
||||
return apt.version_compare(available_vers, cur_vers) == 1
|
0
hooks/charmhelpers/core/__init__.py
Normal file
0
hooks/charmhelpers/core/__init__.py
Normal file
339
hooks/charmhelpers/core/hookenv.py
Normal file
339
hooks/charmhelpers/core/hookenv.py
Normal file
@ -0,0 +1,339 @@
|
||||
"Interactions with the Juju environment"
|
||||
# Copyright 2013 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
import subprocess
|
||||
import UserDict
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
WARNING = "WARNING"
|
||||
INFO = "INFO"
|
||||
DEBUG = "DEBUG"
|
||||
MARKER = object()
|
||||
|
||||
cache = {}
|
||||
|
||||
|
||||
def cached(func):
|
||||
''' Cache return values for multiple executions of func + args
|
||||
|
||||
For example:
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
pass
|
||||
|
||||
unit_get('test')
|
||||
|
||||
will cache the result of unit_get + 'test' for future calls.
|
||||
'''
|
||||
def wrapper(*args, **kwargs):
|
||||
global cache
|
||||
key = str((func, args, kwargs))
|
||||
try:
|
||||
return cache[key]
|
||||
except KeyError:
|
||||
res = func(*args, **kwargs)
|
||||
cache[key] = res
|
||||
return res
|
||||
return wrapper
|
||||
|
||||
|
||||
def flush(key):
|
||||
''' Flushes any entries from function cache where the
|
||||
key is found in the function+args '''
|
||||
flush_list = []
|
||||
for item in cache:
|
||||
if key in item:
|
||||
flush_list.append(item)
|
||||
for item in flush_list:
|
||||
del cache[item]
|
||||
|
||||
|
||||
def log(message, level=None):
|
||||
"Write a message to the juju log"
|
||||
command = ['juju-log']
|
||||
if level:
|
||||
command += ['-l', level]
|
||||
command += [message]
|
||||
subprocess.call(command)
|
||||
|
||||
|
||||
class Serializable(UserDict.IterableUserDict):
|
||||
"Wrapper, an object that can be serialized to yaml or json"
|
||||
|
||||
def __init__(self, obj):
|
||||
# wrap the object
|
||||
UserDict.IterableUserDict.__init__(self)
|
||||
self.data = obj
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# See if this object has attribute.
|
||||
if attr in ("json", "yaml", "data"):
|
||||
return self.__dict__[attr]
|
||||
# Check for attribute in wrapped object.
|
||||
got = getattr(self.data, attr, MARKER)
|
||||
if got is not MARKER:
|
||||
return got
|
||||
# Proxy to the wrapped object via dict interface.
|
||||
try:
|
||||
return self.data[attr]
|
||||
except KeyError:
|
||||
raise AttributeError(attr)
|
||||
|
||||
def __getstate__(self):
|
||||
# Pickle as a standard dictionary.
|
||||
return self.data
|
||||
|
||||
def __setstate__(self, state):
|
||||
# Unpickle into our wrapper.
|
||||
self.data = state
|
||||
|
||||
def json(self):
|
||||
"Serialize the object to json"
|
||||
return json.dumps(self.data)
|
||||
|
||||
def yaml(self):
|
||||
"Serialize the object to yaml"
|
||||
return yaml.dump(self.data)
|
||||
|
||||
|
||||
def execution_environment():
|
||||
"""A convenient bundling of the current execution context"""
|
||||
context = {}
|
||||
context['conf'] = config()
|
||||
if relation_id():
|
||||
context['reltype'] = relation_type()
|
||||
context['relid'] = relation_id()
|
||||
context['rel'] = relation_get()
|
||||
context['unit'] = local_unit()
|
||||
context['rels'] = relations()
|
||||
context['env'] = os.environ
|
||||
return context
|
||||
|
||||
|
||||
def in_relation_hook():
|
||||
"Determine whether we're running in a relation hook"
|
||||
return 'JUJU_RELATION' in os.environ
|
||||
|
||||
|
||||
def relation_type():
|
||||
"The scope for the current relation hook"
|
||||
return os.environ.get('JUJU_RELATION', None)
|
||||
|
||||
|
||||
def relation_id():
|
||||
"The relation ID for the current relation hook"
|
||||
return os.environ.get('JUJU_RELATION_ID', None)
|
||||
|
||||
|
||||
def local_unit():
|
||||
"Local unit ID"
|
||||
return os.environ['JUJU_UNIT_NAME']
|
||||
|
||||
|
||||
def remote_unit():
|
||||
"The remote unit for the current relation hook"
|
||||
return os.environ['JUJU_REMOTE_UNIT']
|
||||
|
||||
|
||||
def service_name():
|
||||
"The name service group this unit belongs to"
|
||||
return local_unit().split('/')[0]
|
||||
|
||||
|
||||
@cached
|
||||
def config(scope=None):
|
||||
"Juju charm configuration"
|
||||
config_cmd_line = ['config-get']
|
||||
if scope is not None:
|
||||
config_cmd_line.append(scope)
|
||||
config_cmd_line.append('--format=json')
|
||||
try:
|
||||
return json.loads(subprocess.check_output(config_cmd_line))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
@cached
|
||||
def relation_get(attribute=None, unit=None, rid=None):
|
||||
_args = ['relation-get', '--format=json']
|
||||
if rid:
|
||||
_args.append('-r')
|
||||
_args.append(rid)
|
||||
_args.append(attribute or '-')
|
||||
if unit:
|
||||
_args.append(unit)
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
||||
relation_cmd_line = ['relation-set']
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
for k, v in (relation_settings.items() + kwargs.items()):
|
||||
if v is None:
|
||||
relation_cmd_line.append('{}='.format(k))
|
||||
else:
|
||||
relation_cmd_line.append('{}={}'.format(k, v))
|
||||
subprocess.check_call(relation_cmd_line)
|
||||
# Flush cache of any relation-gets for local unit
|
||||
flush(local_unit())
|
||||
|
||||
|
||||
@cached
|
||||
def relation_ids(reltype=None):
|
||||
"A list of relation_ids"
|
||||
reltype = reltype or relation_type()
|
||||
relid_cmd_line = ['relation-ids', '--format=json']
|
||||
if reltype is not None:
|
||||
relid_cmd_line.append(reltype)
|
||||
return json.loads(subprocess.check_output(relid_cmd_line))
|
||||
return []
|
||||
|
||||
|
||||
@cached
|
||||
def related_units(relid=None):
|
||||
"A list of related units"
|
||||
relid = relid or relation_id()
|
||||
units_cmd_line = ['relation-list', '--format=json']
|
||||
if relid is not None:
|
||||
units_cmd_line.extend(('-r', relid))
|
||||
return json.loads(subprocess.check_output(units_cmd_line))
|
||||
|
||||
|
||||
@cached
|
||||
def relation_for_unit(unit=None, rid=None):
|
||||
"Get the json represenation of a unit's relation"
|
||||
unit = unit or remote_unit()
|
||||
relation = relation_get(unit=unit, rid=rid)
|
||||
for key in relation:
|
||||
if key.endswith('-list'):
|
||||
relation[key] = relation[key].split()
|
||||
relation['__unit__'] = unit
|
||||
return relation
|
||||
|
||||
|
||||
@cached
|
||||
def relations_for_id(relid=None):
|
||||
"Get relations of a specific relation ID"
|
||||
relation_data = []
|
||||
relid = relid or relation_ids()
|
||||
for unit in related_units(relid):
|
||||
unit_data = relation_for_unit(unit, relid)
|
||||
unit_data['__relid__'] = relid
|
||||
relation_data.append(unit_data)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def relations_of_type(reltype=None):
|
||||
"Get relations of a specific type"
|
||||
relation_data = []
|
||||
reltype = reltype or relation_type()
|
||||
for relid in relation_ids(reltype):
|
||||
for relation in relations_for_id(relid):
|
||||
relation['__relid__'] = relid
|
||||
relation_data.append(relation)
|
||||
return relation_data
|
||||
|
||||
|
||||
@cached
|
||||
def relation_types():
|
||||
"Get a list of relation types supported by this charm"
|
||||
charmdir = os.environ.get('CHARM_DIR', '')
|
||||
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
|
||||
md = yaml.safe_load(mdf)
|
||||
rel_types = []
|
||||
for key in ('provides', 'requires', 'peers'):
|
||||
section = md.get(key)
|
||||
if section:
|
||||
rel_types.extend(section.keys())
|
||||
mdf.close()
|
||||
return rel_types
|
||||
|
||||
|
||||
@cached
|
||||
def relations():
|
||||
rels = {}
|
||||
for reltype in relation_types():
|
||||
relids = {}
|
||||
for relid in relation_ids(reltype):
|
||||
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
|
||||
for unit in related_units(relid):
|
||||
reldata = relation_get(unit=unit, rid=relid)
|
||||
units[unit] = reldata
|
||||
relids[relid] = units
|
||||
rels[reltype] = relids
|
||||
return rels
|
||||
|
||||
|
||||
def open_port(port, protocol="TCP"):
|
||||
"Open a service network port"
|
||||
_args = ['open-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
def close_port(port, protocol="TCP"):
|
||||
"Close a service network port"
|
||||
_args = ['close-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
_args = ['unit-get', '--format=json', attribute]
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def unit_private_ip():
|
||||
return unit_get('private-address')
|
||||
|
||||
|
||||
class UnregisteredHookError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Hooks(object):
|
||||
def __init__(self):
|
||||
super(Hooks, self).__init__()
|
||||
self._hooks = {}
|
||||
|
||||
def register(self, name, function):
|
||||
self._hooks[name] = function
|
||||
|
||||
def execute(self, args):
|
||||
hook_name = os.path.basename(args[0])
|
||||
if hook_name in self._hooks:
|
||||
self._hooks[hook_name]()
|
||||
else:
|
||||
raise UnregisteredHookError(hook_name)
|
||||
|
||||
def hook(self, *hook_names):
|
||||
def wrapper(decorated):
|
||||
for hook_name in hook_names:
|
||||
self.register(hook_name, decorated)
|
||||
else:
|
||||
self.register(decorated.__name__, decorated)
|
||||
if '_' in decorated.__name__:
|
||||
self.register(
|
||||
decorated.__name__.replace('_', '-'), decorated)
|
||||
return decorated
|
||||
return wrapper
|
||||
|
||||
def charm_dir():
|
||||
return os.environ.get('CHARM_DIR')
|
273
hooks/charmhelpers/core/host.py
Normal file
273
hooks/charmhelpers/core/host.py
Normal file
@ -0,0 +1,273 @@
|
||||
"""Tools for working with the host system"""
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Nick Moffitt <nick.moffitt@canonical.com>
|
||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
||||
|
||||
import apt_pkg
|
||||
import os
|
||||
import pwd
|
||||
import grp
|
||||
import subprocess
|
||||
import hashlib
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from hookenv import log, execution_environment
|
||||
|
||||
|
||||
def service_start(service_name):
|
||||
service('start', service_name)
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
service('stop', service_name)
|
||||
|
||||
|
||||
def service_restart(service_name):
|
||||
service('restart', service_name)
|
||||
|
||||
|
||||
def service_reload(service_name, restart_on_failure=False):
|
||||
if not service('reload', service_name) and restart_on_failure:
|
||||
service('restart', service_name)
|
||||
|
||||
|
||||
def service(action, service_name):
|
||||
cmd = ['service', service_name, action]
|
||||
return subprocess.call(cmd) == 0
|
||||
|
||||
|
||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||
"""Add a user"""
|
||||
try:
|
||||
user_info = pwd.getpwnam(username)
|
||||
log('user {0} already exists!'.format(username))
|
||||
except KeyError:
|
||||
log('creating user {0}'.format(username))
|
||||
cmd = ['useradd']
|
||||
if system_user or password is None:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
cmd.extend([
|
||||
'--create-home',
|
||||
'--shell', shell,
|
||||
'--password', password,
|
||||
])
|
||||
cmd.append(username)
|
||||
subprocess.check_call(cmd)
|
||||
user_info = pwd.getpwnam(username)
|
||||
return user_info
|
||||
|
||||
|
||||
def add_user_to_group(username, group):
|
||||
"""Add a user to a group"""
|
||||
cmd = [
|
||||
'gpasswd', '-a',
|
||||
username,
|
||||
group
|
||||
]
|
||||
log("Adding user {} to group {}".format(username, group))
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def rsync(from_path, to_path, flags='-r', options=None):
|
||||
"""Replicate the contents of a path"""
|
||||
context = execution_environment()
|
||||
options = options or ['--delete', '--executability']
|
||||
cmd = ['/usr/bin/rsync', flags]
|
||||
cmd.extend(options)
|
||||
cmd.append(from_path.format(**context))
|
||||
cmd.append(to_path.format(**context))
|
||||
log(" ".join(cmd))
|
||||
return subprocess.check_output(cmd).strip()
|
||||
|
||||
|
||||
def symlink(source, destination):
|
||||
"""Create a symbolic link"""
|
||||
context = execution_environment()
|
||||
log("Symlinking {} as {}".format(source, destination))
|
||||
cmd = [
|
||||
'ln',
|
||||
'-sf',
|
||||
source.format(**context),
|
||||
destination.format(**context)
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def mkdir(path, owner='root', group='root', perms=0555, force=False):
|
||||
"""Create a directory"""
|
||||
context = execution_environment()
|
||||
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||
perms))
|
||||
uid = pwd.getpwnam(owner.format(**context)).pw_uid
|
||||
gid = grp.getgrnam(group.format(**context)).gr_gid
|
||||
realpath = os.path.abspath(path)
|
||||
if os.path.exists(realpath):
|
||||
if force and not os.path.isdir(realpath):
|
||||
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||
os.unlink(realpath)
|
||||
else:
|
||||
os.makedirs(realpath, perms)
|
||||
os.chown(realpath, uid, gid)
|
||||
|
||||
|
||||
def write_file(path, fmtstr, owner='root', group='root', perms=0444, **kwargs):
|
||||
"""Create or overwrite a file with the contents of a string"""
|
||||
context = execution_environment()
|
||||
context.update(kwargs)
|
||||
log("Writing file {} {}:{} {:o}".format(path, owner, group,
|
||||
perms))
|
||||
uid = pwd.getpwnam(owner.format(**context)).pw_uid
|
||||
gid = grp.getgrnam(group.format(**context)).gr_gid
|
||||
with open(path.format(**context), 'w') as target:
|
||||
os.fchown(target.fileno(), uid, gid)
|
||||
os.fchmod(target.fileno(), perms)
|
||||
target.write(fmtstr.format(**context))
|
||||
|
||||
|
||||
def render_template_file(source, destination, **kwargs):
|
||||
"""Create or overwrite a file using a template"""
|
||||
log("Rendering template {} for {}".format(source,
|
||||
destination))
|
||||
context = execution_environment()
|
||||
with open(source.format(**context), 'r') as template:
|
||||
write_file(destination.format(**context), template.read(),
|
||||
**kwargs)
|
||||
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
"""Returns a list of packages that require installation"""
|
||||
apt_pkg.init()
|
||||
cache = apt_pkg.Cache()
|
||||
_pkgs = []
|
||||
for package in packages:
|
||||
try:
|
||||
p = cache[package]
|
||||
p.current_ver or _pkgs.append(package)
|
||||
except KeyError:
|
||||
log('Package {} has no installation candidate.'.format(package),
|
||||
level='WARNING')
|
||||
_pkgs.append(package)
|
||||
return _pkgs
|
||||
|
||||
|
||||
def apt_install(packages, options=None, fatal=False):
|
||||
"""Install one or more packages"""
|
||||
options = options or []
|
||||
cmd = ['apt-get', '-y']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Installing {} with options: {}".format(packages,
|
||||
options))
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def apt_update(fatal=False):
|
||||
"""Update local apt cache"""
|
||||
cmd = ['apt-get', 'update']
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def mount(device, mountpoint, options=None, persist=False):
|
||||
'''Mount a filesystem'''
|
||||
cmd_args = ['mount']
|
||||
if options is not None:
|
||||
cmd_args.extend(['-o', options])
|
||||
cmd_args.extend([device, mountpoint])
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||
return False
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def umount(mountpoint, persist=False):
|
||||
'''Unmount a filesystem'''
|
||||
cmd_args = ['umount', mountpoint]
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||
return False
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def mounts():
|
||||
'''List of all mounted volumes as [[mountpoint,device],[...]]'''
|
||||
with open('/proc/mounts') as f:
|
||||
# [['/mount/point','/dev/path'],[...]]
|
||||
system_mounts = [m[1::-1] for m in [l.strip().split()
|
||||
for l in f.readlines()]]
|
||||
return system_mounts
|
||||
|
||||
|
||||
def file_hash(path):
|
||||
''' Generate a md5 hash of the contents of 'path' or None if not found '''
|
||||
if os.path.exists(path):
|
||||
h = hashlib.md5()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
return h.hexdigest()
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def restart_on_change(restart_map):
|
||||
''' Restart services based on configuration files changing
|
||||
|
||||
This function is used a decorator, for example
|
||||
|
||||
@restart_on_change({
|
||||
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||
})
|
||||
def ceph_client_changed():
|
||||
...
|
||||
|
||||
In this example, the cinder-api and cinder-volume services
|
||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||
ceph_client_changed function.
|
||||
'''
|
||||
def wrap(f):
|
||||
def wrapped_f(*args):
|
||||
checksums = {}
|
||||
for path in restart_map:
|
||||
checksums[path] = file_hash(path)
|
||||
f(*args)
|
||||
restarts = []
|
||||
for path in restart_map:
|
||||
if checksums[path] != file_hash(path):
|
||||
restarts += restart_map[path]
|
||||
for service_name in list(OrderedDict.fromkeys(restarts)):
|
||||
service('restart', service_name)
|
||||
return wrapped_f
|
||||
return wrap
|
||||
|
||||
|
||||
def lsb_release():
|
||||
'''Return /etc/lsb-release in a dict'''
|
||||
d = {}
|
||||
with open('/etc/lsb-release', 'r') as lsb:
|
||||
for l in lsb:
|
||||
k, v = l.split('=')
|
||||
d[k.strip()] = v.strip()
|
||||
return d
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
318
hooks/hooks.py
318
hooks/hooks.py
@ -1,318 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import lib.utils as utils
|
||||
import lib.cluster_utils as cluster
|
||||
import lib.openstack_common as openstack
|
||||
import sys
|
||||
import quantum_utils as qutils
|
||||
import os
|
||||
|
||||
PLUGIN = utils.config_get('plugin')
|
||||
|
||||
|
||||
def install():
|
||||
utils.configure_source()
|
||||
if PLUGIN in qutils.GATEWAY_PKGS.keys():
|
||||
if PLUGIN in [qutils.OVS, qutils.NVP]:
|
||||
# Install OVS DKMS first to ensure that the ovs module
|
||||
# loaded supports GRE tunnels
|
||||
utils.install('openvswitch-datapath-dkms')
|
||||
utils.install(*qutils.GATEWAY_PKGS[PLUGIN])
|
||||
else:
|
||||
utils.juju_log('ERROR', 'Please provide a valid plugin config')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@utils.inteli_restart(qutils.RESTART_MAP[PLUGIN])
|
||||
def config_changed():
|
||||
src = utils.config_get('openstack-origin')
|
||||
available = openstack.get_os_codename_install_source(src)
|
||||
installed = openstack.get_os_codename_package('quantum-common')
|
||||
if (available and
|
||||
openstack.get_os_version_codename(available) > \
|
||||
openstack.get_os_version_codename(installed)):
|
||||
qutils.do_openstack_upgrade()
|
||||
|
||||
if PLUGIN in qutils.GATEWAY_PKGS.keys():
|
||||
render_quantum_conf()
|
||||
render_dhcp_agent_conf()
|
||||
render_l3_agent_conf()
|
||||
render_metadata_agent_conf()
|
||||
render_metadata_api_conf()
|
||||
render_plugin_conf()
|
||||
render_ext_port_upstart()
|
||||
render_evacuate_unit()
|
||||
if PLUGIN in [qutils.OVS, qutils.NVP]:
|
||||
qutils.add_bridge(qutils.INT_BRIDGE)
|
||||
qutils.add_bridge(qutils.EXT_BRIDGE)
|
||||
ext_port = utils.config_get('ext-port')
|
||||
if ext_port:
|
||||
qutils.add_bridge_port(qutils.EXT_BRIDGE, ext_port)
|
||||
else:
|
||||
utils.juju_log('ERROR',
|
||||
'Please provide a valid plugin config')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def upgrade_charm():
|
||||
install()
|
||||
config_changed()
|
||||
|
||||
|
||||
def render_ext_port_upstart():
|
||||
if utils.config_get('ext-port'):
|
||||
with open(qutils.EXT_PORT_CONF, "w") as conf:
|
||||
conf.write(utils.render_template(
|
||||
os.path.basename(qutils.EXT_PORT_CONF),
|
||||
{"ext_port": utils.config_get('ext-port')}
|
||||
)
|
||||
)
|
||||
else:
|
||||
if os.path.exists(qutils.EXT_PORT_CONF):
|
||||
os.remove(qutils.EXT_PORT_CONF)
|
||||
|
||||
|
||||
def render_l3_agent_conf():
|
||||
context = get_keystone_conf()
|
||||
if (context and
|
||||
os.path.exists(qutils.L3_AGENT_CONF)):
|
||||
with open(qutils.L3_AGENT_CONF, "w") as conf:
|
||||
conf.write(utils.render_template(
|
||||
os.path.basename(qutils.L3_AGENT_CONF),
|
||||
context
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def render_dhcp_agent_conf():
|
||||
if (os.path.exists(qutils.DHCP_AGENT_CONF)):
|
||||
with open(qutils.DHCP_AGENT_CONF, "w") as conf:
|
||||
conf.write(utils.render_template(
|
||||
os.path.basename(qutils.DHCP_AGENT_CONF),
|
||||
{"plugin": PLUGIN}
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def render_metadata_agent_conf():
|
||||
context = get_keystone_conf()
|
||||
if (context and
|
||||
os.path.exists(qutils.METADATA_AGENT_CONF)):
|
||||
context['local_ip'] = utils.get_host_ip()
|
||||
context['shared_secret'] = qutils.get_shared_secret()
|
||||
with open(qutils.METADATA_AGENT_CONF, "w") as conf:
|
||||
conf.write(utils.render_template(
|
||||
os.path.basename(qutils.METADATA_AGENT_CONF),
|
||||
context
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def render_quantum_conf():
|
||||
context = get_rabbit_conf()
|
||||
if (context and
|
||||
os.path.exists(qutils.QUANTUM_CONF)):
|
||||
context['core_plugin'] = \
|
||||
qutils.CORE_PLUGIN[PLUGIN]
|
||||
with open(qutils.QUANTUM_CONF, "w") as conf:
|
||||
conf.write(utils.render_template(
|
||||
os.path.basename(qutils.QUANTUM_CONF),
|
||||
context
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def render_plugin_conf():
|
||||
context = get_quantum_db_conf()
|
||||
if (context and
|
||||
os.path.exists(qutils.PLUGIN_CONF[PLUGIN])):
|
||||
context['local_ip'] = utils.get_host_ip()
|
||||
conf_file = qutils.PLUGIN_CONF[PLUGIN]
|
||||
with open(conf_file, "w") as conf:
|
||||
conf.write(utils.render_template(
|
||||
os.path.basename(conf_file),
|
||||
context
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def render_metadata_api_conf():
|
||||
context = get_nova_db_conf()
|
||||
r_context = get_rabbit_conf()
|
||||
q_context = get_keystone_conf()
|
||||
if (context and r_context and q_context and
|
||||
os.path.exists(qutils.NOVA_CONF)):
|
||||
context.update(r_context)
|
||||
context.update(q_context)
|
||||
context['shared_secret'] = qutils.get_shared_secret()
|
||||
with open(qutils.NOVA_CONF, "w") as conf:
|
||||
conf.write(utils.render_template(
|
||||
os.path.basename(qutils.NOVA_CONF),
|
||||
context
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def render_evacuate_unit():
|
||||
context = get_keystone_conf()
|
||||
if context:
|
||||
with open('/usr/local/bin/quantum-evacuate-unit', "w") as conf:
|
||||
conf.write(utils.render_template('evacuate_unit.py', context))
|
||||
os.chmod('/usr/local/bin/quantum-evacuate-unit', 0700)
|
||||
|
||||
|
||||
def get_keystone_conf():
|
||||
for relid in utils.relation_ids('quantum-network-service'):
|
||||
for unit in utils.relation_list(relid):
|
||||
conf = {
|
||||
"keystone_host": utils.relation_get('keystone_host',
|
||||
unit, relid),
|
||||
"service_port": utils.relation_get('service_port',
|
||||
unit, relid),
|
||||
"auth_port": utils.relation_get('auth_port', unit, relid),
|
||||
"service_username": utils.relation_get('service_username',
|
||||
unit, relid),
|
||||
"service_password": utils.relation_get('service_password',
|
||||
unit, relid),
|
||||
"service_tenant": utils.relation_get('service_tenant',
|
||||
unit, relid),
|
||||
"quantum_host": utils.relation_get('quantum_host',
|
||||
unit, relid),
|
||||
"quantum_port": utils.relation_get('quantum_port',
|
||||
unit, relid),
|
||||
"quantum_url": utils.relation_get('quantum_url',
|
||||
unit, relid),
|
||||
"region": utils.relation_get('region',
|
||||
unit, relid)
|
||||
}
|
||||
if None not in conf.itervalues():
|
||||
return conf
|
||||
return None
|
||||
|
||||
|
||||
def db_joined():
|
||||
utils.relation_set(quantum_username=qutils.DB_USER,
|
||||
quantum_database=qutils.QUANTUM_DB,
|
||||
quantum_hostname=utils.unit_get('private-address'),
|
||||
nova_username=qutils.NOVA_DB_USER,
|
||||
nova_database=qutils.NOVA_DB,
|
||||
nova_hostname=utils.unit_get('private-address'))
|
||||
|
||||
|
||||
@utils.inteli_restart(qutils.RESTART_MAP[PLUGIN])
|
||||
def db_changed():
|
||||
render_plugin_conf()
|
||||
render_metadata_api_conf()
|
||||
|
||||
|
||||
def get_quantum_db_conf():
|
||||
for relid in utils.relation_ids('shared-db'):
|
||||
for unit in utils.relation_list(relid):
|
||||
conf = {
|
||||
"host": utils.relation_get('db_host',
|
||||
unit, relid),
|
||||
"user": qutils.DB_USER,
|
||||
"password": utils.relation_get('quantum_password',
|
||||
unit, relid),
|
||||
"db": qutils.QUANTUM_DB
|
||||
}
|
||||
if None not in conf.itervalues():
|
||||
return conf
|
||||
return None
|
||||
|
||||
|
||||
def get_nova_db_conf():
|
||||
for relid in utils.relation_ids('shared-db'):
|
||||
for unit in utils.relation_list(relid):
|
||||
conf = {
|
||||
"host": utils.relation_get('db_host',
|
||||
unit, relid),
|
||||
"user": qutils.NOVA_DB_USER,
|
||||
"password": utils.relation_get('nova_password',
|
||||
unit, relid),
|
||||
"db": qutils.NOVA_DB
|
||||
}
|
||||
if None not in conf.itervalues():
|
||||
return conf
|
||||
return None
|
||||
|
||||
|
||||
def amqp_joined():
|
||||
utils.relation_set(username=qutils.RABBIT_USER,
|
||||
vhost=qutils.RABBIT_VHOST)
|
||||
|
||||
|
||||
@utils.inteli_restart(qutils.RESTART_MAP[PLUGIN])
|
||||
def amqp_changed():
|
||||
render_dhcp_agent_conf()
|
||||
render_quantum_conf()
|
||||
render_metadata_api_conf()
|
||||
|
||||
|
||||
def get_rabbit_conf():
|
||||
for relid in utils.relation_ids('amqp'):
|
||||
for unit in utils.relation_list(relid):
|
||||
conf = {
|
||||
"rabbit_host": utils.relation_get('private-address',
|
||||
unit, relid),
|
||||
"rabbit_virtual_host": qutils.RABBIT_VHOST,
|
||||
"rabbit_userid": qutils.RABBIT_USER,
|
||||
"rabbit_password": utils.relation_get('password',
|
||||
unit, relid)
|
||||
}
|
||||
clustered = utils.relation_get('clustered', unit, relid)
|
||||
if clustered:
|
||||
conf['rabbit_host'] = utils.relation_get('vip', unit, relid)
|
||||
if None not in conf.itervalues():
|
||||
return conf
|
||||
return None
|
||||
|
||||
|
||||
@utils.inteli_restart(qutils.RESTART_MAP[PLUGIN])
|
||||
def nm_changed():
|
||||
render_dhcp_agent_conf()
|
||||
render_l3_agent_conf()
|
||||
render_metadata_agent_conf()
|
||||
render_metadata_api_conf()
|
||||
render_evacuate_unit()
|
||||
store_ca_cert()
|
||||
|
||||
|
||||
def store_ca_cert():
|
||||
ca_cert = get_ca_cert()
|
||||
if ca_cert:
|
||||
qutils.install_ca(ca_cert)
|
||||
|
||||
|
||||
def get_ca_cert():
|
||||
for relid in utils.relation_ids('quantum-network-service'):
|
||||
for unit in utils.relation_list(relid):
|
||||
ca_cert = utils.relation_get('ca_cert', unit, relid)
|
||||
if ca_cert:
|
||||
return ca_cert
|
||||
return None
|
||||
|
||||
|
||||
def cluster_departed():
|
||||
if PLUGIN == 'nvp':
|
||||
utils.juju_log('WARNING',
|
||||
'Unable to re-assign agent resources'
|
||||
' for failed nodes with nvp')
|
||||
return
|
||||
conf = get_keystone_conf()
|
||||
if conf and cluster.eligible_leader(None):
|
||||
qutils.reassign_agent_resources(conf)
|
||||
|
||||
utils.do_hooks({
|
||||
"install": install,
|
||||
"config-changed": config_changed,
|
||||
"upgrade-charm": upgrade_charm,
|
||||
"shared-db-relation-joined": db_joined,
|
||||
"shared-db-relation-changed": db_changed,
|
||||
"amqp-relation-joined": amqp_joined,
|
||||
"amqp-relation-changed": amqp_changed,
|
||||
"quantum-network-service-relation-changed": nm_changed,
|
||||
"cluster-relation-departed": cluster_departed
|
||||
})
|
||||
|
||||
sys.exit(0)
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
@ -9,261 +9,38 @@
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import socket
|
||||
import sys
|
||||
import hashlib
|
||||
from charmhelpers.core.host import (
|
||||
apt_install
|
||||
)
|
||||
from charmhelpers.core.hookenv import (
|
||||
unit_get,
|
||||
cached
|
||||
)
|
||||
|
||||
|
||||
def do_hooks(hooks):
|
||||
hook = os.path.basename(sys.argv[0])
|
||||
|
||||
try:
|
||||
hook_func = hooks[hook]
|
||||
except KeyError:
|
||||
juju_log('INFO',
|
||||
"This charm doesn't know how to handle '{}'.".format(hook))
|
||||
else:
|
||||
hook_func()
|
||||
|
||||
|
||||
def install(*pkgs):
|
||||
cmd = [
|
||||
'apt-get',
|
||||
'-y',
|
||||
'install'
|
||||
]
|
||||
for pkg in pkgs:
|
||||
cmd.append(pkg)
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
TEMPLATES_DIR = 'templates'
|
||||
|
||||
try:
|
||||
import jinja2
|
||||
except ImportError:
|
||||
install('python-jinja2')
|
||||
apt_install('python-jinja2', fatal=True)
|
||||
import jinja2
|
||||
|
||||
try:
|
||||
import dns.resolver
|
||||
except ImportError:
|
||||
install('python-dnspython')
|
||||
apt_install('python-dnspython', fatal=True)
|
||||
import dns.resolver
|
||||
|
||||
|
||||
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
|
||||
templates = jinja2.Environment(
|
||||
loader=jinja2.FileSystemLoader(template_dir)
|
||||
)
|
||||
loader=jinja2.FileSystemLoader(template_dir)
|
||||
)
|
||||
template = templates.get_template(template_name)
|
||||
return template.render(context)
|
||||
|
||||
CLOUD_ARCHIVE = \
|
||||
""" # Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
"""
|
||||
|
||||
CLOUD_ARCHIVE_POCKETS = {
|
||||
'folsom': 'precise-updates/folsom',
|
||||
'folsom/updates': 'precise-updates/folsom',
|
||||
'folsom/proposed': 'precise-proposed/folsom',
|
||||
'grizzly': 'precise-updates/grizzly',
|
||||
'grizzly/updates': 'precise-updates/grizzly',
|
||||
'grizzly/proposed': 'precise-proposed/grizzly'
|
||||
}
|
||||
|
||||
|
||||
def configure_source():
|
||||
source = str(config_get('openstack-origin'))
|
||||
if not source:
|
||||
return
|
||||
if source.startswith('ppa:'):
|
||||
cmd = [
|
||||
'add-apt-repository',
|
||||
source
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
if source.startswith('cloud:'):
|
||||
# CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
|
||||
# cloud:precise-folsom/updates or cloud:precise-folsom/proposed
|
||||
install('ubuntu-cloud-keyring')
|
||||
pocket = source.split(':')[1]
|
||||
pocket = pocket.split('-')[1]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
|
||||
if source.startswith('deb'):
|
||||
l = len(source.split('|'))
|
||||
if l == 2:
|
||||
(apt_line, key) = source.split('|')
|
||||
cmd = [
|
||||
'apt-key',
|
||||
'adv', '--keyserver keyserver.ubuntu.com',
|
||||
'--recv-keys', key
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
elif l == 1:
|
||||
apt_line = source
|
||||
|
||||
with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
|
||||
apt.write(apt_line + "\n")
|
||||
cmd = [
|
||||
'apt-get',
|
||||
'update'
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
# Protocols
|
||||
TCP = 'TCP'
|
||||
UDP = 'UDP'
|
||||
|
||||
|
||||
def expose(port, protocol='TCP'):
|
||||
cmd = [
|
||||
'open-port',
|
||||
'{}/{}'.format(port, protocol)
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def juju_log(severity, message):
|
||||
cmd = [
|
||||
'juju-log',
|
||||
'--log-level', severity,
|
||||
message
|
||||
]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
cache = {}
|
||||
|
||||
|
||||
def cached(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
global cache
|
||||
key = str((func, args, kwargs))
|
||||
try:
|
||||
return cache[key]
|
||||
except KeyError:
|
||||
res = func(*args, **kwargs)
|
||||
cache[key] = res
|
||||
return res
|
||||
return wrapper
|
||||
|
||||
|
||||
@cached
|
||||
def relation_ids(relation):
|
||||
cmd = [
|
||||
'relation-ids',
|
||||
relation
|
||||
]
|
||||
result = str(subprocess.check_output(cmd)).split()
|
||||
if result == "":
|
||||
return None
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
@cached
|
||||
def relation_list(rid):
|
||||
cmd = [
|
||||
'relation-list',
|
||||
'-r', rid,
|
||||
]
|
||||
result = str(subprocess.check_output(cmd)).split()
|
||||
if result == "":
|
||||
return None
|
||||
else:
|
||||
return result
|
||||
|
||||
|
||||
@cached
|
||||
def relation_get(attribute, unit=None, rid=None):
|
||||
cmd = [
|
||||
'relation-get',
|
||||
]
|
||||
if rid:
|
||||
cmd.append('-r')
|
||||
cmd.append(rid)
|
||||
cmd.append(attribute)
|
||||
if unit:
|
||||
cmd.append(unit)
|
||||
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
if value == "":
|
||||
return None
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
@cached
|
||||
def relation_get_dict(relation_id=None, remote_unit=None):
|
||||
"""Obtain all relation data as dict by way of JSON"""
|
||||
cmd = [
|
||||
'relation-get', '--format=json'
|
||||
]
|
||||
if relation_id:
|
||||
cmd.append('-r')
|
||||
cmd.append(relation_id)
|
||||
if remote_unit:
|
||||
remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None)
|
||||
os.environ['JUJU_REMOTE_UNIT'] = remote_unit
|
||||
j = subprocess.check_output(cmd)
|
||||
if remote_unit and remote_unit_orig:
|
||||
os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig
|
||||
d = json.loads(j)
|
||||
settings = {}
|
||||
# convert unicode to strings
|
||||
for k, v in d.iteritems():
|
||||
settings[str(k)] = str(v)
|
||||
return settings
|
||||
|
||||
|
||||
def relation_set(**kwargs):
|
||||
cmd = [
|
||||
'relation-set'
|
||||
]
|
||||
args = []
|
||||
for k, v in kwargs.items():
|
||||
if k == 'rid':
|
||||
if v:
|
||||
cmd.append('-r')
|
||||
cmd.append(v)
|
||||
else:
|
||||
args.append('{}={}'.format(k, v))
|
||||
cmd += args
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
cmd = [
|
||||
'unit-get',
|
||||
attribute
|
||||
]
|
||||
value = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
if value == "":
|
||||
return None
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
@cached
|
||||
def config_get(attribute):
|
||||
cmd = [
|
||||
'config-get',
|
||||
'--format',
|
||||
'json',
|
||||
]
|
||||
out = subprocess.check_output(cmd).strip() # IGNORE:E1103
|
||||
cfg = json.loads(out)
|
||||
|
||||
try:
|
||||
return cfg[attribute]
|
||||
except KeyError:
|
||||
return None
|
||||
|
||||
|
||||
@cached
|
||||
def get_unit_hostname():
|
||||
@ -271,7 +48,8 @@ def get_unit_hostname():
|
||||
|
||||
|
||||
@cached
|
||||
def get_host_ip(hostname=unit_get('private-address')):
|
||||
def get_host_ip(hostname=None):
|
||||
hostname = hostname or unit_get('private-address')
|
||||
try:
|
||||
# Test to see if already an IPv4 address
|
||||
socket.inet_aton(hostname)
|
||||
@ -281,79 +59,3 @@ def get_host_ip(hostname=unit_get('private-address')):
|
||||
if answers:
|
||||
return answers[0].address
|
||||
return None
|
||||
|
||||
|
||||
def _svc_control(service, action):
|
||||
subprocess.check_call(['service', service, action])
|
||||
|
||||
|
||||
def restart(*services):
|
||||
for service in services:
|
||||
_svc_control(service, 'restart')
|
||||
|
||||
|
||||
def stop(*services):
|
||||
for service in services:
|
||||
_svc_control(service, 'stop')
|
||||
|
||||
|
||||
def start(*services):
|
||||
for service in services:
|
||||
_svc_control(service, 'start')
|
||||
|
||||
|
||||
def reload(*services):
|
||||
for service in services:
|
||||
try:
|
||||
_svc_control(service, 'reload')
|
||||
except subprocess.CalledProcessError:
|
||||
# Reload failed - either service does not support reload
|
||||
# or it was not running - restart will fixup most things
|
||||
_svc_control(service, 'restart')
|
||||
|
||||
|
||||
def running(service):
|
||||
try:
|
||||
output = subprocess.check_output(['service', service, 'status'])
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
if ("start/running" in output or
|
||||
"is running" in output):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def file_hash(path):
|
||||
if os.path.exists(path):
|
||||
h = hashlib.md5()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
return h.hexdigest()
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def inteli_restart(restart_map):
|
||||
def wrap(f):
|
||||
def wrapped_f(*args):
|
||||
checksums = {}
|
||||
for path in restart_map:
|
||||
checksums[path] = file_hash(path)
|
||||
f(*args)
|
||||
restarts = []
|
||||
for path in restart_map:
|
||||
if checksums[path] != file_hash(path):
|
||||
restarts += restart_map[path]
|
||||
restart(*list(set(restarts)))
|
||||
return wrapped_f
|
||||
return wrap
|
||||
|
||||
|
||||
def is_relation_made(relation, key='private-address'):
|
||||
for r_id in (relation_ids(relation) or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
if relation_get(key, rid=r_id, unit=unit):
|
||||
return True
|
||||
return False
|
||||
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
341
hooks/quantum_relations.py
Executable file
341
hooks/quantum_relations.py
Executable file
@ -0,0 +1,341 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log, ERROR, WARNING,
|
||||
config,
|
||||
relation_ids,
|
||||
related_units,
|
||||
relation_get,
|
||||
relation_set,
|
||||
unit_get,
|
||||
Hooks, UnregisteredHookError
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
apt_update,
|
||||
apt_install,
|
||||
restart_on_change
|
||||
)
|
||||
from charmhelpers.contrib.hahelpers.cluster_utils import(
|
||||
eligible_leader
|
||||
)
|
||||
from charmhelpers.contrib.openstack.openstack_utils import (
|
||||
configure_installation_source,
|
||||
get_os_codename_install_source,
|
||||
get_os_codename_package,
|
||||
get_os_version_codename
|
||||
)
|
||||
from charmhelpers.contrib.network.ovs import (
|
||||
add_bridge,
|
||||
add_bridge_port
|
||||
)
|
||||
|
||||
from lib.utils import render_template, get_host_ip
|
||||
|
||||
import sys
|
||||
import quantum_utils as qutils
|
||||
import os
|
||||
|
||||
PLUGIN = config('plugin')
|
||||
hooks = Hooks()
|
||||
|
||||
|
||||
@hooks.hook()
|
||||
def install():
|
||||
configure_installation_source(config('openstack-origin'))
|
||||
apt_update(fatal=True)
|
||||
if PLUGIN in qutils.GATEWAY_PKGS.keys():
|
||||
if PLUGIN in [qutils.OVS, qutils.NVP]:
|
||||
# Install OVS DKMS first to ensure that the ovs module
|
||||
# loaded supports GRE tunnels
|
||||
apt_install('openvswitch-datapath-dkms', fatal=True)
|
||||
apt_install(qutils.GATEWAY_PKGS[PLUGIN], fatal=True)
|
||||
else:
|
||||
log('Please provide a valid plugin config', level=ERROR)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@hooks.hook()
|
||||
@restart_on_change(qutils.RESTART_MAP[PLUGIN])
|
||||
def config_changed():
|
||||
src = config('openstack-origin')
|
||||
available = get_os_codename_install_source(src)
|
||||
installed = get_os_codename_package('quantum-common')
|
||||
if (available and
|
||||
get_os_version_codename(available) >
|
||||
get_os_version_codename(installed)):
|
||||
qutils.do_openstack_upgrade()
|
||||
|
||||
if PLUGIN in qutils.GATEWAY_PKGS.keys():
|
||||
render_quantum_conf()
|
||||
render_dhcp_agent_conf()
|
||||
render_l3_agent_conf()
|
||||
render_metadata_agent_conf()
|
||||
render_metadata_api_conf()
|
||||
render_plugin_conf()
|
||||
render_ext_port_upstart()
|
||||
render_evacuate_unit()
|
||||
if PLUGIN in [qutils.OVS, qutils.NVP]:
|
||||
add_bridge(qutils.INT_BRIDGE)
|
||||
add_bridge(qutils.EXT_BRIDGE)
|
||||
ext_port = config('ext-port')
|
||||
if ext_port:
|
||||
add_bridge_port(qutils.EXT_BRIDGE, ext_port)
|
||||
else:
|
||||
log('Please provide a valid plugin config', level=ERROR)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@hooks.hook()
|
||||
def upgrade_charm():
|
||||
install()
|
||||
config_changed()
|
||||
|
||||
|
||||
def render_ext_port_upstart():
|
||||
if config('ext-port'):
|
||||
with open(qutils.EXT_PORT_CONF, "w") as conf:
|
||||
conf.write(
|
||||
render_template(os.path.basename(qutils.EXT_PORT_CONF),
|
||||
{"ext_port": config('ext-port')})
|
||||
)
|
||||
else:
|
||||
if os.path.exists(qutils.EXT_PORT_CONF):
|
||||
os.remove(qutils.EXT_PORT_CONF)
|
||||
|
||||
|
||||
def render_l3_agent_conf():
|
||||
context = get_keystone_conf()
|
||||
if (context and
|
||||
os.path.exists(qutils.L3_AGENT_CONF)):
|
||||
with open(qutils.L3_AGENT_CONF, "w") as conf:
|
||||
conf.write(
|
||||
render_template(os.path.basename(qutils.L3_AGENT_CONF),
|
||||
context)
|
||||
)
|
||||
|
||||
|
||||
def render_dhcp_agent_conf():
|
||||
if (os.path.exists(qutils.DHCP_AGENT_CONF)):
|
||||
with open(qutils.DHCP_AGENT_CONF, "w") as conf:
|
||||
conf.write(
|
||||
render_template(os.path.basename(qutils.DHCP_AGENT_CONF),
|
||||
{"plugin": PLUGIN})
|
||||
)
|
||||
|
||||
|
||||
def render_metadata_agent_conf():
|
||||
context = get_keystone_conf()
|
||||
if (context and
|
||||
os.path.exists(qutils.METADATA_AGENT_CONF)):
|
||||
context['local_ip'] = get_host_ip()
|
||||
context['shared_secret'] = qutils.get_shared_secret()
|
||||
with open(qutils.METADATA_AGENT_CONF, "w") as conf:
|
||||
conf.write(
|
||||
render_template(os.path.basename(qutils.METADATA_AGENT_CONF),
|
||||
context)
|
||||
)
|
||||
|
||||
|
||||
def render_quantum_conf():
|
||||
context = get_rabbit_conf()
|
||||
if (context and
|
||||
os.path.exists(qutils.QUANTUM_CONF)):
|
||||
context['core_plugin'] = \
|
||||
qutils.CORE_PLUGIN[PLUGIN]
|
||||
with open(qutils.QUANTUM_CONF, "w") as conf:
|
||||
conf.write(
|
||||
render_template(os.path.basename(qutils.QUANTUM_CONF),
|
||||
context)
|
||||
)
|
||||
|
||||
|
||||
def render_plugin_conf():
|
||||
context = get_quantum_db_conf()
|
||||
if (context and
|
||||
os.path.exists(qutils.PLUGIN_CONF[PLUGIN])):
|
||||
context['local_ip'] = get_host_ip()
|
||||
conf_file = qutils.PLUGIN_CONF[PLUGIN]
|
||||
with open(conf_file, "w") as conf:
|
||||
conf.write(
|
||||
render_template(os.path.basename(conf_file),
|
||||
context)
|
||||
)
|
||||
|
||||
|
||||
def render_metadata_api_conf():
|
||||
context = get_nova_db_conf()
|
||||
r_context = get_rabbit_conf()
|
||||
q_context = get_keystone_conf()
|
||||
if (context and r_context and q_context and
|
||||
os.path.exists(qutils.NOVA_CONF)):
|
||||
context.update(r_context)
|
||||
context.update(q_context)
|
||||
context['shared_secret'] = qutils.get_shared_secret()
|
||||
with open(qutils.NOVA_CONF, "w") as conf:
|
||||
conf.write(
|
||||
render_template(os.path.basename(qutils.NOVA_CONF),
|
||||
context)
|
||||
)
|
||||
|
||||
|
||||
def render_evacuate_unit():
|
||||
context = get_keystone_conf()
|
||||
if context:
|
||||
with open('/usr/local/bin/quantum-evacuate-unit', "w") as conf:
|
||||
conf.write(render_template('evacuate_unit.py', context))
|
||||
os.chmod('/usr/local/bin/quantum-evacuate-unit', 0700)
|
||||
|
||||
|
||||
def get_keystone_conf():
|
||||
for relid in relation_ids('quantum-network-service'):
|
||||
for unit in related_units(relid):
|
||||
conf = {
|
||||
"keystone_host": relation_get('keystone_host',
|
||||
unit, relid),
|
||||
"service_port": relation_get('service_port',
|
||||
unit, relid),
|
||||
"auth_port": relation_get('auth_port', unit, relid),
|
||||
"service_username": relation_get('service_username',
|
||||
unit, relid),
|
||||
"service_password": relation_get('service_password',
|
||||
unit, relid),
|
||||
"service_tenant": relation_get('service_tenant',
|
||||
unit, relid),
|
||||
"quantum_host": relation_get('quantum_host',
|
||||
unit, relid),
|
||||
"quantum_port": relation_get('quantum_port',
|
||||
unit, relid),
|
||||
"quantum_url": relation_get('quantum_url',
|
||||
unit, relid),
|
||||
"region": relation_get('region',
|
||||
unit, relid)
|
||||
}
|
||||
if None not in conf.itervalues():
|
||||
return conf
|
||||
return None
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-joined')
|
||||
def db_joined():
|
||||
relation_set(quantum_username=qutils.DB_USER,
|
||||
quantum_database=qutils.QUANTUM_DB,
|
||||
quantum_hostname=unit_get('private-address'),
|
||||
nova_username=qutils.NOVA_DB_USER,
|
||||
nova_database=qutils.NOVA_DB,
|
||||
nova_hostname=unit_get('private-address'))
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-changed')
|
||||
@restart_on_change(qutils.RESTART_MAP[PLUGIN])
|
||||
def db_changed():
|
||||
render_plugin_conf()
|
||||
render_metadata_api_conf()
|
||||
|
||||
|
||||
def get_quantum_db_conf():
|
||||
for relid in relation_ids('shared-db'):
|
||||
for unit in related_units(relid):
|
||||
conf = {
|
||||
"host": relation_get('db_host',
|
||||
unit, relid),
|
||||
"user": qutils.DB_USER,
|
||||
"password": relation_get('quantum_password',
|
||||
unit, relid),
|
||||
"db": qutils.QUANTUM_DB
|
||||
}
|
||||
if None not in conf.itervalues():
|
||||
return conf
|
||||
return None
|
||||
|
||||
|
||||
def get_nova_db_conf():
|
||||
for relid in relation_ids('shared-db'):
|
||||
for unit in related_units(relid):
|
||||
conf = {
|
||||
"host": relation_get('db_host',
|
||||
unit, relid),
|
||||
"user": qutils.NOVA_DB_USER,
|
||||
"password": relation_get('nova_password',
|
||||
unit, relid),
|
||||
"db": qutils.NOVA_DB
|
||||
}
|
||||
if None not in conf.itervalues():
|
||||
return conf
|
||||
return None
|
||||
|
||||
|
||||
@hooks.hook('amqp-relation-joined')
|
||||
def amqp_joined():
|
||||
relation_set(username=qutils.RABBIT_USER,
|
||||
vhost=qutils.RABBIT_VHOST)
|
||||
|
||||
|
||||
@hooks.hook('amqp-relation-changed')
|
||||
@restart_on_change(qutils.RESTART_MAP[PLUGIN])
|
||||
def amqp_changed():
|
||||
render_dhcp_agent_conf()
|
||||
render_quantum_conf()
|
||||
render_metadata_api_conf()
|
||||
|
||||
|
||||
def get_rabbit_conf():
|
||||
for relid in relation_ids('amqp'):
|
||||
for unit in related_units(relid):
|
||||
conf = {
|
||||
"rabbit_host": relation_get('private-address',
|
||||
unit, relid),
|
||||
"rabbit_virtual_host": qutils.RABBIT_VHOST,
|
||||
"rabbit_userid": qutils.RABBIT_USER,
|
||||
"rabbit_password": relation_get('password',
|
||||
unit, relid)
|
||||
}
|
||||
clustered = relation_get('clustered', unit, relid)
|
||||
if clustered:
|
||||
conf['rabbit_host'] = relation_get('vip', unit, relid)
|
||||
if None not in conf.itervalues():
|
||||
return conf
|
||||
return None
|
||||
|
||||
|
||||
@hooks.hook('quantum-network-service-relation-changed')
|
||||
@restart_on_change(qutils.RESTART_MAP[PLUGIN])
|
||||
def nm_changed():
|
||||
render_dhcp_agent_conf()
|
||||
render_l3_agent_conf()
|
||||
render_metadata_agent_conf()
|
||||
render_metadata_api_conf()
|
||||
render_evacuate_unit()
|
||||
store_ca_cert()
|
||||
|
||||
|
||||
def store_ca_cert():
|
||||
ca_cert = get_ca_cert()
|
||||
if ca_cert:
|
||||
qutils.install_ca(ca_cert)
|
||||
|
||||
|
||||
def get_ca_cert():
|
||||
for relid in relation_ids('quantum-network-service'):
|
||||
for unit in related_units(relid):
|
||||
ca_cert = relation_get('ca_cert', unit, relid)
|
||||
if ca_cert:
|
||||
return ca_cert
|
||||
return None
|
||||
|
||||
|
||||
@hooks.hook("cluster-relation-departed")
|
||||
def cluster_departed():
|
||||
if PLUGIN == 'nvp':
|
||||
log('Unable to re-assign agent resources for failed nodes with nvp',
|
||||
level=WARNING)
|
||||
return
|
||||
conf = get_keystone_conf()
|
||||
if conf and eligible_leader(None):
|
||||
qutils.reassign_agent_resources(conf)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
hooks.execute(sys.argv)
|
||||
except UnregisteredHookError as e:
|
||||
log('Unknown hook {} - skipping.'.format(e))
|
@ -3,12 +3,16 @@ import os
|
||||
import uuid
|
||||
import base64
|
||||
import apt_pkg as apt
|
||||
from lib.utils import (
|
||||
juju_log as log,
|
||||
configure_source,
|
||||
config_get
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
config
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
apt_install
|
||||
)
|
||||
from charmhelpers.contrib.openstack.openstack_utils import (
|
||||
configure_installation_source
|
||||
)
|
||||
|
||||
OVS = "ovs"
|
||||
NVP = "nvp"
|
||||
@ -20,7 +24,7 @@ NVP_PLUGIN = \
|
||||
CORE_PLUGIN = {
|
||||
OVS: OVS_PLUGIN,
|
||||
NVP: NVP_PLUGIN
|
||||
}
|
||||
}
|
||||
|
||||
OVS_PLUGIN_CONF = \
|
||||
"/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini"
|
||||
@ -29,7 +33,7 @@ NVP_PLUGIN_CONF = \
|
||||
PLUGIN_CONF = {
|
||||
OVS: OVS_PLUGIN_CONF,
|
||||
NVP: NVP_PLUGIN_CONF
|
||||
}
|
||||
}
|
||||
|
||||
GATEWAY_PKGS = {
|
||||
OVS: [
|
||||
@ -38,14 +42,14 @@ GATEWAY_PKGS = {
|
||||
"quantum-dhcp-agent",
|
||||
'python-mysqldb',
|
||||
"nova-api-metadata"
|
||||
],
|
||||
],
|
||||
NVP: [
|
||||
"openvswitch-switch",
|
||||
"quantum-dhcp-agent",
|
||||
'python-mysqldb',
|
||||
"nova-api-metadata"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
GATEWAY_AGENTS = {
|
||||
OVS: [
|
||||
@ -53,12 +57,12 @@ GATEWAY_AGENTS = {
|
||||
"quantum-l3-agent",
|
||||
"quantum-dhcp-agent",
|
||||
"nova-api-metadata"
|
||||
],
|
||||
],
|
||||
NVP: [
|
||||
"quantum-dhcp-agent",
|
||||
"nova-api-metadata"
|
||||
],
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
EXT_PORT_CONF = '/etc/init/ext-port.conf'
|
||||
|
||||
@ -96,45 +100,45 @@ OVS_RESTART_MAP = {
|
||||
'quantum-dhcp-agent',
|
||||
'quantum-metadata-agent',
|
||||
'quantum-plugin-openvswitch-agent'
|
||||
],
|
||||
],
|
||||
DHCP_AGENT_CONF: [
|
||||
'quantum-dhcp-agent'
|
||||
],
|
||||
],
|
||||
L3_AGENT_CONF: [
|
||||
'quantum-l3-agent'
|
||||
],
|
||||
],
|
||||
METADATA_AGENT_CONF: [
|
||||
'quantum-metadata-agent'
|
||||
],
|
||||
],
|
||||
OVS_PLUGIN_CONF: [
|
||||
'quantum-plugin-openvswitch-agent'
|
||||
],
|
||||
],
|
||||
NOVA_CONF: [
|
||||
'nova-api-metadata'
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
NVP_RESTART_MAP = {
|
||||
QUANTUM_CONF: [
|
||||
'quantum-dhcp-agent',
|
||||
'quantum-metadata-agent'
|
||||
],
|
||||
],
|
||||
DHCP_AGENT_CONF: [
|
||||
'quantum-dhcp-agent'
|
||||
],
|
||||
],
|
||||
METADATA_AGENT_CONF: [
|
||||
'quantum-metadata-agent'
|
||||
],
|
||||
],
|
||||
NOVA_CONF: [
|
||||
'nova-api-metadata'
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
RESTART_MAP = {
|
||||
OVS: OVS_RESTART_MAP,
|
||||
NVP: NVP_RESTART_MAP
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
RABBIT_USER = "nova"
|
||||
@ -143,41 +147,6 @@ RABBIT_VHOST = "nova"
|
||||
INT_BRIDGE = "br-int"
|
||||
EXT_BRIDGE = "br-ex"
|
||||
|
||||
|
||||
def add_bridge(name):
|
||||
status = subprocess.check_output(["ovs-vsctl", "show"])
|
||||
if "Bridge {}".format(name) not in status:
|
||||
log('INFO', 'Creating bridge {}'.format(name))
|
||||
subprocess.check_call(["ovs-vsctl", "add-br", name])
|
||||
|
||||
|
||||
def del_bridge(name):
|
||||
status = subprocess.check_output(["ovs-vsctl", "show"])
|
||||
if "Bridge {}".format(name) in status:
|
||||
log('INFO', 'Deleting bridge {}'.format(name))
|
||||
subprocess.check_call(["ovs-vsctl", "del-br", name])
|
||||
|
||||
|
||||
def add_bridge_port(name, port):
|
||||
status = subprocess.check_output(["ovs-vsctl", "show"])
|
||||
if ("Bridge {}".format(name) in status and
|
||||
"Interface \"{}\"".format(port) not in status):
|
||||
log('INFO',
|
||||
'Adding port {} to bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "add-port", name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "up"])
|
||||
|
||||
|
||||
def del_bridge_port(name, port):
|
||||
status = subprocess.check_output(["ovs-vsctl", "show"])
|
||||
if ("Bridge {}".format(name) in status and
|
||||
"Interface \"{}\"".format(port) in status):
|
||||
log('INFO',
|
||||
'Deleting port {} from bridge {}'.format(port, name))
|
||||
subprocess.check_call(["ovs-vsctl", "del-port", name, port])
|
||||
subprocess.check_call(["ip", "link", "set", port, "down"])
|
||||
|
||||
|
||||
SHARED_SECRET = "/etc/quantum/secret.txt"
|
||||
|
||||
|
||||
@ -198,11 +167,11 @@ def flush_local_configuration():
|
||||
cmd = [
|
||||
"quantum-netns-cleanup",
|
||||
"--config-file=/etc/quantum/quantum.conf"
|
||||
]
|
||||
]
|
||||
for agent_conf in ['l3_agent.ini', 'dhcp_agent.ini']:
|
||||
agent_cmd = list(cmd)
|
||||
agent_cmd.append('--config-file=/etc/quantum/{}'\
|
||||
.format(agent_conf))
|
||||
agent_cmd.append('--config-file=/etc/quantum/{}'
|
||||
.format(agent_conf))
|
||||
subprocess.call(agent_cmd)
|
||||
|
||||
|
||||
@ -233,7 +202,7 @@ def reassign_agent_resources(env):
|
||||
networks = {}
|
||||
for agent in agents['agents']:
|
||||
if not agent['alive']:
|
||||
log('INFO', 'DHCP Agent %s down' % agent['id'])
|
||||
log('DHCP Agent %s down' % agent['id'])
|
||||
for network in \
|
||||
quantum.list_networks_on_dhcp_agent(agent['id'])['networks']:
|
||||
networks[network['id']] = agent['id']
|
||||
@ -244,7 +213,7 @@ def reassign_agent_resources(env):
|
||||
routers = {}
|
||||
for agent in agents['agents']:
|
||||
if not agent['alive']:
|
||||
log('INFO', 'L3 Agent %s down' % agent['id'])
|
||||
log('L3 Agent %s down' % agent['id'])
|
||||
for router in \
|
||||
quantum.list_routers_on_l3_agent(agent['id'])['routers']:
|
||||
routers[router['id']] = agent['id']
|
||||
@ -254,8 +223,7 @@ def reassign_agent_resources(env):
|
||||
index = 0
|
||||
for router_id in routers:
|
||||
agent = index % len(l3_agents)
|
||||
log('INFO',
|
||||
'Moving router %s from %s to %s' % \
|
||||
log('Moving router %s from %s to %s' %
|
||||
(router_id, routers[router_id], l3_agents[agent]))
|
||||
quantum.remove_router_from_l3_agent(l3_agent=routers[router_id],
|
||||
router_id=router_id)
|
||||
@ -266,8 +234,7 @@ def reassign_agent_resources(env):
|
||||
index = 0
|
||||
for network_id in networks:
|
||||
agent = index % len(dhcp_agents)
|
||||
log('INFO',
|
||||
'Moving network %s from %s to %s' % \
|
||||
log('Moving network %s from %s to %s' %
|
||||
(network_id, networks[network_id], dhcp_agents[agent]))
|
||||
quantum.remove_network_from_dhcp_agent(dhcp_agent=networks[network_id],
|
||||
network_id=network_id)
|
||||
@ -275,16 +242,17 @@ def reassign_agent_resources(env):
|
||||
body={'network_id': network_id})
|
||||
index += 1
|
||||
|
||||
|
||||
def do_openstack_upgrade():
|
||||
configure_source()
|
||||
plugin = config_get('plugin')
|
||||
configure_installation_source(config('openstack-origin'))
|
||||
plugin = config('plugin')
|
||||
pkgs = []
|
||||
if plugin in GATEWAY_PKGS.keys():
|
||||
pkgs += GATEWAY_PKGS[plugin]
|
||||
if plugin == OVS:
|
||||
pkgs.extend(GATEWAY_PKGS[plugin])
|
||||
if plugin in [OVS, NVP]:
|
||||
pkgs.append('openvswitch-datapath-dkms')
|
||||
cmd = ['apt-get', '-y',
|
||||
'--option', 'Dpkg::Options::=--force-confold',
|
||||
'--option', 'Dpkg::Options::=--force-confdef',
|
||||
'install'] + pkgs
|
||||
subprocess.check_call(cmd)
|
||||
dpkg_opts = [
|
||||
'--option', 'Dpkg::Options::=--force-confold',
|
||||
'--option', 'Dpkg::Options::=--force-confdef'
|
||||
]
|
||||
apt_install(pkgs, options=dpkg_opts, fatal=True)
|
||||
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
@ -1 +1 @@
|
||||
hooks.py
|
||||
quantum_relations.py
|
Loading…
x
Reference in New Issue
Block a user