diff --git a/config.yaml b/config.yaml index 3949b2c..c0b81f3 100644 --- a/config.yaml +++ b/config.yaml @@ -26,11 +26,11 @@ options: default: nova type: string decsription: Rabbitmq vhost - db-user: + database-user: default: nova type: string description: Username for database access - nova-db: + database: default: nova type: string description: Database name diff --git a/hooks/__init__.py b/hooks/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/amqp-relation-changed b/hooks/amqp-relation-changed index 6f9ff4f..6eb6593 120000 --- a/hooks/amqp-relation-changed +++ b/hooks/amqp-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/amqp-relation-joined b/hooks/amqp-relation-joined index 6f9ff4f..6eb6593 120000 --- a/hooks/amqp-relation-joined +++ b/hooks/amqp-relation-joined @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/ceph-relation-changed b/hooks/ceph-relation-changed index 6f9ff4f..6eb6593 120000 --- a/hooks/ceph-relation-changed +++ b/hooks/ceph-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/ceph-relation-joined b/hooks/ceph-relation-joined index 6f9ff4f..6eb6593 120000 --- a/hooks/ceph-relation-joined +++ b/hooks/ceph-relation-joined @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/charmhelpers/__init__.py b/hooks/charmhelpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/__init__.py b/hooks/charmhelpers/contrib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/hahelpers/__init__.py b/hooks/charmhelpers/contrib/hahelpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/hahelpers/apache.py b/hooks/charmhelpers/contrib/hahelpers/apache.py new file mode 100644 index 0000000..3208a85 --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/apache.py @@ -0,0 +1,58 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess + +from charmhelpers.core.hookenv import ( + config as config_get, + relation_get, + relation_ids, + related_units as relation_list, + log, + INFO, +) + + +def get_cert(): + cert = config_get('ssl_cert') + key = config_get('ssl_key') + if not (cert and key): + log("Inspecting identity-service relations for SSL certificate.", + level=INFO) + cert = key = None + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not cert: + cert = relation_get('ssl_cert', + rid=r_id, unit=unit) + if not key: + key = relation_get('ssl_key', + rid=r_id, unit=unit) + return (cert, key) + + +def get_ca_cert(): + ca_cert = None + log("Inspecting identity-service relations for CA SSL certificate.", + level=INFO) + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if not ca_cert: + ca_cert = relation_get('ca_cert', + rid=r_id, unit=unit) + return ca_cert + + +def install_ca_cert(ca_cert): + if ca_cert: + with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt', + 'w') as crt: + crt.write(ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) diff --git a/hooks/charmhelpers/contrib/hahelpers/ceph.py b/hooks/charmhelpers/contrib/hahelpers/ceph.py new file mode 100644 index 0000000..fb1b8b9 --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/ceph.py @@ -0,0 +1,278 @@ +# +# Copyright 2012 Canonical Ltd. +# +# This file is sourced from lp:openstack-charm-helpers +# +# Authors: +# James Page +# Adam Gandelman +# + +import commands +import os +import shutil + +from subprocess import ( + check_call, + check_output, + CalledProcessError +) + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, + log, + INFO, +) + +from charmhelpers.core.host import ( + apt_install, + mount, + mounts, + service_start, + service_stop, + umount, +) + +KEYRING = '/etc/ceph/ceph.client.%s.keyring' +KEYFILE = '/etc/ceph/ceph.client.%s.key' + +CEPH_CONF = """[global] + auth supported = %(auth)s + keyring = %(keyring)s + mon host = %(mon_hosts)s +""" + + +def running(service): + # this local util can be dropped as soon the following branch lands + # in lp:charm-helpers + # https://code.launchpad.net/~gandelman-a/charm-helpers/service_running/ + try: + output = check_output(['service', service, 'status']) + except CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def install(): + ceph_dir = "/etc/ceph" + if not os.path.isdir(ceph_dir): + os.mkdir(ceph_dir) + apt_install('ceph-common', fatal=True) + + +def rbd_exists(service, pool, rbd_img): + (rc, out) = commands.getstatusoutput('rbd list --id %s --pool %s' % + (service, pool)) + return rbd_img in out + + +def create_rbd_image(service, pool, image, sizemb): + cmd = [ + 'rbd', + 'create', + image, + '--size', + str(sizemb), + '--id', + service, + '--pool', + pool + ] + check_call(cmd) + + +def pool_exists(service, name): + (rc, out) = commands.getstatusoutput("rados --id %s lspools" % service) + return name in out + + +def create_pool(service, name): + cmd = [ + 'rados', + '--id', + service, + 'mkpool', + name + ] + check_call(cmd) + + +def keyfile_path(service): + return KEYFILE % service + + +def keyring_path(service): + return KEYRING % service + + +def create_keyring(service, key): + keyring = keyring_path(service) + if os.path.exists(keyring): + log('ceph: Keyring exists at %s.' % keyring, level=INFO) + cmd = [ + 'ceph-authtool', + keyring, + '--create-keyring', + '--name=client.%s' % service, + '--add-key=%s' % key + ] + check_call(cmd) + log('ceph: Created new ring at %s.' % keyring, level=INFO) + + +def create_key_file(service, key): + # create a file containing the key + keyfile = keyfile_path(service) + if os.path.exists(keyfile): + log('ceph: Keyfile exists at %s.' % keyfile, level=INFO) + fd = open(keyfile, 'w') + fd.write(key) + fd.close() + log('ceph: Created new keyfile at %s.' % keyfile, level=INFO) + + +def get_ceph_nodes(): + hosts = [] + for r_id in relation_ids('ceph'): + for unit in related_units(r_id): + hosts.append(relation_get('private-address', unit=unit, rid=r_id)) + return hosts + + +def configure(service, key, auth): + create_keyring(service, key) + create_key_file(service, key) + hosts = get_ceph_nodes() + mon_hosts = ",".join(map(str, hosts)) + keyring = keyring_path(service) + with open('/etc/ceph/ceph.conf', 'w') as ceph_conf: + ceph_conf.write(CEPH_CONF % locals()) + modprobe_kernel_module('rbd') + + +def image_mapped(image_name): + (rc, out) = commands.getstatusoutput('rbd showmapped') + return image_name in out + + +def map_block_storage(service, pool, image): + cmd = [ + 'rbd', + 'map', + '%s/%s' % (pool, image), + '--user', + service, + '--secret', + keyfile_path(service), + ] + check_call(cmd) + + +def filesystem_mounted(fs): + return fs in [f for m, f in mounts()] + + +def make_filesystem(blk_device, fstype='ext4'): + log('ceph: Formatting block device %s as filesystem %s.' % + (blk_device, fstype), level=INFO) + cmd = ['mkfs', '-t', fstype, blk_device] + check_call(cmd) + + +def place_data_on_ceph(service, blk_device, data_src_dst, fstype='ext4'): + # mount block device into /mnt + mount(blk_device, '/mnt') + + # copy data to /mnt + try: + copy_files(data_src_dst, '/mnt') + except: + pass + + # umount block device + umount('/mnt') + + _dir = os.stat(data_src_dst) + uid = _dir.st_uid + gid = _dir.st_gid + + # re-mount where the data should originally be + mount(blk_device, data_src_dst, persist=True) + + # ensure original ownership of new mount. + cmd = ['chown', '-R', '%s:%s' % (uid, gid), data_src_dst] + check_call(cmd) + + +# TODO: re-use +def modprobe_kernel_module(module): + log('ceph: Loading kernel module', level=INFO) + cmd = ['modprobe', module] + check_call(cmd) + cmd = 'echo %s >> /etc/modules' % module + check_call(cmd, shell=True) + + +def copy_files(src, dst, symlinks=False, ignore=None): + for item in os.listdir(src): + s = os.path.join(src, item) + d = os.path.join(dst, item) + if os.path.isdir(s): + shutil.copytree(s, d, symlinks, ignore) + else: + shutil.copy2(s, d) + + +def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point, + blk_device, fstype, system_services=[]): + """ + To be called from the current cluster leader. + Ensures given pool and RBD image exists, is mapped to a block device, + and the device is formatted and mounted at the given mount_point. + + If formatting a device for the first time, data existing at mount_point + will be migrated to the RBD device before being remounted. + + All services listed in system_services will be stopped prior to data + migration and restarted when complete. + """ + # Ensure pool, RBD image, RBD mappings are in place. + if not pool_exists(service, pool): + log('ceph: Creating new pool %s.' % pool, level=INFO) + create_pool(service, pool) + + if not rbd_exists(service, pool, rbd_img): + log('ceph: Creating RBD image (%s).' % rbd_img, level=INFO) + create_rbd_image(service, pool, rbd_img, sizemb) + + if not image_mapped(rbd_img): + log('ceph: Mapping RBD Image as a Block Device.', level=INFO) + map_block_storage(service, pool, rbd_img) + + # make file system + # TODO: What happens if for whatever reason this is run again and + # the data is already in the rbd device and/or is mounted?? + # When it is mounted already, it will fail to make the fs + # XXX: This is really sketchy! Need to at least add an fstab entry + # otherwise this hook will blow away existing data if its executed + # after a reboot. + if not filesystem_mounted(mount_point): + make_filesystem(blk_device, fstype) + + for svc in system_services: + if running(svc): + log('Stopping services %s prior to migrating data.' % svc, + level=INFO) + service_stop(svc) + + place_data_on_ceph(service, blk_device, mount_point, fstype) + + for svc in system_services: + service_start(svc) diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py new file mode 100644 index 0000000..dde6c9b --- /dev/null +++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py @@ -0,0 +1,180 @@ +# +# Copyright 2012 Canonical Ltd. +# +# Authors: +# James Page +# Adam Gandelman +# + +import subprocess +import os + +from socket import gethostname as get_unit_hostname + +from charmhelpers.core.hookenv import ( + log, + relation_ids, + related_units as relation_list, + relation_get, + config as config_get, + INFO, + ERROR, +) + + +class HAIncompleteConfig(Exception): + pass + + +def is_clustered(): + for r_id in (relation_ids('ha') or []): + for unit in (relation_list(r_id) or []): + clustered = relation_get('clustered', + rid=r_id, + unit=unit) + if clustered: + return True + return False + + +def is_leader(resource): + cmd = [ + "crm", "resource", + "show", resource + ] + try: + status = subprocess.check_output(cmd) + except subprocess.CalledProcessError: + return False + else: + if get_unit_hostname() in status: + return True + else: + return False + + +def peer_units(): + peers = [] + for r_id in (relation_ids('cluster') or []): + for unit in (relation_list(r_id) or []): + peers.append(unit) + return peers + + +def oldest_peer(peers): + local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1]) + for peer in peers: + remote_unit_no = int(peer.split('/')[1]) + if remote_unit_no < local_unit_no: + return False + return True + + +def eligible_leader(resource): + if is_clustered(): + if not is_leader(resource): + log('Deferring action to CRM leader.', level=INFO) + return False + else: + peers = peer_units() + if peers and not oldest_peer(peers): + log('Deferring action to oldest service unit.', level=INFO) + return False + return True + + +def https(): + ''' + Determines whether enough data has been provided in configuration + or relation data to configure HTTPS + . + returns: boolean + ''' + if config_get('use-https') == "yes": + return True + if config_get('ssl_cert') and config_get('ssl_key'): + return True + for r_id in relation_ids('identity-service'): + for unit in relation_list(r_id): + if None not in [ + relation_get('https_keystone', rid=r_id, unit=unit), + relation_get('ssl_cert', rid=r_id, unit=unit), + relation_get('ssl_key', rid=r_id, unit=unit), + relation_get('ca_cert', rid=r_id, unit=unit), + ]: + return True + return False + + +def determine_api_port(public_port): + ''' + Determine correct API server listening port based on + existence of HTTPS reverse proxy and/or haproxy. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the API service + ''' + i = 0 + if len(peer_units()) > 0 or is_clustered(): + i += 1 + if https(): + i += 1 + return public_port - (i * 10) + + +def determine_haproxy_port(public_port): + ''' + Description: Determine correct proxy listening port based on public IP + + existence of HTTPS reverse proxy. + + public_port: int: standard public port for given service + + returns: int: the correct listening port for the HAProxy service + ''' + i = 0 + if https(): + i += 1 + return public_port - (i * 10) + + +def get_hacluster_config(): + ''' + Obtains all relevant configuration from charm configuration required + for initiating a relation to hacluster: + + ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr + + returns: dict: A dict containing settings keyed by setting name. + raises: HAIncompleteConfig if settings are missing. + ''' + settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr'] + conf = {} + for setting in settings: + conf[setting] = config_get(setting) + missing = [] + [missing.append(s) for s, v in conf.iteritems() if v is None] + if missing: + log('Insufficient config data to configure hacluster.', level=ERROR) + raise HAIncompleteConfig + return conf + + +def canonical_url(configs, vip_setting='vip'): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration and hacluster. + + :configs : OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + :vip_setting: str: Setting in charm config that specifies + VIP address. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + if is_clustered(): + addr = config_get(vip_setting) + else: + addr = get_unit_hostname() + return '%s://%s' % (scheme, addr) diff --git a/hooks/charmhelpers/contrib/openstack/__init__.py b/hooks/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py new file mode 100644 index 0000000..f146e0b --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -0,0 +1,271 @@ +import os + +from base64 import b64decode + +from subprocess import ( + check_call +) + +from charmhelpers.core.hookenv import ( + config, + local_unit, + log, + relation_get, + relation_ids, + related_units, + unit_get, +) + +from charmhelpers.contrib.hahelpers.cluster import ( + determine_api_port, + determine_haproxy_port, + https, + is_clustered, + peer_units, +) + +from charmhelpers.contrib.hahelpers.apache import ( + get_cert, + get_ca_cert, +) + +CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' + + +class OSContextError(Exception): + pass + + +def context_complete(ctxt): + _missing = [] + for k, v in ctxt.iteritems(): + if v is None or v == '': + _missing.append(k) + if _missing: + log('Missing required data: %s' % ' '.join(_missing), level='INFO') + return False + return True + + +class OSContextGenerator(object): + interfaces = [] + + def __call__(self): + raise NotImplementedError + + +class SharedDBContext(OSContextGenerator): + interfaces = ['shared-db'] + + def __call__(self): + log('Generating template context for shared-db') + conf = config() + try: + database = conf['database'] + username = conf['database-user'] + except KeyError as e: + log('Could not generate shared_db context. ' + 'Missing required charm config options: %s.' % e) + raise OSContextError + ctxt = {} + for rid in relation_ids('shared-db'): + for unit in related_units(rid): + ctxt = { + 'database_host': relation_get('db_host', rid=rid, + unit=unit), + 'database': database, + 'database_user': username, + 'database_password': relation_get('password', rid=rid, + unit=unit) + } + if not context_complete(ctxt): + return {} + return ctxt + + +class IdentityServiceContext(OSContextGenerator): + interfaces = ['identity-service'] + + def __call__(self): + log('Generating template context for identity-service') + ctxt = {} + + for rid in relation_ids('identity-service'): + for unit in related_units(rid): + ctxt = { + 'service_port': relation_get('service_port', rid=rid, + unit=unit), + 'service_host': relation_get('service_host', rid=rid, + unit=unit), + 'auth_host': relation_get('auth_host', rid=rid, unit=unit), + 'auth_port': relation_get('auth_port', rid=rid, unit=unit), + 'admin_tenant_name': relation_get('service_tenant', + rid=rid, unit=unit), + 'admin_user': relation_get('service_username', rid=rid, + unit=unit), + 'admin_password': relation_get('service_password', rid=rid, + unit=unit), + # XXX: Hard-coded http. + 'service_protocol': 'http', + 'auth_protocol': 'http', + } + if not context_complete(ctxt): + return {} + return ctxt + + +class AMQPContext(OSContextGenerator): + interfaces = ['amqp'] + + def __call__(self): + log('Generating template context for amqp') + conf = config() + try: + username = conf['rabbit-user'] + vhost = conf['rabbit-vhost'] + except KeyError as e: + log('Could not generate shared_db context. ' + 'Missing required charm config options: %s.' % e) + raise OSContextError + + ctxt = {} + for rid in relation_ids('amqp'): + for unit in related_units(rid): + if relation_get('clustered', rid=rid, unit=unit): + rabbitmq_host = relation_get('vip', rid=rid, unit=unit) + else: + rabbitmq_host = relation_get('private-address', + rid=rid, unit=unit) + ctxt = { + 'rabbitmq_host': rabbitmq_host, + 'rabbitmq_user': username, + 'rabbitmq_password': relation_get('password', rid=rid, + unit=unit), + 'rabbitmq_virtual_host': vhost, + } + if not context_complete(ctxt): + return {} + return ctxt + + +class CephContext(OSContextGenerator): + interfaces = ['ceph'] + + def __call__(self): + '''This generates context for /etc/ceph/ceph.conf templates''' + log('Generating tmeplate context for ceph') + mon_hosts = [] + auth = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + mon_hosts.append(relation_get('private-address', rid=rid, + unit=unit)) + auth = relation_get('auth', rid=rid, unit=unit) + + ctxt = { + 'mon_hosts': ' '.join(mon_hosts), + 'auth': auth, + } + if not context_complete(ctxt): + return {} + return ctxt + + +class HAProxyContext(OSContextGenerator): + interfaces = ['cluster'] + + def __call__(self): + ''' + Builds half a context for the haproxy template, which describes + all peers to be included in the cluster. Each charm needs to include + its own context generator that describes the port mapping. + ''' + if not relation_ids('cluster'): + return {} + + cluster_hosts = {} + l_unit = local_unit().replace('/', '-') + cluster_hosts[l_unit] = unit_get('private-address') + + for rid in relation_ids('cluster'): + for unit in related_units(rid): + _unit = unit.replace('/', '-') + addr = relation_get('private-address', rid=rid, unit=unit) + cluster_hosts[_unit] = addr + + ctxt = { + 'units': cluster_hosts, + } + if len(cluster_hosts.keys()) > 1: + # Enable haproxy when we have enough peers. + log('Ensuring haproxy enabled in /etc/default/haproxy.') + with open('/etc/default/haproxy', 'w') as out: + out.write('ENABLED=1\n') + return ctxt + log('HAProxy context is incomplete, this unit has no peers.') + return {} + + +class ApacheSSLContext(OSContextGenerator): + """ + Generates a context for an apache vhost configuration that configures + HTTPS reverse proxying for one or many endpoints. Generated context + looks something like: + { + 'namespace': 'cinder', + 'private_address': 'iscsi.mycinderhost.com', + 'endpoints': [(8776, 8766), (8777, 8767)] + } + + The endpoints list consists of a tuples mapping external ports + to internal ports. + """ + interfaces = ['https'] + + # charms should inherit this context and set external ports + # and service namespace accordingly. + external_ports = [] + service_namespace = None + + def enable_modules(self): + cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http'] + check_call(cmd) + + def configure_cert(self): + if not os.path.isdir('/etc/apache2/ssl'): + os.mkdir('/etc/apache2/ssl') + ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace) + if not os.path.isdir(ssl_dir): + os.mkdir(ssl_dir) + cert, key = get_cert() + with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out: + cert_out.write(b64decode(cert)) + with open(os.path.join(ssl_dir, 'key'), 'w') as key_out: + key_out.write(b64decode(key)) + ca_cert = get_ca_cert() + if ca_cert: + with open(CA_CERT_PATH, 'w') as ca_out: + ca_out.write(b64decode(ca_cert)) + + def __call__(self): + if isinstance(self.external_ports, basestring): + self.external_ports = [self.external_ports] + if (not self.external_ports or not https()): + return {} + + self.configure_cert() + self.enable_modules() + + ctxt = { + 'namespace': self.service_namespace, + 'private_address': unit_get('private-address'), + 'endpoints': [] + } + for ext_port in self.external_ports: + if peer_units() or is_clustered(): + int_port = determine_haproxy_port(ext_port) + else: + int_port = determine_api_port(ext_port) + portmap = (int(ext_port), int(int_port)) + ctxt['endpoints'].append(portmap) + return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/templates/__init__.py b/hooks/charmhelpers/contrib/openstack/templates/__init__.py new file mode 100644 index 0000000..0b49ad2 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/__init__.py @@ -0,0 +1,2 @@ +# dummy __init__.py to fool syncer into thinking this is a syncable python +# module diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf new file mode 100644 index 0000000..1d8ca3b --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf @@ -0,0 +1,11 @@ +############################################################################### +# [ WARNING ] +# cinder configuration file maintained by Juju +# local changes may be overwritten. +############################################################################### +{% if auth %} +[global] + auth_supported = {{ auth }} + keyring = /etc/ceph/$cluster.$name.keyring + mon host = {{ mon_hosts }} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg new file mode 100644 index 0000000..b184cd4 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg @@ -0,0 +1,37 @@ +global + log 127.0.0.1 local0 + log 127.0.0.1 local1 notice + maxconn 20000 + user haproxy + group haproxy + spread-checks 0 + +defaults + log global + mode http + option httplog + option dontlognull + retries 3 + timeout queue 1000 + timeout connect 1000 + timeout client 30000 + timeout server 30000 + +listen stats :8888 + mode http + stats enable + stats hide-version + stats realm Haproxy\ Statistics + stats uri / + stats auth admin:password + +{% if units %} +{% for service, ports in service_ports.iteritems() -%} +listen {{ service }} 0.0.0.0:{{ ports[0] }} + balance roundrobin + option tcplog + {% for unit, address in units.iteritems() -%} + server {{ unit }} {{ address }}:{{ ports[1] }} check + {% endfor %} +{% endfor %} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend new file mode 100644 index 0000000..e833a71 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templates/openstack_https_frontend @@ -0,0 +1,23 @@ +{% if endpoints %} +{% for ext, int in endpoints %} +Listen {{ ext }} +NameVirtualHost *:{{ ext }} + + ServerName {{ private_address }} + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert + SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key + ProxyPass / http://localhost:{{ int }}/ + ProxyPassReverse / http://localhost:{{ int }}/ + ProxyPreserveHost on + + + Order deny,allow + Allow from all + + + Order allow,deny + Allow from all + +{% endfor %} +{% endif %} diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py new file mode 100644 index 0000000..c555cc6 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -0,0 +1,261 @@ +import os + +from charmhelpers.core.host import apt_install + +from charmhelpers.core.hookenv import ( + log, + ERROR, + INFO +) + +from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES + +try: + from jinja2 import FileSystemLoader, ChoiceLoader, Environment +except ImportError: + # python-jinja2 may not be installed yet, or we're running unittests. + FileSystemLoader = ChoiceLoader = Environment = None + + +class OSConfigException(Exception): + pass + + +def get_loader(templates_dir, os_release): + """ + Create a jinja2.ChoiceLoader containing template dirs up to + and including os_release. If directory template directory + is missing at templates_dir, it will be omitted from the loader. + templates_dir is added to the bottom of the search list as a base + loading dir. + + A charm may also ship a templates dir with this module + and it will be appended to the bottom of the search list, eg: + hooks/charmhelpers/contrib/openstack/templates. + + :param templates_dir: str: Base template directory containing release + sub-directories. + :param os_release : str: OpenStack release codename to construct template + loader. + + :returns : jinja2.ChoiceLoader constructed with a list of + jinja2.FilesystemLoaders, ordered in descending + order by OpenStack release. + """ + tmpl_dirs = [(rel, os.path.join(templates_dir, rel)) + for rel in OPENSTACK_CODENAMES.itervalues()] + + if not os.path.isdir(templates_dir): + log('Templates directory not found @ %s.' % templates_dir, + level=ERROR) + raise OSConfigException + + # the bottom contains tempaltes_dir and possibly a common templates dir + # shipped with the helper. + loaders = [FileSystemLoader(templates_dir)] + helper_templates = os.path.join(os.path.dirname(__file__), 'templates') + if os.path.isdir(helper_templates): + loaders.append(FileSystemLoader(helper_templates)) + + for rel, tmpl_dir in tmpl_dirs: + if os.path.isdir(tmpl_dir): + loaders.insert(0, FileSystemLoader(tmpl_dir)) + if rel == os_release: + break + log('Creating choice loader with dirs: %s' % + [l.searchpath for l in loaders], level=INFO) + return ChoiceLoader(loaders) + + +class OSConfigTemplate(object): + """ + Associates a config file template with a list of context generators. + Responsible for constructing a template context based on those generators. + """ + def __init__(self, config_file, contexts): + self.config_file = config_file + + if hasattr(contexts, '__call__'): + self.contexts = [contexts] + else: + self.contexts = contexts + + self._complete_contexts = [] + + def context(self): + ctxt = {} + for context in self.contexts: + _ctxt = context() + if _ctxt: + ctxt.update(_ctxt) + # track interfaces for every complete context. + [self._complete_contexts.append(interface) + for interface in context.interfaces + if interface not in self._complete_contexts] + return ctxt + + def complete_contexts(self): + ''' + Return a list of interfaces that have atisfied contexts. + ''' + if self._complete_contexts: + return self._complete_contexts + self.context() + return self._complete_contexts + + +class OSConfigRenderer(object): + """ + This class provides a common templating system to be used by OpenStack + charms. It is intended to help charms share common code and templates, + and ease the burden of managing config templates across multiple OpenStack + releases. + + Basic usage: + # import some common context generates from charmhelpers + from charmhelpers.contrib.openstack import context + + # Create a renderer object for a specific OS release. + configs = OSConfigRenderer(templates_dir='/tmp/templates', + openstack_release='folsom') + # register some config files with context generators. + configs.register(config_file='/etc/nova/nova.conf', + contexts=[context.SharedDBContext(), + context.AMQPContext()]) + configs.register(config_file='/etc/nova/api-paste.ini', + contexts=[context.IdentityServiceContext()]) + configs.register(config_file='/etc/haproxy/haproxy.conf', + contexts=[context.HAProxyContext()]) + # write out a single config + configs.write('/etc/nova/nova.conf') + # write out all registered configs + configs.write_all() + + Details: + + OpenStack Releases and template loading + --------------------------------------- + When the object is instantiated, it is associated with a specific OS + release. This dictates how the template loader will be constructed. + + The constructed loader attempts to load the template from several places + in the following order: + - from the most recent OS release-specific template dir (if one exists) + - the base templates_dir + - a template directory shipped in the charm with this helper file. + + + For the example above, '/tmp/templates' contains the following structure: + /tmp/templates/nova.conf + /tmp/templates/api-paste.ini + /tmp/templates/grizzly/api-paste.ini + /tmp/templates/havana/api-paste.ini + + Since it was registered with the grizzly release, it first seraches + the grizzly directory for nova.conf, then the templates dir. + + When writing api-paste.ini, it will find the template in the grizzly + directory. + + If the object were created with folsom, it would fall back to the + base templates dir for its api-paste.ini template. + + This system should help manage changes in config files through + openstack releases, allowing charms to fall back to the most recently + updated config template for a given release + + The haproxy.conf, since it is not shipped in the templates dir, will + be loaded from the module directory's template directory, eg + $CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows + us to ship common templates (haproxy, apache) with the helpers. + + Context generators + --------------------------------------- + Context generators are used to generate template contexts during hook + execution. Doing so may require inspecting service relations, charm + config, etc. When registered, a config file is associated with a list + of generators. When a template is rendered and written, all context + generates are called in a chain to generate the context dictionary + passed to the jinja2 template. See context.py for more info. + """ + def __init__(self, templates_dir, openstack_release): + if not os.path.isdir(templates_dir): + log('Could not locate templates dir %s' % templates_dir, + level=ERROR) + raise OSConfigException + + self.templates_dir = templates_dir + self.openstack_release = openstack_release + self.templates = {} + self._tmpl_env = None + + if None in [Environment, ChoiceLoader, FileSystemLoader]: + # if this code is running, the object is created pre-install hook. + # jinja2 shouldn't get touched until the module is reloaded on next + # hook execution, with proper jinja2 bits successfully imported. + apt_install('python-jinja2') + + def register(self, config_file, contexts): + """ + Register a config file with a list of context generators to be called + during rendering. + """ + self.templates[config_file] = OSConfigTemplate(config_file=config_file, + contexts=contexts) + log('Registered config file: %s' % config_file, level=INFO) + + def _get_tmpl_env(self): + if not self._tmpl_env: + loader = get_loader(self.templates_dir, self.openstack_release) + self._tmpl_env = Environment(loader=loader) + + def _get_template(self, template): + self._get_tmpl_env() + template = self._tmpl_env.get_template(template) + log('Loaded template from %s' % template.filename, level=INFO) + return template + + def render(self, config_file): + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + ctxt = self.templates[config_file].context() + _tmpl = os.path.basename(config_file) + log('Rendering from template: %s' % _tmpl, level=INFO) + template = self._get_template(_tmpl) + return template.render(ctxt) + + def write(self, config_file): + """ + Write a single config file, raises if config file is not registered. + """ + if config_file not in self.templates: + log('Config not registered: %s' % config_file, level=ERROR) + raise OSConfigException + with open(config_file, 'wb') as out: + out.write(self.render(config_file)) + log('Wrote template %s.' % config_file, level=INFO) + + def write_all(self): + """ + Write out all registered config files. + """ + [self.write(k) for k in self.templates.iterkeys()] + + def set_release(self, openstack_release): + """ + Resets the template environment and generates a new template loader + based on a the new openstack release. + """ + self._tmpl_env = None + self.openstack_release = openstack_release + self._get_tmpl_env() + + def complete_contexts(self): + ''' + Returns a list of context interfaces that yield a complete context. + ''' + interfaces = [] + [interfaces.extend(i.complete_contexts()) + for i in self.templates.itervalues()] + return interfaces diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py new file mode 100644 index 0000000..5da85b3 --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -0,0 +1,273 @@ +#!/usr/bin/python + +# Common python helper functions used for OpenStack charms. + +from collections import OrderedDict + +import apt_pkg as apt +import subprocess +import os +import sys + +from charmhelpers.core.hookenv import ( + config, + log as juju_log, + charm_dir, +) + +from charmhelpers.core.host import ( + lsb_release, + apt_install, +) + +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' + +UBUNTU_OPENSTACK_RELEASE = OrderedDict([ + ('oneiric', 'diablo'), + ('precise', 'essex'), + ('quantal', 'folsom'), + ('raring', 'grizzly'), + ('saucy', 'havana'), +]) + + +OPENSTACK_CODENAMES = OrderedDict([ + ('2011.2', 'diablo'), + ('2012.1', 'essex'), + ('2012.2', 'folsom'), + ('2013.1', 'grizzly'), + ('2013.2', 'havana'), + ('2014.1', 'icehouse'), +]) + +# The ugly duckling +SWIFT_CODENAMES = { + '1.4.3': 'diablo', + '1.4.8': 'essex', + '1.7.4': 'folsom', + '1.7.6': 'grizzly', + '1.7.7': 'grizzly', + '1.8.0': 'grizzly', + '1.9.0': 'havana', + '1.9.1': 'havana', +} + + +def error_out(msg): + juju_log("FATAL ERROR: %s" % msg, level='ERROR') + sys.exit(1) + + +def get_os_codename_install_source(src): + '''Derive OpenStack release codename from a given installation source.''' + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = '' + if src == 'distro': + try: + rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel] + except KeyError: + e = 'Could not derive openstack release for '\ + 'this Ubuntu release: %s' % ubuntu_rel + error_out(e) + return rel + + if src.startswith('cloud:'): + ca_rel = src.split(':')[1] + ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] + return ca_rel + + # Best guess match based on deb string provided + if src.startswith('deb') or src.startswith('ppa'): + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v in src: + return v + + +def get_os_version_install_source(src): + codename = get_os_codename_install_source(src) + return get_os_version_codename(codename) + + +def get_os_codename_version(vers): + '''Determine OpenStack codename from version number.''' + try: + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_codename(codename): + '''Determine OpenStack version number from codename.''' + for k, v in OPENSTACK_CODENAMES.iteritems(): + if v == codename: + return k + e = 'Could not derive OpenStack version for '\ + 'codename: %s' % codename + error_out(e) + + +def get_os_codename_package(package, fatal=True): + '''Derive OpenStack release codename from an installed package.''' + apt.init() + cache = apt.Cache() + + try: + pkg = cache[package] + except: + if not fatal: + return None + # the package is unknown to the current apt cache. + e = 'Could not determine version of package with no installation '\ + 'candidate: %s' % package + error_out(e) + + if not pkg.current_ver: + if not fatal: + return None + # package is known, but no version is currently installed. + e = 'Could not determine version of uninstalled package: %s' % package + error_out(e) + + vers = apt.UpstreamVersion(pkg.current_ver.ver_str) + + try: + if 'swift' in pkg.name: + vers = vers[:5] + return SWIFT_CODENAMES[vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) + + +def get_os_version_package(pkg, fatal=True): + '''Derive OpenStack version number from an installed package.''' + codename = get_os_codename_package(pkg, fatal=fatal) + + if not codename: + return None + + if 'swift' in pkg: + vers_map = SWIFT_CODENAMES + else: + vers_map = OPENSTACK_CODENAMES + + for version, cname in vers_map.iteritems(): + if cname == codename: + return version + #e = "Could not determine OpenStack version for package: %s" % pkg + #error_out(e) + + +def import_key(keyid): + cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \ + "--recv-keys %s" % keyid + try: + subprocess.check_call(cmd.split(' ')) + except subprocess.CalledProcessError: + error_out("Error importing repo key %s" % keyid) + + +def configure_installation_source(rel): + '''Configure apt installation source.''' + if rel == 'distro': + return + elif rel[:4] == "ppa:": + src = rel + subprocess.check_call(["add-apt-repository", "-y", src]) + elif rel[:3] == "deb": + l = len(rel.split('|')) + if l == 2: + src, key = rel.split('|') + juju_log("Importing PPA key from keyserver for %s" % src) + import_key(key) + elif l == 1: + src = rel + with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: + f.write(src) + elif rel[:6] == 'cloud:': + ubuntu_rel = lsb_release()['DISTRIB_CODENAME'] + rel = rel.split(':')[1] + u_rel = rel.split('-')[0] + ca_rel = rel.split('-')[1] + + if u_rel != ubuntu_rel: + e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\ + 'version (%s)' % (ca_rel, ubuntu_rel) + error_out(e) + + if 'staging' in ca_rel: + # staging is just a regular PPA. + os_rel = ca_rel.split('/')[0] + ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel + cmd = 'add-apt-repository -y %s' % ppa + subprocess.check_call(cmd.split(' ')) + return + + # map charm config options to actual archive pockets. + pockets = { + 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', + 'folsom/proposed': 'precise-proposed/folsom', + 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', + 'grizzly/proposed': 'precise-proposed/grizzly', + 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', + 'havana/proposed': 'precise-proposed/havana', + } + + try: + pocket = pockets[ca_rel] + except KeyError: + e = 'Invalid Cloud Archive release specified: %s' % rel + error_out(e) + + src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket) + apt_install('ubuntu-cloud-keyring', fatal=True) + + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f: + f.write(src) + else: + error_out("Invalid openstack-release specified: %s" % rel) + + +def save_script_rc(script_path="scripts/scriptrc", **env_vars): + """ + Write an rc file in the charm-delivered directory containing + exported environment variables provided by env_vars. Any charm scripts run + outside the juju hook environment can source this scriptrc to obtain + updated config information necessary to perform health checks or + service changes. + """ + juju_rc_path = "%s/%s" % (charm_dir(), script_path) + if not os.path.exists(os.path.dirname(juju_rc_path)): + os.mkdir(os.path.dirname(juju_rc_path)) + with open(juju_rc_path, 'wb') as rc_script: + rc_script.write( + "#!/bin/bash\n") + [rc_script.write('export %s=%s\n' % (u, p)) + for u, p in env_vars.iteritems() if u != "script_path"] + + +def openstack_upgrade_available(package): + """ + Determines if an OpenStack upgrade is available from installation + source, based on version of installed package. + + :param package: str: Name of installed package. + + :returns: bool: : Returns True if configured installation source offers + a newer version of package. + + """ + + src = config('openstack-origin') + cur_vers = get_os_version_package(package) + available_vers = get_os_version_install_source(src) + apt.init() + return apt.version_compare(available_vers, cur_vers) == 1 diff --git a/hooks/charmhelpers/contrib/storage/__init__.py b/hooks/charmhelpers/contrib/storage/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/storage/linux/__init__.py b/hooks/charmhelpers/contrib/storage/linux/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/contrib/storage/linux/loopback.py b/hooks/charmhelpers/contrib/storage/linux/loopback.py new file mode 100644 index 0000000..9fb87a2 --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/loopback.py @@ -0,0 +1,59 @@ + +import os +import re + +from subprocess import ( + check_call, + check_output, +) + + +################################################## +# loopback device helpers. +################################################## +def loopback_devices(): + ''' + Parse through 'losetup -a' output to determine currently mapped + loopback devices. Output is expected to look like: + + /dev/loop0: [0807]:961814 (/tmp/my.img) + + :returns: dict: a dict mapping {loopback_dev: backing_file} + ''' + loopbacks = {} + cmd = ['losetup', '-a'] + devs = [d.strip().split(' ') for d in + check_output(cmd).splitlines() if d != ''] + for dev, _, f in devs: + loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0] + return loopbacks + + +def create_loopback(file_path): + ''' + Create a loopback device for a given backing file. + + :returns: str: Full path to new loopback device (eg, /dev/loop0) + ''' + cmd = ['losetup', '--find', file_path] + return check_output(cmd).strip() + + +def ensure_loopback_device(path, size): + ''' + Ensure a loopback device exists for a given backing file path and size. + If it a loopback device is not mapped to file, a new one will be created. + + TODO: Confirm size of found loopback device. + + :returns: str: Full path to the ensured loopback device (eg, /dev/loop0) + ''' + for d, f in loopback_devices().iteritems(): + if f == path: + return d + + if not os.path.exists(path): + cmd = ['truncate', '--size', size, path] + check_call(cmd) + + return create_loopback(path) diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py new file mode 100644 index 0000000..6e29181 --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py @@ -0,0 +1,88 @@ +from subprocess import ( + CalledProcessError, + check_call, + check_output, + Popen, + PIPE, +) + + +################################################## +# LVM helpers. +################################################## +def deactivate_lvm_volume_group(block_device): + ''' + Deactivate any volume gruop associated with an LVM physical volume. + + :param block_device: str: Full path to LVM physical volume + ''' + vg = list_lvm_volume_group(block_device) + if vg: + cmd = ['vgchange', '-an', vg] + check_call(cmd) + + +def is_lvm_physical_volume(block_device): + ''' + Determine whether a block device is initialized as an LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: boolean: True if block device is a PV, False if not. + ''' + try: + check_output(['pvdisplay', block_device]) + return True + except CalledProcessError: + return False + + +def remove_lvm_physical_volume(block_device): + ''' + Remove LVM PV signatures from a given block device. + + :param block_device: str: Full path of block device to scrub. + ''' + p = Popen(['pvremove', '-ff', block_device], + stdin=PIPE) + p.communicate(input='y\n') + + +def list_lvm_volume_group(block_device): + ''' + List LVM volume group associated with a given block device. + + Assumes block device is a valid LVM PV. + + :param block_device: str: Full path of block device to inspect. + + :returns: str: Name of volume group associated with block device or None + ''' + vg = None + pvd = check_output(['pvdisplay', block_device]).splitlines() + for l in pvd: + if l.strip().startswith('VG Name'): + vg = ' '.join(l.split()).split(' ').pop() + return vg + + +def create_lvm_physical_volume(block_device): + ''' + Initialize a block device as an LVM physical volume. + + :param block_device: str: Full path of block device to initialize. + + ''' + check_call(['pvcreate', block_device]) + + +def create_lvm_volume_group(volume_group, block_device): + ''' + Create an LVM volume group backed by a given block device. + + Assumes block device has already been initialized as an LVM PV. + + :param volume_group: str: Name of volume group to create. + :block_device: str: Full path of PV-initialized block device. + ''' + check_call(['vgcreate', volume_group, block_device]) diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py new file mode 100644 index 0000000..5b9b6d4 --- /dev/null +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -0,0 +1,25 @@ +from os import stat +from stat import S_ISBLK + +from subprocess import ( + check_call +) + + +def is_block_device(path): + ''' + Confirm device at path is a valid block device node. + + :returns: boolean: True if path is a block device, False if not. + ''' + return S_ISBLK(stat(path).st_mode) + + +def zap_disk(block_device): + ''' + Clear a block device of partition table. Relies on sgdisk, which is + installed as pat of the 'gdisk' package in Ubuntu. + + :param block_device: str: Full path of block device to clean. + ''' + check_call(['sgdisk', '--zap-all', block_device]) diff --git a/hooks/charmhelpers/core/__init__.py b/hooks/charmhelpers/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py new file mode 100644 index 0000000..2b06706 --- /dev/null +++ b/hooks/charmhelpers/core/hookenv.py @@ -0,0 +1,340 @@ +"Interactions with the Juju environment" +# Copyright 2013 Canonical Ltd. +# +# Authors: +# Charm Helpers Developers + +import os +import json +import yaml +import subprocess +import UserDict + +CRITICAL = "CRITICAL" +ERROR = "ERROR" +WARNING = "WARNING" +INFO = "INFO" +DEBUG = "DEBUG" +MARKER = object() + +cache = {} + + +def cached(func): + ''' Cache return values for multiple executions of func + args + + For example: + + @cached + def unit_get(attribute): + pass + + unit_get('test') + + will cache the result of unit_get + 'test' for future calls. + ''' + def wrapper(*args, **kwargs): + global cache + key = str((func, args, kwargs)) + try: + return cache[key] + except KeyError: + res = func(*args, **kwargs) + cache[key] = res + return res + return wrapper + + +def flush(key): + ''' Flushes any entries from function cache where the + key is found in the function+args ''' + flush_list = [] + for item in cache: + if key in item: + flush_list.append(item) + for item in flush_list: + del cache[item] + + +def log(message, level=None): + "Write a message to the juju log" + command = ['juju-log'] + if level: + command += ['-l', level] + command += [message] + subprocess.call(command) + + +class Serializable(UserDict.IterableUserDict): + "Wrapper, an object that can be serialized to yaml or json" + + def __init__(self, obj): + # wrap the object + UserDict.IterableUserDict.__init__(self) + self.data = obj + + def __getattr__(self, attr): + # See if this object has attribute. + if attr in ("json", "yaml", "data"): + return self.__dict__[attr] + # Check for attribute in wrapped object. + got = getattr(self.data, attr, MARKER) + if got is not MARKER: + return got + # Proxy to the wrapped object via dict interface. + try: + return self.data[attr] + except KeyError: + raise AttributeError(attr) + + def __getstate__(self): + # Pickle as a standard dictionary. + return self.data + + def __setstate__(self, state): + # Unpickle into our wrapper. + self.data = state + + def json(self): + "Serialize the object to json" + return json.dumps(self.data) + + def yaml(self): + "Serialize the object to yaml" + return yaml.dump(self.data) + + +def execution_environment(): + """A convenient bundling of the current execution context""" + context = {} + context['conf'] = config() + if relation_id(): + context['reltype'] = relation_type() + context['relid'] = relation_id() + context['rel'] = relation_get() + context['unit'] = local_unit() + context['rels'] = relations() + context['env'] = os.environ + return context + + +def in_relation_hook(): + "Determine whether we're running in a relation hook" + return 'JUJU_RELATION' in os.environ + + +def relation_type(): + "The scope for the current relation hook" + return os.environ.get('JUJU_RELATION', None) + + +def relation_id(): + "The relation ID for the current relation hook" + return os.environ.get('JUJU_RELATION_ID', None) + + +def local_unit(): + "Local unit ID" + return os.environ['JUJU_UNIT_NAME'] + + +def remote_unit(): + "The remote unit for the current relation hook" + return os.environ['JUJU_REMOTE_UNIT'] + + +def service_name(): + "The name service group this unit belongs to" + return local_unit().split('/')[0] + + +@cached +def config(scope=None): + "Juju charm configuration" + config_cmd_line = ['config-get'] + if scope is not None: + config_cmd_line.append(scope) + config_cmd_line.append('--format=json') + try: + return json.loads(subprocess.check_output(config_cmd_line)) + except ValueError: + return None + + +@cached +def relation_get(attribute=None, unit=None, rid=None): + _args = ['relation-get', '--format=json'] + if rid: + _args.append('-r') + _args.append(rid) + _args.append(attribute or '-') + if unit: + _args.append(unit) + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def relation_set(relation_id=None, relation_settings={}, **kwargs): + relation_cmd_line = ['relation-set'] + if relation_id is not None: + relation_cmd_line.extend(('-r', relation_id)) + for k, v in (relation_settings.items() + kwargs.items()): + if v is None: + relation_cmd_line.append('{}='.format(k)) + else: + relation_cmd_line.append('{}={}'.format(k, v)) + subprocess.check_call(relation_cmd_line) + # Flush cache of any relation-gets for local unit + flush(local_unit()) + + +@cached +def relation_ids(reltype=None): + "A list of relation_ids" + reltype = reltype or relation_type() + relid_cmd_line = ['relation-ids', '--format=json'] + if reltype is not None: + relid_cmd_line.append(reltype) + return json.loads(subprocess.check_output(relid_cmd_line)) or [] + return [] + + +@cached +def related_units(relid=None): + "A list of related units" + relid = relid or relation_id() + units_cmd_line = ['relation-list', '--format=json'] + if relid is not None: + units_cmd_line.extend(('-r', relid)) + return json.loads(subprocess.check_output(units_cmd_line)) or [] + + +@cached +def relation_for_unit(unit=None, rid=None): + "Get the json represenation of a unit's relation" + unit = unit or remote_unit() + relation = relation_get(unit=unit, rid=rid) + for key in relation: + if key.endswith('-list'): + relation[key] = relation[key].split() + relation['__unit__'] = unit + return relation + + +@cached +def relations_for_id(relid=None): + "Get relations of a specific relation ID" + relation_data = [] + relid = relid or relation_ids() + for unit in related_units(relid): + unit_data = relation_for_unit(unit, relid) + unit_data['__relid__'] = relid + relation_data.append(unit_data) + return relation_data + + +@cached +def relations_of_type(reltype=None): + "Get relations of a specific type" + relation_data = [] + reltype = reltype or relation_type() + for relid in relation_ids(reltype): + for relation in relations_for_id(relid): + relation['__relid__'] = relid + relation_data.append(relation) + return relation_data + + +@cached +def relation_types(): + "Get a list of relation types supported by this charm" + charmdir = os.environ.get('CHARM_DIR', '') + mdf = open(os.path.join(charmdir, 'metadata.yaml')) + md = yaml.safe_load(mdf) + rel_types = [] + for key in ('provides', 'requires', 'peers'): + section = md.get(key) + if section: + rel_types.extend(section.keys()) + mdf.close() + return rel_types + + +@cached +def relations(): + rels = {} + for reltype in relation_types(): + relids = {} + for relid in relation_ids(reltype): + units = {local_unit(): relation_get(unit=local_unit(), rid=relid)} + for unit in related_units(relid): + reldata = relation_get(unit=unit, rid=relid) + units[unit] = reldata + relids[relid] = units + rels[reltype] = relids + return rels + + +def open_port(port, protocol="TCP"): + "Open a service network port" + _args = ['open-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +def close_port(port, protocol="TCP"): + "Close a service network port" + _args = ['close-port'] + _args.append('{}/{}'.format(port, protocol)) + subprocess.check_call(_args) + + +@cached +def unit_get(attribute): + _args = ['unit-get', '--format=json', attribute] + try: + return json.loads(subprocess.check_output(_args)) + except ValueError: + return None + + +def unit_private_ip(): + return unit_get('private-address') + + +class UnregisteredHookError(Exception): + pass + + +class Hooks(object): + def __init__(self): + super(Hooks, self).__init__() + self._hooks = {} + + def register(self, name, function): + self._hooks[name] = function + + def execute(self, args): + hook_name = os.path.basename(args[0]) + if hook_name in self._hooks: + self._hooks[hook_name]() + else: + raise UnregisteredHookError(hook_name) + + def hook(self, *hook_names): + def wrapper(decorated): + for hook_name in hook_names: + self.register(hook_name, decorated) + else: + self.register(decorated.__name__, decorated) + if '_' in decorated.__name__: + self.register( + decorated.__name__.replace('_', '-'), decorated) + return decorated + return wrapper + + +def charm_dir(): + return os.environ.get('CHARM_DIR') diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py new file mode 100644 index 0000000..fee4216 --- /dev/null +++ b/hooks/charmhelpers/core/host.py @@ -0,0 +1,269 @@ +"""Tools for working with the host system""" +# Copyright 2012 Canonical Ltd. +# +# Authors: +# Nick Moffitt +# Matthew Wedgwood + +import apt_pkg +import os +import pwd +import grp +import subprocess +import hashlib + +from collections import OrderedDict + +from hookenv import log + + +def service_start(service_name): + service('start', service_name) + + +def service_stop(service_name): + service('stop', service_name) + + +def service_restart(service_name): + service('restart', service_name) + + +def service_reload(service_name, restart_on_failure=False): + if not service('reload', service_name) and restart_on_failure: + service('restart', service_name) + + +def service(action, service_name): + cmd = ['service', service_name, action] + return subprocess.call(cmd) == 0 + + +def service_running(service): + try: + output = subprocess.check_output(['service', service, 'status']) + except subprocess.CalledProcessError: + return False + else: + if ("start/running" in output or "is running" in output): + return True + else: + return False + + +def adduser(username, password=None, shell='/bin/bash', system_user=False): + """Add a user""" + try: + user_info = pwd.getpwnam(username) + log('user {0} already exists!'.format(username)) + except KeyError: + log('creating user {0}'.format(username)) + cmd = ['useradd'] + if system_user or password is None: + cmd.append('--system') + else: + cmd.extend([ + '--create-home', + '--shell', shell, + '--password', password, + ]) + cmd.append(username) + subprocess.check_call(cmd) + user_info = pwd.getpwnam(username) + return user_info + + +def add_user_to_group(username, group): + """Add a user to a group""" + cmd = [ + 'gpasswd', '-a', + username, + group + ] + log("Adding user {} to group {}".format(username, group)) + subprocess.check_call(cmd) + + +def rsync(from_path, to_path, flags='-r', options=None): + """Replicate the contents of a path""" + options = options or ['--delete', '--executability'] + cmd = ['/usr/bin/rsync', flags] + cmd.extend(options) + cmd.append(from_path) + cmd.append(to_path) + log(" ".join(cmd)) + return subprocess.check_output(cmd).strip() + + +def symlink(source, destination): + """Create a symbolic link""" + log("Symlinking {} as {}".format(source, destination)) + cmd = [ + 'ln', + '-sf', + source, + destination, + ] + subprocess.check_call(cmd) + + +def mkdir(path, owner='root', group='root', perms=0555, force=False): + """Create a directory""" + log("Making dir {} {}:{} {:o}".format(path, owner, group, + perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + realpath = os.path.abspath(path) + if os.path.exists(realpath): + if force and not os.path.isdir(realpath): + log("Removing non-directory file {} prior to mkdir()".format(path)) + os.unlink(realpath) + else: + os.makedirs(realpath, perms) + os.chown(realpath, uid, gid) + + +def write_file(path, content, owner='root', group='root', perms=0444): + """Create or overwrite a file with the contents of a string""" + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) + uid = pwd.getpwnam(owner).pw_uid + gid = grp.getgrnam(group).gr_gid + with open(path, 'w') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + target.write(content) + + +def filter_installed_packages(packages): + """Returns a list of packages that require installation""" + apt_pkg.init() + cache = apt_pkg.Cache() + _pkgs = [] + for package in packages: + try: + p = cache[package] + p.current_ver or _pkgs.append(package) + except KeyError: + log('Package {} has no installation candidate.'.format(package), + level='WARNING') + _pkgs.append(package) + return _pkgs + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages""" + options = options or [] + cmd = ['apt-get', '-y'] + cmd.extend(options) + cmd.append('install') + if isinstance(packages, basestring): + cmd.append(packages) + else: + cmd.extend(packages) + log("Installing {} with options: {}".format(packages, + options)) + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def apt_update(fatal=False): + """Update local apt cache""" + cmd = ['apt-get', 'update'] + if fatal: + subprocess.check_call(cmd) + else: + subprocess.call(cmd) + + +def mount(device, mountpoint, options=None, persist=False): + '''Mount a filesystem''' + cmd_args = ['mount'] + if options is not None: + cmd_args.extend(['-o', options]) + cmd_args.extend([device, mountpoint]) + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def umount(mountpoint, persist=False): + '''Unmount a filesystem''' + cmd_args = ['umount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError, e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + if persist: + # TODO: update fstab + pass + return True + + +def mounts(): + '''List of all mounted volumes as [[mountpoint,device],[...]]''' + with open('/proc/mounts') as f: + # [['/mount/point','/dev/path'],[...]] + system_mounts = [m[1::-1] for m in [l.strip().split() + for l in f.readlines()]] + return system_mounts + + +def file_hash(path): + ''' Generate a md5 hash of the contents of 'path' or None if not found ''' + if os.path.exists(path): + h = hashlib.md5() + with open(path, 'r') as source: + h.update(source.read()) # IGNORE:E1101 - it does have update + return h.hexdigest() + else: + return None + + +def restart_on_change(restart_map): + ''' Restart services based on configuration files changing + + This function is used a decorator, for example + + @restart_on_change({ + '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ] + }) + def ceph_client_changed(): + ... + + In this example, the cinder-api and cinder-volume services + would be restarted if /etc/ceph/ceph.conf is changed by the + ceph_client_changed function. + ''' + def wrap(f): + def wrapped_f(*args): + checksums = {} + for path in restart_map: + checksums[path] = file_hash(path) + f(*args) + restarts = [] + for path in restart_map: + if checksums[path] != file_hash(path): + restarts += restart_map[path] + for service_name in list(OrderedDict.fromkeys(restarts)): + service('restart', service_name) + return wrapped_f + return wrap + + +def lsb_release(): + '''Return /etc/lsb-release in a dict''' + d = {} + with open('/etc/lsb-release', 'r') as lsb: + for l in lsb: + k, v = l.split('=') + d[k.strip()] = v.strip() + return d diff --git a/hooks/cloud-compute-relation-changed b/hooks/cloud-compute-relation-changed index 6f9ff4f..6eb6593 120000 --- a/hooks/cloud-compute-relation-changed +++ b/hooks/cloud-compute-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/cloud-compute-relation-joined b/hooks/cloud-compute-relation-joined index 6f9ff4f..6eb6593 120000 --- a/hooks/cloud-compute-relation-joined +++ b/hooks/cloud-compute-relation-joined @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/config-changed b/hooks/config-changed index 6f9ff4f..6eb6593 120000 --- a/hooks/config-changed +++ b/hooks/config-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/image-service-relation-changed b/hooks/image-service-relation-changed index 6f9ff4f..6eb6593 120000 --- a/hooks/image-service-relation-changed +++ b/hooks/image-service-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/image-service-relation-joined b/hooks/image-service-relation-joined deleted file mode 120000 index 6f9ff4f..0000000 --- a/hooks/image-service-relation-joined +++ /dev/null @@ -1 +0,0 @@ -nova-compute-relations \ No newline at end of file diff --git a/hooks/install b/hooks/install deleted file mode 120000 index 6f9ff4f..0000000 --- a/hooks/install +++ /dev/null @@ -1 +0,0 @@ -nova-compute-relations \ No newline at end of file diff --git a/hooks/lib/nova/essex b/hooks/lib/nova/essex deleted file mode 100644 index a82bf29..0000000 --- a/hooks/lib/nova/essex +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -e - -# Essex-specific functions - -nova_set_or_update() { - # Set a config option in nova.conf or api-paste.ini, depending - # Defaults to updating nova.conf - local key=$1 - local value=$2 - local conf_file=$3 - local pattern="" - - local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} - local api_conf=${API_CONF:-/etc/nova/api-paste.ini} - local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} - [[ -z $key ]] && juju-log "$CHARM set_or_update: value $value missing key" && exit 1 - [[ -z $value ]] && juju-log "$CHARM set_or_update: key $key missing value" && exit 1 - [[ -z "$conf_file" ]] && conf_file=$nova_conf - - case "$conf_file" in - "$nova_conf") match="\-\-$key=" - pattern="--$key=" - out=$pattern - ;; - "$api_conf"|"$libvirtd_conf") match="^$key = " - pattern="$match" - out="$key = " - ;; - *) error_out "ERROR: set_or_update: Invalid conf_file ($conf_file)" - esac - - cat $conf_file | grep "$match$value" >/dev/null && - juju-log "$CHARM: $key=$value already in set in $conf_file" \ - && return 0 - if cat $conf_file | grep "$match" >/dev/null ; then - juju-log "$CHARM: Updating $conf_file, $key=$value" - sed -i "s|\($pattern\).*|\1$value|" $conf_file - else - juju-log "$CHARM: Setting new option $key=$value in $conf_file" - echo "$out$value" >>$conf_file - fi - CONFIG_CHANGED=True -} diff --git a/hooks/lib/nova/folsom b/hooks/lib/nova/folsom deleted file mode 100644 index e8194d8..0000000 --- a/hooks/lib/nova/folsom +++ /dev/null @@ -1,135 +0,0 @@ -#!/bin/bash -e - -# Folsom-specific functions - -nova_set_or_update() { - # Set a config option in nova.conf or api-paste.ini, depending - # Defaults to updating nova.conf - local key="$1" - local value="$2" - local conf_file="$3" - local section="${4:-DEFAULT}" - - local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} - local api_conf=${API_CONF:-/etc/nova/api-paste.ini} - local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf} - local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini} - local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini} - local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} - - [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1 - [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1 - - [[ -z "$conf_file" ]] && conf_file=$nova_conf - - local pattern="" - case "$conf_file" in - "$nova_conf") match="^$key=" - pattern="$key=" - out=$pattern - ;; - "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \ - "$libvirtd_conf") - match="^$key = " - pattern="$match" - out="$key = " - ;; - *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)" - esac - - cat $conf_file | grep "$match$value" >/dev/null && - juju-log "$CHARM: $key=$value already in set in $conf_file" \ - && return 0 - - case $conf_file in - "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf") - python -c " -import ConfigParser -config = ConfigParser.RawConfigParser() -config.read('$conf_file') -config.set('$section','$key','$value') -with open('$conf_file', 'wb') as configfile: - config.write(configfile) -" - ;; - *) - if cat $conf_file | grep "$match" >/dev/null ; then - juju-log "$CHARM: Updating $conf_file, $key=$value" - sed -i "s|\($pattern\).*|\1$value|" $conf_file - else - juju-log "$CHARM: Setting new option $key=$value in $conf_file" - echo "$out$value" >>$conf_file - fi - ;; - esac - CONFIG_CHANGED="True" -} - -# Upgrade Helpers -nova_pre_upgrade() { - # Pre-upgrade helper. Caller should pass the version of OpenStack we are - # upgrading from. - return 0 # Nothing to do here, yet. -} - -nova_post_upgrade() { - # Post-upgrade helper. Caller should pass the version of OpenStack we are - # upgrading from. - local upgrade_from="$1" - juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> folsom." - # We only support essex -> folsom, currently. - [[ "$upgrade_from" != "essex" ]] && - error_out "Unsupported upgrade: $upgrade_from -> folsom" - - # This may be dangerous, if we are upgrading a number of units at once - # and they all begin the same migration concurrently. Migrate only from - # the cloud controller(s). - if [[ "$CHARM" == "nova-cloud-controller" ]] ; then - juju-log "$CHARM: Migrating nova database." - /usr/bin/nova-manage db sync - - # Trigger a service restart on all other nova nodes. - trigger_remote_service_restarts - fi - - # Packaging currently takes care of converting the Essex gflags format - # to .ini, but we need to update the api-paste.ini manually. It can be - # updated directly from keystone, via the identity-service relation, - # if it exists. Only services that require keystone credentials will - # have modified api-paste.ini, and only those services will have a .dpkg-dist - # version present. - local r_id=$(relation-ids identity-service) - if [[ -n "$r_id" ]] && [[ -e "$CONF_DIR/api-paste.ini.dpkg-dist" ]] ; then - # Backup the last api config, update the stock packaged version - # with our current Keystone info. - mv $API_CONF $CONF_DIR/api-paste.ini.juju-last - mv $CONF_DIR/api-paste.ini.dpkg-dist $CONF_DIR/api-paste.ini - - unit=$(relation-list -r $r_id | head -n1) - # Note, this should never be called from an relation hook, only config-changed. - export JUJU_REMOTE_UNIT=$unit - service_port=$(relation-get -r $r_id service_port) - auth_port=$(relation-get -r $r_id auth_port) - service_username=$(relation-get -r $r_id service_username) - service_password=$(relation-get -r $r_id service_password) - service_tenant=$(relation-get -r $r_id service_tenant) - keystone_host=$(relation-get -r $r_id private-address) - unset JUJU_REMOTE_UNIT - - juju-log "$CHARM: Updating new api-paste.ini with keystone data from $unit:$r_id" - set_or_update "service_host" "$keystone_host" "$API_CONF" - set_or_update "service_port" "$service_port" "$API_CONF" - set_or_update "auth_host" "$keystone_host" "$API_CONF" - set_or_update "auth_port" "$auth_port" "$API_CONF" - set_or_update "auth_uri" "http://$keystone_host:$service_port/" "$API_CONF" - set_or_update "admin_tenant_name" "$service_tenant" "$API_CONF" - set_or_update "admin_user" "$service_username" "$API_CONF" - set_or_update "admin_password" "$service_password" "$API_CONF" - fi - - # TEMPORARY - # RC3 packaging in cloud archive doesn't have this in postinst. Do it here - sed -e "s,^root_helper=.\+,rootwrap_config=/etc/nova/rootwrap.conf," -i /etc/nova/nova.conf - - juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> folsom." -} diff --git a/hooks/lib/nova/grizzly b/hooks/lib/nova/grizzly deleted file mode 100644 index 6904f39..0000000 --- a/hooks/lib/nova/grizzly +++ /dev/null @@ -1,97 +0,0 @@ -#!/bin/bash -e - -# Folsom-specific functions - -nova_set_or_update() { - # TODO: This needs to be shared among folsom, grizzly and beyond. - # Set a config option in nova.conf or api-paste.ini, depending - # Defaults to updating nova.conf - local key="$1" - local value="$2" - local conf_file="$3" - local section="${4:-DEFAULT}" - - local nova_conf=${NOVA_CONF:-/etc/nova/nova.conf} - local api_conf=${API_CONF:-/etc/nova/api-paste.ini} - local quantum_conf=${QUANTUM_CONF:-/etc/quantum/quantum.conf} - local quantum_api_conf=${QUANTUM_API_CONF:-/etc/quantum/api-paste.ini} - local quantum_plugin_conf=${QUANTUM_PLUGIN_CONF:-/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini} - local libvirtd_conf=${LIBVIRTD_CONF:-/etc/libvirt/libvirtd.conf} - - [[ -z $key ]] && juju-log "$CHARM: set_or_update: value $value missing key" && exit 1 - [[ -z $value ]] && juju-log "$CHARM: set_or_update: key $key missing value" && exit 1 - - [[ -z "$conf_file" ]] && conf_file=$nova_conf - - local pattern="" - case "$conf_file" in - "$nova_conf") match="^$key=" - pattern="$key=" - out=$pattern - ;; - "$api_conf"|"$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf"| \ - "$libvirtd_conf") - match="^$key = " - pattern="$match" - out="$key = " - ;; - *) juju-log "$CHARM ERROR: set_or_update: Invalid conf_file ($conf_file)" - esac - - cat $conf_file | grep "$match$value" >/dev/null && - juju-log "$CHARM: $key=$value already in set in $conf_file" \ - && return 0 - - case $conf_file in - "$quantum_conf"|"$quantum_api_conf"|"$quantum_plugin_conf") - python -c " -import ConfigParser -config = ConfigParser.RawConfigParser() -config.read('$conf_file') -config.set('$section','$key','$value') -with open('$conf_file', 'wb') as configfile: - config.write(configfile) -" - ;; - *) - if cat $conf_file | grep "$match" >/dev/null ; then - juju-log "$CHARM: Updating $conf_file, $key=$value" - sed -i "s|\($pattern\).*|\1$value|" $conf_file - else - juju-log "$CHARM: Setting new option $key=$value in $conf_file" - echo "$out$value" >>$conf_file - fi - ;; - esac - CONFIG_CHANGED="True" -} - -# Upgrade Helpers -nova_pre_upgrade() { - # Pre-upgrade helper. Caller should pass the version of OpenStack we are - # upgrading from. - return 0 # Nothing to do here, yet. -} - -nova_post_upgrade() { - # Post-upgrade helper. Caller should pass the version of OpenStack we are - # upgrading from. - local upgrade_from="$1" - juju-log "$CHARM: Running post-upgrade hook: $upgrade_from -> grizzly." - # We only support folsom -> grizzly, currently. - [[ "$upgrade_from" != "folsom" ]] && - error_out "Unsupported upgrade: $upgrade_from -> grizzly" - - # This may be dangerous, if we are upgrading a number of units at once - # and they all begin the same migration concurrently. Migrate only from - # the cloud controller(s). - if [[ "$CHARM" == "nova-cloud-controller" ]] ; then - juju-log "$CHARM: Migrating nova database." - /usr/bin/nova-manage db sync - - # Trigger a service restart on all other nova nodes. - trigger_remote_service_restarts - fi - - juju-log "$CHARM: Post-upgrade hook complete: $upgrade_from -> grizzly." -} diff --git a/hooks/lib/nova/nova-common b/hooks/lib/nova/nova-common deleted file mode 100644 index d212a50..0000000 --- a/hooks/lib/nova/nova-common +++ /dev/null @@ -1,148 +0,0 @@ -#!/bin/bash -e - -# Common utility functions used across all nova charms. - -CONFIG_CHANGED=False -HOOKS_DIR="$CHARM_DIR/hooks" - -# Load the common OpenStack helper library. -if [[ -e $HOOKS_DIR/lib/openstack-common ]] ; then - . $HOOKS_DIR/lib/openstack-common -else - juju-log "Couldn't load $HOOKS_DIR/lib/opentack-common." && exit 1 -fi - -set_or_update() { - # Update config flags in nova.conf or api-paste.ini. - # Config layout changed in Folsom, so this is now OpenStack release specific. - local rel=$(get_os_codename_package "nova-common") - . $HOOKS_DIR/lib/nova/$rel - nova_set_or_update $@ -} - -function set_config_flags() { - # Set user-defined nova.conf flags from deployment config - juju-log "$CHARM: Processing config-flags." - flags=$(config-get config-flags) - if [[ "$flags" != "None" && -n "$flags" ]] ; then - for f in $(echo $flags | sed -e 's/,/ /g') ; do - k=$(echo $f | cut -d= -f1) - v=$(echo $f | cut -d= -f2) - set_or_update "$k" "$v" - done - fi -} - -configure_volume_service() { - local svc="$1" - local cur_vers="$(get_os_codename_package "nova-common")" - case "$svc" in - "cinder") - set_or_update "volume_api_class" "nova.volume.cinder.API" ;; - "nova-volume") - # nova-volume only supported before grizzly. - [[ "$cur_vers" == "essex" ]] || [[ "$cur_vers" == "folsom" ]] && - set_or_update "volume_api_class" "nova.volume.api.API" - ;; - *) juju-log "$CHARM ERROR - configure_volume_service: Invalid service $svc" - return 1 ;; - esac -} - -function configure_network_manager { - local manager="$1" - echo "$CHARM: configuring $manager network manager" - case $1 in - "FlatManager") - set_or_update "network_manager" "nova.network.manager.FlatManager" - ;; - "FlatDHCPManager") - set_or_update "network_manager" "nova.network.manager.FlatDHCPManager" - - if [[ "$CHARM" == "nova-compute" ]] ; then - local flat_interface=$(config-get flat-interface) - local ec2_host=$(relation-get ec2_host) - set_or_update flat_inteface "$flat_interface" - set_or_update ec2_dmz_host "$ec2_host" - - # Ensure flat_interface has link. - if ip link show $flat_interface >/dev/null 2>&1 ; then - ip link set $flat_interface up - fi - - # work around (LP: #1035172) - if [[ -e /dev/vhost-net ]] ; then - iptables -A POSTROUTING -t mangle -p udp --dport 68 -j CHECKSUM \ - --checksum-fill - fi - fi - - ;; - "Quantum") - local local_ip=$(get_ip `unit-get private-address`) - [[ -n $local_ip ]] || { - juju-log "Unable to resolve local IP address" - exit 1 - } - set_or_update "network_api_class" "nova.network.quantumv2.api.API" - set_or_update "quantum_auth_strategy" "keystone" - set_or_update "core_plugin" "$QUANTUM_CORE_PLUGIN" "$QUANTUM_CONF" - set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF" - if [ "$QUANTUM_PLUGIN" == "ovs" ]; then - set_or_update "tenant_network_type" "gre" $QUANTUM_PLUGIN_CONF "OVS" - set_or_update "enable_tunneling" "True" $QUANTUM_PLUGIN_CONF "OVS" - set_or_update "tunnel_id_ranges" "1:1000" $QUANTUM_PLUGIN_CONF "OVS" - set_or_update "local_ip" "$local_ip" $QUANTUM_PLUGIN_CONF "OVS" - fi - ;; - *) juju-log "ERROR: Invalid network manager $1" && exit 1 ;; - esac -} - -function trigger_remote_service_restarts() { - # Trigger a service restart on all other nova nodes that have a relation - # via the cloud-controller interface. - - # possible relations to other nova services. - local relations="cloud-compute nova-volume-service" - - for rel in $relations; do - local r_ids=$(relation-ids $rel) - for r_id in $r_ids ; do - juju-log "$CHARM: Triggering a service restart on relation $r_id." - relation-set -r $r_id restart-trigger=$(uuid) - done - done -} - -do_openstack_upgrade() { - # update openstack components to those provided by a new installation source - # it is assumed the calling hook has confirmed that the upgrade is sane. - local rel="$1" - shift - local packages=$@ - - orig_os_rel=$(get_os_codename_package "nova-common") - new_rel=$(get_os_codename_install_source "$rel") - - # Backup the config directory. - local stamp=$(date +"%Y%m%d%M%S") - tar -pcf /var/lib/juju/$CHARM-backup-$stamp.tar $CONF_DIR - - # load the release helper library for pre/post upgrade hooks specific to the - # release we are upgrading to. - . $HOOKS_DIR/lib/nova/$new_rel - - # new release specific pre-upgrade hook - nova_pre_upgrade "$orig_os_rel" - - # Setup apt repository access and kick off the actual package upgrade. - configure_install_source "$rel" - apt-get update - DEBIAN_FRONTEND=noninteractive apt-get --option Dpkg::Options::=--force-confold -y \ - install --no-install-recommends $packages - - # new release sepcific post-upgrade hook - nova_post_upgrade "$orig_os_rel" - -} diff --git a/hooks/lib/openstack-common b/hooks/lib/openstack-common deleted file mode 100644 index d5b19c5..0000000 --- a/hooks/lib/openstack-common +++ /dev/null @@ -1,781 +0,0 @@ -#!/bin/bash -e - -# Common utility functions used across all OpenStack charms. - -error_out() { - juju-log "$CHARM ERROR: $@" - exit 1 -} - -function service_ctl_status { - # Return 0 if a service is running, 1 otherwise. - local svc="$1" - local status=$(service $svc status | cut -d/ -f1 | awk '{ print $2 }') - case $status in - "start") return 0 ;; - "stop") return 1 ;; - *) error_out "Unexpected status of service $svc: $status" ;; - esac -} - -function service_ctl { - # control a specific service, or all (as defined by $SERVICES) - # service restarts will only occur depending on global $CONFIG_CHANGED, - # which should be updated in charm's set_or_update(). - local config_changed=${CONFIG_CHANGED:-True} - if [[ $1 == "all" ]] ; then - ctl="$SERVICES" - else - ctl="$1" - fi - action="$2" - if [[ -z "$ctl" ]] || [[ -z "$action" ]] ; then - error_out "ERROR service_ctl: Not enough arguments" - fi - - for i in $ctl ; do - case $action in - "start") - service_ctl_status $i || service $i start ;; - "stop") - service_ctl_status $i && service $i stop || return 0 ;; - "restart") - if [[ "$config_changed" == "True" ]] ; then - service_ctl_status $i && service $i restart || service $i start - fi - ;; - esac - if [[ $? != 0 ]] ; then - juju-log "$CHARM: service_ctl ERROR - Service $i failed to $action" - fi - done - # all configs should have been reloaded on restart of all services, reset - # flag if its being used. - if [[ "$action" == "restart" ]] && [[ -n "$CONFIG_CHANGED" ]] && - [[ "$ctl" == "all" ]]; then - CONFIG_CHANGED="False" - fi -} - -function configure_install_source { - # Setup and configure installation source based on a config flag. - local src="$1" - - # Default to installing from the main Ubuntu archive. - [[ $src == "distro" ]] || [[ -z "$src" ]] && return 0 - - . /etc/lsb-release - - # standard 'ppa:someppa/name' format. - if [[ "${src:0:4}" == "ppa:" ]] ; then - juju-log "$CHARM: Configuring installation from custom src ($src)" - add-apt-repository -y "$src" || error_out "Could not configure PPA access." - return 0 - fi - - # standard 'deb http://url/ubuntu main' entries. gpg key ids must - # be appended to the end of url after a |, ie: - # 'deb http://url/ubuntu main|$GPGKEYID' - if [[ "${src:0:3}" == "deb" ]] ; then - juju-log "$CHARM: Configuring installation from custom src URL ($src)" - if echo "$src" | grep -q "|" ; then - # gpg key id tagged to end of url folloed by a | - url=$(echo $src | cut -d'|' -f1) - key=$(echo $src | cut -d'|' -f2) - juju-log "$CHARM: Importing repository key: $key" - apt-key adv --keyserver keyserver.ubuntu.com --recv-keys "$key" || \ - juju-log "$CHARM WARN: Could not import key from keyserver: $key" - else - juju-log "$CHARM No repository key specified." - url="$src" - fi - echo "$url" > /etc/apt/sources.list.d/juju_deb.list - return 0 - fi - - # Cloud Archive - if [[ "${src:0:6}" == "cloud:" ]] ; then - - # current os releases supported by the UCA. - local cloud_archive_versions="folsom grizzly" - - local ca_rel=$(echo $src | cut -d: -f2) - local u_rel=$(echo $ca_rel | cut -d- -f1) - local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1) - - [[ "$u_rel" != "$DISTRIB_CODENAME" ]] && - error_out "Cannot install from Cloud Archive pocket $src " \ - "on this Ubuntu version ($DISTRIB_CODENAME)!" - - valid_release="" - for rel in $cloud_archive_versions ; do - if [[ "$os_rel" == "$rel" ]] ; then - valid_release=1 - juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive." - fi - done - if [[ -z "$valid_release" ]] ; then - error_out "OpenStack release ($os_rel) not supported by "\ - "the Ubuntu Cloud Archive." - fi - - # CA staging repos are standard PPAs. - if echo $ca_rel | grep -q "staging" ; then - add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging - return 0 - fi - - # the others are LP-external deb repos. - case "$ca_rel" in - "$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; - "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; - "$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;; - "$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;; - *) error_out "Invalid Cloud Archive repo specified: $src" - esac - - apt-get -y install ubuntu-cloud-keyring - entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main" - echo "$entry" \ - >/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list - return 0 - fi - - error_out "Invalid installation source specified in config: $src" - -} - -get_os_codename_install_source() { - # derive the openstack release provided by a supported installation source. - local rel="$1" - local codename="unknown" - . /etc/lsb-release - - # map ubuntu releases to the openstack version shipped with it. - if [[ "$rel" == "distro" ]] ; then - case "$DISTRIB_CODENAME" in - "oneiric") codename="diablo" ;; - "precise") codename="essex" ;; - "quantal") codename="folsom" ;; - "raring") codename="grizzly" ;; - esac - fi - - # derive version from cloud archive strings. - if [[ "${rel:0:6}" == "cloud:" ]] ; then - rel=$(echo $rel | cut -d: -f2) - local u_rel=$(echo $rel | cut -d- -f1) - local ca_rel=$(echo $rel | cut -d- -f2) - if [[ "$u_rel" == "$DISTRIB_CODENAME" ]] ; then - case "$ca_rel" in - "folsom"|"folsom/updates"|"folsom/proposed"|"folsom/staging") - codename="folsom" ;; - "grizzly"|"grizzly/updates"|"grizzly/proposed"|"grizzly/staging") - codename="grizzly" ;; - esac - fi - fi - - # have a guess based on the deb string provided - if [[ "${rel:0:3}" == "deb" ]] || \ - [[ "${rel:0:3}" == "ppa" ]] ; then - CODENAMES="diablo essex folsom grizzly havana" - for cname in $CODENAMES; do - if echo $rel | grep -q $cname; then - codename=$cname - fi - done - fi - echo $codename -} - -get_os_codename_package() { - local pkg_vers=$(dpkg -l | grep "$1" | awk '{ print $3 }') || echo "none" - pkg_vers=$(echo $pkg_vers | cut -d: -f2) # epochs - case "${pkg_vers:0:6}" in - "2011.2") echo "diablo" ;; - "2012.1") echo "essex" ;; - "2012.2") echo "folsom" ;; - "2013.1") echo "grizzly" ;; - "2013.2") echo "havana" ;; - esac -} - -get_os_version_codename() { - case "$1" in - "diablo") echo "2011.2" ;; - "essex") echo "2012.1" ;; - "folsom") echo "2012.2" ;; - "grizzly") echo "2013.1" ;; - "havana") echo "2013.2" ;; - esac -} - -get_ip() { - dpkg -l | grep -q python-dnspython || { - apt-get -y install python-dnspython 2>&1 > /dev/null - } - hostname=$1 - python -c " -import dns.resolver -import socket -try: - # Test to see if already an IPv4 address - socket.inet_aton('$hostname') - print '$hostname' -except socket.error: - try: - answers = dns.resolver.query('$hostname', 'A') - if answers: - print answers[0].address - except dns.resolver.NXDOMAIN: - pass -" -} - -# Common storage routines used by cinder, nova-volume and swift-storage. -clean_storage() { - # if configured to overwrite existing storage, we unmount the block-dev - # if mounted and clear any previous pv signatures - local block_dev="$1" - juju-log "Cleaining storage '$block_dev'" - if grep -q "^$block_dev" /proc/mounts ; then - mp=$(grep "^$block_dev" /proc/mounts | awk '{ print $2 }') - juju-log "Unmounting $block_dev from $mp" - umount "$mp" || error_out "ERROR: Could not unmount storage from $mp" - fi - if pvdisplay "$block_dev" >/dev/null 2>&1 ; then - juju-log "Removing existing LVM PV signatures from $block_dev" - - # deactivate any volgroups that may be built on this dev - vg=$(pvdisplay $block_dev | grep "VG Name" | awk '{ print $3 }') - if [[ -n "$vg" ]] ; then - juju-log "Deactivating existing volume group: $vg" - vgchange -an "$vg" || - error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?" - fi - echo "yes" | pvremove -ff "$block_dev" || - error_out "Could not pvremove $block_dev" - else - juju-log "Zapping disk of all GPT and MBR structures" - sgdisk --zap-all $block_dev || - error_out "Unable to zap $block_dev" - fi -} - -function get_block_device() { - # given a string, return full path to the block device for that - # if input is not a block device, find a loopback device - local input="$1" - - case "$input" in - /dev/*) [[ ! -b "$input" ]] && error_out "$input does not exist." - echo "$input"; return 0;; - /*) :;; - *) [[ ! -b "/dev/$input" ]] && error_out "/dev/$input does not exist." - echo "/dev/$input"; return 0;; - esac - - # this represents a file - # support "/path/to/file|5G" - local fpath size oifs="$IFS" - if [ "${input#*|}" != "${input}" ]; then - size=${input##*|} - fpath=${input%|*} - else - fpath=${input} - size=5G - fi - - ## loop devices are not namespaced. This is bad for containers. - ## it means that the output of 'losetup' may have the given $fpath - ## in it, but that may not represent this containers $fpath, but - ## another containers. To address that, we really need to - ## allow some uniq container-id to be expanded within path. - ## TODO: find a unique container-id that will be consistent for - ## this container throughout its lifetime and expand it - ## in the fpath. - # fpath=${fpath//%{id}/$THAT_ID} - - local found="" - # parse through 'losetup -a' output, looking for this file - # output is expected to look like: - # /dev/loop0: [0807]:961814 (/tmp/my.img) - found=$(losetup -a | - awk 'BEGIN { found=0; } - $3 == f { sub(/:$/,"",$1); print $1; found=found+1; } - END { if( found == 0 || found == 1 ) { exit(0); }; exit(1); }' \ - f="($fpath)") - - if [ $? -ne 0 ]; then - echo "multiple devices found for $fpath: $found" 1>&2 - return 1; - fi - - [ -n "$found" -a -b "$found" ] && { echo "$found"; return 1; } - - if [ -n "$found" ]; then - echo "confused, $found is not a block device for $fpath"; - return 1; - fi - - # no existing device was found, create one - mkdir -p "${fpath%/*}" - truncate --size "$size" "$fpath" || - { echo "failed to create $fpath of size $size"; return 1; } - - found=$(losetup --find --show "$fpath") || - { echo "failed to setup loop device for $fpath" 1>&2; return 1; } - - echo "$found" - return 0 -} - -HAPROXY_CFG=/etc/haproxy/haproxy.cfg -HAPROXY_DEFAULT=/etc/default/haproxy -########################################################################## -# Description: Configures HAProxy services for Openstack API's -# Parameters: -# Space delimited list of service:port:mode combinations for which -# haproxy service configuration should be generated for. The function -# assumes the name of the peer relation is 'cluster' and that every -# service unit in the peer relation is running the same services. -# -# Services that do not specify :mode in parameter will default to http. -# -# Example -# configure_haproxy cinder_api:8776:8756:tcp nova_api:8774:8764:http -########################################################################## -configure_haproxy() { - local address=`unit-get private-address` - local name=${JUJU_UNIT_NAME////-} - cat > $HAPROXY_CFG << EOF -global - log 127.0.0.1 local0 - log 127.0.0.1 local1 notice - maxconn 20000 - user haproxy - group haproxy - spread-checks 0 - -defaults - log global - mode http - option httplog - option dontlognull - retries 3 - timeout queue 1000 - timeout connect 1000 - timeout client 30000 - timeout server 30000 - -listen stats :8888 - mode http - stats enable - stats hide-version - stats realm Haproxy\ Statistics - stats uri / - stats auth admin:password - -EOF - for service in $@; do - local service_name=$(echo $service | cut -d : -f 1) - local haproxy_listen_port=$(echo $service | cut -d : -f 2) - local api_listen_port=$(echo $service | cut -d : -f 3) - local mode=$(echo $service | cut -d : -f 4) - [[ -z "$mode" ]] && mode="http" - juju-log "Adding haproxy configuration entry for $service "\ - "($haproxy_listen_port -> $api_listen_port)" - cat >> $HAPROXY_CFG << EOF -listen $service_name 0.0.0.0:$haproxy_listen_port - balance roundrobin - mode $mode - option ${mode}log - server $name $address:$api_listen_port check -EOF - local r_id="" - local unit="" - for r_id in `relation-ids cluster`; do - for unit in `relation-list -r $r_id`; do - local unit_name=${unit////-} - local unit_address=`relation-get -r $r_id private-address $unit` - if [ -n "$unit_address" ]; then - echo " server $unit_name $unit_address:$api_listen_port check" \ - >> $HAPROXY_CFG - fi - done - done - done - echo "ENABLED=1" > $HAPROXY_DEFAULT - service haproxy restart -} - -########################################################################## -# Description: Query HA interface to determine is cluster is configured -# Returns: 0 if configured, 1 if not configured -########################################################################## -is_clustered() { - local r_id="" - local unit="" - for r_id in $(relation-ids ha); do - if [ -n "$r_id" ]; then - for unit in $(relation-list -r $r_id); do - clustered=$(relation-get -r $r_id clustered $unit) - if [ -n "$clustered" ]; then - juju-log "Unit is haclustered" - return 0 - fi - done - fi - done - juju-log "Unit is not haclustered" - return 1 -} - -########################################################################## -# Description: Return a list of all peers in cluster relations -########################################################################## -peer_units() { - local peers="" - local r_id="" - for r_id in $(relation-ids cluster); do - peers="$peers $(relation-list -r $r_id)" - done - echo $peers -} - -########################################################################## -# Description: Determines whether the current unit is the oldest of all -# its peers - supports partial leader election -# Returns: 0 if oldest, 1 if not -########################################################################## -oldest_peer() { - peers=$1 - local l_unit_no=$(echo $JUJU_UNIT_NAME | cut -d / -f 2) - for peer in $peers; do - echo "Comparing $JUJU_UNIT_NAME with peers: $peers" - local r_unit_no=$(echo $peer | cut -d / -f 2) - if (($r_unit_no<$l_unit_no)); then - juju-log "Not oldest peer; deferring" - return 1 - fi - done - juju-log "Oldest peer; might take charge?" - return 0 -} - -########################################################################## -# Description: Determines whether the current service units is the -# leader within a) a cluster of its peers or b) across a -# set of unclustered peers. -# Parameters: CRM resource to check ownership of if clustered -# Returns: 0 if leader, 1 if not -########################################################################## -eligible_leader() { - if is_clustered; then - if ! is_leader $1; then - juju-log 'Deferring action to CRM leader' - return 1 - fi - else - peers=$(peer_units) - if [ -n "$peers" ] && ! oldest_peer "$peers"; then - juju-log 'Deferring action to oldest service unit.' - return 1 - fi - fi - return 0 -} - -########################################################################## -# Description: Query Cluster peer interface to see if peered -# Returns: 0 if peered, 1 if not peered -########################################################################## -is_peered() { - local r_id=$(relation-ids cluster) - if [ -n "$r_id" ]; then - if [ -n "$(relation-list -r $r_id)" ]; then - juju-log "Unit peered" - return 0 - fi - fi - juju-log "Unit not peered" - return 1 -} - -########################################################################## -# Description: Determines whether host is owner of clustered services -# Parameters: Name of CRM resource to check ownership of -# Returns: 0 if leader, 1 if not leader -########################################################################## -is_leader() { - hostname=`hostname` - if [ -x /usr/sbin/crm ]; then - if crm resource show $1 | grep -q $hostname; then - juju-log "$hostname is cluster leader." - return 0 - fi - fi - juju-log "$hostname is not cluster leader." - return 1 -} - -########################################################################## -# Description: Determines whether enough data has been provided in -# configuration or relation data to configure HTTPS. -# Parameters: None -# Returns: 0 if HTTPS can be configured, 1 if not. -########################################################################## -https() { - local r_id="" - if [[ -n "$(config-get ssl_cert)" ]] && - [[ -n "$(config-get ssl_key)" ]] ; then - return 0 - fi - for r_id in $(relation-ids identity-service) ; do - for unit in $(relation-list -r $r_id) ; do - if [[ "$(relation-get -r $r_id https_keystone $unit)" == "True" ]] && - [[ -n "$(relation-get -r $r_id ssl_cert $unit)" ]] && - [[ -n "$(relation-get -r $r_id ssl_key $unit)" ]] && - [[ -n "$(relation-get -r $r_id ca_cert $unit)" ]] ; then - return 0 - fi - done - done - return 1 -} - -########################################################################## -# Description: For a given number of port mappings, configures apache2 -# HTTPs local reverse proxying using certficates and keys provided in -# either configuration data (preferred) or relation data. Assumes ports -# are not in use (calling charm should ensure that). -# Parameters: Variable number of proxy port mappings as -# $internal:$external. -# Returns: 0 if reverse proxy(s) have been configured, 0 if not. -########################################################################## -enable_https() { - local port_maps="$@" - local http_restart="" - juju-log "Enabling HTTPS for port mappings: $port_maps." - - # allow overriding of keystone provided certs with those set manually - # in config. - local cert=$(config-get ssl_cert) - local key=$(config-get ssl_key) - local ca_cert="" - if [[ -z "$cert" ]] || [[ -z "$key" ]] ; then - juju-log "Inspecting identity-service relations for SSL certificate." - local r_id="" - cert="" - key="" - ca_cert="" - for r_id in $(relation-ids identity-service) ; do - for unit in $(relation-list -r $r_id) ; do - [[ -z "$cert" ]] && cert="$(relation-get -r $r_id ssl_cert $unit)" - [[ -z "$key" ]] && key="$(relation-get -r $r_id ssl_key $unit)" - [[ -z "$ca_cert" ]] && ca_cert="$(relation-get -r $r_id ca_cert $unit)" - done - done - [[ -n "$cert" ]] && cert=$(echo $cert | base64 -di) - [[ -n "$key" ]] && key=$(echo $key | base64 -di) - [[ -n "$ca_cert" ]] && ca_cert=$(echo $ca_cert | base64 -di) - else - juju-log "Using SSL certificate provided in service config." - fi - - [[ -z "$cert" ]] || [[ -z "$key" ]] && - juju-log "Expected but could not find SSL certificate data, not "\ - "configuring HTTPS!" && return 1 - - apt-get -y install apache2 - a2enmod ssl proxy proxy_http | grep -v "To activate the new configuration" && - http_restart=1 - - mkdir -p /etc/apache2/ssl/$CHARM - echo "$cert" >/etc/apache2/ssl/$CHARM/cert - echo "$key" >/etc/apache2/ssl/$CHARM/key - if [[ -n "$ca_cert" ]] ; then - juju-log "Installing Keystone supplied CA cert." - echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt - update-ca-certificates --fresh - - # XXX TODO: Find a better way of exporting this? - if [[ "$CHARM" == "nova-cloud-controller" ]] ; then - [[ -e /var/www/keystone_juju_ca_cert.crt ]] && - rm -rf /var/www/keystone_juju_ca_cert.crt - ln -s /usr/local/share/ca-certificates/keystone_juju_ca_cert.crt \ - /var/www/keystone_juju_ca_cert.crt - fi - - fi - for port_map in $port_maps ; do - local ext_port=$(echo $port_map | cut -d: -f1) - local int_port=$(echo $port_map | cut -d: -f2) - juju-log "Creating apache2 reverse proxy vhost for $port_map." - cat >/etc/apache2/sites-available/${CHARM}_${ext_port} < - ServerName $(unit-get private-address) - SSLEngine on - SSLCertificateFile /etc/apache2/ssl/$CHARM/cert - SSLCertificateKeyFile /etc/apache2/ssl/$CHARM/key - ProxyPass / http://localhost:$int_port/ - ProxyPassReverse / http://localhost:$int_port/ - ProxyPreserveHost on - - - Order deny,allow - Allow from all - - - Order allow,deny - Allow from all - -END - a2ensite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && - http_restart=1 - done - if [[ -n "$http_restart" ]] ; then - service apache2 restart - fi -} - -########################################################################## -# Description: Ensure HTTPS reverse proxying is disabled for given port -# mappings. -# Parameters: Variable number of proxy port mappings as -# $internal:$external. -# Returns: 0 if reverse proxy is not active for all portmaps, 1 on error. -########################################################################## -disable_https() { - local port_maps="$@" - local http_restart="" - juju-log "Ensuring HTTPS disabled for $port_maps." - ( [[ ! -d /etc/apache2 ]] || [[ ! -d /etc/apache2/ssl/$CHARM ]] ) && return 0 - for port_map in $port_maps ; do - local ext_port=$(echo $port_map | cut -d: -f1) - local int_port=$(echo $port_map | cut -d: -f2) - if [[ -e /etc/apache2/sites-available/${CHARM}_${ext_port} ]] ; then - juju-log "Disabling HTTPS reverse proxy for $CHARM $port_map." - a2dissite ${CHARM}_${ext_port} | grep -v "To activate the new configuration" && - http_restart=1 - fi - done - if [[ -n "$http_restart" ]] ; then - service apache2 restart - fi -} - - -########################################################################## -# Description: Ensures HTTPS is either enabled or disabled for given port -# mapping. -# Parameters: Variable number of proxy port mappings as -# $internal:$external. -# Returns: 0 if HTTPS reverse proxy is in place, 1 if it is not. -########################################################################## -setup_https() { - # configure https via apache reverse proxying either - # using certs provided by config or keystone. - [[ -z "$CHARM" ]] && - error_out "setup_https(): CHARM not set." - if ! https ; then - disable_https $@ - else - enable_https $@ - fi -} - -########################################################################## -# Description: Determine correct API server listening port based on -# existence of HTTPS reverse proxy and/or haproxy. -# Paremeters: The standard public port for given service. -# Returns: The correct listening port for API service. -########################################################################## -determine_api_port() { - local public_port="$1" - local i=0 - ( [[ -n "$(peer_units)" ]] || is_clustered >/dev/null 2>&1 ) && i=$[$i + 1] - https >/dev/null 2>&1 && i=$[$i + 1] - echo $[$public_port - $[$i * 10]] -} - -########################################################################## -# Description: Determine correct proxy listening port based on public IP + -# existence of HTTPS reverse proxy. -# Paremeters: The standard public port for given service. -# Returns: The correct listening port for haproxy service public address. -########################################################################## -determine_haproxy_port() { - local public_port="$1" - local i=0 - https >/dev/null 2>&1 && i=$[$i + 1] - echo $[$public_port - $[$i * 10]] -} - -########################################################################## -# Description: Print the value for a given config option in an OpenStack -# .ini style configuration file. -# Parameters: File path, option to retrieve, optional -# section name (default=DEFAULT) -# Returns: Prints value if set, prints nothing otherwise. -########################################################################## -local_config_get() { - # return config values set in openstack .ini config files. - # default placeholders starting (eg, %AUTH_HOST%) treated as - # unset values. - local file="$1" - local option="$2" - local section="$3" - [[ -z "$section" ]] && section="DEFAULT" - python -c " -import ConfigParser -config = ConfigParser.RawConfigParser() -config.read('$file') -try: - value = config.get('$section', '$option') -except: - print '' - exit(0) -if value.startswith('%'): exit(0) -print value -" -} - -########################################################################## -# Description: Creates an rc file exporting environment variables to a -# script_path local to the charm's installed directory. -# Any charm scripts run outside the juju hook environment can source this -# scriptrc to obtain updated config information necessary to perform health -# checks or service changes -# -# Parameters: -# An array of '=' delimited ENV_VAR:value combinations to export. -# If optional script_path key is not provided in the array, script_path -# defaults to scripts/scriptrc -########################################################################## -function save_script_rc { - if [ ! -n "$JUJU_UNIT_NAME" ]; then - echo "Error: Missing JUJU_UNIT_NAME environment variable" - exit 1 - fi - # our default unit_path - unit_path="$CHARM_DIR/scripts/scriptrc" - echo $unit_path - tmp_rc="/tmp/${JUJU_UNIT_NAME/\//-}rc" - - echo "#!/bin/bash" > $tmp_rc - for env_var in "${@}" - do - if `echo $env_var | grep -q script_path`; then - # well then we need to reset the new unit-local script path - unit_path="$CHARM_DIR/${env_var/script_path=/}" - else - echo "export $env_var" >> $tmp_rc - fi - done - chmod 755 $tmp_rc - mv $tmp_rc $unit_path -} diff --git a/hooks/misc_utils.py b/hooks/misc_utils.py new file mode 100644 index 0000000..c5748f1 --- /dev/null +++ b/hooks/misc_utils.py @@ -0,0 +1,31 @@ +import subprocess + +from charmhelpers.core.hookenv import ( + relation_get, + relation_ids, + related_units, +) + +from charmhelpers.contrib.hahelpers.ceph import ( + create_keyring as ceph_create_keyring, + keyring_path as ceph_keyring_path, +) + + +# This was pulled from cinder redux. It should go somewhere common, charmhelpers.hahelpers.ceph? + +def ensure_ceph_keyring(service): + '''Ensures a ceph keyring exists. Returns True if so, False otherwise''' + # TODO: This can be shared between cinder + glance, find a home for it. + key = None + for rid in relation_ids('ceph'): + for unit in related_units(rid): + key = relation_get('key', rid=rid, unit=unit) + if key: + break + if not key: + return False + ceph_create_keyring(service=service, key=key) + keyring = ceph_keyring_path(service) + subprocess.check_call(['chown', 'cinder.cinder', keyring]) + return True diff --git a/hooks/nova-compute-common b/hooks/nova-compute-common deleted file mode 100755 index eaaa4dc..0000000 --- a/hooks/nova-compute-common +++ /dev/null @@ -1,309 +0,0 @@ -#!/bin/bash -e - -CHARM="nova-compute" -PACKAGES="nova-compute python-keystone genisoimage" -SERVICES="nova-compute" -CONF_DIR="/etc/nova" -NOVA_CONF=$(config-get nova-config) -API_CONF="/etc/nova/api-paste.ini" -QUANTUM_CONF="/etc/quantum/quantum.conf" -LIBVIRTD_CONF="/etc/libvirt/libvirtd.conf" -HOOKS_DIR="$CHARM_DIR/hooks" -MULTI_HOST=$(config-get multi-host) - -if [ -f /etc/nova/nm.conf ]; then - NET_MANAGER=$(cat /etc/nova/nm.conf) -fi -case $NET_MANAGER in - "Quantum") - QUANTUM_PLUGIN=$(cat /etc/nova/quantum_plugin.conf) - case $QUANTUM_PLUGIN in - "ovs") - SERVICES="$SERVICES quantum-plugin-openvswitch-agent" - QUANTUM_PLUGIN_CONF="/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini" - ;; - "nvp") - QUANTUM_PLUGIN_CONF="/etc/quantum/plugins/nicira/nvp.ini" - ;; - *) - juju-log "Unrecognised plugin for quantum: $QUANTUM_PLUGIN" && exit 1 - ;; - esac - ;; - "FlatManager"|"FlatDHCPManager") - if [[ "$MULTI_HOST" == "yes" ]] ; then - SERVICES="$SERVICES nova-api nova-network" - fi - ;; -esac - -if [[ -e $HOOKS_DIR/lib/nova/nova-common ]] ; then - . $HOOKS_DIR/lib/nova/nova-common -else - juju-log "$CHARM: Couldn't load $HOOKS_DIR/lib/nova-common" && exit 1 -fi - -determine_compute_package() { - # determines the appropriate nova-compute package to install - # for the configured virt-type. - local virt_type="$1" - local compute_pkg="" - case $virt_type in - "kvm") compute_pkg="nova-compute-kvm";; - "qemu") compute_pkg="nova-compute-qemu";; - "xen") compute_pkg="nova-compute-xen";; - "uml") compute_pkg="nova-compute-uml";; - "lxc") compute_pkg="nova-compute-lxc";; - *) error_out "ERROR: Unsupported virt_type=$virt_type";; - esac - echo "$compute_pkg" -} - -function setup_bridge { - # XXX This is required by nova-network and will likely move somewhere else - # once we can split these services up into seperate formulas. - br=$1 - ip=$2 - netmask=$3 - [[ -z $br ]] && br="br100" - [[ -z $ip ]] && ip="11.0.0.1" - [[ -z $netmask ]] && netmask="255.255.255.0" - - apt-get -y install bridge-utils augeas-lenses augeas-tools - echo "Configuring bridge $br ($ip $netmask)" - context="/files/etc/network/interfaces" - augtool < /etc/nova/nm.conf - ;;& - "FlatManager") - local bridge_ip=$(config-get bridge-ip) - local bridge_netmask=$(config-get bridge-netmask) - setup_bridge $network_bridge $bridge_ip $bridge_netmask - set_or_update network_manager nova.network.manager.FlatManager - set_or_update flat_network_bridge $network_bridge - ;; - "FlatDHCPManager") - local flat_interface=$(config-get flat-interface) - local ec2_host=$(relation-get ec2_host) - [[ -z $ec2_host ]] && juju-log "nova-compute: Missing ec2_host" \ - && exit 0 - set_or_update network_manager nova.network.manager.FlatDHCPManager - # the interface on which bridge is built - set_or_update flat_interface $flat_interface - # address of API server to forward requests - set_or_update ec2_dmz_host $ec2_host - ;; - "Quantum") - local keystone_host="$(relation-get keystone_host)" - local auth_port="$(relation-get auth_port)" - local quantum_url="$(relation-get quantum_url)" - local quantum_admin_tenant_name="$(relation-get service_tenant)" - local quantum_admin_username="$(relation-get service_username)" - local quantum_admin_password="$(relation-get service_password)" - local quantum_security_groups="$(relation-get quantum_security_groups)" - - # might end up here before nova-c-c has processed keystone hooks - [[ -z "$keystone_host" ]] || - [[ -z "$auth_port" ]] || - [[ -z "$quantum_url" ]] || - [[ -z "$quantum_admin_tenant_name" ]] || - [[ -z "$quantum_admin_username" ]] || - [[ -z "$quantum_admin_password" ]] && - juju-log "nova-compute: Missing required data for Quantum config." && - exit 0 - - local cur=$(get_os_codename_package "nova-common") - local vers=$(get_os_version_codename $cur) - - [[ "$quantum_security_groups" == "yes" ]] && - dpkg --compare-versions $vers lt '2013.1' && - juju-log "Unable to use quantum security groups with < grizzly" && - exit 1 - - set_or_update "network_api_class" "nova.network.quantumv2.api.API" - set_or_update "quantum_auth_strategy" "keystone" - set_or_update "quantum_url" "$quantum_url" - set_or_update "quantum_admin_tenant_name" "$quantum_admin_tenant_name" - set_or_update "quantum_admin_username" "$quantum_admin_username" - set_or_update "quantum_admin_password" "$quantum_admin_password" - set_or_update "quantum_admin_auth_url" \ - "http://$keystone_host:$auth_port/v2.0" - - if dpkg --compare-versions $vers gt '2012.2'; then - # Grizzly onwards supports metadata proxy so forcing use of config - # drive is not required. - set_or_update "force_config_drive" "False" - else - set_or_update "force_config_drive" "True" - fi - case $quantum_plugin in - "ovs") - apt-get -y install openvswitch-datapath-dkms - apt-get -y install quantum-plugin-openvswitch-agent - local quantum_plugin_conf="/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini" - set_or_update "core_plugin" "quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2" "$QUANTUM_CONF" - if dpkg --compare-versions $vers gt '2012.2'; then - set_or_update "libvirt_vif_driver" "nova.virt.libvirt.vif.LibvirtGenericVIFDriver" - else - set_or_update "libvirt_vif_driver" "nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver" - fi - set_or_update "libvirt_use_virtio_for_bridges" "True" - set_or_update "tenant_network_type" "gre" $quantum_plugin_conf "OVS" - set_or_update "enable_tunneling" "True" $quantum_plugin_conf "OVS" - set_or_update "tunnel_id_ranges" "1:1000" $quantum_plugin_conf "OVS" - set_or_update "local_ip" "$private_address" $quantum_plugin_conf "OVS" - if [ "$quantum_security_groups" == "yes" ]; then - set_or_update "security_group_api" "quantum" - set_or_update "firewall_driver" "nova.virt.firewall.NoopFirewallDriver" - set_or_update "firewall_driver" \ - "quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver" \ - $quantum_plugin_conf "SECURITYGROUP" - fi - SERVICES="$SERVICES quantum-plugin-openvswitch-agent" - ;; - esac - set_or_update "bind_host" "0.0.0.0" "$QUANTUM_CONF" - [[ -n $net_manager ]] && echo $net_manager > /etc/nova/nm.conf - [[ -n $quantum_plugin ]] && echo $quantum_plugin > /etc/nova/quantum_plugin.conf - ;; - *) echo "ERROR: Invalid network manager $1" && exit 1 ;; - esac -} - -BR_INT="br-int" - -function configure_quantum_bridge { - if ! ovs-vsctl show | grep -q "Bridge $BR_INT"; then - ovs-vsctl add-br $BR_INT - fi -} - -function initialize_ssh_keys { - # generate ssh keypair for root if one does not exist or - # the pari is not complete. - local pub="/root/.ssh/id_rsa" - local priv="/root/.ssh/id_rsa.pub" - if [[ -e $pub ]] && - [[ -e $priv ]] ; then - juju-log "$CHARM: SSH credentials already exist for root." - return 0 - fi - juju-log "$CHARM: Initializing new SSH key pair for live migration." - [[ -e $pub ]] && mv $pub $pub.$(date +"%s") - [[ -e $priv ]] && mv $priv $priv.$(date +"%s") - local keyname=$(echo $JUJU_UNIT_NAME | sed -e 's,/,-,g') - echo -e "\n" | ssh-keygen -C "$keyname" -N "" -} - -function libvirt_tcp_listening { - # toggle libvirtd's tcp listening in both /etc/default/libvirt-bin - # and /etc/libvirt/libvirtd.conf. - local toggle="$1" - juju-log "$CHARM: Configuring libvirt tcp listening: $toggle." - local cur_opts=$(grep "^libvirtd_opts" /etc/default/libvirt-bin | - cut -d= -f2 | sed -e 's/\"//g') - local new_opts="" - - if [[ "$toggle" == "on" ]] ; then - if [[ -z "$cur_opts" ]] ; then - echo "libvirtd_opts=\"-d -l\"" >>/etc/default/libvirt-bin - elif ! echo "$cur_opts" | grep -q "\-l" ; then - new_opts="$cur_opts -l" - sed -i "s|\(libvirtd_opts=\).*|\1\"$new_opts\"|" /etc/default/libvirt-bin - fi - set_or_update "listen_tcp" 1 $LIBVIRTD_CONF - elif [[ "$toggle" == "off" ]] ; then - if echo "$cur_opts" | grep -q "\-l" ; then - new_opts=$(echo $cur_opts | sed -e 's/\-l//g') - fi - set_or_update "listen_tcp" 0 $LIBVIRTD_CONF - fi - - [[ -n "$new_opts" ]] && - sed -i "s|\(libvirtd_opts=\).*|\1\"$new_opts\"|" /etc/default/libvirt-bin - - return 0 -} - - -function configure_migration { - local enable_migration=$(config-get enable-live-migration) - - if [[ "$enable_migration" != "True" ]] && - [[ "$enable_migraiton" != "true" ]] ; then - libvirt_tcp_listening "off" - return $? - fi - - libvirt_tcp_listening "on" - - case "$(config-get migration-auth-type)" in - "none"|"None") - set_or_update "listen_tls" 0 $LIBVIRTD_CONF - set_or_update "auth_tcp" "\"none\"" $LIBVIRTD_CONF - ;; - "ssh") - set_or_update "listen_tls" 0 $LIBVIRTD_CONF - set_or_update "live_migration_uri" "qemu+ssh://%s/system" $NOVA_CONF - initialize_ssh_keys - # check in with nova-c-c and register our new key. - for id in $(relation-ids cloud-compute) ; do - compute_joined $id - done - service_ctl nova-compute restart ;; - "sasl") return 0 ;; - esac -} - -function configure_libvirt { - cat > /etc/libvirt/qemu.conf << EOF -# File installed by Juju nova-compute charm -cgroup_device_acl = [ - "/dev/null", "/dev/full", "/dev/zero", - "/dev/random", "/dev/urandom", - "/dev/ptmx", "/dev/kvm", "/dev/kqemu", - "/dev/rtc", "/dev/hpet", "/dev/net/tun", -] -EOF - configure_migration - service libvirt-bin restart -} - -function migration_enabled { - local migration="$(config-get enable-live-migration)" - [[ "$migration" == "true" ]] || [[ "$migration" == "True" ]] && return 0 - return 1 -} diff --git a/hooks/nova-compute-relations b/hooks/nova-compute-relations deleted file mode 100755 index 3fc2a8d..0000000 --- a/hooks/nova-compute-relations +++ /dev/null @@ -1,329 +0,0 @@ -#!/bin/bash -e -HOOKS_DIR="$CHARM_DIR/hooks" -ARG0=${0##*/} - -if [[ -e $HOOKS_DIR/nova-compute-common ]] ; then - . $HOOKS_DIR/nova-compute-common -else - juju-log "ERROR: Could not load nova-compute-common from $HOOKS_DIR" -fi - -function install_hook { - [ -d exec.d ] && ( for f in exec.d/*/charm-pre-install; do [ -x $f ] && /bin/sh -c "$f";done ) - local virt_type=$(config-get virt-type) - local compute_pkg=$(determine_compute_package "$virt_type") - apt-get -y install python-software-properties || exit 1 - configure_install_source "$(config-get openstack-origin)" - apt-get update || exit 1 - apt-get -y install $compute_pkg $PACKAGES || exit 1 - service_ctl all stop - set_or_update "auth_strategy" "keystone" - configure_libvirt -} - -function upgrade_hook { - [ -d exec.d ] && ( for f in exec.d/*/charm-pre-install; do [ -x $f ] && /bin/sh -c "$f";done ) -} - -function config_changed() { - - # Determine whether or not we should do an upgrade, based on whether or not - # the version offered in openstack-origin is greater than what is installed. - - local install_src=$(config-get openstack-origin) - local cur=$(get_os_codename_package "nova-common") - local available=$(get_os_codename_install_source "$install_src") - - if dpkg --compare-versions $(get_os_version_codename "$cur") lt \ - $(get_os_version_codename "$available") ; then - juju-log "$CHARM: Upgrading OpenStack release: $cur -> $available." - do_openstack_upgrade "$install_src" $PACKAGES - fi - - # set this here until its fixed in grizzly packaging. (adam_g) - [[ "$cur" == "grizzly" ]] && - set_or_update "compute_driver" "libvirt.LibvirtDriver" - - configure_libvirt - set_config_flags - service_ctl all restart -} - -function amqp_joined { - # we request a username on the rabbit queue - # and store it in nova.conf. our response is its IP + PASSWD - # but we configure that in _changed - local rabbit_user=$(config-get rabbit-user) - local rabbit_vhost=$(config-get rabbit-vhost) - juju-log "$CHARM - amqp_joined: requesting credentials for $rabbit_user" - relation-set username=$rabbit_user - relation-set vhost=$rabbit_vhost -} - -function amqp_changed { - # server creates our credentials and tells us where - # to connect. for now, using default vhost '/' - local rabbit_host=$(relation-get private-address) - local rabbit_password=$(relation-get password) - - if [[ -z $rabbit_host ]] || \ - [[ -z $rabbit_password ]] ; then - juju-log "$CHARM - amqp_changed: rabbit_host||rabbit_password not set." - exit 0 - fi - - # if the rabbitmq service is clustered among nodes with hacluster, - # point to its vip instead of its private-address. - local clustered=$(relation-get clustered) - if [[ -n "$clustered" ]] ; then - juju-log "$CHARM - ampq_changed: Configuring for "\ - "access to haclustered rabbitmq service." - local vip=$(relation-get vip) - [[ -z "$vip" ]] && juju-log "$CHARM - amqp_changed: Clustered but no vip."\ - && exit 0 - rabbit_host="$vip" - fi - - local rabbit_user=$(config-get rabbit-user) - local rabbit_vhost=$(config-get rabbit-vhost) - juju-log "$CHARM - amqp_changed: Setting rabbit config in nova.conf: " \ - "$rabbit_user@$rabbit_host/$rabbit_vhost" - set_or_update rabbit_host $rabbit_host - set_or_update rabbit_userid $rabbit_user - set_or_update rabbit_password $rabbit_password - set_or_update rabbit_virtual_host $rabbit_vhost - - if [ "$NET_MANAGER" == "Quantum" ]; then - set_or_update rabbit_host "$rabbit_host" "$QUANTUM_CONF" - set_or_update rabbit_userid "$rabbit_user" "$QUANTUM_CONF" - set_or_update rabbit_password "$rabbit_password" "$QUANTUM_CONF" - set_or_update rabbit_virtual_host "$rabbit_vhost" "$QUANTUM_CONF" - fi - - service_ctl all restart -} - -function db_joined { - # tell mysql provider which database we want. it will create it and give us - # credentials - local nova_db=$(config-get nova-db) - local db_user=$(config-get db-user) - local hostname=$(unit-get private-address) - juju-log "$CHARM - db_joined: requesting database access to $nova_db for "\ - "$db_user@$hostname" - relation-set nova_database=$nova_db nova_username=$db_user nova_hostname=$hostname - if [ "$NET_MANAGER" == "Quantum" ]; then - relation-set quantum_database=quantum quantum_username=quantum quantum_hostname=$hostname - fi -} - -function db_changed { - local db_host=`relation-get db_host` - local db_password=`relation-get nova_password` - - if [[ -z $db_host ]] || [[ -z $db_password ]] ; then - juju-log "$CHARM - db_changed: db_host||db_password set, will retry." - exit 0 - fi - - local nova_db=$(config-get nova-db) - local db_user=$(config-get db-user) - juju-log "$CHARM - db_changed: Configuring nova.conf for access to $nova_db" - - set_or_update sql_connection "mysql://$db_user:$db_password@$db_host/$nova_db" - - if [ "$NET_MANAGER" == "Quantum" ]; then - local quantum_db_password=`relation-get quantum_password` - set_or_update sql_connection "mysql://quantum:$quantum_db_password@$db_host/quantum?charset=utf8" \ - $QUANTUM_PLUGIN_CONF "DATABASE" - fi - - service_ctl all restart -} - -function image-service_changed { - local api_server=`relation-get glance-api-server` - if [[ -z $api_server ]] ; then - echo "image-service_changed: api_server not yet set. Exit 0 and retry" - exit 0 - fi - - if [[ "$(get_os_codename_package nova-common)" == "essex" ]] ; then - # essex needs glance_api_servers urls stripped of protocol. - api_server="$(echo $api_server | awk '{gsub(/http:\/\/|https:\/\//,"")}1')" - fi - - set_or_update glance_api_servers $api_server - service_ctl all restart -} - -function compute_joined { - migration_enabled || return 0 - local relid="$1" - [[ -n "$relid" ]] && relid="-r $relid" - migration_auth="$(config-get migration-auth-type)" - case "$migration_auth" in - "none"|"None") return 0 ;; - "ssh") relation-set $relid ssh_public_key="$(cat /root/.ssh/id_rsa.pub)" ;; - esac - relation-set $relid migration_auth_type="$migration_auth" -} - -function compute_changed { - # nova-c-c will inform us of the configured network manager. nova-compute - # needs to configure itself accordingly. - network_manager=`relation-get network_manager` - if [[ -n "$network_manager" ]] ; then - if [ "$network_manager" == "Quantum" ]; then - configure_network_manager "$network_manager" "$(relation-get quantum_plugin)" - configure_quantum_bridge - # Quantum also needs access to the quantum database - # depending on add-relation order, this relation - # may already be present so ask it for credentials if so - r_ids="$(relation-ids shared-db)" - for id in $r_ids ; do - relation-set -r $id \ - quantum_database=quantum \ - quantum_username=quantum \ - quantum_hostname=$(unit-get private-address) - done - # Rabbit MQ relation may also already be in place - # shared vhost with nova so just grab settings and - # configure. need to be sure to use VIP if clustered. - local rabbit_clustered="" rabbit_vip="" rabbit_host="" rabbit_password="" - r_ids="$(relation-ids amqp)" - for id in $r_ids ; do - for unit in $(relation-list -r $id) ; do - [[ -z "$rabbit_clustered" ]] && - rabbit_clustered=$(relation-get -r $id clustered $unit) - [[ -z "$rabbit_vip" ]] && rabbit_vip=$(relation-get -r $id vip $unit) - [[ -z "$rabbit_password" ]] && - rabbit_password=$(relation-get -r $id password $unit) - rabbit_host=$(relation-get -r $id private-address $unit) - done - done - if [[ -n "$rabbit_clustered" ]] ; then - rabbit_host="$rabbit_vip" - fi - if [[ -n $rabbit_host ]] && \ - [[ -n $rabbit_password ]]; then - set_or_update rabbit_host "$rabbit_host" "$QUANTUM_CONF" - set_or_update rabbit_userid "$(config-get rabbit-user)" "$QUANTUM_CONF" - set_or_update rabbit_password "$rabbit_password" "$QUANTUM_CONF" - set_or_update rabbit_virtual_host "$(config-get rabbit-vhost)" "$QUANTUM_CONF" - fi - else - configure_network_manager "$network_manager" - fi - fi - - # nova-c-c informs us of what volume service has been deployed. - volume_service=`relation-get volume_service` - [[ -n "$volume_service" ]] && configure_volume_service "$volume_service" - - if migration_enabled ; then - case "$(config-get migration-auth-type)" in - "ssh") - local known_hosts="$(relation-get known_hosts)" - local authorized_keys="$(relation-get authorized_keys)" - if [[ -n "$known_hosts" ]] && - [[ -n "$authorized_keys" ]] ; then - juju-log "$CHARM: Saving new known_hosts+authorized_keys file." - echo "$known_hosts" | base64 -di >/root/.ssh/known_hosts - echo "$authorized_keys" | base64 -di >/root/.ssh/authorized_keys - fi - ;; - esac - fi - - # If Keytone is configured manage SSL certs, nova-compute needs a copy - # of its CA installed. - local ca_cert="$(relation-get ca_cert)" - if [[ -n "$ca_cert" ]] ; then - juju-log "Installing Keystone CA certificate." - ca_cert="$(echo $ca_cert | base64 -di)" - echo "$ca_cert" >/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt - update-ca-certificates - fi - - # restart on all changed events. nova-c-c may send out a uuid to trigger - # remote restarts of services here (after db migrations, for instance) - service_ctl all restart -} - -function ceph_joined { - mkdir -p /etc/ceph - apt-get -y install ceph-common || exit 1 -} - -function ceph_changed { - SERVICE_NAME=`echo $JUJU_UNIT_NAME | cut -d / -f 1` - KEYRING=/etc/ceph/ceph.client.$SERVICE_NAME.keyring - KEY=`relation-get key` - if [ -n "$KEY" ]; then - # But only once - if [ ! -f $KEYRING ]; then - ceph-authtool $KEYRING \ - --create-keyring --name=client.$SERVICE_NAME \ - --add-key="$KEY" - chmod +r $KEYRING - fi - else - # No key - bail for the time being - exit 0 - fi - - MONS=`relation-list` - mon_hosts="" - for mon in $MONS; do - mon_hosts="$mon_hosts $(get_ip $(relation-get private-address $mon)):6789" - done - cat > /etc/ceph/ceph.conf << EOF -[global] - auth supported = $(relation-get auth) - keyring = /etc/ceph/\$cluster.\$name.keyring - mon host = $mon_hosts -EOF - - if [ ! -f /etc/ceph/secret.xml ]; then - # This is just a label and it must be consistent across - # nova-compute nodes to support live migration. - UUID="514c9fca-8cbe-11e2-9c52-3bc8c7819472" - cat > /etc/ceph/secret.xml << EOF - - $UUID - - client.$SERVICE_NAME secret - - -EOF - # Create secret for libvirt usage - # note that this does limit ceph usage to - # KVM only at this point in time. - virsh secret-define --file /etc/ceph/secret.xml - virsh secret-set-value --secret $UUID --base64 $KEY - set_or_update rbd_user $SERVICE_NAME - set_or_update rbd_secret_uuid $UUID - set_or_update rbd_pool nova - service_ctl all restart - fi -} - -case $ARG0 in - "install") install_hook ;; - "upgrade-charm") upgrade_hook ;; - "start"|"stop") exit 0 ;; - "config-changed") config_changed ;; - "amqp-relation-joined") amqp_joined ;; - "amqp-relation-changed") amqp_changed ;; - "shared-db-relation-joined") db_joined ;; - "shared-db-relation-changed") db_changed ;; - "image-service-relation-joined") exit 0 ;; - "image-service-relation-changed") image-service_changed ;; - "identity-service-relation-joined") keystone_joined ;; - "identity-service-relation-changed") exit 0 ;; - "ceph-relation-joined") ceph_joined;; - "ceph-relation-changed") ceph_changed;; - "cloud-compute-relation-joined" ) compute_joined ;; - "cloud-compute-relation-changed") compute_changed ;; -esac diff --git a/hooks/nova_compute_relations.py b/hooks/nova_compute_relations.py new file mode 100755 index 0000000..7f35255 --- /dev/null +++ b/hooks/nova_compute_relations.py @@ -0,0 +1,155 @@ +#!/usr/bin/python + +import os + +from charmhelpers.core.hookenv import ( + Hooks, + config, + log, + relation_ids, + relation_set, + service_name, + unit_get, +) + +from charmhelpers.core.host import ( + apt_install, + apt_update, + restart_on_change, +) + +from charmhelpers.contrib.openstack.utils import ( + configure_installation_source, + openstack_upgrade_available, +) + +from nova_compute_utils import ( + PACKAGES, + RESTART_MAP, + import_authorized_keys, + import_keystone_ca_cert, + migration_enabled, + configure_live_migration, + configure_network_service, + configure_volume_service, + do_openstack_upgrade, + quantum_enabled, + quantum_plugin_config, + public_ssh_key, + register_configs, +) + +from misc_utils import ( + ensure_ceph_keyring, +) + +hooks = Hooks() +CONFIGS = register_configs() + + +@hooks.hook() +def install(): + configure_installation_source(config('openstack-origin')) + apt_update() + apt_install(PACKAGES, fatal=True) + + +@hooks.hook('config-changed') +@restart_on_change(RESTART_MAP) +def config_changed(): + if openstack_upgrade_available('nova-common'): + do_openstack_upgrade() + + configure_live_migration() + if migration_enabled() and config('migration-auth-type') == 'ssh': + # Check-in with nova-c-c and register new ssh key, if it has just been + # generated. + [compute_joined(rid) for rid in relation_ids('cloud-compute')] + + +@hooks.hook('amqp-relation-joined') +@restart_on_change(RESTART_MAP) +def amqp_joined(): + relation_set(username=config('rabbit-user'), vhost=config('rabbit-vhost')) + + +@hooks.hook('amqp-relation-changed') +@restart_on_change(RESTART_MAP) +def amqp_changed(): + if 'amqp' not in CONFIGS.complete_contexts(): + log('amqp relation incomplete. Peer not ready?') + return + CONFIGS.write('/etc/nova/nova.conf') + if quantum_enabled(): + CONFIGS.write('/etc/quantum/quantum.conf') + + +@hooks.hook('shared-db-relation-joined') +def db_joined(): + relation_set(database=config('database'), username=config('database-user'), + hostname=unit_get('private-address')) + + +@hooks.hook('shared-db-relation-changed') +@restart_on_change(RESTART_MAP) +def db_changed(): + if 'shared-db' not in CONFIGS.complete_contexts(): + log('shared-db relation incomplete. Peer not ready?') + return + CONFIGS.write('/etc/nova/nova.conf') + if quantum_enabled(): + CONFIGS.write(quantum_plugin_config()) + + +@hooks.hook('image-service-relation-changed') +@restart_on_change(RESTART_MAP) +def image_service_changed(): + if 'image-service' not in CONFIGS.complete_contexts(): + log('image-service relation incomplete. Peer not ready?') + return + CONFIGS.write('/etc/nova/nova.conf') + + +@hooks.hook('cloud-compute-relation-joined') +def compute_joined(rid=None): + if not migration_enabled(): + return + auth_type = config('migration-auth-type') + settings = { + 'migration_auth_type': auth_type + } + if auth_type == 'ssh': + settings['ssh_public_key'] = public_ssh_key() + relation_set(relation_id=rid, **settings) + + +@hooks.hook('cloud-compute-relation-changed') +@restart_on_change(RESTART_MAP) +def compute_changed(): + configure_network_service() + configure_volume_service() + import_authorized_keys() + import_keystone_ca_cert() + + +@hooks.hook('ceph-relation-joined') +@restart_on_change(RESTART_MAP) +def ceph_joined(): + if not os.path.isdir('/etc/ceph'): + os.mkdir('/etc/ceph') + apt_install('ceph-common') + + +@hooks.hook('ceph-relation-changed') +@restart_on_change(RESTART_MAP) +def ceph_changed(): + if 'ceph' not in CONFIGS.complete_contexts(): + log('ceph relation incomplete. Peer not ready?') + return + svc = service_name() + if not ensure_ceph_keyring(service=svc): + log('Could not create ceph keyring: peer not ready?') + return + CONFIGS.write('/etc/ceph/ceph.conf') + CONFIGS.write('/etc/ceph/secret.xml') + CONFIGS.write('/etc/nova/nova.conf') diff --git a/hooks/nova_compute_utils.py b/hooks/nova_compute_utils.py new file mode 100644 index 0000000..86d567e --- /dev/null +++ b/hooks/nova_compute_utils.py @@ -0,0 +1,75 @@ +from charmhelpers.core.hookenv import ( + config, +) + +PACKAGES = [] + +RESTART_MAP = { + '/etc/libvirt/qemu.conf': ['libvirt-bin'], + '/etc/default/libvirt-bin': ['libvirt-bin'] +} + +# This is just a label and it must be consistent across +# nova-compute nodes to support live migration. +CEPH_SECRET_UUID = '514c9fca-8cbe-11e2-9c52-3bc8c7819472' + + +def migration_enabled(): + return config('enable-live-migration').lower() == 'true' + + +def quantum_enabled(): + return config('network-manager').lower() == 'quantum' + + +def quantum_plugin_config(): + pass + + +def public_ssh_key(user='root'): + pass + + +def initialize_ssh_keys(): + pass + + +def import_authorized_keys(): + pass + + +def configure_live_migration(configs=None): + """ + Ensure libvirt live migration is properly configured or disabled, + depending on current config setting. + """ + configs = configs or register_configs() + configs.write('/etc/libvirt/libvirtd.conf') + configs.write('/etc/default/libvirt-bin') + configs.write('/etc/nova/nova.conf') + + if not migration_enabled(): + return + + if config('migration-auth-type') == 'ssh': + initialize_ssh_keys() + + +def do_openstack_upgrade(): + pass + + +def register_configs(): + pass + + +def import_keystone_ca_cert(): + pass + + +def configure_network_service(): + pass + + +def configure_volume_service(): + pass diff --git a/hooks/shared-db-relation-changed b/hooks/shared-db-relation-changed index 6f9ff4f..6eb6593 120000 --- a/hooks/shared-db-relation-changed +++ b/hooks/shared-db-relation-changed @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/shared-db-relation-joined b/hooks/shared-db-relation-joined index 6f9ff4f..6eb6593 120000 --- a/hooks/shared-db-relation-joined +++ b/hooks/shared-db-relation-joined @@ -1 +1 @@ -nova-compute-relations \ No newline at end of file +nova_compute_relations.py \ No newline at end of file diff --git a/hooks/start b/hooks/start deleted file mode 120000 index 6f9ff4f..0000000 --- a/hooks/start +++ /dev/null @@ -1 +0,0 @@ -nova-compute-relations \ No newline at end of file diff --git a/hooks/stop b/hooks/stop deleted file mode 120000 index 6f9ff4f..0000000 --- a/hooks/stop +++ /dev/null @@ -1 +0,0 @@ -nova-compute-relations \ No newline at end of file