Resync helpers

This commit is contained in:
James Page 2014-07-28 12:33:12 +01:00
parent efb7afbca4
commit 3b53a2fb31
10 changed files with 84 additions and 131 deletions

View File

@ -146,12 +146,12 @@ def get_hacluster_config():
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
ha-bindiface, ha-mcastport, vip
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
settings = ['ha-bindiface', 'ha-mcastport', 'vip']
conf = {}
for setting in settings:
conf[setting] = config_get(setting)

View File

@ -21,12 +21,16 @@ def del_bridge(name):
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
def add_bridge_port(name, port):
def add_bridge_port(name, port, promisc=False):
''' Add a port to the named openvswitch bridge '''
log('Adding port {} to bridge {}'.format(port, name))
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
name, port])
subprocess.check_call(["ip", "link", "set", port, "up"])
if promisc:
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
else:
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
def del_bridge_port(name, port):
@ -35,6 +39,7 @@ def del_bridge_port(name, port):
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
name, port])
subprocess.check_call(["ip", "link", "set", port, "down"])
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
def set_manager(manager):

View File

@ -7,7 +7,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
"""This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, series, openstack=None, source=None):
def __init__(self, series=None, openstack=None, source=None):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
@ -24,15 +24,13 @@ class OpenStackAmuletDeployment(AmuletDeployment):
if self.openstack:
for svc in services:
charm_name = self._get_charm_name(svc[name])
if charm_name not in use_source:
if svc[name] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
if self.source:
for svc in services:
charm_name = self._get_charm_name(svc[name])
if charm_name in use_source:
if svc[name] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)

View File

@ -177,40 +177,12 @@ class OpenStackAmuletUtils(AmuletUtils):
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
count = 1
status = image.status
while status != 'active' and count < 10:
time.sleep(3)
image = glance.images.get(image.id)
status = image.status
self.log.debug('image status: {}'.format(status))
count += 1
if status != 'active':
self.log.error('image creation timed out')
return None
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
num_before = len(list(glance.images.list()))
glance.images.delete(image)
count = 1
num_after = len(list(glance.images.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(glance.images.list()))
self.log.debug('number of images: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('image deletion timed out')
return False
return True
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
@ -227,27 +199,11 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
if status == 'BUILD':
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
count = 1
num_after = len(list(nova.servers.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(nova.servers.list()))
self.log.debug('number of instances: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('instance deletion timed out')
return False
return True

View File

@ -21,9 +21,11 @@ from charmhelpers.core.hookenv import (
relation_get,
relation_ids,
related_units,
relation_set,
unit_get,
unit_private_ip,
ERROR,
INFO
)
from charmhelpers.contrib.hahelpers.cluster import (
@ -42,6 +44,8 @@ from charmhelpers.contrib.openstack.neutron import (
neutron_plugin_attribute,
)
from charmhelpers.contrib.network.ip import get_address_in_network
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
@ -134,8 +138,26 @@ class SharedDBContext(OSContextGenerator):
'Missing required charm config options. '
'(database name and user)')
raise OSContextError
ctxt = {}
# NOTE(jamespage) if mysql charm provides a network upon which
# access to the database should be made, reconfigure relation
# with the service units local address and defer execution
access_network = relation_get('access-network')
if access_network is not None:
if self.relation_prefix is not None:
hostname_key = "{}_hostname".format(self.relation_prefix)
else:
hostname_key = "hostname"
access_hostname = get_address_in_network(access_network,
unit_get('private-address'))
set_hostname = relation_get(attribute=hostname_key,
unit=local_unit())
if set_hostname != access_hostname:
relation_set(relation_settings={hostname_key: access_hostname})
return ctxt # Defer any further hook execution for now....
password_setting = 'password'
if self.relation_prefix:
password_setting = self.relation_prefix + '_password'
@ -340,10 +362,12 @@ class CephContext(OSContextGenerator):
use_syslog = str(config('use-syslog')).lower()
for rid in relation_ids('ceph'):
for unit in related_units(rid):
mon_hosts.append(relation_get('private-address', rid=rid,
unit=unit))
auth = relation_get('auth', rid=rid, unit=unit)
key = relation_get('key', rid=rid, unit=unit)
ceph_addr = \
relation_get('ceph-public-address', rid=rid, unit=unit) or \
relation_get('private-address', rid=rid, unit=unit)
mon_hosts.append(ceph_addr)
ctxt = {
'mon_hosts': ' '.join(mon_hosts),
@ -377,7 +401,9 @@ class HAProxyContext(OSContextGenerator):
cluster_hosts = {}
l_unit = local_unit().replace('/', '-')
cluster_hosts[l_unit] = unit_get('private-address')
cluster_hosts[l_unit] = \
get_address_in_network(config('os-internal-network'),
unit_get('private-address'))
for rid in relation_ids('cluster'):
for unit in related_units(rid):
@ -689,7 +715,7 @@ class SubordinateConfigContext(OSContextGenerator):
self.interface = interface
def __call__(self):
ctxt = {}
ctxt = {'sections': {}}
for rid in relation_ids(self.interface):
for unit in related_units(rid):
sub_config = relation_get('subordinate_configuration',
@ -715,14 +741,29 @@ class SubordinateConfigContext(OSContextGenerator):
sub_config = sub_config[self.config_file]
for k, v in sub_config.iteritems():
ctxt[k] = v
if k == 'sections':
for section, config_dict in v.iteritems():
log("adding section '%s'" % (section))
ctxt[k][section] = config_dict
else:
ctxt[k] = v
if not ctxt:
ctxt['sections'] = {}
log("%d section(s) found" % (len(ctxt['sections'])), level=INFO)
return ctxt
class LogLevelContext(OSContextGenerator):
def __call__(self):
ctxt = {}
ctxt['debug'] = \
False if config('debug') is None else config('debug')
ctxt['verbose'] = \
False if config('verbose') is None else config('verbose')
return ctxt
class SyslogContext(OSContextGenerator):
def __call__(self):

View File

@ -322,6 +322,10 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
import apt_pkg
if not pkgcache:
apt_pkg.init()
# Force Apt to build its cache in memory. That way we avoid race
# conditions with other applications building the cache in the same
# place.
apt_pkg.config.set("Dir::Cache::pkgcache", "")
pkgcache = apt_pkg.Cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)

View File

@ -1,40 +1,35 @@
import amulet
import re
class AmuletDeployment(object):
"""This class provides generic Amulet deployment and test runner
methods."""
def __init__(self, series):
def __init__(self, series=None):
"""Initialize the deployment environment."""
self.series = series
self.d = amulet.Deployment(series=self.series)
self.series = None
def _get_charm_name(self, service_name):
"""Gets the charm name from the service name. Unique service names can
be specified with a '-service#' suffix (e.g. mysql-service1)."""
if re.match(r"^.*-service\d{1,3}$", service_name):
charm_name = re.sub('\-service\d{1,3}$', '', service_name)
if series:
self.series = series
self.d = amulet.Deployment(series=self.series)
else:
charm_name = service_name
return charm_name
self.d = amulet.Deployment()
def _add_services(self, this_service, other_services):
"""Add services to the deployment where this_service is the local charm
that we're focused on testing and other_services are the other
charms that come from the charm store."""
name, units = range(2)
charm_name = self._get_charm_name(this_service[name])
self.d.add(this_service[name],
units=this_service[units])
self.this_service = this_service[name]
self.d.add(this_service[name], units=this_service[units])
for svc in other_services:
charm_name = self._get_charm_name(svc[name])
self.d.add(svc[name],
charm='cs:{}/{}'.format(self.series, charm_name),
units=svc[units])
if self.series:
self.d.add(svc[name],
charm='cs:{}/{}'.format(self.series, svc[name]),
units=svc[units])
else:
self.d.add(svc[name], units=svc[units])
def _add_relations(self, relations):
"""Add all of the relations for the services."""

View File

@ -139,11 +139,11 @@ class AmuletUtils(object):
return self._get_dir_mtime(sentry_unit, proc_dir)
def service_restarted(self, sentry_unit, service, filename,
pgrep_full=False, sleep_time=20):
pgrep_full=False):
"""Compare a service's start time vs a file's last modification time
(such as a config file for that service) to determine if the service
has been restarted."""
sleep(sleep_time)
sleep(10)
if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \
self._get_file_mtime(sentry_unit, filename):
return True

View File

@ -7,7 +7,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
"""This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms."""
def __init__(self, series, openstack=None, source=None):
def __init__(self, series=None, openstack=None, source=None):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.openstack = openstack
@ -24,15 +24,13 @@ class OpenStackAmuletDeployment(AmuletDeployment):
if self.openstack:
for svc in services:
charm_name = self._get_charm_name(svc[name])
if charm_name not in use_source:
if svc[name] not in use_source:
config = {'openstack-origin': self.openstack}
self.d.configure(svc[name], config)
if self.source:
for svc in services:
charm_name = self._get_charm_name(svc[name])
if charm_name in use_source:
if svc[name] in use_source:
config = {'source': self.source}
self.d.configure(svc[name], config)

View File

@ -177,40 +177,12 @@ class OpenStackAmuletUtils(AmuletUtils):
image = glance.images.create(name=image_name, is_public=True,
disk_format='qcow2',
container_format='bare', data=f)
count = 1
status = image.status
while status != 'active' and count < 10:
time.sleep(3)
image = glance.images.get(image.id)
status = image.status
self.log.debug('image status: {}'.format(status))
count += 1
if status != 'active':
self.log.error('image creation timed out')
return None
return image
def delete_image(self, glance, image):
"""Delete the specified image."""
num_before = len(list(glance.images.list()))
glance.images.delete(image)
count = 1
num_after = len(list(glance.images.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(glance.images.list()))
self.log.debug('number of images: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('image deletion timed out')
return False
return True
def create_instance(self, nova, image_name, instance_name, flavor):
"""Create the specified instance."""
image = nova.images.find(name=image_name)
@ -227,27 +199,11 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('instance status: {}'.format(status))
count += 1
if status != 'ACTIVE':
self.log.error('instance creation timed out')
if status == 'BUILD':
return None
return instance
def delete_instance(self, nova, instance):
"""Delete the specified instance."""
num_before = len(list(nova.servers.list()))
nova.servers.delete(instance)
count = 1
num_after = len(list(nova.servers.list()))
while num_after != (num_before - 1) and count < 10:
time.sleep(3)
num_after = len(list(nova.servers.list()))
self.log.debug('number of instances: {}'.format(num_after))
count += 1
if num_after != (num_before - 1):
self.log.error('instance deletion timed out')
return False
return True