Add 0mq support
This commit is contained in:
parent
02b115c3b5
commit
642bd51cd9
@ -1,4 +1,4 @@
|
|||||||
branch: lp:charm-helpers
|
branch: lp:~openstack-charmers/charm-helpers/0mq
|
||||||
destination: hooks/charmhelpers
|
destination: hooks/charmhelpers
|
||||||
include:
|
include:
|
||||||
- core
|
- core
|
||||||
|
@ -6,6 +6,11 @@
|
|||||||
# Adam Gandelman <adamg@ubuntu.com>
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
#
|
#
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helpers for clustering and determining "cluster leadership" and other
|
||||||
|
clustering-related helpers.
|
||||||
|
"""
|
||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import os
|
import os
|
||||||
|
|
||||||
@ -19,6 +24,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
config as config_get,
|
config as config_get,
|
||||||
INFO,
|
INFO,
|
||||||
ERROR,
|
ERROR,
|
||||||
|
WARNING,
|
||||||
unit_get,
|
unit_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,6 +33,29 @@ class HAIncompleteConfig(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def is_elected_leader(resource):
|
||||||
|
"""
|
||||||
|
Returns True if the charm executing this is the elected cluster leader.
|
||||||
|
|
||||||
|
It relies on two mechanisms to determine leadership:
|
||||||
|
1. If the charm is part of a corosync cluster, call corosync to
|
||||||
|
determine leadership.
|
||||||
|
2. If the charm is not part of a corosync cluster, the leader is
|
||||||
|
determined as being "the alive unit with the lowest unit numer". In
|
||||||
|
other words, the oldest surviving unit.
|
||||||
|
"""
|
||||||
|
if is_clustered():
|
||||||
|
if not is_crm_leader(resource):
|
||||||
|
log('Deferring action to CRM leader.', level=INFO)
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
peers = peer_units()
|
||||||
|
if peers and not oldest_peer(peers):
|
||||||
|
log('Deferring action to oldest service unit.', level=INFO)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def is_clustered():
|
def is_clustered():
|
||||||
for r_id in (relation_ids('ha') or []):
|
for r_id in (relation_ids('ha') or []):
|
||||||
for unit in (relation_list(r_id) or []):
|
for unit in (relation_list(r_id) or []):
|
||||||
@ -38,7 +67,11 @@ def is_clustered():
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def is_leader(resource):
|
def is_crm_leader(resource):
|
||||||
|
"""
|
||||||
|
Returns True if the charm calling this is the elected corosync leader,
|
||||||
|
as returned by calling the external "crm" command.
|
||||||
|
"""
|
||||||
cmd = [
|
cmd = [
|
||||||
"crm", "resource",
|
"crm", "resource",
|
||||||
"show", resource
|
"show", resource
|
||||||
@ -54,15 +87,31 @@ def is_leader(resource):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def peer_units():
|
def is_leader(resource):
|
||||||
|
log("is_leader is deprecated. Please consider using is_crm_leader "
|
||||||
|
"instead.", level=WARNING)
|
||||||
|
return is_crm_leader(resource)
|
||||||
|
|
||||||
|
|
||||||
|
def peer_units(peer_relation="cluster"):
|
||||||
peers = []
|
peers = []
|
||||||
for r_id in (relation_ids('cluster') or []):
|
for r_id in (relation_ids(peer_relation) or []):
|
||||||
for unit in (relation_list(r_id) or []):
|
for unit in (relation_list(r_id) or []):
|
||||||
peers.append(unit)
|
peers.append(unit)
|
||||||
return peers
|
return peers
|
||||||
|
|
||||||
|
|
||||||
|
def peer_ips(peer_relation='cluster', addr_key='private-address'):
|
||||||
|
'''Return a dict of peers and their private-address'''
|
||||||
|
peers = {}
|
||||||
|
for r_id in relation_ids(peer_relation):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
|
||||||
|
return peers
|
||||||
|
|
||||||
|
|
||||||
def oldest_peer(peers):
|
def oldest_peer(peers):
|
||||||
|
"""Determines who the oldest peer is by comparing unit numbers."""
|
||||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||||
for peer in peers:
|
for peer in peers:
|
||||||
remote_unit_no = int(peer.split('/')[1])
|
remote_unit_no = int(peer.split('/')[1])
|
||||||
@ -72,16 +121,9 @@ def oldest_peer(peers):
|
|||||||
|
|
||||||
|
|
||||||
def eligible_leader(resource):
|
def eligible_leader(resource):
|
||||||
if is_clustered():
|
log("eligible_leader is deprecated. Please consider using "
|
||||||
if not is_leader(resource):
|
"is_elected_leader instead.", level=WARNING)
|
||||||
log('Deferring action to CRM leader.', level=INFO)
|
return is_elected_leader(resource)
|
||||||
return False
|
|
||||||
else:
|
|
||||||
peers = peer_units()
|
|
||||||
if peers and not oldest_peer(peers):
|
|
||||||
log('Deferring action to oldest service unit.', level=INFO)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def https():
|
def https():
|
||||||
|
@ -4,7 +4,7 @@ from functools import partial
|
|||||||
|
|
||||||
from charmhelpers.fetch import apt_install
|
from charmhelpers.fetch import apt_install
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
ERROR, log,
|
ERROR, log, config,
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -154,3 +154,21 @@ def _get_for_address(address, key):
|
|||||||
get_iface_for_address = partial(_get_for_address, key='iface')
|
get_iface_for_address = partial(_get_for_address, key='iface')
|
||||||
|
|
||||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||||
|
|
||||||
|
|
||||||
|
def get_ipv6_addr(iface="eth0"):
|
||||||
|
try:
|
||||||
|
iface_addrs = netifaces.ifaddresses(iface)
|
||||||
|
if netifaces.AF_INET6 not in iface_addrs:
|
||||||
|
raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
|
||||||
|
|
||||||
|
addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
|
||||||
|
ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
|
||||||
|
and config('vip') != a['addr']]
|
||||||
|
if not ipv6_addr:
|
||||||
|
raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
|
||||||
|
|
||||||
|
return ipv6_addr[0]
|
||||||
|
|
||||||
|
except ValueError:
|
||||||
|
raise ValueError("Invalid interface '%s'" % iface)
|
||||||
|
@ -4,8 +4,11 @@ from charmhelpers.contrib.amulet.deployment import (
|
|||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
"""This class inherits from AmuletDeployment and has additional support
|
"""OpenStack amulet deployment.
|
||||||
that is specifically for use by OpenStack charms."""
|
|
||||||
|
This class inherits from AmuletDeployment and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, series=None, openstack=None, source=None):
|
def __init__(self, series=None, openstack=None, source=None):
|
||||||
"""Initialize the deployment environment."""
|
"""Initialize the deployment environment."""
|
||||||
@ -40,11 +43,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
self.d.configure(service, config)
|
self.d.configure(service, config)
|
||||||
|
|
||||||
def _get_openstack_release(self):
|
def _get_openstack_release(self):
|
||||||
"""Return an integer representing the enum value of the openstack
|
"""Get openstack release.
|
||||||
release."""
|
|
||||||
self.precise_essex, self.precise_folsom, self.precise_grizzly, \
|
Return an integer representing the enum value of the openstack
|
||||||
self.precise_havana, self.precise_icehouse, \
|
release.
|
||||||
self.trusty_icehouse = range(6)
|
"""
|
||||||
|
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||||
|
self.precise_havana, self.precise_icehouse,
|
||||||
|
self.trusty_icehouse) = range(6)
|
||||||
releases = {
|
releases = {
|
||||||
('precise', None): self.precise_essex,
|
('precise', None): self.precise_essex,
|
||||||
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||||
|
@ -16,8 +16,11 @@ ERROR = logging.ERROR
|
|||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletUtils(AmuletUtils):
|
class OpenStackAmuletUtils(AmuletUtils):
|
||||||
"""This class inherits from AmuletUtils and has additional support
|
"""OpenStack amulet utilities.
|
||||||
that is specifically for use by OpenStack charms."""
|
|
||||||
|
This class inherits from AmuletUtils and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
def __init__(self, log_level=ERROR):
|
def __init__(self, log_level=ERROR):
|
||||||
"""Initialize the deployment environment."""
|
"""Initialize the deployment environment."""
|
||||||
@ -25,13 +28,17 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
|
|
||||||
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||||
public_port, expected):
|
public_port, expected):
|
||||||
"""Validate actual endpoint data vs expected endpoint data. The ports
|
"""Validate endpoint data.
|
||||||
are used to find the matching endpoint."""
|
|
||||||
|
Validate actual endpoint data vs expected endpoint data. The ports
|
||||||
|
are used to find the matching endpoint.
|
||||||
|
"""
|
||||||
found = False
|
found = False
|
||||||
for ep in endpoints:
|
for ep in endpoints:
|
||||||
self.log.debug('endpoint: {}'.format(repr(ep)))
|
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||||
if admin_port in ep.adminurl and internal_port in ep.internalurl \
|
if (admin_port in ep.adminurl and
|
||||||
and public_port in ep.publicurl:
|
internal_port in ep.internalurl and
|
||||||
|
public_port in ep.publicurl):
|
||||||
found = True
|
found = True
|
||||||
actual = {'id': ep.id,
|
actual = {'id': ep.id,
|
||||||
'region': ep.region,
|
'region': ep.region,
|
||||||
@ -47,8 +54,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
return 'endpoint not found'
|
return 'endpoint not found'
|
||||||
|
|
||||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
"""Validate a list of actual service catalog endpoints vs a list of
|
"""Validate service catalog endpoint data.
|
||||||
expected service catalog endpoints."""
|
|
||||||
|
Validate a list of actual service catalog endpoints vs a list of
|
||||||
|
expected service catalog endpoints.
|
||||||
|
"""
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for k, v in expected.iteritems():
|
for k, v in expected.iteritems():
|
||||||
if k in actual:
|
if k in actual:
|
||||||
@ -60,8 +70,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def validate_tenant_data(self, expected, actual):
|
def validate_tenant_data(self, expected, actual):
|
||||||
"""Validate a list of actual tenant data vs list of expected tenant
|
"""Validate tenant data.
|
||||||
data."""
|
|
||||||
|
Validate a list of actual tenant data vs list of expected tenant
|
||||||
|
data.
|
||||||
|
"""
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
@ -78,8 +91,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def validate_role_data(self, expected, actual):
|
def validate_role_data(self, expected, actual):
|
||||||
"""Validate a list of actual role data vs a list of expected role
|
"""Validate role data.
|
||||||
data."""
|
|
||||||
|
Validate a list of actual role data vs a list of expected role
|
||||||
|
data.
|
||||||
|
"""
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
@ -95,8 +111,11 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def validate_user_data(self, expected, actual):
|
def validate_user_data(self, expected, actual):
|
||||||
"""Validate a list of actual user data vs a list of expected user
|
"""Validate user data.
|
||||||
data."""
|
|
||||||
|
Validate a list of actual user data vs a list of expected user
|
||||||
|
data.
|
||||||
|
"""
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
for e in expected:
|
for e in expected:
|
||||||
found = False
|
found = False
|
||||||
@ -114,20 +133,23 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
return ret
|
return ret
|
||||||
|
|
||||||
def validate_flavor_data(self, expected, actual):
|
def validate_flavor_data(self, expected, actual):
|
||||||
"""Validate a list of actual flavors vs a list of expected flavors."""
|
"""Validate flavor data.
|
||||||
|
|
||||||
|
Validate a list of actual flavors vs a list of expected flavors.
|
||||||
|
"""
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
act = [a.name for a in actual]
|
act = [a.name for a in actual]
|
||||||
return self._validate_list_data(expected, act)
|
return self._validate_list_data(expected, act)
|
||||||
|
|
||||||
def tenant_exists(self, keystone, tenant):
|
def tenant_exists(self, keystone, tenant):
|
||||||
"""Return True if tenant exists"""
|
"""Return True if tenant exists."""
|
||||||
return tenant in [t.name for t in keystone.tenants.list()]
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
tenant):
|
tenant):
|
||||||
"""Authenticates admin user with the keystone admin endpoint."""
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
service_ip = \
|
unit = keystone_sentry
|
||||||
keystone_sentry.relation('shared-db',
|
service_ip = unit.relation('shared-db',
|
||||||
'mysql:shared-db')['private-address']
|
'mysql:shared-db')['private-address']
|
||||||
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
return keystone_client.Client(username=user, password=password,
|
return keystone_client.Client(username=user, password=password,
|
||||||
@ -177,12 +199,40 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
image = glance.images.create(name=image_name, is_public=True,
|
image = glance.images.create(name=image_name, is_public=True,
|
||||||
disk_format='qcow2',
|
disk_format='qcow2',
|
||||||
container_format='bare', data=f)
|
container_format='bare', data=f)
|
||||||
|
count = 1
|
||||||
|
status = image.status
|
||||||
|
while status != 'active' and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
image = glance.images.get(image.id)
|
||||||
|
status = image.status
|
||||||
|
self.log.debug('image status: {}'.format(status))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if status != 'active':
|
||||||
|
self.log.error('image creation timed out')
|
||||||
|
return None
|
||||||
|
|
||||||
return image
|
return image
|
||||||
|
|
||||||
def delete_image(self, glance, image):
|
def delete_image(self, glance, image):
|
||||||
"""Delete the specified image."""
|
"""Delete the specified image."""
|
||||||
|
num_before = len(list(glance.images.list()))
|
||||||
glance.images.delete(image)
|
glance.images.delete(image)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
num_after = len(list(glance.images.list()))
|
||||||
|
while num_after != (num_before - 1) and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
num_after = len(list(glance.images.list()))
|
||||||
|
self.log.debug('number of images: {}'.format(num_after))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if num_after != (num_before - 1):
|
||||||
|
self.log.error('image deletion timed out')
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||||
"""Create the specified instance."""
|
"""Create the specified instance."""
|
||||||
image = nova.images.find(name=image_name)
|
image = nova.images.find(name=image_name)
|
||||||
@ -199,11 +249,27 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.debug('instance status: {}'.format(status))
|
self.log.debug('instance status: {}'.format(status))
|
||||||
count += 1
|
count += 1
|
||||||
|
|
||||||
if status == 'BUILD':
|
if status != 'ACTIVE':
|
||||||
|
self.log.error('instance creation timed out')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return instance
|
return instance
|
||||||
|
|
||||||
def delete_instance(self, nova, instance):
|
def delete_instance(self, nova, instance):
|
||||||
"""Delete the specified instance."""
|
"""Delete the specified instance."""
|
||||||
|
num_before = len(list(nova.servers.list()))
|
||||||
nova.servers.delete(instance)
|
nova.servers.delete(instance)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
num_after = len(list(nova.servers.list()))
|
||||||
|
while num_after != (num_before - 1) and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
num_after = len(list(nova.servers.list()))
|
||||||
|
self.log.debug('number of instances: {}'.format(num_after))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if num_after != (num_before - 1):
|
||||||
|
self.log.error('instance deletion timed out')
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
@ -21,6 +21,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
relation_get,
|
relation_get,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
related_units,
|
related_units,
|
||||||
|
is_relation_made,
|
||||||
relation_set,
|
relation_set,
|
||||||
unit_get,
|
unit_get,
|
||||||
unit_private_ip,
|
unit_private_ip,
|
||||||
@ -44,7 +45,10 @@ from charmhelpers.contrib.openstack.neutron import (
|
|||||||
neutron_plugin_attribute,
|
neutron_plugin_attribute,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.network.ip import get_address_in_network
|
from charmhelpers.contrib.network.ip import (
|
||||||
|
get_address_in_network,
|
||||||
|
get_ipv6_addr,
|
||||||
|
)
|
||||||
|
|
||||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||||
|
|
||||||
@ -401,9 +405,12 @@ class HAProxyContext(OSContextGenerator):
|
|||||||
|
|
||||||
cluster_hosts = {}
|
cluster_hosts = {}
|
||||||
l_unit = local_unit().replace('/', '-')
|
l_unit = local_unit().replace('/', '-')
|
||||||
cluster_hosts[l_unit] = \
|
if config('prefer-ipv6'):
|
||||||
get_address_in_network(config('os-internal-network'),
|
addr = get_ipv6_addr()
|
||||||
unit_get('private-address'))
|
else:
|
||||||
|
addr = unit_get('private-address')
|
||||||
|
cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
|
||||||
|
addr)
|
||||||
|
|
||||||
for rid in relation_ids('cluster'):
|
for rid in relation_ids('cluster'):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
@ -414,6 +421,16 @@ class HAProxyContext(OSContextGenerator):
|
|||||||
ctxt = {
|
ctxt = {
|
||||||
'units': cluster_hosts,
|
'units': cluster_hosts,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config('prefer-ipv6'):
|
||||||
|
ctxt['local_host'] = 'ip6-localhost'
|
||||||
|
ctxt['haproxy_host'] = '::'
|
||||||
|
ctxt['stat_port'] = ':::8888'
|
||||||
|
else:
|
||||||
|
ctxt['local_host'] = '127.0.0.1'
|
||||||
|
ctxt['haproxy_host'] = '0.0.0.0'
|
||||||
|
ctxt['stat_port'] = ':8888'
|
||||||
|
|
||||||
if len(cluster_hosts.keys()) > 1:
|
if len(cluster_hosts.keys()) > 1:
|
||||||
# Enable haproxy when we have enough peers.
|
# Enable haproxy when we have enough peers.
|
||||||
log('Ensuring haproxy enabled in /etc/default/haproxy.')
|
log('Ensuring haproxy enabled in /etc/default/haproxy.')
|
||||||
@ -753,6 +770,17 @@ class SubordinateConfigContext(OSContextGenerator):
|
|||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class LogLevelContext(OSContextGenerator):
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = {}
|
||||||
|
ctxt['debug'] = \
|
||||||
|
False if config('debug') is None else config('debug')
|
||||||
|
ctxt['verbose'] = \
|
||||||
|
False if config('verbose') is None else config('verbose')
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class SyslogContext(OSContextGenerator):
|
class SyslogContext(OSContextGenerator):
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
@ -760,3 +788,16 @@ class SyslogContext(OSContextGenerator):
|
|||||||
'use_syslog': config('use-syslog')
|
'use_syslog': config('use-syslog')
|
||||||
}
|
}
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class ZeroMQContext(OSContextGenerator):
|
||||||
|
interfaces = ['zeromq-configuration']
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = {}
|
||||||
|
if is_relation_made('zeromq-configuration', 'host'):
|
||||||
|
for rid in relation_ids('zeromq-configuration'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
|
||||||
|
ctxt['zmq_host'] = relation_get('host', unit, rid)
|
||||||
|
return ctxt
|
@ -7,6 +7,7 @@ from charmhelpers.contrib.network.ip import (
|
|||||||
get_address_in_network,
|
get_address_in_network,
|
||||||
is_address_in_network,
|
is_address_in_network,
|
||||||
is_ipv6,
|
is_ipv6,
|
||||||
|
get_ipv6_addr,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
@ -64,10 +65,13 @@ def resolve_address(endpoint_type=PUBLIC):
|
|||||||
vip):
|
vip):
|
||||||
resolved_address = vip
|
resolved_address = vip
|
||||||
else:
|
else:
|
||||||
|
if config('prefer-ipv6'):
|
||||||
|
fallback_addr = get_ipv6_addr()
|
||||||
|
else:
|
||||||
|
fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
|
||||||
resolved_address = get_address_in_network(
|
resolved_address = get_address_in_network(
|
||||||
config(_address_map[endpoint_type]['config']),
|
config(_address_map[endpoint_type]['config']), fallback_addr)
|
||||||
unit_get(_address_map[endpoint_type]['fallback'])
|
|
||||||
)
|
|
||||||
if resolved_address is None:
|
if resolved_address is None:
|
||||||
raise ValueError('Unable to resolve a suitable IP address'
|
raise ValueError('Unable to resolve a suitable IP address'
|
||||||
' based on charm state and configuration')
|
' based on charm state and configuration')
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
global
|
global
|
||||||
log 127.0.0.1 local0
|
log {{ local_host }} local0
|
||||||
log 127.0.0.1 local1 notice
|
log {{ local_host }} local1 notice
|
||||||
maxconn 20000
|
maxconn 20000
|
||||||
user haproxy
|
user haproxy
|
||||||
group haproxy
|
group haproxy
|
||||||
@ -17,7 +17,7 @@ defaults
|
|||||||
timeout client 30000
|
timeout client 30000
|
||||||
timeout server 30000
|
timeout server 30000
|
||||||
|
|
||||||
listen stats :8888
|
listen stats {{ stat_port }}
|
||||||
mode http
|
mode http
|
||||||
stats enable
|
stats enable
|
||||||
stats hide-version
|
stats hide-version
|
||||||
|
@ -23,7 +23,7 @@ from charmhelpers.contrib.storage.linux.lvm import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||||
from charmhelpers.fetch import apt_install
|
from charmhelpers.fetch import apt_install, apt_cache
|
||||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||||
|
|
||||||
@ -70,6 +70,7 @@ SWIFT_CODENAMES = OrderedDict([
|
|||||||
('1.13.0', 'icehouse'),
|
('1.13.0', 'icehouse'),
|
||||||
('1.12.0', 'icehouse'),
|
('1.12.0', 'icehouse'),
|
||||||
('1.11.0', 'icehouse'),
|
('1.11.0', 'icehouse'),
|
||||||
|
('2.0.0', 'juno'),
|
||||||
])
|
])
|
||||||
|
|
||||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
@ -134,13 +135,8 @@ def get_os_version_codename(codename):
|
|||||||
def get_os_codename_package(package, fatal=True):
|
def get_os_codename_package(package, fatal=True):
|
||||||
'''Derive OpenStack release codename from an installed package.'''
|
'''Derive OpenStack release codename from an installed package.'''
|
||||||
import apt_pkg as apt
|
import apt_pkg as apt
|
||||||
apt.init()
|
|
||||||
|
|
||||||
# Tell apt to build an in-memory cache to prevent race conditions (if
|
cache = apt_cache()
|
||||||
# another process is already building the cache).
|
|
||||||
apt.config.set("Dir::Cache::pkgcache", "")
|
|
||||||
|
|
||||||
cache = apt.Cache()
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
pkg = cache[package]
|
pkg = cache[package]
|
||||||
|
@ -46,5 +46,8 @@ def is_device_mounted(device):
|
|||||||
:returns: boolean: True if the path represents a mounted device, False if
|
:returns: boolean: True if the path represents a mounted device, False if
|
||||||
it doesn't.
|
it doesn't.
|
||||||
'''
|
'''
|
||||||
|
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
||||||
out = check_output(['mount'])
|
out = check_output(['mount'])
|
||||||
|
if is_partition:
|
||||||
|
return bool(re.search(device + r"\b", out))
|
||||||
return bool(re.search(device + r"[0-9]+\b", out))
|
return bool(re.search(device + r"[0-9]+\b", out))
|
||||||
|
@ -156,12 +156,15 @@ def hook_name():
|
|||||||
|
|
||||||
|
|
||||||
class Config(dict):
|
class Config(dict):
|
||||||
"""A Juju charm config dictionary that can write itself to
|
"""A dictionary representation of the charm's config.yaml, with some
|
||||||
disk (as json) and track which values have changed since
|
extra features:
|
||||||
the previous hook invocation.
|
|
||||||
|
|
||||||
Do not instantiate this object directly - instead call
|
- See which values in the dictionary have changed since the previous hook.
|
||||||
``hookenv.config()``
|
- For values that have changed, see what the previous value was.
|
||||||
|
- Store arbitrary data for use in a later hook.
|
||||||
|
|
||||||
|
NOTE: Do not instantiate this object directly - instead call
|
||||||
|
``hookenv.config()``, which will return an instance of :class:`Config`.
|
||||||
|
|
||||||
Example usage::
|
Example usage::
|
||||||
|
|
||||||
@ -170,8 +173,8 @@ class Config(dict):
|
|||||||
>>> config = hookenv.config()
|
>>> config = hookenv.config()
|
||||||
>>> config['foo']
|
>>> config['foo']
|
||||||
'bar'
|
'bar'
|
||||||
|
>>> # store a new key/value for later use
|
||||||
>>> config['mykey'] = 'myval'
|
>>> config['mykey'] = 'myval'
|
||||||
>>> config.save()
|
|
||||||
|
|
||||||
|
|
||||||
>>> # user runs `juju set mycharm foo=baz`
|
>>> # user runs `juju set mycharm foo=baz`
|
||||||
@ -188,22 +191,23 @@ class Config(dict):
|
|||||||
>>> # keys/values that we add are preserved across hooks
|
>>> # keys/values that we add are preserved across hooks
|
||||||
>>> config['mykey']
|
>>> config['mykey']
|
||||||
'myval'
|
'myval'
|
||||||
>>> # don't forget to save at the end of hook!
|
|
||||||
>>> config.save()
|
|
||||||
|
|
||||||
"""
|
"""
|
||||||
CONFIG_FILE_NAME = '.juju-persistent-config'
|
CONFIG_FILE_NAME = '.juju-persistent-config'
|
||||||
|
|
||||||
def __init__(self, *args, **kw):
|
def __init__(self, *args, **kw):
|
||||||
super(Config, self).__init__(*args, **kw)
|
super(Config, self).__init__(*args, **kw)
|
||||||
|
self.implicit_save = True
|
||||||
self._prev_dict = None
|
self._prev_dict = None
|
||||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||||
if os.path.exists(self.path):
|
if os.path.exists(self.path):
|
||||||
self.load_previous()
|
self.load_previous()
|
||||||
|
|
||||||
def load_previous(self, path=None):
|
def load_previous(self, path=None):
|
||||||
"""Load previous copy of config from disk so that current values
|
"""Load previous copy of config from disk.
|
||||||
can be compared to previous values.
|
|
||||||
|
In normal usage you don't need to call this method directly - it
|
||||||
|
is called automatically at object initialization.
|
||||||
|
|
||||||
:param path:
|
:param path:
|
||||||
|
|
||||||
@ -218,8 +222,8 @@ class Config(dict):
|
|||||||
self._prev_dict = json.load(f)
|
self._prev_dict = json.load(f)
|
||||||
|
|
||||||
def changed(self, key):
|
def changed(self, key):
|
||||||
"""Return true if the value for this key has changed since
|
"""Return True if the current value for this key is different from
|
||||||
the last save.
|
the previous value.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self._prev_dict is None:
|
if self._prev_dict is None:
|
||||||
@ -228,7 +232,7 @@ class Config(dict):
|
|||||||
|
|
||||||
def previous(self, key):
|
def previous(self, key):
|
||||||
"""Return previous value for this key, or None if there
|
"""Return previous value for this key, or None if there
|
||||||
is no "previous" value.
|
is no previous value.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self._prev_dict:
|
if self._prev_dict:
|
||||||
@ -238,7 +242,13 @@ class Config(dict):
|
|||||||
def save(self):
|
def save(self):
|
||||||
"""Save this config to disk.
|
"""Save this config to disk.
|
||||||
|
|
||||||
Preserves items in _prev_dict that do not exist in self.
|
If the charm is using the :mod:`Services Framework <services.base>`
|
||||||
|
or :meth:'@hook <Hooks.hook>' decorator, this
|
||||||
|
is called automatically at the end of successful hook execution.
|
||||||
|
Otherwise, it should be called directly by user code.
|
||||||
|
|
||||||
|
To disable automatic saves, set ``implicit_save=False`` on this
|
||||||
|
instance.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
if self._prev_dict:
|
if self._prev_dict:
|
||||||
@ -285,8 +295,9 @@ def relation_get(attribute=None, unit=None, rid=None):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||||
"""Set relation information for the current unit"""
|
"""Set relation information for the current unit"""
|
||||||
|
relation_settings = relation_settings if relation_settings else {}
|
||||||
relation_cmd_line = ['relation-set']
|
relation_cmd_line = ['relation-set']
|
||||||
if relation_id is not None:
|
if relation_id is not None:
|
||||||
relation_cmd_line.extend(('-r', relation_id))
|
relation_cmd_line.extend(('-r', relation_id))
|
||||||
@ -477,6 +488,9 @@ class Hooks(object):
|
|||||||
hook_name = os.path.basename(args[0])
|
hook_name = os.path.basename(args[0])
|
||||||
if hook_name in self._hooks:
|
if hook_name in self._hooks:
|
||||||
self._hooks[hook_name]()
|
self._hooks[hook_name]()
|
||||||
|
cfg = config()
|
||||||
|
if cfg.implicit_save:
|
||||||
|
cfg.save()
|
||||||
else:
|
else:
|
||||||
raise UnregisteredHookError(hook_name)
|
raise UnregisteredHookError(hook_name)
|
||||||
|
|
||||||
|
@ -12,6 +12,8 @@ import random
|
|||||||
import string
|
import string
|
||||||
import subprocess
|
import subprocess
|
||||||
import hashlib
|
import hashlib
|
||||||
|
import shutil
|
||||||
|
from contextlib import contextmanager
|
||||||
|
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
|
||||||
@ -52,7 +54,7 @@ def service(action, service_name):
|
|||||||
def service_running(service):
|
def service_running(service):
|
||||||
"""Determine whether a system service is running"""
|
"""Determine whether a system service is running"""
|
||||||
try:
|
try:
|
||||||
output = subprocess.check_output(['service', service, 'status'])
|
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
return False
|
return False
|
||||||
else:
|
else:
|
||||||
@ -62,6 +64,16 @@ def service_running(service):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def service_available(service_name):
|
||||||
|
"""Determine whether a system service is available"""
|
||||||
|
try:
|
||||||
|
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||||
"""Add a user to the system"""
|
"""Add a user to the system"""
|
||||||
try:
|
try:
|
||||||
@ -320,12 +332,29 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
|||||||
|
|
||||||
'''
|
'''
|
||||||
import apt_pkg
|
import apt_pkg
|
||||||
|
from charmhelpers.fetch import apt_cache
|
||||||
if not pkgcache:
|
if not pkgcache:
|
||||||
apt_pkg.init()
|
pkgcache = apt_cache()
|
||||||
# Force Apt to build its cache in memory. That way we avoid race
|
|
||||||
# conditions with other applications building the cache in the same
|
|
||||||
# place.
|
|
||||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
|
||||||
pkgcache = apt_pkg.Cache()
|
|
||||||
pkg = pkgcache[package]
|
pkg = pkgcache[package]
|
||||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def chdir(d):
|
||||||
|
cur = os.getcwd()
|
||||||
|
try:
|
||||||
|
yield os.chdir(d)
|
||||||
|
finally:
|
||||||
|
os.chdir(cur)
|
||||||
|
|
||||||
|
|
||||||
|
def chownr(path, owner, group):
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(path):
|
||||||
|
for name in dirs + files:
|
||||||
|
full = os.path.join(root, name)
|
||||||
|
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
||||||
|
if not broken_symlink:
|
||||||
|
os.chown(full, uid, gid)
|
||||||
|
2
hooks/charmhelpers/core/services/__init__.py
Normal file
2
hooks/charmhelpers/core/services/__init__.py
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
from .base import *
|
||||||
|
from .helpers import *
|
313
hooks/charmhelpers/core/services/base.py
Normal file
313
hooks/charmhelpers/core/services/base.py
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
import os
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
from collections import Iterable
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['ServiceManager', 'ManagerCallback',
|
||||||
|
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
||||||
|
'service_restart', 'service_stop']
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceManager(object):
|
||||||
|
def __init__(self, services=None):
|
||||||
|
"""
|
||||||
|
Register a list of services, given their definitions.
|
||||||
|
|
||||||
|
Service definitions are dicts in the following formats (all keys except
|
||||||
|
'service' are optional)::
|
||||||
|
|
||||||
|
{
|
||||||
|
"service": <service name>,
|
||||||
|
"required_data": <list of required data contexts>,
|
||||||
|
"provided_data": <list of provided data contexts>,
|
||||||
|
"data_ready": <one or more callbacks>,
|
||||||
|
"data_lost": <one or more callbacks>,
|
||||||
|
"start": <one or more callbacks>,
|
||||||
|
"stop": <one or more callbacks>,
|
||||||
|
"ports": <list of ports to manage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
The 'required_data' list should contain dicts of required data (or
|
||||||
|
dependency managers that act like dicts and know how to collect the data).
|
||||||
|
Only when all items in the 'required_data' list are populated are the list
|
||||||
|
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
||||||
|
information.
|
||||||
|
|
||||||
|
The 'provided_data' list should contain relation data providers, most likely
|
||||||
|
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
||||||
|
that will indicate a set of data to set on a given relation.
|
||||||
|
|
||||||
|
The 'data_ready' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
||||||
|
are fired.
|
||||||
|
|
||||||
|
The 'data_lost' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when a 'required_data' item no longer passes
|
||||||
|
`is_ready()`. Each callback will be called with the service name as the
|
||||||
|
only parameter. After all of the 'data_lost' callbacks are called,
|
||||||
|
the 'stop' callbacks are fired.
|
||||||
|
|
||||||
|
The 'start' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when starting the service, after the 'data_ready'
|
||||||
|
callbacks are complete. Each callback will be called with the service
|
||||||
|
name as the only parameter. This defaults to
|
||||||
|
`[host.service_start, services.open_ports]`.
|
||||||
|
|
||||||
|
The 'stop' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when stopping the service. If the service is
|
||||||
|
being stopped because it no longer has all of its 'required_data', this
|
||||||
|
will be called after all of the 'data_lost' callbacks are complete.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
This defaults to `[services.close_ports, host.service_stop]`.
|
||||||
|
|
||||||
|
The 'ports' value should be a list of ports to manage. The default
|
||||||
|
'start' handler will open the ports after the service is started,
|
||||||
|
and the default 'stop' handler will close the ports prior to stopping
|
||||||
|
the service.
|
||||||
|
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
The following registers an Upstart service called bingod that depends on
|
||||||
|
a mongodb relation and which runs a custom `db_migrate` function prior to
|
||||||
|
restarting the service, and a Runit service called spadesd::
|
||||||
|
|
||||||
|
manager = services.ServiceManager([
|
||||||
|
{
|
||||||
|
'service': 'bingod',
|
||||||
|
'ports': [80, 443],
|
||||||
|
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
||||||
|
'data_ready': [
|
||||||
|
services.template(source='bingod.conf'),
|
||||||
|
services.template(source='bingod.ini',
|
||||||
|
target='/etc/bingod.ini',
|
||||||
|
owner='bingo', perms=0400),
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'service': 'spadesd',
|
||||||
|
'data_ready': services.template(source='spadesd_run.j2',
|
||||||
|
target='/etc/sv/spadesd/run',
|
||||||
|
perms=0555),
|
||||||
|
'start': runit_start,
|
||||||
|
'stop': runit_stop,
|
||||||
|
},
|
||||||
|
])
|
||||||
|
manager.manage()
|
||||||
|
"""
|
||||||
|
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
||||||
|
self._ready = None
|
||||||
|
self.services = {}
|
||||||
|
for service in services or []:
|
||||||
|
service_name = service['service']
|
||||||
|
self.services[service_name] = service
|
||||||
|
|
||||||
|
def manage(self):
|
||||||
|
"""
|
||||||
|
Handle the current hook by doing The Right Thing with the registered services.
|
||||||
|
"""
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
if hook_name == 'stop':
|
||||||
|
self.stop_services()
|
||||||
|
else:
|
||||||
|
self.provide_data()
|
||||||
|
self.reconfigure_services()
|
||||||
|
cfg = hookenv.config()
|
||||||
|
if cfg.implicit_save:
|
||||||
|
cfg.save()
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Set the relation data for each provider in the ``provided_data`` list.
|
||||||
|
|
||||||
|
A provider must have a `name` attribute, which indicates which relation
|
||||||
|
to set data on, and a `provide_data()` method, which returns a dict of
|
||||||
|
data to set.
|
||||||
|
"""
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
for service in self.services.values():
|
||||||
|
for provider in service.get('provided_data', []):
|
||||||
|
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
|
||||||
|
data = provider.provide_data()
|
||||||
|
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
|
||||||
|
if _ready:
|
||||||
|
hookenv.relation_set(None, data)
|
||||||
|
|
||||||
|
def reconfigure_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Update all files for one or more registered services, and,
|
||||||
|
if ready, optionally restart them.
|
||||||
|
|
||||||
|
If no service names are given, reconfigures all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
if self.is_ready(service_name):
|
||||||
|
self.fire_event('data_ready', service_name)
|
||||||
|
self.fire_event('start', service_name, default=[
|
||||||
|
service_restart,
|
||||||
|
manage_ports])
|
||||||
|
self.save_ready(service_name)
|
||||||
|
else:
|
||||||
|
if self.was_ready(service_name):
|
||||||
|
self.fire_event('data_lost', service_name)
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
self.save_lost(service_name)
|
||||||
|
|
||||||
|
def stop_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Stop one or more registered services, by name.
|
||||||
|
|
||||||
|
If no service names are given, stops all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
|
||||||
|
def get_service(self, service_name):
|
||||||
|
"""
|
||||||
|
Given the name of a registered service, return its service definition.
|
||||||
|
"""
|
||||||
|
service = self.services.get(service_name)
|
||||||
|
if not service:
|
||||||
|
raise KeyError('Service not registered: %s' % service_name)
|
||||||
|
return service
|
||||||
|
|
||||||
|
def fire_event(self, event_name, service_name, default=None):
|
||||||
|
"""
|
||||||
|
Fire a data_ready, data_lost, start, or stop event on a given service.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
callbacks = service.get(event_name, default)
|
||||||
|
if not callbacks:
|
||||||
|
return
|
||||||
|
if not isinstance(callbacks, Iterable):
|
||||||
|
callbacks = [callbacks]
|
||||||
|
for callback in callbacks:
|
||||||
|
if isinstance(callback, ManagerCallback):
|
||||||
|
callback(self, service_name, event_name)
|
||||||
|
else:
|
||||||
|
callback(service_name)
|
||||||
|
|
||||||
|
def is_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if a registered service is ready, by checking its 'required_data'.
|
||||||
|
|
||||||
|
A 'required_data' item can be any mapping type, and is considered ready
|
||||||
|
if `bool(item)` evaluates as True.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
reqs = service.get('required_data', [])
|
||||||
|
return all(bool(req) for req in reqs)
|
||||||
|
|
||||||
|
def _load_ready_file(self):
|
||||||
|
if self._ready is not None:
|
||||||
|
return
|
||||||
|
if os.path.exists(self._ready_file):
|
||||||
|
with open(self._ready_file) as fp:
|
||||||
|
self._ready = set(json.load(fp))
|
||||||
|
else:
|
||||||
|
self._ready = set()
|
||||||
|
|
||||||
|
def _save_ready_file(self):
|
||||||
|
if self._ready is None:
|
||||||
|
return
|
||||||
|
with open(self._ready_file, 'w') as fp:
|
||||||
|
json.dump(list(self._ready), fp)
|
||||||
|
|
||||||
|
def save_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is now data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.add(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def save_lost(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is no longer data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.discard(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def was_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if the given service was previously data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
return service_name in self._ready
|
||||||
|
|
||||||
|
|
||||||
|
class ManagerCallback(object):
|
||||||
|
"""
|
||||||
|
Special case of a callback that takes the `ServiceManager` instance
|
||||||
|
in addition to the service name.
|
||||||
|
|
||||||
|
Subclasses should implement `__call__` which should accept three parameters:
|
||||||
|
|
||||||
|
* `manager` The `ServiceManager` instance
|
||||||
|
* `service_name` The name of the service it's being triggered for
|
||||||
|
* `event_name` The name of the event that this callback is handling
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class PortManagerCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will open or close ports, for use as either
|
||||||
|
a start or stop action.
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
new_ports = service.get('ports', [])
|
||||||
|
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||||
|
if os.path.exists(port_file):
|
||||||
|
with open(port_file) as fp:
|
||||||
|
old_ports = fp.read().split(',')
|
||||||
|
for old_port in old_ports:
|
||||||
|
if bool(old_port):
|
||||||
|
old_port = int(old_port)
|
||||||
|
if old_port not in new_ports:
|
||||||
|
hookenv.close_port(old_port)
|
||||||
|
with open(port_file, 'w') as fp:
|
||||||
|
fp.write(','.join(str(port) for port in new_ports))
|
||||||
|
for port in new_ports:
|
||||||
|
if event_name == 'start':
|
||||||
|
hookenv.open_port(port)
|
||||||
|
elif event_name == 'stop':
|
||||||
|
hookenv.close_port(port)
|
||||||
|
|
||||||
|
|
||||||
|
def service_stop(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_stop to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_stop(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_restart(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_restart to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_available(service_name):
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_restart(service_name)
|
||||||
|
else:
|
||||||
|
host.service_start(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases
|
||||||
|
open_ports = close_ports = manage_ports = PortManagerCallback()
|
125
hooks/charmhelpers/core/services/helpers.py
Normal file
125
hooks/charmhelpers/core/services/helpers.py
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import templating
|
||||||
|
|
||||||
|
from charmhelpers.core.services.base import ManagerCallback
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['RelationContext', 'TemplateCallback',
|
||||||
|
'render_template', 'template']
|
||||||
|
|
||||||
|
|
||||||
|
class RelationContext(dict):
|
||||||
|
"""
|
||||||
|
Base class for a context generator that gets relation data from juju.
|
||||||
|
|
||||||
|
Subclasses must provide the attributes `name`, which is the name of the
|
||||||
|
interface of interest, `interface`, which is the type of the interface of
|
||||||
|
interest, and `required_keys`, which is the set of keys required for the
|
||||||
|
relation to be considered complete. The data for all interfaces matching
|
||||||
|
the `name` attribute that are complete will used to populate the dictionary
|
||||||
|
values (see `get_data`, below).
|
||||||
|
|
||||||
|
The generated context will be namespaced under the interface type, to prevent
|
||||||
|
potential naming conflicts.
|
||||||
|
"""
|
||||||
|
name = None
|
||||||
|
interface = None
|
||||||
|
required_keys = []
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(RelationContext, self).__init__(*args, **kwargs)
|
||||||
|
self.get_data()
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the required_keys are available.
|
||||||
|
"""
|
||||||
|
return self.is_ready()
|
||||||
|
|
||||||
|
__nonzero__ = __bool__
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return super(RelationContext, self).__repr__()
|
||||||
|
|
||||||
|
def is_ready(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the `required_keys` are available from any units.
|
||||||
|
"""
|
||||||
|
ready = len(self.get(self.name, [])) > 0
|
||||||
|
if not ready:
|
||||||
|
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
||||||
|
return ready
|
||||||
|
|
||||||
|
def _is_ready(self, unit_data):
|
||||||
|
"""
|
||||||
|
Helper method that tests a set of relation data and returns True if
|
||||||
|
all of the `required_keys` are present.
|
||||||
|
"""
|
||||||
|
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
||||||
|
|
||||||
|
def get_data(self):
|
||||||
|
"""
|
||||||
|
Retrieve the relation data for each unit involved in a relation and,
|
||||||
|
if complete, store it in a list under `self[self.name]`. This
|
||||||
|
is automatically called when the RelationContext is instantiated.
|
||||||
|
|
||||||
|
The units are sorted lexographically first by the service ID, then by
|
||||||
|
the unit ID. Thus, if an interface has two other services, 'db:1'
|
||||||
|
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
||||||
|
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
||||||
|
set of data, the relation data for the units will be stored in the
|
||||||
|
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
||||||
|
|
||||||
|
If you only care about a single unit on the relation, you can just
|
||||||
|
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
||||||
|
support multiple units on a relation, you should iterate over the list,
|
||||||
|
like::
|
||||||
|
|
||||||
|
{% for unit in interface -%}
|
||||||
|
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
|
||||||
|
Note that since all sets of relation data from all related services and
|
||||||
|
units are in a single list, if you need to know which service or unit a
|
||||||
|
set of data came from, you'll need to extend this class to preserve
|
||||||
|
that information.
|
||||||
|
"""
|
||||||
|
if not hookenv.relation_ids(self.name):
|
||||||
|
return
|
||||||
|
|
||||||
|
ns = self.setdefault(self.name, [])
|
||||||
|
for rid in sorted(hookenv.relation_ids(self.name)):
|
||||||
|
for unit in sorted(hookenv.related_units(rid)):
|
||||||
|
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
||||||
|
if self._is_ready(reldata):
|
||||||
|
ns.append(reldata)
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Return data to be relation_set for this interface.
|
||||||
|
"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will render a template, for use as a ready action.
|
||||||
|
"""
|
||||||
|
def __init__(self, source, target, owner='root', group='root', perms=0444):
|
||||||
|
self.source = source
|
||||||
|
self.target = target
|
||||||
|
self.owner = owner
|
||||||
|
self.group = group
|
||||||
|
self.perms = perms
|
||||||
|
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
context = {}
|
||||||
|
for ctx in service.get('required_data', []):
|
||||||
|
context.update(ctx)
|
||||||
|
templating.render(self.source, self.target, context,
|
||||||
|
self.owner, self.group, self.perms)
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases for templates
|
||||||
|
render_template = template = TemplateCallback
|
51
hooks/charmhelpers/core/templating.py
Normal file
51
hooks/charmhelpers/core/templating.py
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
|
||||||
|
"""
|
||||||
|
Render a template.
|
||||||
|
|
||||||
|
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||||
|
|
||||||
|
The `target` path should be absolute.
|
||||||
|
|
||||||
|
The context should be a dict containing the values to be replaced in the
|
||||||
|
template.
|
||||||
|
|
||||||
|
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
||||||
|
|
||||||
|
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||||
|
|
||||||
|
Note: Using this requires python-jinja2; if it is not installed, calling
|
||||||
|
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
except ImportError:
|
||||||
|
hookenv.log('Could not import jinja2, and could not import '
|
||||||
|
'charmhelpers.fetch to install it',
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise
|
||||||
|
apt_install('python-jinja2', fatal=True)
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
|
||||||
|
if templates_dir is None:
|
||||||
|
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||||
|
loader = Environment(loader=FileSystemLoader(templates_dir))
|
||||||
|
try:
|
||||||
|
source = source
|
||||||
|
template = loader.get_template(source)
|
||||||
|
except exceptions.TemplateNotFound as e:
|
||||||
|
hookenv.log('Could not load template %s from %s.' %
|
||||||
|
(source, templates_dir),
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise e
|
||||||
|
content = template.render(context)
|
||||||
|
host.mkdir(os.path.dirname(target))
|
||||||
|
host.write_file(target, content, owner, group, perms)
|
@ -1,4 +1,5 @@
|
|||||||
import importlib
|
import importlib
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
import time
|
import time
|
||||||
from yaml import safe_load
|
from yaml import safe_load
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
@ -116,14 +117,7 @@ class BaseFetchHandler(object):
|
|||||||
|
|
||||||
def filter_installed_packages(packages):
|
def filter_installed_packages(packages):
|
||||||
"""Returns a list of packages that require installation"""
|
"""Returns a list of packages that require installation"""
|
||||||
import apt_pkg
|
cache = apt_cache()
|
||||||
apt_pkg.init()
|
|
||||||
|
|
||||||
# Tell apt to build an in-memory cache to prevent race conditions (if
|
|
||||||
# another process is already building the cache).
|
|
||||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
|
||||||
|
|
||||||
cache = apt_pkg.Cache()
|
|
||||||
_pkgs = []
|
_pkgs = []
|
||||||
for package in packages:
|
for package in packages:
|
||||||
try:
|
try:
|
||||||
@ -136,6 +130,16 @@ def filter_installed_packages(packages):
|
|||||||
return _pkgs
|
return _pkgs
|
||||||
|
|
||||||
|
|
||||||
|
def apt_cache(in_memory=True):
|
||||||
|
"""Build and return an apt cache"""
|
||||||
|
import apt_pkg
|
||||||
|
apt_pkg.init()
|
||||||
|
if in_memory:
|
||||||
|
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||||
|
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
|
||||||
|
return apt_pkg.Cache()
|
||||||
|
|
||||||
|
|
||||||
def apt_install(packages, options=None, fatal=False):
|
def apt_install(packages, options=None, fatal=False):
|
||||||
"""Install one or more packages"""
|
"""Install one or more packages"""
|
||||||
if options is None:
|
if options is None:
|
||||||
@ -201,6 +205,27 @@ def apt_hold(packages, fatal=False):
|
|||||||
|
|
||||||
|
|
||||||
def add_source(source, key=None):
|
def add_source(source, key=None):
|
||||||
|
"""Add a package source to this system.
|
||||||
|
|
||||||
|
@param source: a URL or sources.list entry, as supported by
|
||||||
|
add-apt-repository(1). Examples:
|
||||||
|
ppa:charmers/example
|
||||||
|
deb https://stub:key@private.example.com/ubuntu trusty main
|
||||||
|
|
||||||
|
In addition:
|
||||||
|
'proposed:' may be used to enable the standard 'proposed'
|
||||||
|
pocket for the release.
|
||||||
|
'cloud:' may be used to activate official cloud archive pockets,
|
||||||
|
such as 'cloud:icehouse'
|
||||||
|
|
||||||
|
@param key: A key to be added to the system's APT keyring and used
|
||||||
|
to verify the signatures on packages. Ideally, this should be an
|
||||||
|
ASCII format GPG public key including the block headers. A GPG key
|
||||||
|
id may also be used, but be aware that only insecure protocols are
|
||||||
|
available to retrieve the actual public key from a public keyserver
|
||||||
|
placing your Juju environment at risk. ppa and cloud archive keys
|
||||||
|
are securely added automtically, so sould not be provided.
|
||||||
|
"""
|
||||||
if source is None:
|
if source is None:
|
||||||
log('Source is not present. Skipping')
|
log('Source is not present. Skipping')
|
||||||
return
|
return
|
||||||
@ -225,7 +250,20 @@ def add_source(source, key=None):
|
|||||||
release = lsb_release()['DISTRIB_CODENAME']
|
release = lsb_release()['DISTRIB_CODENAME']
|
||||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||||
apt.write(PROPOSED_POCKET.format(release))
|
apt.write(PROPOSED_POCKET.format(release))
|
||||||
|
else:
|
||||||
|
raise SourceConfigError("Unknown source: {!r}".format(source))
|
||||||
|
|
||||||
if key:
|
if key:
|
||||||
|
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||||
|
with NamedTemporaryFile() as key_file:
|
||||||
|
key_file.write(key)
|
||||||
|
key_file.flush()
|
||||||
|
key_file.seek(0)
|
||||||
|
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
|
||||||
|
else:
|
||||||
|
# Note that hkp: is in no way a secure protocol. Using a
|
||||||
|
# GPG key id is pointless from a security POV unless you
|
||||||
|
# absolutely trust your network and DNS.
|
||||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||||
'hkp://keyserver.ubuntu.com:80', '--recv',
|
'hkp://keyserver.ubuntu.com:80', '--recv',
|
||||||
key])
|
key])
|
||||||
@ -238,7 +276,8 @@ def configure_sources(update=False,
|
|||||||
Configure multiple sources from charm configuration.
|
Configure multiple sources from charm configuration.
|
||||||
|
|
||||||
The lists are encoded as yaml fragments in the configuration.
|
The lists are encoded as yaml fragments in the configuration.
|
||||||
The frament needs to be included as a string.
|
The frament needs to be included as a string. Sources and their
|
||||||
|
corresponding keys are of the types supported by add_source().
|
||||||
|
|
||||||
Example config:
|
Example config:
|
||||||
install_sources: |
|
install_sources: |
|
||||||
|
@ -8,6 +8,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
config,
|
config,
|
||||||
log,
|
log,
|
||||||
relation_set,
|
relation_set,
|
||||||
|
relation_ids,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
@ -20,6 +21,7 @@ from charmhelpers.fetch import (
|
|||||||
|
|
||||||
from neutron_ovs_utils import (
|
from neutron_ovs_utils import (
|
||||||
determine_packages,
|
determine_packages,
|
||||||
|
get_topics,
|
||||||
register_configs,
|
register_configs,
|
||||||
restart_map,
|
restart_map,
|
||||||
)
|
)
|
||||||
@ -42,6 +44,8 @@ def install():
|
|||||||
@restart_on_change(restart_map())
|
@restart_on_change(restart_map())
|
||||||
def config_changed():
|
def config_changed():
|
||||||
CONFIGS.write_all()
|
CONFIGS.write_all()
|
||||||
|
for rid in relation_ids('zeromq-configuration'):
|
||||||
|
zeromq_configuration_relation_joined(rid)
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('amqp-relation-joined')
|
@hooks.hook('amqp-relation-joined')
|
||||||
@ -61,6 +65,19 @@ def amqp_changed():
|
|||||||
CONFIGS.write_all()
|
CONFIGS.write_all()
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('zeromq-configuration-relation-joined')
|
||||||
|
def zeromq_configuration_relation_joined(relid=None):
|
||||||
|
relation_set(relation_id=relid,
|
||||||
|
topics=" ".join(get_topics()),
|
||||||
|
users="nova")
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('zeromq-configuration-relation-changed')
|
||||||
|
@restart_on_change(restart_map(), stopstart=True)
|
||||||
|
def zeromq_configuration_relation_changed():
|
||||||
|
CONFIGS.write_all()
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
try:
|
try:
|
||||||
hooks.execute(sys.argv)
|
hooks.execute(sys.argv)
|
||||||
|
@ -18,7 +18,8 @@ BASE_RESOURCE_MAP = OrderedDict([
|
|||||||
(NEUTRON_CONF, {
|
(NEUTRON_CONF, {
|
||||||
'services': ['neutron-plugin-openvswitch-agent'],
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
'contexts': [neutron_ovs_context.OVSPluginContext(),
|
'contexts': [neutron_ovs_context.OVSPluginContext(),
|
||||||
context.AMQPContext()],
|
context.AMQPContext(),
|
||||||
|
context.ZeroMQContext()],
|
||||||
}),
|
}),
|
||||||
(ML2_CONF, {
|
(ML2_CONF, {
|
||||||
'services': ['neutron-plugin-openvswitch-agent'],
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
@ -56,3 +57,8 @@ def restart_map():
|
|||||||
state.
|
state.
|
||||||
'''
|
'''
|
||||||
return {k: v['services'] for k, v in resource_map().iteritems()}
|
return {k: v['services'] for k, v in resource_map().iteritems()}
|
||||||
|
|
||||||
|
|
||||||
|
def get_topics():
|
||||||
|
return ['q-plugin']
|
||||||
|
|
||||||
|
1
hooks/zeromq-configuration-relation-changed
Symbolic link
1
hooks/zeromq-configuration-relation-changed
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
neutron_ovs_hooks.py
|
1
hooks/zeromq-configuration-relation-joined
Symbolic link
1
hooks/zeromq-configuration-relation-joined
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
neutron_ovs_hooks.py
|
@ -28,3 +28,7 @@ requires:
|
|||||||
scope: container
|
scope: container
|
||||||
neutron-plugin-api:
|
neutron-plugin-api:
|
||||||
interface: neutron-plugin-api
|
interface: neutron-plugin-api
|
||||||
|
zeromq-configuration:
|
||||||
|
interface: zeromq-configuration
|
||||||
|
scope: container
|
||||||
|
|
||||||
|
@ -25,6 +25,8 @@ notification_topics = notifications
|
|||||||
|
|
||||||
{% include "parts/rabbitmq" %}
|
{% include "parts/rabbitmq" %}
|
||||||
|
|
||||||
|
{% include "parts/zeromq" %}
|
||||||
|
|
||||||
[QUOTAS]
|
[QUOTAS]
|
||||||
|
|
||||||
[DEFAULT_SERVICETYPE]
|
[DEFAULT_SERVICETYPE]
|
||||||
|
6
templates/parts/zeromq
Normal file
6
templates/parts/zeromq
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
{% if zmq_host -%}
|
||||||
|
# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
|
||||||
|
rpc_backend = zmq
|
||||||
|
rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_ring.MatchMakerRing
|
||||||
|
rpc_zmq_host = {{ zmq_host }}
|
||||||
|
{% endif -%}
|
Loading…
Reference in New Issue
Block a user