diff --git a/Makefile b/Makefile index 8dc88152..e7b1bce1 100644 --- a/Makefile +++ b/Makefile @@ -2,10 +2,11 @@ PYTHON := /usr/bin/env python lint: - @flake8 --exclude hooks/charmhelpers hooks unit_tests + @flake8 --exclude hooks/charmhelpers hooks unit_tests tests @charm proof -test: +unit_test: + @echo Starting unit tests... @$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests bin/charm_helpers_sync.py: @@ -14,8 +15,16 @@ bin/charm_helpers_sync.py: > bin/charm_helpers_sync.py sync: bin/charm_helpers_sync.py - @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers.yaml + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml + @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml -publish: lint test +test: + @echo Starting Amulet tests... + # coreycb note: The -v should only be temporary until Amulet sends + # raise_status() messages to stderr: + # https://bugs.launchpad.net/amulet/+bug/1320357 + @juju test -v -p AMULET_HTTP_PROXY + +publish: lint unit_test bzr push lp:charms/quantum-gateway bzr push lp:charms/trusty/quantum-gateway diff --git a/charm-helpers.yaml b/charm-helpers-hooks.yaml similarity index 78% rename from charm-helpers.yaml rename to charm-helpers-hooks.yaml index 81810093..1a98c81c 100644 --- a/charm-helpers.yaml +++ b/charm-helpers-hooks.yaml @@ -5,7 +5,6 @@ include: - fetch - contrib.openstack - contrib.hahelpers - - contrib.network.ovs + - contrib.network - contrib.storage.linux - payload.execd - - contrib.network.ip diff --git a/charm-helpers-tests.yaml b/charm-helpers-tests.yaml new file mode 100644 index 00000000..48b12f6f --- /dev/null +++ b/charm-helpers-tests.yaml @@ -0,0 +1,5 @@ +branch: lp:charm-helpers +destination: tests/charmhelpers +include: + - contrib.amulet + - contrib.openstack.amulet diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 92c41b23..ea9cf1b6 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -753,6 +753,17 @@ class SubordinateConfigContext(OSContextGenerator): return ctxt +class LogLevelContext(OSContextGenerator): + + def __call__(self): + ctxt = {} + ctxt['debug'] = \ + False if config('debug') is None else config('debug') + ctxt['verbose'] = \ + False if config('verbose') is None else config('verbose') + return ctxt + + class SyslogContext(OSContextGenerator): def __call__(self): diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py new file mode 100644 index 00000000..7e7a536f --- /dev/null +++ b/hooks/charmhelpers/contrib/openstack/ip.py @@ -0,0 +1,75 @@ +from charmhelpers.core.hookenv import ( + config, + unit_get, +) + +from charmhelpers.contrib.network.ip import ( + get_address_in_network, + is_address_in_network, + is_ipv6, +) + +from charmhelpers.contrib.hahelpers.cluster import is_clustered + +PUBLIC = 'public' +INTERNAL = 'int' +ADMIN = 'admin' + +_address_map = { + PUBLIC: { + 'config': 'os-public-network', + 'fallback': 'public-address' + }, + INTERNAL: { + 'config': 'os-internal-network', + 'fallback': 'private-address' + }, + ADMIN: { + 'config': 'os-admin-network', + 'fallback': 'private-address' + } +} + + +def canonical_url(configs, endpoint_type=PUBLIC): + ''' + Returns the correct HTTP URL to this host given the state of HTTPS + configuration, hacluster and charm configuration. + + :configs OSTemplateRenderer: A config tempating object to inspect for + a complete https context. + :endpoint_type str: The endpoint type to resolve. + + :returns str: Base URL for services on the current service unit. + ''' + scheme = 'http' + if 'https' in configs.complete_contexts(): + scheme = 'https' + address = resolve_address(endpoint_type) + if is_ipv6(address): + address = "[{}]".format(address) + return '%s://%s' % (scheme, address) + + +def resolve_address(endpoint_type=PUBLIC): + resolved_address = None + if is_clustered(): + if config(_address_map[endpoint_type]['config']) is None: + # Assume vip is simple and pass back directly + resolved_address = config('vip') + else: + for vip in config('vip').split(): + if is_address_in_network( + config(_address_map[endpoint_type]['config']), + vip): + resolved_address = vip + else: + resolved_address = get_address_in_network( + config(_address_map[endpoint_type]['config']), + unit_get(_address_map[endpoint_type]['fallback']) + ) + if resolved_address is None: + raise ValueError('Unable to resolve a suitable IP address' + ' based on charm state and configuration') + else: + return resolved_address diff --git a/templates/havana/metadata_agent.ini b/templates/havana/metadata_agent.ini index 4ce68537..b9141fdf 100644 --- a/templates/havana/metadata_agent.ini +++ b/templates/havana/metadata_agent.ini @@ -2,6 +2,8 @@ # [ WARNING ] # Configuration file maintained by Juju. Local changes may be overwritten. ############################################################################### +# Metadata service seems to cache neutron api url from keystone so trigger +# restart if it changes: {{ quantum_url }} [DEFAULT] auth_url = {{ service_protocol }}://{{ keystone_host }}:{{ service_port }}/v2.0 auth_region = {{ region }} diff --git a/tests/00-setup b/tests/00-setup new file mode 100755 index 00000000..f12fd16a --- /dev/null +++ b/tests/00-setup @@ -0,0 +1,10 @@ +#!/bin/bash + +set -ex + +sudo add-apt-repository --yes ppa:juju/stable +sudo apt-get update --yes +sudo apt-get install --yes python-amulet +sudo apt-get install --yes python-neutronclient +sudo apt-get install --yes python-keystoneclient +sudo apt-get install --yes python-novaclient diff --git a/tests/12-basic-precise-grizzly b/tests/12-basic-precise-grizzly new file mode 100755 index 00000000..29c358b9 --- /dev/null +++ b/tests/12-basic-precise-grizzly @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic quantum-gateway deployment on precise-grizzly.""" + +from basic_deployment import QuantumGatewayBasicDeployment + +if __name__ == '__main__': + deployment = QuantumGatewayBasicDeployment(series='precise', + openstack='cloud:precise-grizzly', + source='cloud:precise-updates/grizzly') + deployment.run_tests() diff --git a/tests/13-basic-precise-havana b/tests/13-basic-precise-havana new file mode 100755 index 00000000..285fc333 --- /dev/null +++ b/tests/13-basic-precise-havana @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic quantum-gateway deployment on precise-havana.""" + +from basic_deployment import QuantumGatewayBasicDeployment + +if __name__ == '__main__': + deployment = QuantumGatewayBasicDeployment(series='precise', + openstack='cloud:precise-havana', + source='cloud:precise-updates/havana') + deployment.run_tests() diff --git a/tests/14-basic-precise-icehouse b/tests/14-basic-precise-icehouse new file mode 100755 index 00000000..a5a59bf7 --- /dev/null +++ b/tests/14-basic-precise-icehouse @@ -0,0 +1,11 @@ +#!/usr/bin/python + +"""Amulet tests on a basic quantum-gateway deployment on precise-icehouse.""" + +from basic_deployment import QuantumGatewayBasicDeployment + +if __name__ == '__main__': + deployment = QuantumGatewayBasicDeployment(series='precise', + openstack='cloud:precise-icehouse', + source='cloud:precise-updates/icehouse') + deployment.run_tests() diff --git a/tests/15-basic-trusty-icehouse b/tests/15-basic-trusty-icehouse new file mode 100755 index 00000000..81f3ea12 --- /dev/null +++ b/tests/15-basic-trusty-icehouse @@ -0,0 +1,9 @@ +#!/usr/bin/python + +"""Amulet tests on a basic quantum-gateway deployment on trusty-icehouse.""" + +from basic_deployment import QuantumGatewayBasicDeployment + +if __name__ == '__main__': + deployment = QuantumGatewayBasicDeployment(series='trusty') + deployment.run_tests() diff --git a/tests/README b/tests/README new file mode 100644 index 00000000..15f04bf4 --- /dev/null +++ b/tests/README @@ -0,0 +1,47 @@ +This directory provides Amulet tests that focus on verification of +quantum-gateway deployments. + +If you use a web proxy server to access the web, you'll need to set the +AMULET_HTTP_PROXY environment variable to the http URL of the proxy server. + +The following examples demonstrate different ways that tests can be executed. +All examples are run from the charm's root directory. + + * To run all tests (starting with 00-setup): + + make test + + * To run a specific test module (or modules): + + juju test -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To run a specific test module (or modules), and keep the environment + deployed after a failure: + + juju test --set-e -v -p AMULET_HTTP_PROXY 15-basic-trusty-icehouse + + * To re-run a test module against an already deployed environment (one + that was deployed by a previous call to 'juju test --set-e'): + + ./tests/15-basic-trusty-icehouse + +For debugging and test development purposes, all code should be idempotent. +In other words, the code should have the ability to be re-run without changing +the results beyond the initial run. This enables editing and re-running of a +test module against an already deployed environment, as described above. + +Manual debugging tips: + + * Set the following env vars before using the OpenStack CLI as admin: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=admin + export OS_USERNAME=admin + export OS_PASSWORD=openstack + export OS_REGION_NAME=RegionOne + + * Set the following env vars before using the OpenStack CLI as demoUser: + export OS_AUTH_URL=http://`juju-deployer -f keystone 2>&1 | tail -n 1`:5000/v2.0 + export OS_TENANT_NAME=demoTenant + export OS_USERNAME=demoUser + export OS_PASSWORD=password + export OS_REGION_NAME=RegionOne diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py new file mode 100644 index 00000000..e8a482d5 --- /dev/null +++ b/tests/basic_deployment.py @@ -0,0 +1,834 @@ +#!/usr/bin/python + +import amulet +try: + from quantumclient.v2_0 import client as neutronclient +except ImportError: + from neutronclient.v2_0 import client as neutronclient + +from charmhelpers.contrib.openstack.amulet.deployment import ( + OpenStackAmuletDeployment +) + +from charmhelpers.contrib.openstack.amulet.utils import ( + OpenStackAmuletUtils, + DEBUG, # flake8: noqa + ERROR +) + +# Use DEBUG to turn on debug logging +u = OpenStackAmuletUtils(ERROR) + + +class QuantumGatewayBasicDeployment(OpenStackAmuletDeployment): + """Amulet tests on a basic quantum-gateway deployment.""" + + def __init__(self, series, openstack=None, source=None): + """Deploy the entire test environment.""" + super(QuantumGatewayBasicDeployment, self).__init__(series, openstack, + source) + self._add_services() + self._add_relations() + self._configure_services() + self._deploy() + self._initialize_tests() + + def _add_services(self): + """Add the service that we're testing, including the number of units, + where quantum-gateway is local, and the other charms are from + the charm store.""" + this_service = ('quantum-gateway', 1) + other_services = [('mysql', 1), + ('rabbitmq-server', 1), ('keystone', 1), + ('nova-cloud-controller', 1)] + super(QuantumGatewayBasicDeployment, self)._add_services(this_service, + other_services) + + def _add_relations(self): + """Add all of the relations for the services.""" + relations = { + 'keystone:shared-db': 'mysql:shared-db', + 'quantum-gateway:shared-db': 'mysql:shared-db', + 'quantum-gateway:amqp': 'rabbitmq-server:amqp', + 'nova-cloud-controller:quantum-network-service': \ + 'quantum-gateway:quantum-network-service', + 'nova-cloud-controller:shared-db': 'mysql:shared-db', + 'nova-cloud-controller:identity-service': 'keystone:identity-service', + 'nova-cloud-controller:amqp': 'rabbitmq-server:amqp' + } + super(QuantumGatewayBasicDeployment, self)._add_relations(relations) + + def _configure_services(self): + """Configure all of the services.""" + keystone_config = {'admin-password': 'openstack', + 'admin-token': 'ubuntutesting'} + nova_cc_config = {'network-manager': 'Quantum', + 'quantum-security-groups': 'yes'} + configs = {'keystone': keystone_config, + 'nova-cloud-controller': nova_cc_config} + super(QuantumGatewayBasicDeployment, self)._configure_services(configs) + + def _initialize_tests(self): + """Perform final initialization before tests get run.""" + # Access the sentries for inspecting service units + self.mysql_sentry = self.d.sentry.unit['mysql/0'] + self.keystone_sentry = self.d.sentry.unit['keystone/0'] + self.rabbitmq_sentry = self.d.sentry.unit['rabbitmq-server/0'] + self.nova_cc_sentry = self.d.sentry.unit['nova-cloud-controller/0'] + self.quantum_gateway_sentry = self.d.sentry.unit['quantum-gateway/0'] + + # Authenticate admin with keystone + self.keystone = u.authenticate_keystone_admin(self.keystone_sentry, + user='admin', + password='openstack', + tenant='admin') + + + # Authenticate admin with neutron + ep = self.keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + self.neutron = neutronclient.Client(auth_url=ep, + username='admin', + password='openstack', + tenant_name='admin', + region_name='RegionOne') + + def test_services(self): + """Verify the expected services are running on the corresponding + service units.""" + if self._get_openstack_release() >= self.precise_havana: + neutron_services = ['status neutron-dhcp-agent', + 'status neutron-lbaas-agent', + 'status neutron-metadata-agent', + 'status neutron-plugin-openvswitch-agent'] + if self._get_openstack_release() == self.precise_havana: + neutron_services.append('status neutron-l3-agent') + else: + neutron_services.append('status neutron-vpn-agent') + neutron_services.append('status neutron-metering-agent') + neutron_services.append('status neutron-ovs-cleanup') + else: + neutron_services = ['status quantum-dhcp-agent', + 'status quantum-l3-agent', + 'status quantum-metadata-agent', + 'status quantum-plugin-openvswitch-agent'] + + nova_cc_services = ['status nova-api-ec2', + 'status nova-api-os-compute', + 'status nova-objectstore', + 'status nova-cert', + 'status nova-scheduler'] + if self._get_openstack_release() >= self.precise_grizzly: + nova_cc_services.append('status nova-conductor') + + commands = { + self.mysql_sentry: ['status mysql'], + self.keystone_sentry: ['status keystone'], + self.nova_cc_sentry: nova_cc_services, + self.quantum_gateway_sentry: neutron_services + } + + ret = u.validate_services(commands) + if ret: + amulet.raise_status(amulet.FAIL, msg=ret) + + def test_quantum_gateway_shared_db_relation(self): + """Verify the quantum-gateway to mysql shared-db relation data""" + unit = self.quantum_gateway_sentry + relation = ['shared-db', 'mysql:shared-db'] + expected = { + 'private-address': u.valid_ip, + 'database': 'nova', + 'username': 'nova', + 'hostname': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('quantum-gateway shared-db', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_mysql_shared_db_relation(self): + """Verify the mysql to quantum-gateway shared-db relation data""" + unit = self.mysql_sentry + relation = ['shared-db', 'quantum-gateway:shared-db'] + expected = { + 'private-address': u.valid_ip, + 'password': u.not_null, + 'db_host': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('mysql shared-db', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_quantum_gateway_amqp_relation(self): + """Verify the quantum-gateway to rabbitmq-server amqp relation data""" + unit = self.quantum_gateway_sentry + relation = ['amqp', 'rabbitmq-server:amqp'] + expected = { + 'username': 'neutron', + 'private-address': u.valid_ip, + 'vhost': 'openstack' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('quantum-gateway amqp', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_rabbitmq_amqp_relation(self): + """Verify the rabbitmq-server to quantum-gateway amqp relation data""" + unit = self.rabbitmq_sentry + relation = ['amqp', 'quantum-gateway:amqp'] + expected = { + 'private-address': u.valid_ip, + 'password': u.not_null, + 'hostname': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('rabbitmq amqp', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_quantum_gateway_network_service_relation(self): + """Verify the quantum-gateway to nova-cc quantum-network-service + relation data""" + unit = self.quantum_gateway_sentry + relation = ['quantum-network-service', + 'nova-cloud-controller:quantum-network-service'] + expected = { + 'private-address': u.valid_ip + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('quantum-gateway network-service', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_nova_cc_network_service_relation(self): + """Verify the nova-cc to quantum-gateway quantum-network-service + relation data""" + unit = self.nova_cc_sentry + relation = ['quantum-network-service', + 'quantum-gateway:quantum-network-service'] + expected = { + 'service_protocol': 'http', + 'service_tenant': 'services', + 'quantum_url': u.valid_url, + 'quantum_port': '9696', + 'service_port': '5000', + 'region': 'RegionOne', + 'service_password': u.not_null, + 'quantum_host': u.valid_ip, + 'auth_port': '35357', + 'auth_protocol': 'http', + 'private-address': u.valid_ip, + 'keystone_host': u.valid_ip, + 'quantum_plugin': 'ovs', + 'auth_host': u.valid_ip, + 'service_username': 'quantum_s3_ec2_nova', + 'service_tenant_name': 'services' + } + + ret = u.validate_relation_data(unit, relation, expected) + if ret: + message = u.relation_error('nova-cc network-service', ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_restart_on_config_change(self): + """Verify that the specified services are restarted when the config + is changed.""" + if self._get_openstack_release() >= self.precise_havana: + conf = '/etc/neutron/neutron.conf' + services = ['neutron-dhcp-agent', 'neutron-openvswitch-agent', + 'neutron-metering-agent', 'neutron-lbaas-agent', + 'neutron-metadata-agent'] + if self._get_openstack_release() == self.precise_havana: + services.append('neutron-l3-agent') + else: + services.append('neutron-vpn-agent') + else: + conf = '/etc/quantum/quantum.conf' + services = ['quantum-dhcp-agent', 'quantum-openvswitch-agent', + 'quantum-metadata-agent', 'quantum-l3-agent'] + + self.d.configure('quantum-gateway', {'debug': 'True'}) + + time = 20 + for s in services: + if not u.service_restarted(self.quantum_gateway_sentry, s, conf, + pgrep_full=True, sleep_time=time): + msg = "service {} didn't restart after config change".format(s) + amulet.raise_status(amulet.FAIL, msg=msg) + time = 0 + + self.d.configure('quantum-gateway', {'debug': 'False'}) + + def test_neutron_config(self): + """Verify the data in the neutron config file.""" + unit = self.quantum_gateway_sentry + rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', + 'quantum-gateway:amqp') + + if self._get_openstack_release() >= self.precise_havana: + conf = '/etc/neutron/neutron.conf' + expected = { + 'DEFAULT': { + 'verbose': 'False', + 'debug': 'False', + 'lock_path': '/var/lock/neutron', + 'rabbit_userid': 'neutron', + 'rabbit_virtual_host': 'openstack', + 'rabbit_password': rabbitmq_relation['password'], + 'rabbit_host': rabbitmq_relation['hostname'], + 'control_exchange': 'neutron', + 'notification_driver': 'neutron.openstack.common.notifier.' + 'list_notifier', + 'list_notifier_drivers': 'neutron.openstack.common.' + 'notifier.rabbit_notifier' + }, + 'agent': { + 'root_helper': 'sudo /usr/bin/neutron-rootwrap ' + '/etc/neutron/rootwrap.conf' + } + } + else: + conf = '/etc/quantum/quantum.conf' + expected = { + 'DEFAULT': { + 'verbose': 'False', + 'debug': 'False', + 'lock_path': '/var/lock/quantum', + 'rabbit_userid': 'neutron', + 'rabbit_virtual_host': 'openstack', + 'rabbit_password': rabbitmq_relation['password'], + 'rabbit_host': rabbitmq_relation['hostname'], + 'control_exchange': 'quantum', + 'notification_driver': 'quantum.openstack.common.notifier.' + 'list_notifier', + 'list_notifier_drivers': 'quantum.openstack.common.' + 'notifier.rabbit_notifier' + }, + 'AGENT': { + 'root_helper': 'sudo /usr/bin/quantum-rootwrap ' + '/etc/quantum/rootwrap.conf' + } + } + + if self._get_openstack_release() >= self.precise_icehouse: + expected['DEFAULT']['core_plugin'] = \ + 'neutron.plugins.ml2.plugin.Ml2Plugin' + elif self._get_openstack_release() >= self.precise_havana: + expected['DEFAULT']['core_plugin'] = \ + 'neutron.plugins.openvswitch.ovs_neutron_plugin.OVSNeutronPluginV2' + else: + expected['DEFAULT']['core_plugin'] = \ + 'quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2' + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "neutron config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ml2_config(self): + """Verify the data in the ml2 config file. This is only available + since icehouse.""" + if self._get_openstack_release() < self.precise_icehouse: + return + + unit = self.quantum_gateway_sentry + conf = '/etc/neutron/plugins/ml2/ml2_conf.ini' + quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db') + expected = { + 'ml2': { + 'type_drivers': 'gre,vxlan', + 'tenant_network_types': 'gre,vxlan', + 'mechanism_drivers': 'openvswitch' + }, + 'ml2_type_gre': { + 'tunnel_id_ranges': '1:1000' + }, + 'ml2_type_vxlan': { + 'vni_ranges': '1001:2000' + }, + 'ovs': { + 'enable_tunneling': 'True', + 'local_ip': quantum_gateway_relation['private-address'] + }, + 'agent': { + 'tunnel_types': 'gre' + }, + 'securitygroup': { + 'firewall_driver': 'neutron.agent.linux.iptables_firewall.' + 'OVSHybridIptablesFirewallDriver' + } + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "ml2 config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_api_paste_config(self): + """Verify the data in the api paste config file.""" + unit = self.quantum_gateway_sentry + if self._get_openstack_release() >= self.precise_havana: + conf = '/etc/neutron/api-paste.ini' + expected = { + 'composite:neutron': { + 'use': 'egg:Paste#urlmap', + '/': 'neutronversions', + '/v2.0': 'neutronapi_v2_0' + }, + 'filter:keystonecontext': { + 'paste.filter_factory': 'neutron.auth:' + 'NeutronKeystoneContext.factory' + }, + 'filter:authtoken': { + 'paste.filter_factory': 'keystoneclient.middleware.' + 'auth_token:filter_factory' + }, + 'filter:extensions': { + 'paste.filter_factory': 'neutron.api.extensions:' + 'plugin_aware_extension_middleware_' + 'factory' + }, + 'app:neutronversions': { + 'paste.app_factory': 'neutron.api.versions:Versions.factory' + }, + 'app:neutronapiapp_v2_0': { + 'paste.app_factory': 'neutron.api.v2.router:APIRouter.' + 'factory' + } + } + if self._get_openstack_release() == self.precise_havana: + expected_additional = { + 'composite:neutronapi_v2_0': { + 'use': 'call:neutron.auth:pipeline_factory', + 'noauth': 'extensions neutronapiapp_v2_0', + 'keystone': 'authtoken keystonecontext extensions ' + 'neutronapiapp_v2_0' + } + } + else: + expected_additional = { + 'composite:neutronapi_v2_0': { + 'use': 'call:neutron.auth:pipeline_factory', + 'noauth': 'request_id catch_errors extensions ' + 'neutronapiapp_v2_0', + 'keystone': 'request_id catch_errors authtoken ' + 'keystonecontext extensions ' + 'neutronapiapp_v2_0' + } + } + expected = dict(expected.items() + expected_additional.items()) + else: + conf = '/etc/quantum/api-paste.ini' + expected = { + 'composite:quantum': { + 'use': 'egg:Paste#urlmap', + '/': 'quantumversions', + '/v2.0': 'quantumapi_v2_0' + }, + 'composite:quantumapi_v2_0': { + 'use': 'call:quantum.auth:pipeline_factory', + 'noauth': 'extensions quantumapiapp_v2_0', + 'keystone': 'authtoken keystonecontext extensions ' + 'quantumapiapp_v2_0', + }, + 'filter:keystonecontext': { + 'paste.filter_factory': 'quantum.auth:' + 'QuantumKeystoneContext.factory' + }, + 'filter:authtoken': { + 'paste.filter_factory': 'keystoneclient.middleware.' + 'auth_token:filter_factory' + }, + 'filter:extensions': { + 'paste.filter_factory': 'quantum.api.extensions:' + 'plugin_aware_extension_middleware_' + 'factory' + }, + 'app:quantumversions': { + 'paste.app_factory': 'quantum.api.versions:Versions.factory' + }, + 'app:quantumapiapp_v2_0': { + 'paste.app_factory': 'quantum.api.v2.router:APIRouter.' + 'factory' + } + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "api paste config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_dhcp_agent_config(self): + """Verify the data in the dhcp agent config file.""" + unit = self.quantum_gateway_sentry + if self._get_openstack_release() >= self.precise_havana: + conf = '/etc/neutron/dhcp_agent.ini' + expected = { + 'state_path': '/var/lib/neutron', + 'interface_driver': 'neutron.agent.linux.interface.' + 'OVSInterfaceDriver', + 'dhcp_driver': 'neutron.agent.linux.dhcp.Dnsmasq', + 'root_helper': 'sudo /usr/bin/neutron-rootwrap ' + '/etc/neutron/rootwrap.conf', + 'ovs_use_veth': 'True' + } + else: + conf = '/etc/quantum/dhcp_agent.ini' + expected = { + 'state_path': '/var/lib/quantum', + 'interface_driver': 'quantum.agent.linux.interface.' + 'OVSInterfaceDriver', + 'dhcp_driver': 'quantum.agent.linux.dhcp.Dnsmasq', + 'root_helper': 'sudo /usr/bin/quantum-rootwrap ' + '/etc/quantum/rootwrap.conf' + } + + ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) + if ret: + message = "dhcp agent config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_fwaas_driver_config(self): + """Verify the data in the fwaas driver config file. This is only + available since havana.""" + if self._get_openstack_release() < self.precise_havana: + return + + unit = self.quantum_gateway_sentry + conf = '/etc/neutron/fwaas_driver.ini' + expected = { + 'driver': 'neutron.services.firewall.drivers.linux.' + 'iptables_fwaas.IptablesFwaasDriver', + 'enabled': 'True' + } + + ret = u.validate_config_data(unit, conf, 'fwaas', expected) + if ret: + message = "fwaas driver config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_l3_agent_config(self): + """Verify the data in the l3 agent config file.""" + unit = self.quantum_gateway_sentry + nova_cc_relation = self.nova_cc_sentry.relation(\ + 'quantum-network-service', + 'quantum-gateway:quantum-network-service') + ep = self.keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + + if self._get_openstack_release() >= self.precise_havana: + conf = '/etc/neutron/l3_agent.ini' + expected = { + 'interface_driver': 'neutron.agent.linux.interface.' + 'OVSInterfaceDriver', + 'auth_url': ep, + 'auth_region': 'RegionOne', + 'admin_tenant_name': 'services', + 'admin_user': 'quantum_s3_ec2_nova', + 'admin_password': nova_cc_relation['service_password'], + 'root_helper': 'sudo /usr/bin/neutron-rootwrap ' + '/etc/neutron/rootwrap.conf', + 'ovs_use_veth': 'True', + 'handle_internal_only_routers': 'True' + } + else: + conf = '/etc/quantum/l3_agent.ini' + expected = { + 'interface_driver': 'quantum.agent.linux.interface.' + 'OVSInterfaceDriver', + 'auth_url': ep, + 'auth_region': 'RegionOne', + 'admin_tenant_name': 'services', + 'admin_user': 'quantum_s3_ec2_nova', + 'admin_password': nova_cc_relation['service_password'], + 'root_helper': 'sudo /usr/bin/quantum-rootwrap ' + '/etc/quantum/rootwrap.conf' + } + + ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) + if ret: + message = "l3 agent config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_lbaas_agent_config(self): + """Verify the data in the lbaas agent config file. This is only + available since havana.""" + if self._get_openstack_release() < self.precise_havana: + return + + unit = self.quantum_gateway_sentry + conf = '/etc/neutron/lbaas_agent.ini' + expected = { + 'DEFAULT': { + 'periodic_interval': '10', + 'interface_driver': 'neutron.agent.linux.interface.' + 'OVSInterfaceDriver', + 'ovs_use_veth': 'False', + 'device_driver': 'neutron.services.loadbalancer.drivers.' + 'haproxy.namespace_driver.HaproxyNSDriver' + }, + 'haproxy': { + 'loadbalancer_state_path': '$state_path/lbaas', + 'user_group': 'nogroup' + } + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "lbaas agent config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_metadata_agent_config(self): + """Verify the data in the metadata agent config file.""" + unit = self.quantum_gateway_sentry + ep = self.keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db') + nova_cc_relation = self.nova_cc_sentry.relation(\ + 'quantum-network-service', + 'quantum-gateway:quantum-network-service') + + if self._get_openstack_release() >= self.precise_havana: + conf = '/etc/neutron/metadata_agent.ini' + expected = { + 'auth_url': ep, + 'auth_region': 'RegionOne', + 'admin_tenant_name': 'services', + 'admin_user': 'quantum_s3_ec2_nova', + 'admin_password': nova_cc_relation['service_password'], + 'root_helper': 'sudo neutron-rootwrap ' + '/etc/neutron/rootwrap.conf', + 'state_path': '/var/lib/neutron', + 'nova_metadata_ip': quantum_gateway_relation['private-address'], + 'nova_metadata_port': '8775' + } + else: + conf = '/etc/quantum/metadata_agent.ini' + expected = { + 'auth_url': ep, + 'auth_region': 'RegionOne', + 'admin_tenant_name': 'services', + 'admin_user': 'quantum_s3_ec2_nova', + 'admin_password': nova_cc_relation['service_password'], + 'root_helper': 'sudo quantum-rootwrap ' + '/etc/quantum/rootwrap.conf', + 'state_path': '/var/lib/quantum', + 'nova_metadata_ip': quantum_gateway_relation['private-address'], + 'nova_metadata_port': '8775' + } + + ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) + if ret: + message = "metadata agent config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_metering_agent_config(self): + """Verify the data in the metering agent config file. This is only + available since havana.""" + if self._get_openstack_release() < self.precise_havana: + return + + unit = self.quantum_gateway_sentry + conf = '/etc/neutron/metering_agent.ini' + expected = { + 'driver': 'neutron.services.metering.drivers.iptables.' + 'iptables_driver.IptablesMeteringDriver', + 'measure_interval': '30', + 'report_interval': '300', + 'interface_driver': 'neutron.agent.linux.interface.' + 'OVSInterfaceDriver', + 'use_namespaces': 'True' + } + + ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) + if ret: + message = "metering agent config error: {}".format(ret) + + def test_nova_config(self): + """Verify the data in the nova config file.""" + unit = self.quantum_gateway_sentry + conf = '/etc/nova/nova.conf' + mysql_relation = self.mysql_sentry.relation('shared-db', + 'quantum-gateway:shared-db') + db_uri = "mysql://{}:{}@{}/{}".format('nova', + mysql_relation['password'], + mysql_relation['db_host'], + 'nova') + rabbitmq_relation = self.rabbitmq_sentry.relation('amqp', + 'quantum-gateway:amqp') + nova_cc_relation = self.nova_cc_sentry.relation(\ + 'quantum-network-service', + 'quantum-gateway:quantum-network-service') + ep = self.keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + + if self._get_openstack_release() >= self.precise_havana: + expected = { + 'logdir': '/var/log/nova', + 'state_path': '/var/lib/nova', + 'lock_path': '/var/lock/nova', + 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf', + 'verbose': 'False', + 'use_syslog': 'False', + 'api_paste_config': '/etc/nova/api-paste.ini', + 'enabled_apis': 'metadata', + 'multi_host': 'True', + 'sql_connection': db_uri, + 'service_neutron_metadata_proxy': 'True', + 'rabbit_userid': 'neutron', + 'rabbit_virtual_host': 'openstack', + 'rabbit_password': rabbitmq_relation['password'], + 'rabbit_host': rabbitmq_relation['hostname'], + 'network_api_class': 'nova.network.neutronv2.api.API', + 'neutron_auth_strategy': 'keystone', + 'neutron_url': nova_cc_relation['quantum_url'], + 'neutron_admin_tenant_name': 'services', + 'neutron_admin_username': 'quantum_s3_ec2_nova', + 'neutron_admin_password': nova_cc_relation['service_password'], + 'neutron_admin_auth_url': ep + + } + else: + expected = { + 'logdir': '/var/log/nova', + 'state_path': '/var/lib/nova', + 'lock_path': '/var/lock/nova', + 'root_helper': 'sudo nova-rootwrap /etc/nova/rootwrap.conf', + 'verbose': 'True', + 'api_paste_config': '/etc/nova/api-paste.ini', + 'enabled_apis': 'metadata', + 'multi_host': 'True', + 'sql_connection': db_uri, + 'service_quantum_metadata_proxy': 'True', + 'rabbit_userid': 'neutron', + 'rabbit_virtual_host': 'openstack', + 'rabbit_password': rabbitmq_relation['password'], + 'rabbit_host': rabbitmq_relation['hostname'], + 'network_api_class': 'nova.network.quantumv2.api.API', + 'quantum_auth_strategy': 'keystone', + 'quantum_url': nova_cc_relation['quantum_url'], + 'quantum_admin_tenant_name': 'services', + 'quantum_admin_username': 'quantum_s3_ec2_nova', + 'quantum_admin_password': nova_cc_relation['service_password'], + 'quantum_admin_auth_url': ep + } + + ret = u.validate_config_data(unit, conf, 'DEFAULT', expected) + if ret: + message = "nova config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_ovs_neutron_plugin_config(self): + """Verify the data in the ovs neutron plugin config file. The ovs + plugin is not used by default since icehouse.""" + if self._get_openstack_release() >= self.precise_icehouse: + return + + unit = self.quantum_gateway_sentry + quantum_gateway_relation = unit.relation('shared-db', 'mysql:shared-db') + + if self._get_openstack_release() >= self.precise_havana: + conf = '/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini' + expected = { + 'ovs': { + 'local_ip': quantum_gateway_relation['private-address'], + 'tenant_network_type': 'gre', + 'enable_tunneling': 'True', + 'tunnel_id_ranges': '1:1000' + } + } + if self._get_openstack_release() > self.precise_havana: + expected_additional = { + 'agent': { + 'polling_interval': '10', + 'root_helper': 'sudo /usr/bin/neutron-rootwrap ' + '/etc/neutron/rootwrap.conf' + } + } + expected = dict(expected.items() + expected_additional.items()) + else: + conf = '/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini' + expected = { + 'OVS': { + 'local_ip': quantum_gateway_relation['private-address'], + 'tenant_network_type': 'gre', + 'enable_tunneling': 'True', + 'tunnel_id_ranges': '1:1000' + }, + 'AGENT': { + 'polling_interval': '10', + 'root_helper': 'sudo /usr/bin/quantum-rootwrap ' + '/etc/quantum/rootwrap.conf' + } + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "ovs neutron plugin config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_vpn_agent_config(self): + """Verify the data in the vpn agent config file. This isn't available + prior to havana.""" + if self._get_openstack_release() < self.precise_havana: + return + + unit = self.quantum_gateway_sentry + conf = '/etc/neutron/vpn_agent.ini' + expected = { + 'vpnagent': { + 'vpn_device_driver': 'neutron.services.vpn.device_drivers.' + 'ipsec.OpenSwanDriver' + }, + 'ipsec': { + 'ipsec_status_check_interval': '60' + } + } + + for section, pairs in expected.iteritems(): + ret = u.validate_config_data(unit, conf, section, pairs) + if ret: + message = "vpn agent config error: {}".format(ret) + amulet.raise_status(amulet.FAIL, msg=message) + + def test_create_network(self): + """Create a network, verify that it exists, and then delete it.""" + self.neutron.format = 'json' + net_name = 'ext_net' + + #Verify that the network doesn't exist + networks = self.neutron.list_networks(name=net_name) + net_count = len(networks['networks']) + if net_count != 0: + msg = "Expected zero networks, found {}".format(net_count) + amulet.raise_status(amulet.FAIL, msg=msg) + + # Create a network and verify that it exists + network = {'name': net_name} + self.neutron.create_network({'network':network}) + + networks = self.neutron.list_networks(name=net_name) + net_len = len(networks['networks']) + if net_len != 1: + msg = "Expected 1 network, found {}".format(net_len) + amulet.raise_status(amulet.FAIL, msg=msg) + + network = networks['networks'][0] + if network['name'] != net_name: + amulet.raise_status(amulet.FAIL, msg="network ext_net not found") + + #Cleanup + self.neutron.delete_network(network['id']) diff --git a/tests/charmhelpers/__init__.py b/tests/charmhelpers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/charmhelpers/contrib/__init__.py b/tests/charmhelpers/contrib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/charmhelpers/contrib/amulet/__init__.py b/tests/charmhelpers/contrib/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py new file mode 100644 index 00000000..9dc082ad --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/deployment.py @@ -0,0 +1,58 @@ +import amulet + + +class AmuletDeployment(object): + """This class provides generic Amulet deployment and test runner + methods.""" + + def __init__(self, series=None): + """Initialize the deployment environment.""" + self.series = None + + if series: + self.series = series + self.d = amulet.Deployment(series=self.series) + else: + self.d = amulet.Deployment() + + def _add_services(self, this_service, other_services): + """Add services to the deployment where this_service is the local charm + that we're focused on testing and other_services are the other + charms that come from the charm store.""" + name, units = range(2) + self.this_service = this_service[name] + self.d.add(this_service[name], units=this_service[units]) + + for svc in other_services: + if self.series: + self.d.add(svc[name], + charm='cs:{}/{}'.format(self.series, svc[name]), + units=svc[units]) + else: + self.d.add(svc[name], units=svc[units]) + + def _add_relations(self, relations): + """Add all of the relations for the services.""" + for k, v in relations.iteritems(): + self.d.relate(k, v) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _deploy(self): + """Deploy environment and wait for all hooks to finish executing.""" + try: + self.d.setup() + self.d.sentry.wait() + except amulet.helpers.TimeoutError: + amulet.raise_status(amulet.FAIL, msg="Deployment timed out") + except: + raise + + def run_tests(self): + """Run all of the methods that are prefixed with 'test_'.""" + for test in dir(self): + if test.startswith('test_'): + getattr(self, test)() diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py new file mode 100644 index 00000000..31764568 --- /dev/null +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -0,0 +1,157 @@ +import ConfigParser +import io +import logging +import re +import sys +from time import sleep + + +class AmuletUtils(object): + """This class provides common utility functions that are used by Amulet + tests.""" + + def __init__(self, log_level=logging.ERROR): + self.log = self.get_logger(level=log_level) + + def get_logger(self, name="amulet-logger", level=logging.DEBUG): + """Get a logger object that will log to stdout.""" + log = logging + logger = log.getLogger(name) + fmt = \ + log.Formatter("%(asctime)s %(funcName)s %(levelname)s: %(message)s") + + handler = log.StreamHandler(stream=sys.stdout) + handler.setLevel(level) + handler.setFormatter(fmt) + + logger.addHandler(handler) + logger.setLevel(level) + + return logger + + def valid_ip(self, ip): + if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): + return True + else: + return False + + def valid_url(self, url): + p = re.compile( + r'^(?:http|ftp)s?://' + r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # flake8: noqa + r'localhost|' + r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' + r'(?::\d+)?' + r'(?:/?|[/?]\S+)$', + re.IGNORECASE) + if p.match(url): + return True + else: + return False + + def validate_services(self, commands): + """Verify the specified services are running on the corresponding + service units.""" + for k, v in commands.iteritems(): + for cmd in v: + output, code = k.run(cmd) + if code != 0: + return "command `{}` returned {}".format(cmd, str(code)) + return None + + def _get_config(self, unit, filename): + """Get a ConfigParser object for parsing a unit's config file.""" + file_contents = unit.file_contents(filename) + config = ConfigParser.ConfigParser() + config.readfp(io.StringIO(file_contents)) + return config + + def validate_config_data(self, sentry_unit, config_file, section, expected): + """Verify that the specified section of the config file contains + the expected option key:value pairs.""" + config = self._get_config(sentry_unit, config_file) + + if section != 'DEFAULT' and not config.has_section(section): + return "section [{}] does not exist".format(section) + + for k in expected.keys(): + if not config.has_option(section, k): + return "section [{}] is missing option {}".format(section, k) + if config.get(section, k) != expected[k]: + return "section [{}] {}:{} != expected {}:{}".format(section, + k, config.get(section, k), k, expected[k]) + return None + + def _validate_dict_data(self, expected, actual): + """Compare expected dictionary data vs actual dictionary data. + The values in the 'expected' dictionary can be strings, bools, ints, + longs, or can be a function that evaluate a variable and returns a + bool.""" + for k, v in expected.iteritems(): + if k in actual: + if isinstance(v, basestring) or \ + isinstance(v, bool) or \ + isinstance(v, (int, long)): + if v != actual[k]: + return "{}:{}".format(k, actual[k]) + elif not v(actual[k]): + return "{}:{}".format(k, actual[k]) + else: + return "key '{}' does not exist".format(k) + return None + + def validate_relation_data(self, sentry_unit, relation, expected): + """Validate actual relation data based on expected relation data.""" + actual = sentry_unit.relation(relation[0], relation[1]) + self.log.debug('actual: {}'.format(repr(actual))) + return self._validate_dict_data(expected, actual) + + def _validate_list_data(self, expected, actual): + """Compare expected list vs actual list data.""" + for e in expected: + if e not in actual: + return "expected item {} not found in actual list".format(e) + return None + + def not_null(self, string): + if string != None: + return True + else: + return False + + def _get_file_mtime(self, sentry_unit, filename): + """Get last modification time of file.""" + return sentry_unit.file_stat(filename)['mtime'] + + def _get_dir_mtime(self, sentry_unit, directory): + """Get last modification time of directory.""" + return sentry_unit.directory_stat(directory)['mtime'] + + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): + """Determine start time of the process based on the last modification + time of the /proc/pid directory. If pgrep_full is True, the process + name is matched against the full command line.""" + if pgrep_full: + cmd = 'pgrep -o -f {}'.format(service) + else: + cmd = 'pgrep -o {}'.format(service) + proc_dir = '/proc/{}'.format(sentry_unit.run(cmd)[0].strip()) + return self._get_dir_mtime(sentry_unit, proc_dir) + + def service_restarted(self, sentry_unit, service, filename, + pgrep_full=False): + """Compare a service's start time vs a file's last modification time + (such as a config file for that service) to determine if the service + has been restarted.""" + sleep(10) + if self._get_proc_start_time(sentry_unit, service, pgrep_full) >= \ + self._get_file_mtime(sentry_unit, filename): + return True + else: + return False + + def relation_error(self, name, data): + return 'unexpected relation data in {} - {}'.format(name, data) + + def endpoint_error(self, name, data): + return 'unexpected endpoint data in {} - {}'.format(name, data) diff --git a/tests/charmhelpers/contrib/openstack/__init__.py b/tests/charmhelpers/contrib/openstack/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/tests/charmhelpers/contrib/openstack/amulet/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py new file mode 100644 index 00000000..e476b6f2 --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -0,0 +1,55 @@ +from charmhelpers.contrib.amulet.deployment import ( + AmuletDeployment +) + + +class OpenStackAmuletDeployment(AmuletDeployment): + """This class inherits from AmuletDeployment and has additional support + that is specifically for use by OpenStack charms.""" + + def __init__(self, series=None, openstack=None, source=None): + """Initialize the deployment environment.""" + super(OpenStackAmuletDeployment, self).__init__(series) + self.openstack = openstack + self.source = source + + def _add_services(self, this_service, other_services): + """Add services to the deployment and set openstack-origin.""" + super(OpenStackAmuletDeployment, self)._add_services(this_service, + other_services) + name = 0 + services = other_services + services.append(this_service) + use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph'] + + if self.openstack: + for svc in services: + if svc[name] not in use_source: + config = {'openstack-origin': self.openstack} + self.d.configure(svc[name], config) + + if self.source: + for svc in services: + if svc[name] in use_source: + config = {'source': self.source} + self.d.configure(svc[name], config) + + def _configure_services(self, configs): + """Configure all of the services.""" + for service, config in configs.iteritems(): + self.d.configure(service, config) + + def _get_openstack_release(self): + """Return an integer representing the enum value of the openstack + release.""" + self.precise_essex, self.precise_folsom, self.precise_grizzly, \ + self.precise_havana, self.precise_icehouse, \ + self.trusty_icehouse = range(6) + releases = { + ('precise', None): self.precise_essex, + ('precise', 'cloud:precise-folsom'): self.precise_folsom, + ('precise', 'cloud:precise-grizzly'): self.precise_grizzly, + ('precise', 'cloud:precise-havana'): self.precise_havana, + ('precise', 'cloud:precise-icehouse'): self.precise_icehouse, + ('trusty', None): self.trusty_icehouse} + return releases[(self.series, self.openstack)] diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py new file mode 100644 index 00000000..222281e3 --- /dev/null +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -0,0 +1,209 @@ +import logging +import os +import time +import urllib + +import glanceclient.v1.client as glance_client +import keystoneclient.v2_0 as keystone_client +import novaclient.v1_1.client as nova_client + +from charmhelpers.contrib.amulet.utils import ( + AmuletUtils +) + +DEBUG = logging.DEBUG +ERROR = logging.ERROR + + +class OpenStackAmuletUtils(AmuletUtils): + """This class inherits from AmuletUtils and has additional support + that is specifically for use by OpenStack charms.""" + + def __init__(self, log_level=ERROR): + """Initialize the deployment environment.""" + super(OpenStackAmuletUtils, self).__init__(log_level) + + def validate_endpoint_data(self, endpoints, admin_port, internal_port, + public_port, expected): + """Validate actual endpoint data vs expected endpoint data. The ports + are used to find the matching endpoint.""" + found = False + for ep in endpoints: + self.log.debug('endpoint: {}'.format(repr(ep))) + if admin_port in ep.adminurl and internal_port in ep.internalurl \ + and public_port in ep.publicurl: + found = True + actual = {'id': ep.id, + 'region': ep.region, + 'adminurl': ep.adminurl, + 'internalurl': ep.internalurl, + 'publicurl': ep.publicurl, + 'service_id': ep.service_id} + ret = self._validate_dict_data(expected, actual) + if ret: + return 'unexpected endpoint data - {}'.format(ret) + + if not found: + return 'endpoint not found' + + def validate_svc_catalog_endpoint_data(self, expected, actual): + """Validate a list of actual service catalog endpoints vs a list of + expected service catalog endpoints.""" + self.log.debug('actual: {}'.format(repr(actual))) + for k, v in expected.iteritems(): + if k in actual: + ret = self._validate_dict_data(expected[k][0], actual[k][0]) + if ret: + return self.endpoint_error(k, ret) + else: + return "endpoint {} does not exist".format(k) + return ret + + def validate_tenant_data(self, expected, actual): + """Validate a list of actual tenant data vs list of expected tenant + data.""" + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'description': act.description, + 'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected tenant data - {}".format(ret) + if not found: + return "tenant {} does not exist".format(e['name']) + return ret + + def validate_role_data(self, expected, actual): + """Validate a list of actual role data vs a list of expected role + data.""" + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'name': act.name, 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected role data - {}".format(ret) + if not found: + return "role {} does not exist".format(e['name']) + return ret + + def validate_user_data(self, expected, actual): + """Validate a list of actual user data vs a list of expected user + data.""" + self.log.debug('actual: {}'.format(repr(actual))) + for e in expected: + found = False + for act in actual: + a = {'enabled': act.enabled, 'name': act.name, + 'email': act.email, 'tenantId': act.tenantId, + 'id': act.id} + if e['name'] == a['name']: + found = True + ret = self._validate_dict_data(e, a) + if ret: + return "unexpected user data - {}".format(ret) + if not found: + return "user {} does not exist".format(e['name']) + return ret + + def validate_flavor_data(self, expected, actual): + """Validate a list of actual flavors vs a list of expected flavors.""" + self.log.debug('actual: {}'.format(repr(actual))) + act = [a.name for a in actual] + return self._validate_list_data(expected, act) + + def tenant_exists(self, keystone, tenant): + """Return True if tenant exists""" + return tenant in [t.name for t in keystone.tenants.list()] + + def authenticate_keystone_admin(self, keystone_sentry, user, password, + tenant): + """Authenticates admin user with the keystone admin endpoint.""" + service_ip = \ + keystone_sentry.relation('shared-db', + 'mysql:shared-db')['private-address'] + ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8')) + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_keystone_user(self, keystone, user, password, tenant): + """Authenticates a regular user with the keystone public endpoint.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return keystone_client.Client(username=user, password=password, + tenant_name=tenant, auth_url=ep) + + def authenticate_glance_admin(self, keystone): + """Authenticates admin user with glance.""" + ep = keystone.service_catalog.url_for(service_type='image', + endpoint_type='adminURL') + return glance_client.Client(ep, token=keystone.auth_token) + + def authenticate_nova_user(self, keystone, user, password, tenant): + """Authenticates a regular user with nova-api.""" + ep = keystone.service_catalog.url_for(service_type='identity', + endpoint_type='publicURL') + return nova_client.Client(username=user, api_key=password, + project_id=tenant, auth_url=ep) + + def create_cirros_image(self, glance, image_name): + """Download the latest cirros image and upload it to glance.""" + http_proxy = os.getenv('AMULET_HTTP_PROXY') + self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) + if http_proxy: + proxies = {'http': http_proxy} + opener = urllib.FancyURLopener(proxies) + else: + opener = urllib.FancyURLopener() + + f = opener.open("http://download.cirros-cloud.net/version/released") + version = f.read().strip() + cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version) + + if not os.path.exists(cirros_img): + cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net", + version, cirros_img) + opener.retrieve(cirros_url, cirros_img) + f.close() + + with open(cirros_img) as f: + image = glance.images.create(name=image_name, is_public=True, + disk_format='qcow2', + container_format='bare', data=f) + return image + + def delete_image(self, glance, image): + """Delete the specified image.""" + glance.images.delete(image) + + def create_instance(self, nova, image_name, instance_name, flavor): + """Create the specified instance.""" + image = nova.images.find(name=image_name) + flavor = nova.flavors.find(name=flavor) + instance = nova.servers.create(name=instance_name, image=image, + flavor=flavor) + + count = 1 + status = instance.status + while status != 'ACTIVE' and count < 60: + time.sleep(3) + instance = nova.servers.get(instance.id) + status = instance.status + self.log.debug('instance status: {}'.format(status)) + count += 1 + + if status == 'BUILD': + return None + + return instance + + def delete_instance(self, nova, instance): + """Delete the specified instance.""" + nova.servers.delete(instance)