Add cleanup action and OVS to OVN migration test

Add OVS to OVN migration at the end of the regular gate test. This
adds only 5-10 minutes to each job and we want to confirm this
works from focal-ussuri and onwards as this is the point where we
recomend our end users to migrate from OVS to OVN.

Do ch-sync.

Merge after https://github.com/juju/charm-helpers/pull/511

Func-Test-Pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/365
Depends-On: Ifa99988612eaaeb9d60a0d99db172f97e27cfc93
Change-Id: Ia4b1d3a9e642b540d1e04adc0363f9b3e11f37cd
This commit is contained in:
Frode Nordahl 2020-07-14 14:32:54 +02:00
parent a7805ec346
commit 916f109e2f
14 changed files with 1790 additions and 448 deletions

View File

@ -1,3 +1,23 @@
cleanup:
description: |
Clean up after the Neutron agents.
params:
i-really-mean-it:
type: boolean
default: false
description: |
The default false will not run the action, set to true to perform
cleanup.
.
WARNING: Running this action will interrupt instance connectivity and
it will not be restored until either Neutron agents or a different
SDN reprograms connectivity on the hypervisor.
.
NOTE: The application must be configured with `firewall-driver`
'openvswitch' and the unit must be paused prior to running this
action.
required:
- i-really-mean-it
pause: pause:
description: Pause the neutron-openvswitch unit. This action will stop neutron-openvswitch services. description: Pause the neutron-openvswitch unit. This action will stop neutron-openvswitch services.
resume: resume:

1
actions/cleanup Symbolic link
View File

@ -0,0 +1 @@
cleanup.py

173
actions/cleanup.py Executable file
View File

@ -0,0 +1,173 @@
#!/usr/bin/env python3
#
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import traceback
sys.path.append('hooks/')
import charmhelpers.core as ch_core
import charmhelpers.contrib.openstack.utils as ch_openstack_utils
import charmhelpers.contrib.network.ovs as ch_ovs
import charmhelpers.contrib.network.ovs.ovsdb as ch_ovsdb
class BaseDocException(Exception):
"""Use docstring as default message for exception."""
def __init__(self, message=None):
self.message = message or self.__doc__
def __repr__(self):
return self.message
def __str__(self):
return self.message
class UnitNotPaused(BaseDocException):
"""Action requires unit to be paused but it was not paused."""
pass
class MandatoryConfigurationNotSet(BaseDocException):
"""Action requires certain configuration to be set to operate correctly."""
pass
def remove_patch_ports(bridge):
"""Remove patch ports from both ends starting with named bridge.
:param bridge: Name of bridge to look for patch ports to remove.
:type bridge: str
"""
# NOTE: We need to consume all output from the `patch_ports_on_bridge`
# generator prior to removing anything otherwise it will raise an error.
for patch in list(ch_ovs.patch_ports_on_bridge(bridge)):
ch_ovs.del_bridge_port(
patch.this_end.bridge,
patch.this_end.port,
linkdown=False)
ch_ovs.del_bridge_port(
patch.other_end.bridge,
patch.other_end.port,
linkdown=False)
def remove_per_bridge_controllers():
"""Remove per bridge controllers."""
bridges = ch_ovsdb.SimpleOVSDB('ovs-vsctl').bridge
for bridge in bridges:
if bridge['controller']:
bridges.clear(str(bridge['_uuid']), 'controller')
def neutron_ipset_cleanup():
"""Perform Neutron ipset cleanup."""
subprocess.check_call(
(
'neutron-ipset-cleanup',
'--config-file=/etc/neutron/neutron.conf',
'--config-file=/etc/neutron/plugins/ml2/openvswitch_agent.ini',
))
def neutron_netns_cleanup():
"""Perform Neutron netns cleanup."""
# FIXME: remove once package dependencies have been backported LP: #1881852
subprocess.check_call(('apt', '-y', 'install', 'net-tools'))
_tmp_filters = '/etc/neutron/rootwrap.d/charm-n-ovs.filters'
with open(_tmp_filters, 'w') as fp:
fp.write(
'[Filters]\nneutron.cmd.netns_cleanup: CommandFilter, ip, root\n')
subprocess.check_call(
(
'neutron-netns-cleanup',
'--force',
*[
# Existence of these files depend on our configuration.
'--config-file={}'.format(cfg) for cfg in (
'/etc/neutron/neutron.conf',
'/etc/neutron/l3_agent.ini',
'/etc/neutron/fwaas_driver.ini',
'/etc/neutron/dhcp_agent.ini',
) if os.path.exists(cfg)]
))
os.unlink(_tmp_filters)
def cleanup(args):
"""Clean up after Neutron agents."""
# Check that prerequisites for operation are met
if not ch_openstack_utils.is_unit_paused_set():
raise UnitNotPaused()
if ch_core.hookenv.config('firewall-driver') != 'openvswitch':
raise MandatoryConfigurationNotSet(
'Action requires configuration option `firewall-driver` to be set '
'to "openvswitch" for succesfull operation.')
if not ch_core.hookenv.action_get('i-really-mean-it'):
raise MandatoryConfigurationNotSet(
'Action requires the `i-really-mean-it` parameter to be set to '
'"true".')
# The names used for the integration- and tunnel-bridge are
# configurable, but this configuration is not exposed in the charm.
#
# Assume default names are used.
remove_patch_ports('br-int')
ch_ovs.del_bridge('br-tun')
# The Neutron Open vSwitch agent configures each Open vSwitch bridge to
# establish an active OVSDB connection back to the Neutron Agent.
#
# Remove these
remove_per_bridge_controllers()
# Remove namespaces set up by Neutron
neutron_netns_cleanup()
# Remove ipsets set up by Neutron
neutron_ipset_cleanup()
# A dictionary of all the defined actions to callables (which take
# parsed arguments).
ACTIONS = {'cleanup': cleanup}
def main(args):
action_name = os.path.basename(args[0])
try:
action = ACTIONS[action_name]
except KeyError:
msg = 'Action "{}" undefined'.format(action_name)
ch_core.hookenv.action_fail(msg)
return msg
else:
try:
action(args)
except Exception as e:
msg = 'Action "{}" failed: "{}"'.format(action_name, str(e))
ch_core.hookenv.log(
'{} "{}"'.format(msg, traceback.format_exc()),
level=ch_core.hookenv.ERROR)
ch_core.hookenv.action_fail(msg)
if __name__ == "__main__":
sys.exit(main(sys.argv))

View File

@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
''' Helpers for interacting with OpenvSwitch ''' ''' Helpers for interacting with OpenvSwitch '''
import collections
import hashlib import hashlib
import os import os
import re import re
@ -20,9 +21,9 @@ import six
import subprocess import subprocess
from charmhelpers import deprecate from charmhelpers import deprecate
from charmhelpers.contrib.network.ovs import ovsdb as ch_ovsdb
from charmhelpers.fetch import apt_install from charmhelpers.fetch import apt_install
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, WARNING, INFO, DEBUG log, WARNING, INFO, DEBUG
) )
@ -592,3 +593,76 @@ def ovs_appctl(target, args):
cmd = ['ovs-appctl', '-t', target] cmd = ['ovs-appctl', '-t', target]
cmd.extend(args) cmd.extend(args)
return subprocess.check_output(cmd, universal_newlines=True) return subprocess.check_output(cmd, universal_newlines=True)
def uuid_for_port(port_name):
"""Get UUID of named port.
:param port_name: Name of port.
:type port_name: str
:returns: Port UUID.
:rtype: Optional[uuid.UUID]
"""
for port in ch_ovsdb.SimpleOVSDB(
'ovs-vsctl').port.find('name={}'.format(port_name)):
return port['_uuid']
def bridge_for_port(port_uuid):
"""Find which bridge a port is on.
:param port_uuid: UUID of port.
:type port_uuid: uuid.UUID
:returns: Name of bridge or None.
:rtype: Optional[str]
"""
for bridge in ch_ovsdb.SimpleOVSDB(
'ovs-vsctl').bridge:
# If there is a single port on a bridge the ports property will not be
# a list. ref: juju/charm-helpers#510
if (isinstance(bridge['ports'], list) and
port_uuid in bridge['ports'] or
port_uuid == bridge['ports']):
return bridge['name']
PatchPort = collections.namedtuple('PatchPort', ('bridge', 'port'))
Patch = collections.namedtuple('Patch', ('this_end', 'other_end'))
def patch_ports_on_bridge(bridge):
"""Find patch ports on a bridge.
:param bridge: Name of bridge
:type bridge: str
:returns: Iterator with bridge and port name for both ends of a patch.
:rtype: Iterator[Patch[PatchPort[str,str],PatchPort[str,str]]]
:raises: ValueError
"""
# On any given vSwitch there will be a small number of patch ports, so we
# start by iterating over ports with type `patch` then look up which bridge
# they belong to and act on any ports that match the criteria.
for interface in ch_ovsdb.SimpleOVSDB(
'ovs-vsctl').interface.find('type=patch'):
for port in ch_ovsdb.SimpleOVSDB(
'ovs-vsctl').port.find('name={}'.format(interface['name'])):
if bridge_for_port(port['_uuid']) == bridge:
this_end = PatchPort(bridge, port['name'])
other_end = PatchPort(bridge_for_port(
uuid_for_port(
interface['options']['peer'])),
interface['options']['peer'])
yield(Patch(this_end, other_end))
# We expect one result and it is ok if it turns out to be a port
# for a different bridge. However we need a break here to satisfy
# the for/else check which is in place to detect interface refering
# to non-existent port.
break
else:
raise ValueError('Port for interface named "{}" does unexpectedly '
'not exist.'.format(interface['name']))
else:
# Allow our caller to handle no patch ports found gracefully, in
# reference to PEP479 just doing a return will provide a emtpy iterator
# and not None.
return

View File

@ -36,6 +36,11 @@ class SimpleOVSDB(object):
for br in ovsdb.bridge: for br in ovsdb.bridge:
if br['name'] == 'br-test': if br['name'] == 'br-test':
ovsdb.bridge.set(br['uuid'], 'external_ids:charm', 'managed') ovsdb.bridge.set(br['uuid'], 'external_ids:charm', 'managed')
WARNING: If a list type field only have one item `ovs-vsctl` will present
it as a single item. Since we do not know the schema we have no way of
knowing what fields should be de-serialized as lists so the caller has
to be careful of checking the type of values returned from this library.
""" """
# For validation we keep a complete map of currently known good tool and # For validation we keep a complete map of currently known good tool and
@ -157,6 +162,51 @@ class SimpleOVSDB(object):
self._tool = tool self._tool = tool
self._table = table self._table = table
def _deserialize_ovsdb(self, data):
"""Deserialize OVSDB RFC7047 section 5.1 data.
:param data: Multidimensional list where first row contains RFC7047
type information
:type data: List[str,any]
:returns: Deserialized data.
:rtype: any
"""
# When using json formatted output to OVS commands Internal OVSDB
# notation may occur that require further deserializing.
# Reference: https://tools.ietf.org/html/rfc7047#section-5.1
ovs_type_cb_map = {
'uuid': uuid.UUID,
# NOTE: OVSDB sets have overloaded type
# see special handling below
'set': list,
'map': dict,
}
assert len(data) > 1, ('Invalid data provided, expecting list '
'with at least two elements.')
if data[0] == 'set':
# special handling for set
#
# it is either a list of strings or a list of typed lists.
# taste first element to see which it is
for el in data[1]:
# NOTE: We lock this handling down to the `uuid` type as
# that is the only one we have a practical example of.
# We could potentially just handle this generally based on
# the types listed in `ovs_type_cb_map` but let's open for
# that as soon as we have a concrete example to validate on
if isinstance(
el, list) and len(el) and el[0] == 'uuid':
decoded_set = []
for el in data[1]:
decoded_set.append(self._deserialize_ovsdb(el))
return(decoded_set)
# fall back to normal processing below
break
# Use map to deserialize data with fallback to `str`
f = ovs_type_cb_map.get(data[0], str)
return f(data[1])
def _find_tbl(self, condition=None): def _find_tbl(self, condition=None):
"""Run and parse output of OVSDB `find` command. """Run and parse output of OVSDB `find` command.
@ -165,15 +215,6 @@ class SimpleOVSDB(object):
:returns: Dictionary with data :returns: Dictionary with data
:rtype: Dict[str, any] :rtype: Dict[str, any]
""" """
# When using json formatted output to OVS commands Internal OVSDB
# notation may occur that require further deserializing.
# Reference: https://tools.ietf.org/html/rfc7047#section-5.1
ovs_type_cb_map = {
'uuid': uuid.UUID,
# FIXME sets also appear to sometimes contain type/value tuples
'set': list,
'map': dict,
}
cmd = [self._tool, '-f', 'json', 'find', self._table] cmd = [self._tool, '-f', 'json', 'find', self._table]
if condition: if condition:
cmd.append(condition) cmd.append(condition)
@ -182,9 +223,8 @@ class SimpleOVSDB(object):
for row in data['data']: for row in data['data']:
values = [] values = []
for col in row: for col in row:
if isinstance(col, list): if isinstance(col, list) and len(col) > 1:
f = ovs_type_cb_map.get(col[0], str) values.append(self._deserialize_ovsdb(col))
values.append(f(col[1]))
else: else:
values.append(col) values.append(col)
yield dict(zip(data['headings'], values)) yield dict(zip(data['headings'], values))

View File

@ -29,6 +29,8 @@ from subprocess import check_call, CalledProcessError
import six import six
import charmhelpers.contrib.storage.linux.ceph as ch_ceph
from charmhelpers.contrib.openstack.audits.openstack_security_guide import ( from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
_config_ini as config_ini _config_ini as config_ini
) )
@ -56,6 +58,7 @@ from charmhelpers.core.hookenv import (
status_set, status_set,
network_get_primary_address, network_get_primary_address,
WARNING, WARNING,
service_name,
) )
from charmhelpers.core.sysctl import create as sysctl_create from charmhelpers.core.sysctl import create as sysctl_create
@ -808,6 +811,12 @@ class CephContext(OSContextGenerator):
ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts))
if config('pool-type') and config('pool-type') == 'erasure-coded':
base_pool_name = config('rbd-pool') or config('rbd-pool-name')
if not base_pool_name:
base_pool_name = service_name()
ctxt['rbd_default_data_pool'] = base_pool_name
if not os.path.isdir('/etc/ceph'): if not os.path.isdir('/etc/ceph'):
os.mkdir('/etc/ceph') os.mkdir('/etc/ceph')
@ -3175,3 +3184,90 @@ class SRIOVContext(OSContextGenerator):
:rtype: Dict[str,int] :rtype: Dict[str,int]
""" """
return self._map return self._map
class CephBlueStoreCompressionContext(OSContextGenerator):
"""Ceph BlueStore compression options."""
# Tuple with Tuples that map configuration option name to CephBrokerRq op
# property name
options = (
('bluestore-compression-algorithm',
'compression-algorithm'),
('bluestore-compression-mode',
'compression-mode'),
('bluestore-compression-required-ratio',
'compression-required-ratio'),
('bluestore-compression-min-blob-size',
'compression-min-blob-size'),
('bluestore-compression-min-blob-size-hdd',
'compression-min-blob-size-hdd'),
('bluestore-compression-min-blob-size-ssd',
'compression-min-blob-size-ssd'),
('bluestore-compression-max-blob-size',
'compression-max-blob-size'),
('bluestore-compression-max-blob-size-hdd',
'compression-max-blob-size-hdd'),
('bluestore-compression-max-blob-size-ssd',
'compression-max-blob-size-ssd'),
)
def __init__(self):
"""Initialize context by loading values from charm config.
We keep two maps, one suitable for use with CephBrokerRq's and one
suitable for template generation.
"""
charm_config = config()
# CephBrokerRq op map
self.op = {}
# Context exposed for template generation
self.ctxt = {}
for config_key, op_key in self.options:
value = charm_config.get(config_key)
self.ctxt.update({config_key.replace('-', '_'): value})
self.op.update({op_key: value})
def __call__(self):
"""Get context.
:returns: Context
:rtype: Dict[str,any]
"""
return self.ctxt
def get_op(self):
"""Get values for use in CephBrokerRq op.
:returns: Context values with CephBrokerRq op property name as key.
:rtype: Dict[str,any]
"""
return self.op
def get_kwargs(self):
"""Get values for use as keyword arguments.
:returns: Context values with key suitable for use as kwargs to
CephBrokerRq add_op_create_*_pool methods.
:rtype: Dict[str,any]
"""
return {
k.replace('-', '_'): v
for k, v in self.op.items()
}
def validate(self):
"""Validate options.
:raises: AssertionError
"""
# We slip in a dummy name on class instantiation to allow validation of
# the other options. It will not affect further use.
#
# NOTE: once we retire Python 3.5 we can fold this into a in-line
# dictionary comprehension in the call to the initializer.
dummy_op = {'name': 'dummy-name'}
dummy_op.update(self.op)
pool = ch_ceph.BasePool('dummy-service', op=dummy_op)
pool.validate()

View File

@ -22,3 +22,7 @@ rbd default features = {{ rbd_features }}
{{ key }} = {{ value }} {{ key }} = {{ value }}
{% endfor -%} {% endfor -%}
{%- endif %} {%- endif %}
{% if rbd_default_data_pool -%}
rbd default data pool = {{ rbd_default_data_pool }}
{% endif %}

View File

@ -0,0 +1,28 @@
{# section header omitted as options can belong to multiple sections #}
{% if bluestore_compression_algorithm -%}
bluestore compression algorithm = {{ bluestore_compression_algorithm }}
{% endif -%}
{% if bluestore_compression_mode -%}
bluestore compression mode = {{ bluestore_compression_mode }}
{% endif -%}
{% if bluestore_compression_required_ratio -%}
bluestore compression required ratio = {{ bluestore_compression_required_ratio }}
{% endif -%}
{% if bluestore_compression_min_blob_size -%}
bluestore compression min blob size = {{ bluestore_compression_min_blob_size }}
{% endif -%}
{% if bluestore_compression_min_blob_size_hdd -%}
bluestore compression min blob size hdd = {{ bluestore_compression_min_blob_size_hdd }}
{% endif -%}
{% if bluestore_compression_min_blob_size_ssd -%}
bluestore compression min blob size ssd = {{ bluestore_compression_min_blob_size_ssd }}
{% endif -%}
{% if bluestore_compression_max_blob_size -%}
bluestore compression max blob size = {{ bluestore_compression_max_blob_size }}
{% endif -%}
{% if bluestore_compression_max_blob_size_hdd -%}
bluestore compression max blob size hdd = {{ bluestore_compression_max_blob_size_hdd }}
{% endif -%}
{% if bluestore_compression_max_blob_size_ssd -%}
bluestore compression max blob size ssd = {{ bluestore_compression_max_blob_size_ssd }}
{% endif -%}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
variables: variables:
openstack-origin: &openstack-origin distro openstack-origin: &openstack-origin distro-proposed
series: &series focal series: &series focal
@ -20,6 +20,8 @@ machines:
9: 9:
constraints: "root-disk=20G mem=4G" constraints: "root-disk=20G mem=4G"
10: {} 10: {}
11: {}
12: {}
# We specify machine placements for these to improve iteration # We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7" # time, given that machine "0" comes up way before machine "7"
@ -113,6 +115,27 @@ applications:
openstack-origin: *openstack-origin openstack-origin: *openstack-origin
to: to:
- '10' - '10'
vault-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
vault:
charm: cs:~openstack-charmers-next/vault
num_units: 1
to:
- '11'
ovn-central:
charm: cs:~openstack-charmers-next/ovn-central
num_units: 3
options:
source: *openstack-origin
to:
- '12'
neutron-api-plugin-ovn:
charm: cs:~openstack-charmers-next/neutron-api-plugin-ovn
ovn-chassis:
charm: cs:~openstack-charmers-next/ovn-chassis
options:
# start new units paused to allow unit by unit OVS to OVN migration
new-units-paused: true
relations: relations:
- - 'neutron-api:amqp' - - 'neutron-api:amqp'
- 'rabbitmq-server:amqp' - 'rabbitmq-server:amqp'
@ -156,3 +179,26 @@ relations:
- ["glance-mysql-router:db-router", "mysql-innodb-cluster:db-router"] - ["glance-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["neutron-mysql-router:db-router", "mysql-innodb-cluster:db-router"] - ["neutron-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["placement-mysql-router:db-router", "mysql-innodb-cluster:db-router"] - ["placement-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
# We need to defer the addition of the neutron-api-plugin-ovn subordinate
# relation to the functional test as the test will first validate the legacy
# Neutron ML2+OVS topology, migrate it to OVN and then confirm connectivity
# post migration.
#
# - - neutron-api-plugin-ovn:neutron-plugin
# - neutron-api:neutron-plugin-api-subordinate
- - ovn-central:certificates
- vault:certificates
- - ovn-central:ovsdb-cms
- neutron-api-plugin-ovn:ovsdb-cms
- - ovn-chassis:nova-compute
- nova-compute:neutron-plugin
- - ovn-chassis:certificates
- vault:certificates
- - ovn-chassis:ovsdb
- ovn-central:ovsdb
- - vault:certificates
- neutron-api-plugin-ovn:certificates
- - vault:shared-db
- vault-mysql-router:shared-db
- - vault-mysql-router:db-router
- mysql-innodb-cluster:db-router

View File

@ -20,6 +20,8 @@ machines:
9: 9:
constraints: "root-disk=20G mem=4G" constraints: "root-disk=20G mem=4G"
10: {} 10: {}
11: {}
12: {}
# We specify machine placements for these to improve iteration # We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7" # time, given that machine "0" comes up way before machine "7"
@ -113,6 +115,27 @@ applications:
openstack-origin: *openstack-origin openstack-origin: *openstack-origin
to: to:
- '10' - '10'
vault-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
vault:
charm: cs:~openstack-charmers-next/vault
num_units: 1
to:
- '11'
ovn-central:
charm: cs:~openstack-charmers-next/ovn-central
num_units: 3
options:
source: *openstack-origin
to:
- '12'
neutron-api-plugin-ovn:
charm: cs:~openstack-charmers-next/neutron-api-plugin-ovn
ovn-chassis:
charm: cs:~openstack-charmers-next/ovn-chassis
options:
# start new units paused to allow unit by unit OVS to OVN migration
new-units-paused: true
relations: relations:
- - 'neutron-api:amqp' - - 'neutron-api:amqp'
- 'rabbitmq-server:amqp' - 'rabbitmq-server:amqp'
@ -156,3 +179,26 @@ relations:
- ["glance-mysql-router:db-router", "mysql-innodb-cluster:db-router"] - ["glance-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["neutron-mysql-router:db-router", "mysql-innodb-cluster:db-router"] - ["neutron-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["placement-mysql-router:db-router", "mysql-innodb-cluster:db-router"] - ["placement-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
# We need to defer the addition of the neutron-api-plugin-ovn subordinate
# relation to the functional test as the test will first validate the legacy
# Neutron ML2+OVS topology, migrate it to OVN and then confirm connectivity
# post migration.
#
# - - neutron-api-plugin-ovn:neutron-plugin
# - neutron-api:neutron-plugin-api-subordinate
- - ovn-central:certificates
- vault:certificates
- - ovn-central:ovsdb-cms
- neutron-api-plugin-ovn:ovsdb-cms
- - ovn-chassis:nova-compute
- nova-compute:neutron-plugin
- - ovn-chassis:certificates
- vault:certificates
- - ovn-chassis:ovsdb
- ovn-central:ovsdb
- - vault:certificates
- neutron-api-plugin-ovn:certificates
- - vault:shared-db
- vault-mysql-router:shared-db
- - vault-mysql-router:db-router
- mysql-innodb-cluster:db-router

View File

@ -20,6 +20,8 @@ machines:
9: 9:
constraints: "root-disk=20G mem=4G" constraints: "root-disk=20G mem=4G"
10: {} 10: {}
11: {}
12: {}
# We specify machine placements for these to improve iteration # We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7" # time, given that machine "0" comes up way before machine "7"
@ -113,6 +115,27 @@ applications:
openstack-origin: *openstack-origin openstack-origin: *openstack-origin
to: to:
- '10' - '10'
vault-mysql-router:
charm: cs:~openstack-charmers-next/mysql-router
vault:
charm: cs:~openstack-charmers-next/vault
num_units: 1
to:
- '11'
ovn-central:
charm: cs:~openstack-charmers-next/ovn-central
num_units: 3
options:
source: *openstack-origin
to:
- '12'
neutron-api-plugin-ovn:
charm: cs:~openstack-charmers-next/neutron-api-plugin-ovn
ovn-chassis:
charm: cs:~openstack-charmers-next/ovn-chassis
options:
# start new units paused to allow unit by unit OVS to OVN migration
new-units-paused: true
relations: relations:
- - 'neutron-api:amqp' - - 'neutron-api:amqp'
- 'rabbitmq-server:amqp' - 'rabbitmq-server:amqp'
@ -156,3 +179,26 @@ relations:
- ["glance-mysql-router:db-router", "mysql-innodb-cluster:db-router"] - ["glance-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["neutron-mysql-router:db-router", "mysql-innodb-cluster:db-router"] - ["neutron-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
- ["placement-mysql-router:db-router", "mysql-innodb-cluster:db-router"] - ["placement-mysql-router:db-router", "mysql-innodb-cluster:db-router"]
# We need to defer the addition of the neutron-api-plugin-ovn subordinate
# relation to the functional test as the test will first validate the legacy
# Neutron ML2+OVS topology, migrate it to OVN and then confirm connectivity
# post migration.
#
# - - neutron-api-plugin-ovn:neutron-plugin
# - neutron-api:neutron-plugin-api-subordinate
- - ovn-central:certificates
- vault:certificates
- - ovn-central:ovsdb-cms
- neutron-api-plugin-ovn:ovsdb-cms
- - ovn-chassis:nova-compute
- nova-compute:neutron-plugin
- - ovn-chassis:certificates
- vault:certificates
- - ovn-chassis:ovsdb
- ovn-central:ovsdb
- - vault:certificates
- neutron-api-plugin-ovn:certificates
- - vault:shared-db
- vault-mysql-router:shared-db
- - vault-mysql-router:db-router
- mysql-innodb-cluster:db-router

View File

@ -1,7 +1,12 @@
charm_name: neutron-openvswitch charm_name: neutron-openvswitch
# NOTE: the OVN migration test runs at the end of a regular gate check and adds
# no more than 5-10 minutes to each job. We want this to run from focal-ussuri
# and onwards as that is the point where we recomend our users to migrate from
# OVS to OVN.
smoke_bundles: smoke_bundles:
- bionic-ussuri-dvr-snat - migrate-ovn: focal-ussuri-dvr-snat
gate_bundles: gate_bundles:
- trusty-mitaka - trusty-mitaka
@ -14,21 +19,52 @@ gate_bundles:
- bionic-stein-dvr-snat - bionic-stein-dvr-snat
- bionic-train-dvr-snat - bionic-train-dvr-snat
- bionic-ussuri-dvr-snat - bionic-ussuri-dvr-snat
- focal-ussuri-dvr-snat - migrate-ovn: focal-ussuri-dvr-snat
- focal-victoria-dvr-snat - migrate-ovn: focal-victoria-dvr-snat
dev_bundles: dev_bundles:
- groovy-victoria-dvr-snat - migrate-ovn: groovy-victoria-dvr-snat
configure: configure:
- zaza.openstack.charm_tests.glance.setup.add_lts_image - zaza.openstack.charm_tests.glance.setup.add_lts_image
- zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network - zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network
- zaza.openstack.charm_tests.nova.setup.create_flavors - zaza.openstack.charm_tests.nova.setup.create_flavors
- zaza.openstack.charm_tests.nova.setup.manage_ssh_key - zaza.openstack.charm_tests.nova.setup.manage_ssh_key
- migrate-ovn:
- zaza.openstack.charm_tests.vault.setup.auto_initialize_no_validation
- zaza.openstack.charm_tests.glance.setup.add_lts_image
- zaza.openstack.charm_tests.neutron.setup.basic_overcloud_network
- zaza.openstack.charm_tests.nova.setup.create_flavors
- zaza.openstack.charm_tests.nova.setup.manage_ssh_key
- zaza.openstack.charm_tests.ovn.setup.pre_migration_configuration
configure_options:
configure_gateway_ext_port_use_juju_wait: false
target_deploy_status:
neutron-api-plugin-ovn:
workload-status: waiting
ovn-chassis:
workload-status: maintenance
workload-status-message: "Paused. Use 'resume' action to resume normal service."
ovn-central:
workload-status: waiting
workload-status-message: "'ovsdb-peer' incomplete, 'certificates' awaiting server certificate data"
vault:
workload-status: blocked
workload-status-message: Vault needs to be initialized
tests: tests:
- zaza.openstack.charm_tests.neutron.tests.NeutronNetworkingTest - zaza.openstack.charm_tests.neutron.tests.NeutronNetworkingTest
- zaza.openstack.charm_tests.neutron.tests.NeutronOpenvSwitchTest - zaza.openstack.charm_tests.neutron.tests.NeutronOpenvSwitchTest
- migrate-ovn:
- zaza.openstack.charm_tests.neutron.tests.NeutronNetworkingTest
- zaza.openstack.charm_tests.ovn.tests.OVSOVNMigrationTest
- zaza.openstack.charm_tests.neutron.tests.NeutronNetworkingTest
tests_options: tests_options:
# NOTE: This allows us to run the NeutronNetworkingTest multiple times while
# reusing the instances created for the first run. This both saves time and
# allows verifying instances survive a SDN migration.
zaza.openstack.charm_tests.neutron.tests.NeutronNetworkingTest.test_instances_have_networking.run_resource_cleanup: false
force_deploy: force_deploy:
- groovy-victoria-dvr-snat - groovy-victoria-dvr-snat

View File

@ -0,0 +1,185 @@
# Copyright 2020 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import test_utils
with mock.patch('neutron_ovs_utils.register_configs') as configs:
configs.return_value = 'test-config'
import cleanup as actions
class CleanupTestCase(test_utils.CharmTestCase):
def setUp(self):
super(CleanupTestCase, self).setUp(
actions, [
'ch_core',
'ch_openstack_utils',
'ch_ovs',
'neutron_ipset_cleanup',
'neutron_netns_cleanup',
'remove_patch_ports',
'remove_per_bridge_controllers',
'subprocess',
])
def test_cleanup(self):
self.ch_openstack_utils.is_unit_paused_set.return_value = False
with self.assertRaises(actions.UnitNotPaused):
actions.cleanup([])
self.ch_openstack_utils.is_unit_paused_set.return_value = True
with self.assertRaises(actions.MandatoryConfigurationNotSet):
actions.cleanup([])
self.ch_core.hookenv.config.return_value = 'openvswitch'
self.ch_core.hookenv.action_get.return_value = False
with self.assertRaises(actions.MandatoryConfigurationNotSet):
actions.cleanup([])
self.ch_core.hookenv.action_get.return_value = True
actions.cleanup([])
self.remove_patch_ports.assert_called_once_with('br-int')
self.ch_ovs.del_bridge.assert_called_once_with('br-tun')
self.remove_per_bridge_controllers.assert_called_once_with()
self.neutron_netns_cleanup.assert_called_once_with()
self.neutron_ipset_cleanup.assert_called_once_with()
class HelperTestCase(test_utils.CharmTestCase):
def setUp(self):
super(HelperTestCase, self).setUp(
actions, [
'ch_ovsdb',
])
@mock.patch.object(actions.ch_ovs, 'del_bridge_port')
@mock.patch.object(actions.ch_ovs, 'patch_ports_on_bridge')
def test_remove_patch_ports(
self, _patch_ports_on_bridge, _del_bridge_port):
_patch_ports_on_bridge.return_value = [actions.ch_ovs.Patch(
this_end=actions.ch_ovs.PatchPort(
bridge='this-end-bridge',
port='this-end-port'),
other_end=actions.ch_ovs.PatchPort(
bridge='other-end-bridge',
port='other-end-port')),
]
actions.remove_patch_ports('fake-bridge')
_patch_ports_on_bridge.assert_called_once_with(
'fake-bridge')
_del_bridge_port.assert_has_calls([
mock.call('this-end-bridge', 'this-end-port', linkdown=False),
mock.call('other-end-bridge', 'other-end-port', linkdown=False),
])
def test_remove_per_bridge_controllers(self):
bridge = mock.MagicMock()
bridge.__getitem__.return_value = 'fake-uuid'
ovsdb = mock.MagicMock()
ovsdb.bridge.__iter__.return_value = [bridge]
self.ch_ovsdb.SimpleOVSDB.return_value = ovsdb
actions.remove_per_bridge_controllers()
ovsdb.bridge.clear.assert_called_once_with('fake-uuid', 'controller')
@mock.patch.object(actions.subprocess, 'check_call')
def test_neutron_ipset_cleanup(self, _check_call):
actions.neutron_ipset_cleanup()
_check_call.assert_called_once_with(
(
'neutron-ipset-cleanup',
'--config-file=/etc/neutron/neutron.conf',
'--config-file=/etc/neutron/plugins/ml2/openvswitch_agent.ini',
))
@mock.patch.object(actions.os.path, 'exists')
@mock.patch.object(actions.os, 'unlink')
@mock.patch.object(actions.subprocess, 'check_call')
def test_neutron_netns_cleanup(self, _check_call, _unlink, _exists):
_exists.return_value = True
with test_utils.patch_open() as (_open, _file):
actions.neutron_netns_cleanup()
_open.assert_called_once_with(
'/etc/neutron/rootwrap.d/charm-n-ovs.filters', 'w')
_file.write.assert_called_once_with(
'[Filters]\n'
'neutron.cmd.netns_cleanup: CommandFilter, ip, root\n')
_check_call.assert_has_calls([
# FIXME: remove once package deps have been backported
mock.call(('apt', '-y', 'install', 'net-tools')),
mock.call(
(
'neutron-netns-cleanup',
'--force',
'--config-file=/etc/neutron/neutron.conf',
'--config-file=/etc/neutron/l3_agent.ini',
'--config-file=/etc/neutron/fwaas_driver.ini',
'--config-file=/etc/neutron/dhcp_agent.ini',
)),
])
_unlink.assert_called_once_with(
'/etc/neutron/rootwrap.d/charm-n-ovs.filters')
# Confirm behaviour when a config does not exist
_exists.reset_mock()
_exists.side_effect = [True, True, True, False]
_check_call.reset_mock()
actions.neutron_netns_cleanup()
_check_call.assert_has_calls([
# FIXME: remove once package deps have been backported
mock.call(('apt', '-y', 'install', 'net-tools')),
mock.call(
(
'neutron-netns-cleanup',
'--force',
'--config-file=/etc/neutron/neutron.conf',
'--config-file=/etc/neutron/l3_agent.ini',
'--config-file=/etc/neutron/fwaas_driver.ini',
)),
])
class MainTestCase(test_utils.CharmTestCase):
def setUp(self):
super(MainTestCase, self).setUp(actions, [
'ch_core'
])
def test_invokes_action(self):
dummy_calls = []
def dummy_action(args):
dummy_calls.append(True)
with mock.patch.dict(actions.ACTIONS, {'foo': dummy_action}):
actions.main(['foo'])
self.assertEqual(dummy_calls, [True])
def test_unknown_action(self):
"""Unknown actions aren't a traceback."""
exit_string = actions.main(['foo'])
self.assertEqual('Action "foo" undefined', exit_string)
def test_failing_action(self):
"""Actions which traceback trigger action_fail() calls."""
dummy_calls = []
self.ch_core.hookenv.action_fail.side_effect = dummy_calls.append
def dummy_action(args):
raise ValueError('uh oh')
with mock.patch.dict(actions.ACTIONS, {'foo': dummy_action}):
actions.main(['foo'])
self.assertEqual(dummy_calls, ['Action "foo" failed: "uh oh"'])