Add extension fwaas_v2 based on neutron-api relation data

fwaas_v2 extension is added in l3_agent.ini by default from stein.
However with the newly introduced neutron-api configuration parameter
enable-fwaas, the extension have to added only when it is set to true
on neutron-api charm.

neutron-api charm adds enabled-fwaas as relation data in the commit
https://review.opendev.org/c/openstack/charm-neutron-api/+/806676.
This patch removes special handling of fwaas_v2 as l3_extension_plugins
is already updated in relation data based on neutron-api enable-fwaas
configuration.

Remove python3-neutron-fwaas in package list from stein release. The
package is already part of dependency of neutron-l3-agent until Ussuri
release and the dependency will be removed from Victoria release in
package control files.

Synced charmhelpers to get related changes
https://github.com/juju/charm-helpers/pull/635

Partial-Bug: #1934129
Change-Id: I07db7fb7c2f00eaf4c0a5bcc082c73922b87d1c0
This commit is contained in:
Hemanth Nakkina 2021-09-01 11:16:01 +05:30
parent 443ffbde88
commit de22d6cbe2
18 changed files with 262 additions and 2085 deletions

View File

@ -1,13 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -1,387 +0,0 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
from charmhelpers.contrib.openstack.amulet.utils import (
OPENSTACK_RELEASES_PAIRS
)
DEBUG = logging.DEBUG
ERROR = logging.ERROR
class OpenStackAmuletDeployment(AmuletDeployment):
"""OpenStack amulet deployment.
This class inherits from AmuletDeployment and has additional support
that is specifically for use by OpenStack charms.
"""
def __init__(self, series=None, openstack=None, source=None,
stable=True, log_level=DEBUG):
"""Initialize the deployment environment."""
super(OpenStackAmuletDeployment, self).__init__(series)
self.log = self.get_logger(level=log_level)
self.log.info('OpenStackAmuletDeployment: init')
self.openstack = openstack
self.source = source
self.stable = stable
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
"""Get a logger object that will log to stdout."""
log = logging
logger = log.getLogger(name)
fmt = log.Formatter("%(asctime)s %(funcName)s "
"%(levelname)s: %(message)s")
handler = log.StreamHandler(stream=sys.stdout)
handler.setLevel(level)
handler.setFormatter(fmt)
logger.addHandler(handler)
logger.setLevel(level)
return logger
def _determine_branch_locations(self, other_services):
"""Determine the branch locations for the other services.
Determine if the local branch being tested is derived from its
stable or next (dev) branch, and based on this, use the corresonding
stable or next branches for the other_services."""
self.log.info('OpenStackAmuletDeployment: determine branch locations')
# Charms outside the ~openstack-charmers
base_charms = {
'mysql': ['trusty'],
'mongodb': ['trusty'],
'nrpe': ['trusty', 'xenial'],
}
for svc in other_services:
# If a location has been explicitly set, use it
if svc.get('location'):
continue
if svc['name'] in base_charms:
# NOTE: not all charms have support for all series we
# want/need to test against, so fix to most recent
# that each base charm supports
target_series = self.series
if self.series not in base_charms[svc['name']]:
target_series = base_charms[svc['name']][-1]
svc['location'] = 'cs:{}/{}'.format(target_series,
svc['name'])
elif self.stable:
svc['location'] = 'cs:{}/{}'.format(self.series,
svc['name'])
else:
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
self.series,
svc['name']
)
return other_services
def _add_services(self, this_service, other_services, use_source=None,
no_origin=None):
"""Add services to the deployment and optionally set
openstack-origin/source.
:param this_service dict: Service dictionary describing the service
whose amulet tests are being run
:param other_services dict: List of service dictionaries describing
the services needed to support the target
service
:param use_source list: List of services which use the 'source' config
option rather than 'openstack-origin'
:param no_origin list: List of services which do not support setting
the Cloud Archive.
Service Dict:
{
'name': str charm-name,
'units': int number of units,
'constraints': dict of juju constraints,
'location': str location of charm,
}
eg
this_service = {
'name': 'openvswitch-odl',
'constraints': {'mem': '8G'},
}
other_services = [
{
'name': 'nova-compute',
'units': 2,
'constraints': {'mem': '4G'},
'location': cs:~bob/xenial/nova-compute
},
{
'name': 'mysql',
'constraints': {'mem': '2G'},
},
{'neutron-api-odl'}]
use_source = ['mysql']
no_origin = ['neutron-api-odl']
"""
self.log.info('OpenStackAmuletDeployment: adding services')
other_services = self._determine_branch_locations(other_services)
super(OpenStackAmuletDeployment, self)._add_services(this_service,
other_services)
services = other_services
services.append(this_service)
use_source = use_source or []
no_origin = no_origin or []
# Charms which should use the source config option
use_source = list(set(
use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw', 'ceph-mon',
'ceph-proxy', 'percona-cluster', 'lxd']))
# Charms which can not use openstack-origin, ie. many subordinates
no_origin = list(set(
no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
'nrpe', 'openvswitch-odl', 'neutron-api-odl',
'odl-controller', 'cinder-backup', 'nexentaedge-data',
'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
'cinder-nexentaedge', 'nexentaedge-mgmt',
'ceilometer-agent']))
if self.openstack:
for svc in services:
if svc['name'] not in use_source + no_origin:
config = {'openstack-origin': self.openstack}
self.d.configure(svc['name'], config)
if self.source:
for svc in services:
if svc['name'] in use_source and svc['name'] not in no_origin:
config = {'source': self.source}
self.d.configure(svc['name'], config)
def _configure_services(self, configs):
"""Configure all of the services."""
self.log.info('OpenStackAmuletDeployment: configure services')
for service, config in six.iteritems(configs):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
include_only=None, timeout=None):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
Examples of message usage:
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
Wait for all units to reach this status (exact match):
message = re.compile('^Unit is ready and clustered$')
Wait for all units to reach any one of these (exact match):
message = re.compile('Unit is ready|OK|Ready')
Wait for at least one unit to reach this status (exact match):
message = {'ready'}
See Amulet's sentry.wait_for_messages() for message usage detail.
https://github.com/juju/amulet/blob/master/amulet/sentry.py
:param message: Expected status match
:param exclude_services: List of juju service names to ignore,
not to be used in conjuction with include_only.
:param include_only: List of juju service names to exclusively check,
not to be used in conjuction with exclude_services.
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
if not timeout:
timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
self.log.info('Waiting for extended status on units for {}s...'
''.format(timeout))
all_services = self.d.services.keys()
if exclude_services and include_only:
raise ValueError('exclude_services can not be used '
'with include_only')
if message:
if isinstance(message, re._pattern_type):
match = message.pattern
else:
match = message
self.log.debug('Custom extended status wait match: '
'{}'.format(match))
else:
self.log.debug('Default extended status wait match: contains '
'READY (case-insensitive)')
message = re.compile('.*ready.*', re.IGNORECASE)
if exclude_services:
self.log.debug('Excluding services from extended status match: '
'{}'.format(exclude_services))
else:
exclude_services = []
if include_only:
services = include_only
else:
services = list(set(all_services) - set(exclude_services))
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
# Check for idleness
self.d.sentry.wait(timeout=timeout)
# Check for error states and bail early
self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
# Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
self.log.info('OK')
def _get_openstack_release(self):
"""Get openstack release.
Return an integer representing the enum value of the openstack
release.
"""
# Must be ordered by OpenStack release (not by Ubuntu release):
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
setattr(self, os_pair, i)
releases = {
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
('xenial', None): self.xenial_mitaka,
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
('xenial', 'cloud:xenial-queens'): self.xenial_queens,
('yakkety', None): self.yakkety_newton,
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
('bionic', None): self.bionic_queens,
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
('bionic', 'cloud:bionic-stein'): self.bionic_stein,
('bionic', 'cloud:bionic-train'): self.bionic_train,
('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri,
('cosmic', None): self.cosmic_rocky,
('disco', None): self.disco_stein,
('eoan', None): self.eoan_train,
('focal', None): self.focal_ussuri,
('focal', 'cloud:focal-victoria'): self.focal_victoria,
('groovy', None): self.groovy_victoria,
}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('trusty', 'icehouse'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]
def get_percona_service_entry(self, memory_constraint=None):
"""Return a amulet service entry for percona cluster.
:param memory_constraint: Override the default memory constraint
in the service entry.
:type memory_constraint: str
:returns: Amulet service entry.
:rtype: dict
"""
memory_constraint = memory_constraint or '3072M'
svc_entry = {
'name': 'percona-cluster',
'constraints': {'mem': memory_constraint}}
if self._get_openstack_release() <= self.trusty_mitaka:
svc_entry['location'] = 'cs:trusty/percona-cluster'
return svc_entry
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
if self._get_openstack_release() == self.trusty_icehouse:
# Icehouse
pools = [
'data',
'metadata',
'rbd',
'cinder-ceph',
'glance'
]
elif (self.trusty_kilo <= self._get_openstack_release() <=
self.zesty_ocata):
# Kilo through Ocata
pools = [
'rbd',
'cinder-ceph',
'glance'
]
else:
# Pike and later
pools = [
'cinder-ceph',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools

File diff suppressed because it is too large Load Diff

View File

@ -25,7 +25,10 @@ import socket
import time
from base64 import b64decode
from subprocess import check_call, CalledProcessError
from subprocess import (
check_call,
check_output,
CalledProcessError)
import six
@ -453,18 +456,24 @@ class IdentityServiceContext(OSContextGenerator):
serv_host = format_ipv6_addr(serv_host) or serv_host
auth_host = rdata.get('auth_host')
auth_host = format_ipv6_addr(auth_host) or auth_host
int_host = rdata.get('internal_host')
int_host = format_ipv6_addr(int_host) or int_host
svc_protocol = rdata.get('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http'
int_protocol = rdata.get('internal_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({'service_port': rdata.get('service_port'),
'service_host': serv_host,
'auth_host': auth_host,
'auth_port': rdata.get('auth_port'),
'internal_host': int_host,
'internal_port': rdata.get('internal_port'),
'admin_tenant_name': rdata.get('service_tenant'),
'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'),
'service_protocol': svc_protocol,
'auth_protocol': auth_protocol,
'internal_protocol': int_protocol,
'api_version': api_version})
if float(api_version) > 2:
@ -1781,6 +1790,10 @@ class NeutronAPIContext(OSContextGenerator):
'rel_key': 'enable-port-forwarding',
'default': False,
},
'enable_fwaas': {
'rel_key': 'enable-fwaas',
'default': False,
},
'global_physnet_mtu': {
'rel_key': 'global-physnet-mtu',
'default': 1500,
@ -1815,6 +1828,11 @@ class NeutronAPIContext(OSContextGenerator):
if ctxt['enable_port_forwarding']:
l3_extension_plugins.append('port_forwarding')
if ctxt['enable_fwaas']:
l3_extension_plugins.append('fwaas_v2')
if ctxt['enable_nfg_logging']:
l3_extension_plugins.append('fwaas_v2_log')
ctxt['l3_extension_plugins'] = l3_extension_plugins
return ctxt
@ -2570,22 +2588,48 @@ class OVSDPDKDeviceContext(OSContextGenerator):
:returns: hex formatted CPU mask
:rtype: str
"""
num_cores = config('dpdk-socket-cores')
mask = 0
return self.cpu_masks()['dpdk_lcore_mask']
def cpu_masks(self):
"""Get hex formatted CPU masks
The mask is based on using the first config:dpdk-socket-cores
cores of each NUMA node in the unit, followed by the
next config:pmd-socket-cores
:returns: Dict of hex formatted CPU masks
:rtype: Dict[str, str]
"""
num_lcores = config('dpdk-socket-cores')
pmd_cores = config('pmd-socket-cores')
lcore_mask = 0
pmd_mask = 0
for cores in self._numa_node_cores().values():
for core in cores[:num_cores]:
mask = mask | 1 << core
return format(mask, '#04x')
for core in cores[:num_lcores]:
lcore_mask = lcore_mask | 1 << core
for core in cores[num_lcores:][:pmd_cores]:
pmd_mask = pmd_mask | 1 << core
return {
'pmd_cpu_mask': format(pmd_mask, '#04x'),
'dpdk_lcore_mask': format(lcore_mask, '#04x')}
def socket_memory(self):
"""Formatted list of socket memory configuration per NUMA node
"""Formatted list of socket memory configuration per socket.
:returns: socket memory configuration per NUMA node
:returns: socket memory configuration per socket.
:rtype: str
"""
lscpu_out = check_output(
['lscpu', '-p=socket']).decode('UTF-8').strip()
sockets = set()
for line in lscpu_out.split('\n'):
try:
sockets.add(int(line))
except ValueError:
# lscpu output is headed by comments so ignore them.
pass
sm_size = config('dpdk-socket-memory')
node_regex = '/sys/devices/system/node/node*'
mem_list = [str(sm_size) for _ in glob.glob(node_regex)]
mem_list = [str(sm_size) for _ in sockets]
if mem_list:
return ','.join(mem_list)
else:

View File

@ -334,7 +334,7 @@ def maybe_do_policyd_overrides(openstack_release,
restart_handler()
@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead")
@charmhelpers.deprecate("Use maybe_do_policyd_overrides instead")
def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs):
"""This function is designed to be called from the config changed hook.

View File

@ -1,10 +1,22 @@
global
log /var/lib/haproxy/dev/log local0
log /var/lib/haproxy/dev/log local1 notice
# NOTE: on startup haproxy chroot's to /var/lib/haproxy.
#
# Unfortunately the program will open some files prior to the call to
# chroot never to reopen them, and some after. So looking at the on-disk
# layout of haproxy resources you will find some resources relative to /
# such as the admin socket, and some relative to /var/lib/haproxy such as
# the log socket.
#
# The logging socket is (re-)opened after the chroot and must be relative
# to /var/lib/haproxy.
log /dev/log local0
log /dev/log local1 notice
maxconn 20000
user haproxy
group haproxy
spread-checks 0
# The admin socket is opened prior to the chroot never to be reopened, so
# it lives outside the chroot directory in the filesystem.
stats socket /var/run/haproxy/admin.sock mode 600 level admin
stats timeout 2m

View File

@ -15,7 +15,7 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL}
@ -41,7 +41,7 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL}
@ -67,7 +67,7 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL}

View File

@ -15,7 +15,7 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
WSGIApplicationGroup %{GLOBAL}
@ -41,7 +41,7 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
WSGIApplicationGroup %{GLOBAL}
@ -67,7 +67,7 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
display-name=%{GROUP} lang=C.UTF-8 locale=C.UTF-8
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
WSGIApplicationGroup %{GLOBAL}

View File

@ -106,6 +106,8 @@ from charmhelpers.fetch import (
filter_installed_packages,
filter_missing_packages,
ubuntu_apt_pkg as apt,
OPENSTACK_RELEASES,
UBUNTU_OPENSTACK_RELEASE,
)
from charmhelpers.fetch.snap import (
@ -132,54 +134,9 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
'restricted main multiverse universe')
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
'queens',
'rocky',
'stein',
'train',
'ussuri',
'victoria',
'wallaby',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
('hirsute', 'wallaby'),
])
OPENSTACK_CODENAMES = OrderedDict([
# NOTE(lourot): 'yyyy.i' isn't actually mapping with any real version
# number. This just means the i-th version of the year yyyy.
('2011.2', 'diablo'),
('2012.1', 'essex'),
('2012.2', 'folsom'),
@ -200,6 +157,8 @@ OPENSTACK_CODENAMES = OrderedDict([
('2020.1', 'ussuri'),
('2020.2', 'victoria'),
('2021.1', 'wallaby'),
('2021.2', 'xena'),
('2022.1', 'yoga'),
])
# The ugly duckling - must list releases oldest to newest

View File

@ -18,8 +18,11 @@
import six
import re
TRUTHY_STRINGS = {'y', 'yes', 'true', 't', 'on'}
FALSEY_STRINGS = {'n', 'no', 'false', 'f', 'off'}
def bool_from_string(value):
def bool_from_string(value, truthy_strings=TRUTHY_STRINGS, falsey_strings=FALSEY_STRINGS, assume_false=False):
"""Interpret string value as boolean.
Returns True if value translates to True otherwise False.
@ -32,9 +35,9 @@ def bool_from_string(value):
value = value.strip().lower()
if value in ['y', 'yes', 'true', 't', 'on']:
if value in truthy_strings:
return True
elif value in ['n', 'no', 'false', 'f', 'off']:
elif value in falsey_strings or assume_false:
return False
msg = "Unable to interpret string value '%s' as boolean" % (value)

View File

@ -106,6 +106,8 @@ if __platform__ == "ubuntu":
apt_pkg = fetch.ubuntu_apt_pkg
get_apt_dpkg_env = fetch.get_apt_dpkg_env
get_installed_version = fetch.get_installed_version
OPENSTACK_RELEASES = fetch.OPENSTACK_RELEASES
UBUNTU_OPENSTACK_RELEASE = fetch.UBUNTU_OPENSTACK_RELEASE
elif __platform__ == "centos":
yum_search = fetch.yum_search

View File

@ -142,8 +142,10 @@ def pip_create_virtualenv(path=None):
"""Create an isolated Python environment."""
if six.PY2:
apt_install('python-virtualenv')
extra_flags = []
else:
apt_install('python3-virtualenv')
apt_install(['python3-virtualenv', 'virtualenv'])
extra_flags = ['--python=python3']
if path:
venv_path = path
@ -151,4 +153,4 @@ def pip_create_virtualenv(path=None):
venv_path = os.path.join(charm_dir(), 'venv')
if not os.path.exists(venv_path):
subprocess.check_call(['virtualenv', venv_path])
subprocess.check_call(['virtualenv', venv_path] + extra_flags)

View File

@ -208,12 +208,79 @@ CLOUD_ARCHIVE_POCKETS = {
'wallaby/proposed': 'focal-proposed/wallaby',
'focal-wallaby/proposed': 'focal-proposed/wallaby',
'focal-proposed/wallaby': 'focal-proposed/wallaby',
# Xena
'xena': 'focal-updates/xena',
'focal-xena': 'focal-updates/xena',
'focal-xena/updates': 'focal-updates/xena',
'focal-updates/xena': 'focal-updates/xena',
'xena/proposed': 'focal-proposed/xena',
'focal-xena/proposed': 'focal-proposed/xena',
'focal-proposed/xena': 'focal-proposed/xena',
# Yoga
'yoga': 'focal-updates/yoga',
'focal-yoga': 'focal-updates/yoga',
'focal-yoga/updates': 'focal-updates/yoga',
'focal-updates/yoga': 'focal-updates/yoga',
'yoga/proposed': 'focal-proposed/yoga',
'focal-yoga/proposed': 'focal-proposed/yoga',
'focal-proposed/yoga': 'focal-proposed/yoga',
}
OPENSTACK_RELEASES = (
'diablo',
'essex',
'folsom',
'grizzly',
'havana',
'icehouse',
'juno',
'kilo',
'liberty',
'mitaka',
'newton',
'ocata',
'pike',
'queens',
'rocky',
'stein',
'train',
'ussuri',
'victoria',
'wallaby',
'xena',
'yoga',
)
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('oneiric', 'diablo'),
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
('wily', 'liberty'),
('xenial', 'mitaka'),
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
('cosmic', 'rocky'),
('disco', 'stein'),
('eoan', 'train'),
('focal', 'ussuri'),
('groovy', 'victoria'),
('hirsute', 'wallaby'),
('impish', 'xena'),
])
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries.
CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times.
CMD_RETRY_COUNT = 10 # Retry a failing fatal command X times.
def filter_installed_packages(packages):
@ -574,6 +641,10 @@ def add_source(source, key=None, fail_invalid=False):
with be used. If staging is NOT used then the cloud archive [3] will be
added, and the 'ubuntu-cloud-keyring' package will be added for the
current distro.
'<openstack-version>': translate to cloud:<release> based on the current
distro version (i.e. for 'ussuri' this will either be 'bionic-ussuri' or
'distro'.
'<openstack-version>/proposed': as above, but for proposed.
Otherwise the source is not recognised and this is logged to the juju log.
However, no error is raised, unless sys_error_on_exit is True.
@ -600,6 +671,12 @@ def add_source(source, key=None, fail_invalid=False):
@raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a
valid pocket in CLOUD_ARCHIVE_POCKETS
"""
# extract the OpenStack versions from the CLOUD_ARCHIVE_POCKETS; can't use
# the list in contrib.openstack.utils as it might not be included in
# classic charms and would break everything. Having OpenStack specific
# code in this file is a bit of an antipattern, anyway.
os_versions_regex = "({})".format("|".join(OPENSTACK_RELEASES))
_mapping = OrderedDict([
(r"^distro$", lambda: None), # This is a NOP
(r"^(?:proposed|distro-proposed)$", _add_proposed),
@ -609,6 +686,9 @@ def add_source(source, key=None, fail_invalid=False):
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)$", _add_cloud_pocket),
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
(r"^{}\/proposed$".format(os_versions_regex),
_add_bare_openstack_proposed),
(r"^{}$".format(os_versions_regex), _add_bare_openstack),
])
if source is None:
source = ''
@ -662,7 +742,8 @@ def _add_apt_repository(spec):
series = get_distrib_codename()
spec = spec.replace('{series}', series)
_run_with_retries(['add-apt-repository', '--yes', spec],
cmd_env=env_proxy_settings(['https', 'http']))
cmd_env=env_proxy_settings(['https', 'http', 'no_proxy'])
)
def _add_cloud_pocket(pocket):
@ -738,6 +819,73 @@ def _verify_is_ubuntu_rel(release, os_release):
'version ({})'.format(release, os_release, ubuntu_rel))
def _add_bare_openstack(openstack_release):
"""Add cloud or distro based on the release given.
The spec given is, say, 'ussuri', but this could apply cloud:bionic-ussuri
or 'distro' depending on whether the ubuntu release is bionic or focal.
:param openstack_release: the OpenStack codename to determine the release
for.
:type openstack_release: str
:raises: SourceConfigError
"""
# TODO(ajkavanagh) - surely this means we should be removing cloud archives
# if they exist?
__add_bare_helper(openstack_release, "{}-{}", lambda: None)
def _add_bare_openstack_proposed(openstack_release):
"""Add cloud of distro but with proposed.
The spec given is, say, 'ussuri' but this could apply
cloud:bionic-ussuri/proposed or 'distro/proposed' depending on whether the
ubuntu release is bionic or focal.
:param openstack_release: the OpenStack codename to determine the release
for.
:type openstack_release: str
:raises: SourceConfigError
"""
__add_bare_helper(openstack_release, "{}-{}/proposed", _add_proposed)
def __add_bare_helper(openstack_release, pocket_format, final_function):
"""Helper for _add_bare_openstack[_proposed]
The bulk of the work between the two functions is exactly the same except
for the pocket format and the function that is run if it's the distro
version.
:param openstack_release: the OpenStack codename. e.g. ussuri
:type openstack_release: str
:param pocket_format: the pocket formatter string to construct a pocket str
from the openstack_release and the current ubuntu version.
:type pocket_format: str
:param final_function: the function to call if it is the distro version.
:type final_function: Callable
:raises SourceConfigError on error
"""
ubuntu_version = get_distrib_codename()
possible_pocket = pocket_format.format(ubuntu_version, openstack_release)
if possible_pocket in CLOUD_ARCHIVE_POCKETS:
_add_cloud_pocket(possible_pocket)
return
# Otherwise it's almost certainly the distro version; verify that it
# exists.
try:
assert UBUNTU_OPENSTACK_RELEASE[ubuntu_version] == openstack_release
except KeyError:
raise SourceConfigError(
"Invalid ubuntu version {} isn't known to this library"
.format(ubuntu_version))
except AssertionError:
raise SourceConfigError(
'Invalid OpenStack release specificed: {} for ubuntu version {}'
.format(openstack_release, ubuntu_version))
final_function()
def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None, quiet=False):
"""Run a command and retry until success or max_retries is reached.

View File

@ -264,7 +264,7 @@ def version_compare(a, b):
else:
raise RuntimeError('Unable to compare "{}" and "{}", according to '
'our logic they are neither greater, equal nor '
'less than each other.')
'less than each other.'.format(a, b))
class PkgVersion():

View File

@ -28,6 +28,9 @@ def get_platform():
elif "elementary" in current_platform:
# ElementaryOS fails to run tests locally without this.
return "ubuntu"
elif "Pop!_OS" in current_platform:
# Pop!_OS also fails to run tests locally without this.
return "ubuntu"
else:
raise RuntimeError("This module is not supported on {}."
.format(current_platform))

View File

@ -128,18 +128,7 @@ class L3AgentContext(OSContextGenerator):
ctxt['report_interval'] = api_settings['report_interval']
ctxt['use_l3ha'] = api_settings['enable_l3ha']
cmp_os_release = CompareOpenStackReleases(os_release('neutron-common'))
l3_extension_plugins = api_settings.get('l3_extension_plugins', [])
# per Change-Id If1b332eb0f581e9acba111f79ba578a0b7081dd2
# only enable it for stein although fwaasv2 was added in Queens
is_stein = cmp_os_release >= 'stein'
if is_stein:
l3_extension_plugins.append('fwaas_v2')
if (is_stein and api_settings.get('enable_nfg_logging')):
l3_extension_plugins.append('fwaas_v2_log')
ctxt['l3_extension_plugins'] = ','.join(l3_extension_plugins)
return ctxt

View File

@ -191,6 +191,9 @@ GATEWAY_PKGS = {
],
}
# python3-{nova, neutron} is added in PY3_PACKAGES to support
# switch to py3 for Rocky release. Previously installed py2
# packages are added to PURGE_PACKAGES to purge.
PURGE_PACKAGES = [
'python-mysqldb',
'python-psycopg2',
@ -297,6 +300,10 @@ def get_packages():
packages.extend(PY3_PACKAGES)
if cmp_os_source >= 'train':
packages.remove('python3-neutron-lbaas')
# Remove python3-neutron-fwaas from stein release as the package is
# included as dependency for neutron-l3-agent.
if cmp_os_source >= 'stein':
packages.remove('python3-neutron-fwaas')
return packages

View File

@ -61,11 +61,14 @@ class TestL3AgentContext(CharmTestCase):
def test_new_ext_network(self, _NeutronAPIContext):
self.os_release.return_value = 'stein'
_NeutronAPIContext.return_value = \
DummyNeutronAPIContext(return_value={'enable_dvr': False,
'report_interval': 30,
'rpc_response_timeout': 60,
'enable_l3ha': True,
})
DummyNeutronAPIContext(return_value={
'enable_dvr': False,
'report_interval': 30,
'rpc_response_timeout': 60,
'enable_l3ha': True,
'enable_fwaas': True,
'l3_extension_plugins': ['fwaas_v2'],
})
self.test_config.set('run-internal-router', 'none')
self.test_config.set('external-network-id', '')
self.eligible_leader.return_value = False