Sync charm/ceph helpers, tox, and requirements

Also clean up pre-existing pep8 violations in the
files/* dir which was previously not covered by lint testing.

Change-Id: I443a084681d5788c89cdadf2ff999030f4b4d735
This commit is contained in:
Ryan Beisner 2019-09-30 17:08:54 -05:00
parent 0261001e1c
commit 07865bd62c
No known key found for this signature in database
GPG Key ID: 952BACDC1C1A05FB
15 changed files with 1168 additions and 87 deletions

View File

@ -14,3 +14,4 @@ include:
- payload - payload
- contrib.charmsupport - contrib.charmsupport
- contrib.hardening|inc=* - contrib.hardening|inc=*
- contrib.openstack.policyd

View File

@ -224,8 +224,9 @@ class MonitorNeutronAgentsDaemon(Daemon):
LOG.info('Moving router %s from %s to %s' % LOG.info('Moving router %s from %s to %s' %
(router_id, routers[router_id], l3_agents[agent])) (router_id, routers[router_id], l3_agents[agent]))
try: try:
quantum.remove_router_from_l3_agent(l3_agent=routers[router_id], quantum.remove_router_from_l3_agent(
router_id=router_id) l3_agent=routers[router_id], router_id=router_id)
except exceptions.NeutronException as e: except exceptions.NeutronException as e:
LOG.error('Remove router raised exception: %s' % e) LOG.error('Remove router raised exception: %s' % e)
try: try:
@ -344,7 +345,6 @@ class MonitorNeutronAgentsDaemon(Daemon):
if len(dhcp_agents) > 0: if len(dhcp_agents) > 0:
self.dhcp_agents_reschedule(dhcp_agents, networks, quantum) self.dhcp_agents_reschedule(dhcp_agents, networks, quantum)
def check_ovs_tunnel(self, quantum=None): def check_ovs_tunnel(self, quantum=None):
''' '''
Work around for Bug #1411163 Work around for Bug #1411163
@ -368,7 +368,8 @@ class MonitorNeutronAgentsDaemon(Daemon):
and conf['devices']: and conf['devices']:
LOG.debug('local ovs agent:%s' % agent) LOG.debug('local ovs agent:%s' % agent)
ovs_output = subprocess.check_output(['ovs-vsctl', ovs_output = subprocess.check_output(['ovs-vsctl',
'list-ports', 'br-tun']) 'list-ports',
'br-tun'])
ports = ovs_output.strip().split('\n') ports = ovs_output.strip().split('\n')
look_up_gre_port = False look_up_gre_port = False
for port in ports: for port in ports:
@ -377,13 +378,15 @@ class MonitorNeutronAgentsDaemon(Daemon):
break break
if not look_up_gre_port: if not look_up_gre_port:
try: try:
LOG.error('Local agent has devices, but no ovs tunnel is created,' LOG.error('Local agent has devices, but no ovs '
'restart ovs agent.') 'tunnel is created, restart ovs agent.')
cmd = ['sudo', 'service', 'neutron-plugin-openvswitch-agent', cmd = ['sudo', 'service',
'neutron-plugin-openvswitch-agent',
'restart'] 'restart']
subprocess.call(cmd) subprocess.call(cmd)
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
LOG.error('Failed to restart neutron-plugin-openvswitch-agent.') LOG.error('Failed to restart '
'neutron-plugin-openvswitch-agent.')
def check_local_agents(self): def check_local_agents(self):
services = ['openvswitch-switch', 'neutron-dhcp-agent', services = ['openvswitch-switch', 'neutron-dhcp-agent',

View File

@ -13,7 +13,6 @@
# limitations under the License. # limitations under the License.
from __future__ import absolute_import # required for external apt import from __future__ import absolute_import # required for external apt import
from apt import apt_pkg
from six import string_types from six import string_types
from charmhelpers.fetch import ( from charmhelpers.fetch import (
@ -26,6 +25,7 @@ from charmhelpers.core.hookenv import (
WARNING, WARNING,
) )
from charmhelpers.contrib.hardening.audits import BaseAudit from charmhelpers.contrib.hardening.audits import BaseAudit
from charmhelpers.fetch import ubuntu_apt_pkg as apt_pkg
class AptConfig(BaseAudit): class AptConfig(BaseAudit):

View File

@ -709,8 +709,8 @@ class OpenStackAmuletUtils(AmuletUtils):
'{}...'.format(image_name, image_url)) '{}...'.format(image_name, image_url))
# Download image # Download image
http_proxy = os.getenv('AMULET_HTTP_PROXY') http_proxy = os.getenv('OS_TEST_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy: if http_proxy:
proxies = {'http': http_proxy} proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies) opener = urllib.FancyURLopener(proxies)
@ -800,8 +800,8 @@ class OpenStackAmuletUtils(AmuletUtils):
'({})...'.format(image_name)) '({})...'.format(image_name))
# Get cirros image URL # Get cirros image URL
http_proxy = os.getenv('AMULET_HTTP_PROXY') http_proxy = os.getenv('OS_TEST_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy)) self.log.debug('OS_TEST_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy: if http_proxy:
proxies = {'http': http_proxy} proxies = {'http': http_proxy}
opener = urllib.FancyURLopener(proxies) opener = urllib.FancyURLopener(proxies)

View File

@ -289,7 +289,7 @@ def update_hacluster_vip(service, relation_data):
iface, netmask, fallback = get_vip_settings(vip) iface, netmask, fallback = get_vip_settings(vip)
vip_monitoring = 'op monitor depth="0" timeout="20s" interval="10s"' vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"'
if iface is not None: if iface is not None:
# NOTE(jamespage): Delete old VIP resources # NOTE(jamespage): Delete old VIP resources
# Old style naming encoding iface in name # Old style naming encoding iface in name

View File

@ -0,0 +1,700 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import contextlib
import os
import six
import shutil
import yaml
import zipfile
import charmhelpers.core.hookenv as hookenv
import charmhelpers.core.host as ch_host
# Features provided by this module:
"""
Policy.d helper functions
=========================
The functions in this module are designed, as a set, to provide an easy-to-use
set of hooks for classic charms to add in /etc/<service-name>/policy.d/
directory override YAML files.
(For charms.openstack charms, a mixin class is provided for this
functionality).
In order to "hook" this functionality into a (classic) charm, two functions are
provided:
maybe_do_policyd_overrides(openstack_release,
service,
blacklist_paths=none,
blacklist_keys=none,
template_function=none,
restart_handler=none)
maybe_do_policyd_overrides_on_config_changed(openstack_release,
service,
blacklist_paths=None,
blacklist_keys=None,
template_function=None,
restart_handler=None
(See the docstrings for details on the parameters)
The functions should be called from the install and upgrade hooks in the charm.
The `maybe_do_policyd_overrides_on_config_changed` function is designed to be
called on the config-changed hook, in that it does an additional check to
ensure that an already overriden policy.d in an upgrade or install hooks isn't
repeated.
In order the *enable* this functionality, the charm's install, config_changed,
and upgrade_charm hooks need to be modified, and a new config option (see
below) needs to be added. The README for the charm should also be updated.
Examples from the keystone charm are:
@hooks.hook('install.real')
@harden()
def install():
...
# call the policy overrides handler which will install any policy overrides
maybe_do_policyd_overrides(os_release('keystone'), 'keystone')
@hooks.hook('config-changed')
@restart_on_change(restart_map(), restart_functions=restart_function_map())
@harden()
def config_changed():
...
# call the policy overrides handler which will install any policy overrides
maybe_do_policyd_overrides_on_config_changed(os_release('keystone'),
'keystone')
@hooks.hook('upgrade-charm')
@restart_on_change(restart_map(), stopstart=True)
@harden()
def upgrade_charm():
...
# call the policy overrides handler which will install any policy overrides
maybe_do_policyd_overrides(os_release('keystone'), 'keystone')
Status Line
===========
The workload status code in charm-helpers has been modified to detect if
policy.d override code has been incorporated into the charm by checking for the
new config variable (in the config.yaml). If it has been, then the workload
status line will automatically show "PO:" at the beginning of the workload
status for that unit/service if the config option is set. If the policy
override is broken, the "PO (broken):" will be shown. No changes to the charm
(apart from those already mentioned) are needed to enable this functionality.
(charms.openstack charms also get this functionality, but please see that
library for further details).
"""
# The config.yaml for the charm should contain the following for the config
# option:
"""
use-policyd-override:
type: boolean
default: False
description: |
If True then use the resource file named 'policyd-override' to install
override yaml files in the service's policy.d directory. The resource
file should be a zip file containing at least one yaml file with a .yaml
or .yml extension. If False then remove the overrides.
"""
# The metadata.yaml for the charm should contain the following:
"""
resources:
policyd-override:
type: file
filename: policyd-override.zip
description: The policy.d overrides file
"""
# The README for the charm should contain the following:
"""
Policy Overrides
----------------
This service allows for policy overrides using the `policy.d` directory. This
is an **advanced** feature and the policies that the service supports should be
clearly and unambiguously understood before trying to override, or add to, the
default policies that the service uses.
The charm also has some policy defaults. They should also be understood before
being overridden. It is possible to break the system (for tenants and other
services) if policies are incorrectly applied to the service.
Policy overrides are YAML files that contain rules that will add to, or
override, existing policy rules in the service. The `policy.d` directory is
a place to put the YAML override files. This charm owns the
`/etc/keystone/policy.d` directory, and as such, any manual changes to it will
be overwritten on charm upgrades.
Policy overrides are provided to the charm using a resource file called
`policyd-override`. This is attached to the charm using (for example):
juju attach-resource <charm-name> policyd-override=<some-file>
The `<charm-name>` is the name that this charm is deployed as, with
`<some-file>` being the resource file containing the policy overrides.
The format of the resource file is a ZIP file (.zip extension) containing at
least one YAML file with an extension of `.yaml` or `.yml`. Note that any
directories in the ZIP file are ignored; all of the files are flattened into a
single directory. There must not be any duplicated filenames; this will cause
an error and nothing in the resource file will be applied.
(ed. next part is optional is the charm supports some form of
template/substitution on a read file)
If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the
resource file then the charm will perform a substitution with charm variables
taken from the config or relations. (ed. edit as appropriate to include the
variable).
To enable the policy overrides the config option `use-policyd-override` must be
set to `True`.
When `use-policyd-override` is `True` the status line of the charm will be
prefixed with `PO:` indicating that policies have been overridden. If the
installation of the policy override YAML files failed for any reason then the
status line will be prefixed with `PO (broken):`. The log file for the charm
will indicate the reason. No policy override files are installed if the `PO
(broken):` is shown. The status line indicates that the overrides are broken,
not that the policy for the service has failed - they will be the defaults for
the charm and service.
If the policy overrides did not install then *either* attach a new, corrected,
resource file *or* disable the policy overrides by setting
`use-policyd-override` to False.
Policy overrides on one service may affect the functionality of another
service. Therefore, it may be necessary to provide policy overrides for
multiple service charms to achieve a consistent set of policies across the
OpenStack system. The charms for the other services that may need overrides
should be checked to ensure that they support overrides before proceeding.
"""
POLICYD_VALID_EXTS = ['.yaml', '.yml', '.j2', '.tmpl', '.tpl']
POLICYD_TEMPLATE_EXTS = ['.j2', '.tmpl', '.tpl']
POLICYD_RESOURCE_NAME = "policyd-override"
POLICYD_CONFIG_NAME = "use-policyd-override"
POLICYD_SUCCESS_FILENAME = "policyd-override-success"
POLICYD_LOG_LEVEL_DEFAULT = hookenv.INFO
POLICYD_ALWAYS_BLACKLISTED_KEYS = ("admin_required", "cloud_admin")
class BadPolicyZipFile(Exception):
def __init__(self, log_message):
self.log_message = log_message
def __str__(self):
return self.log_message
class BadPolicyYamlFile(Exception):
def __init__(self, log_message):
self.log_message = log_message
def __str__(self):
return self.log_message
if six.PY2:
BadZipFile = zipfile.BadZipfile
else:
BadZipFile = zipfile.BadZipFile
def is_policyd_override_valid_on_this_release(openstack_release):
"""Check that the charm is running on at least Ubuntu Xenial, and at
least the queens release.
:param openstack_release: the release codename that is installed.
:type openstack_release: str
:returns: True if okay
:rtype: bool
"""
# NOTE(ajkavanagh) circular import! This is because the status message
# generation code in utils has to call into this module, but this function
# needs the CompareOpenStackReleases() function. The only way to solve
# this is either to put ALL of this module into utils, or refactor one or
# other of the CompareOpenStackReleases or status message generation code
# into a 3rd module.
import charmhelpers.contrib.openstack.utils as ch_utils
return ch_utils.CompareOpenStackReleases(openstack_release) >= 'queens'
def maybe_do_policyd_overrides(openstack_release,
service,
blacklist_paths=None,
blacklist_keys=None,
template_function=None,
restart_handler=None):
"""If the config option is set, get the resource file and process it to
enable the policy.d overrides for the service passed.
The param `openstack_release` is required as the policyd overrides feature
is only supported on openstack_release "queens" or later, and on ubuntu
"xenial" or later. Prior to these versions, this feature is a NOP.
The optional template_function is a function that accepts a string and has
an opportunity to modify the loaded file prior to it being read by
yaml.safe_load(). This allows the charm to perform "templating" using
charm derived data.
The param blacklist_paths are paths (that are in the service's policy.d
directory that should not be touched).
The param blacklist_keys are keys that must not appear in the yaml file.
If they do, then the whole policy.d file fails.
The yaml file extracted from the resource_file (which is a zipped file) has
its file path reconstructed. This, also, must not match any path in the
black list.
The param restart_handler is an optional Callable that is called to perform
the service restart if the policy.d file is changed. This should normally
be None as oslo.policy automatically picks up changes in the policy.d
directory. However, for any services where this is buggy then a
restart_handler can be used to force the policy.d files to be read.
:param openstack_release: The openstack release that is installed.
:type openstack_release: str
:param service: the service name to construct the policy.d directory for.
:type service: str
:param blacklist_paths: optional list of paths to leave alone
:type blacklist_paths: Union[None, List[str]]
:param blacklist_keys: optional list of keys that mustn't appear in the
yaml file's
:type blacklist_keys: Union[None, List[str]]
:param template_function: Optional function that can modify the string
prior to being processed as a Yaml document.
:type template_function: Union[None, Callable[[str], str]]
:param restart_handler: The function to call if the service should be
restarted.
:type restart_handler: Union[None, Callable[]]
"""
config = hookenv.config()
try:
if not config.get(POLICYD_CONFIG_NAME, False):
remove_policy_success_file()
clean_policyd_dir_for(service, blacklist_paths)
return
except Exception:
return
if not is_policyd_override_valid_on_this_release(openstack_release):
return
# from now on it should succeed; if it doesn't then status line will show
# broken.
resource_filename = get_policy_resource_filename()
restart = process_policy_resource_file(
resource_filename, service, blacklist_paths, blacklist_keys,
template_function)
if restart and restart_handler is not None and callable(restart_handler):
restart_handler()
def maybe_do_policyd_overrides_on_config_changed(openstack_release,
service,
blacklist_paths=None,
blacklist_keys=None,
template_function=None,
restart_handler=None):
"""This function is designed to be called from the config changed hook
handler. It will only perform the policyd overrides if the config is True
and the success file doesn't exist. Otherwise, it does nothing as the
resource file has already been processed.
See maybe_do_policyd_overrides() for more details on the params.
:param openstack_release: The openstack release that is installed.
:type openstack_release: str
:param service: the service name to construct the policy.d directory for.
:type service: str
:param blacklist_paths: optional list of paths to leave alone
:type blacklist_paths: Union[None, List[str]]
:param blacklist_keys: optional list of keys that mustn't appear in the
yaml file's
:type blacklist_keys: Union[None, List[str]]
:param template_function: Optional function that can modify the string
prior to being processed as a Yaml document.
:type template_function: Union[None, Callable[[str], str]]
:param restart_handler: The function to call if the service should be
restarted.
:type restart_handler: Union[None, Callable[]]
"""
config = hookenv.config()
try:
if not config.get(POLICYD_CONFIG_NAME, False):
remove_policy_success_file()
clean_policyd_dir_for(service, blacklist_paths)
return
except Exception:
return
# if the policyd overrides have been performed just return
if os.path.isfile(_policy_success_file()):
return
maybe_do_policyd_overrides(
openstack_release, service, blacklist_paths, blacklist_keys,
template_function, restart_handler)
def get_policy_resource_filename():
"""Function to extract the policy resource filename
:returns: The filename of the resource, if set, otherwise, if an error
occurs, then None is returned.
:rtype: Union[str, None]
"""
try:
return hookenv.resource_get(POLICYD_RESOURCE_NAME)
except Exception:
return None
@contextlib.contextmanager
def open_and_filter_yaml_files(filepath):
"""Validate that the filepath provided is a zip file and contains at least
one (.yaml|.yml) file, and that the files are not duplicated when the zip
file is flattened. Note that the yaml files are not checked. This is the
first stage in validating the policy zipfile; individual yaml files are not
checked for validity or black listed keys.
An example of use is:
with open_and_filter_yaml_files(some_path) as zfp, g:
for zipinfo in g:
# do something with zipinfo ...
:param filepath: a filepath object that can be opened by zipfile
:type filepath: Union[AnyStr, os.PathLike[AntStr]]
:returns: (zfp handle,
a generator of the (name, filename, ZipInfo object) tuples) as a
tuple.
:rtype: ContextManager[(zipfile.ZipFile,
Generator[(name, str, str, zipfile.ZipInfo)])]
:raises: zipfile.BadZipFile
:raises: BadPolicyZipFile if duplicated yaml or missing
:raises: IOError if the filepath is not found
"""
with zipfile.ZipFile(filepath, 'r') as zfp:
# first pass through; check for duplicates and at least one yaml file.
names = collections.defaultdict(int)
yamlfiles = _yamlfiles(zfp)
for name, _, _, _ in yamlfiles:
names[name] += 1
# There must be at least 1 yaml file.
if len(names.keys()) == 0:
raise BadPolicyZipFile("contains no yaml files with {} extensions."
.format(", ".join(POLICYD_VALID_EXTS)))
# There must be no duplicates
duplicates = [n for n, c in names.items() if c > 1]
if duplicates:
raise BadPolicyZipFile("{} have duplicates in the zip file."
.format(", ".join(duplicates)))
# Finally, let's yield the generator
yield (zfp, yamlfiles)
def _yamlfiles(zipfile):
"""Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions)
and the infolist item from a zipfile.
:param zipfile: the zipfile to read zipinfo items from
:type zipfile: zipfile.ZipFile
:returns: generator of (name, ext, filename, info item) for each self-identified
yaml file.
:rtype: List[(str, str, str, zipfile.ZipInfo)]
"""
l = []
for infolist_item in zipfile.infolist():
if infolist_item.is_dir():
continue
_, name_ext = os.path.split(infolist_item.filename)
name, ext = os.path.splitext(name_ext)
ext = ext.lower()
if ext and ext in POLICYD_VALID_EXTS:
l.append((name, ext, name_ext, infolist_item))
return l
def read_and_validate_yaml(stream_or_doc, blacklist_keys=None):
"""Read, validate and return the (first) yaml document from the stream.
The doc is read, and checked for a yaml file. The the top-level keys are
checked against the blacklist_keys provided. If there are problems then an
Exception is raised. Otherwise the yaml document is returned as a Python
object that can be dumped back as a yaml file on the system.
The yaml file must only consist of a str:str mapping, and if not then the
yaml file is rejected.
:param stream_or_doc: the file object to read the yaml from
:type stream_or_doc: Union[AnyStr, IO[AnyStr]]
:param blacklist_keys: Any keys, which if in the yaml file, should cause
and error.
:type blacklisted_keys: Union[None, List[str]]
:returns: the yaml file as a python document
:rtype: Dict[str, str]
:raises: yaml.YAMLError if there is a problem with the document
:raises: BadPolicyYamlFile if file doesn't look right or there are
blacklisted keys in the file.
"""
blacklist_keys = blacklist_keys or []
blacklist_keys.append(POLICYD_ALWAYS_BLACKLISTED_KEYS)
doc = yaml.safe_load(stream_or_doc)
if not isinstance(doc, dict):
raise BadPolicyYamlFile("doesn't look like a policy file?")
keys = set(doc.keys())
blacklisted_keys_present = keys.intersection(blacklist_keys)
if blacklisted_keys_present:
raise BadPolicyYamlFile("blacklisted keys {} present."
.format(", ".join(blacklisted_keys_present)))
if not all(isinstance(k, six.string_types) for k in keys):
raise BadPolicyYamlFile("keys in yaml aren't all strings?")
# check that the dictionary looks like a mapping of str to str
if not all(isinstance(v, six.string_types) for v in doc.values()):
raise BadPolicyYamlFile("values in yaml aren't all strings?")
return doc
def policyd_dir_for(service):
"""Return the policy directory for the named service.
This assumes the default name of "policy.d" which is kept across all
charms.
:param service: str
:returns: the policy.d override directory.
:rtype: os.PathLike[str]
"""
return os.path.join("/", "etc", service, "policy.d")
def clean_policyd_dir_for(service, keep_paths=None):
"""Clean out the policyd directory except for items that should be kept.
The keep_paths, if used, should be set to the full path of the files that
should be kept in the policyd directory for the service. Note that the
service name is passed in, and then the policyd_dir_for() function is used.
This is so that a coding error doesn't result in a sudden deletion of the
charm (say).
:param service: the service name to use to construct the policy.d dir.
:type service: str
:param keep_paths: optional list of paths to not delete.
:type keep_paths: Union[None, List[str]]
"""
keep_paths = keep_paths or []
path = policyd_dir_for(service)
if not os.path.exists(path):
ch_host.mkdir(path, owner=service, group=service, perms=0o775)
_scanner = os.scandir if six.PY3 else _py2_scandir
for direntry in _scanner(path):
# see if the path should be kept.
if direntry.path in keep_paths:
continue
# we remove any directories; it's ours and there shouldn't be any
if direntry.is_dir():
shutil.rmtree(direntry.path)
else:
os.remove(direntry.path)
@contextlib.contextmanager
def _py2_scandir(path):
"""provide a py2 implementation of os.scandir if this module ever gets used
in a py2 charm (unlikely). uses os.listdir() to get the names in the path,
and then mocks the is_dir() function using os.path.isdir() to check for a
directory.
:param path: the path to list the directories for
:type path: str
:returns: Generator that provides _P27Direntry objects
:rtype: ContextManager[_P27Direntry]
"""
for f in os.listdir(path):
yield _P27Direntry(f)
class _P27Direntry(object):
"""Mock a scandir Direntry object with enough to use in
clean_policyd_dir_for
"""
def __init__(self, path):
self.path = path
def is_dir(self):
return os.path.isdir(self.path)
def path_for_policy_file(service, name):
"""Return the full path for a policy.d file that will be written to the
service's policy.d directory.
It is constructed using policyd_dir_for(), the name and the ".yaml"
extension.
:param service: the service name
:type service: str
:param name: the name for the policy override
:type name: str
:returns: the full path name for the file
:rtype: os.PathLike[str]
"""
return os.path.join(policyd_dir_for(service), name + ".yaml")
def _policy_success_file():
"""Return the file name for a successful drop of policy.d overrides
:returns: the path name for the file.
:rtype: str
"""
return os.path.join(hookenv.charm_dir(), POLICYD_SUCCESS_FILENAME)
def remove_policy_success_file():
"""Remove the file that indicates successful policyd override."""
try:
os.remove(_policy_success_file())
except Exception:
pass
def policyd_status_message_prefix():
"""Return the prefix str for the status line.
"PO:" indicating that the policy overrides are in place, or "PO (broken):"
if the policy is supposed to be working but there is no success file.
:returns: the prefix
:rtype: str
"""
if os.path.isfile(_policy_success_file()):
return "PO:"
return "PO (broken):"
def process_policy_resource_file(resource_file,
service,
blacklist_paths=None,
blacklist_keys=None,
template_function=None):
"""Process the resource file (which should contain at least one yaml file)
and write those files to the service's policy.d directory.
The optional template_function is a function that accepts a python
string and has an opportunity to modify the document
prior to it being read by the yaml.safe_load() function and written to
disk. Note that this function does *not* say how the templating is done -
this is up to the charm to implement its chosen method.
The param blacklist_paths are paths (that are in the service's policy.d
directory that should not be touched).
The param blacklist_keys are keys that must not appear in the yaml file.
If they do, then the whole policy.d file fails.
The yaml file extracted from the resource_file (which is a zipped file) has
its file path reconstructed. This, also, must not match any path in the
black list.
If any error occurs, then the policy.d directory is cleared, the error is
written to the log, and the status line will eventually show as failed.
:param resource_file: The zipped file to open and extract yaml files form.
:type resource_file: Union[AnyStr, os.PathLike[AnyStr]]
:param service: the service name to construct the policy.d directory for.
:type service: str
:param blacklist_paths: optional list of paths to leave alone
:type blacklist_paths: Union[None, List[str]]
:param blacklist_keys: optional list of keys that mustn't appear in the
yaml file's
:type blacklist_keys: Union[None, List[str]]
:param template_function: Optional function that can modify the yaml
document.
:type template_function: Union[None, Callable[[AnyStr], AnyStr]]
:returns: True if the processing was successful, False if not.
:rtype: boolean
"""
blacklist_paths = blacklist_paths or []
completed = False
try:
with open_and_filter_yaml_files(resource_file) as (zfp, gen):
# first clear out the policy.d directory and clear success
remove_policy_success_file()
clean_policyd_dir_for(service, blacklist_paths)
for name, ext, filename, zipinfo in gen:
# construct a name for the output file.
yaml_filename = path_for_policy_file(service, name)
if yaml_filename in blacklist_paths:
raise BadPolicyZipFile("policy.d name {} is blacklisted"
.format(yaml_filename))
with zfp.open(zipinfo) as fp:
doc = fp.read()
# if template_function is not None, then offer the document
# to the template function
if ext in POLICYD_TEMPLATE_EXTS:
if (template_function is None or not
callable(template_function)):
raise BadPolicyZipFile(
"Template {} but no template_function is "
"available".format(filename))
doc = template_function(doc)
yaml_doc = read_and_validate_yaml(doc, blacklist_keys)
with open(yaml_filename, "wt") as f:
yaml.dump(yaml_doc, f)
# Every thing worked, so we mark up a success.
completed = True
except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e:
hookenv.log("Processing {} failed: {}".format(resource_file, str(e)),
level=POLICYD_LOG_LEVEL_DEFAULT)
except IOError as e:
# technically this shouldn't happen; it would be a programming error as
# the filename comes from Juju and thus, should exist.
hookenv.log(
"File {} failed with IOError. This really shouldn't happen"
" -- error: {}".format(resource_file, str(e)),
level=POLICYD_LOG_LEVEL_DEFAULT)
except Exception as e:
import traceback
hookenv.log("General Exception({}) during policyd processing"
.format(str(e)),
level=POLICYD_LOG_LEVEL_DEFAULT)
hookenv.log(traceback.format_exc())
finally:
if not completed:
hookenv.log("Processing {} failed: cleaning policy.d directory"
.format(resource_file),
level=POLICYD_LOG_LEVEL_DEFAULT)
clean_policyd_dir_for(service, blacklist_paths)
else:
# touch the success filename
hookenv.log("policy.d overrides installed.",
level=POLICYD_LOG_LEVEL_DEFAULT)
open(_policy_success_file(), "w").close()
return completed

View File

@ -84,7 +84,8 @@ from charmhelpers.fetch import (
SourceConfigError, SourceConfigError,
GPGKeyError, GPGKeyError,
get_upstream_version, get_upstream_version,
filter_missing_packages filter_missing_packages,
ubuntu_apt_pkg as apt,
) )
from charmhelpers.fetch.snap import ( from charmhelpers.fetch.snap import (
@ -96,6 +97,10 @@ from charmhelpers.fetch.snap import (
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
from charmhelpers.contrib.openstack.exceptions import OSContextError from charmhelpers.contrib.openstack.exceptions import OSContextError
from charmhelpers.contrib.openstack.policyd import (
policyd_status_message_prefix,
POLICYD_CONFIG_NAME,
)
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@ -443,8 +448,6 @@ def get_os_codename_package(package, fatal=True):
# Second item in list is Version # Second item in list is Version
return line.split()[1] return line.split()[1]
import apt_pkg as apt
cache = apt_cache() cache = apt_cache()
try: try:
@ -658,7 +661,6 @@ def openstack_upgrade_available(package):
a newer version of package. a newer version of package.
""" """
import apt_pkg as apt
src = config('openstack-origin') src = config('openstack-origin')
cur_vers = get_os_version_package(package) cur_vers = get_os_version_package(package)
if not cur_vers: if not cur_vers:
@ -864,6 +866,12 @@ def _determine_os_workload_status(
message = "Unit is ready" message = "Unit is ready"
juju_log(message, 'INFO') juju_log(message, 'INFO')
try:
if config(POLICYD_CONFIG_NAME):
message = "{} {}".format(policyd_status_message_prefix(), message)
except Exception:
pass
return state, message return state, message

View File

@ -301,6 +301,7 @@ class ReplicatedPool(Pool):
percent_data=10.0, app_name=None): percent_data=10.0, app_name=None):
super(ReplicatedPool, self).__init__(service=service, name=name) super(ReplicatedPool, self).__init__(service=service, name=name)
self.replicas = replicas self.replicas = replicas
self.percent_data = percent_data
if pg_num: if pg_num:
# Since the number of placement groups were specified, ensure # Since the number of placement groups were specified, ensure
# that there aren't too many created. # that there aren't too many created.
@ -324,12 +325,24 @@ class ReplicatedPool(Pool):
update_pool(client=self.service, update_pool(client=self.service,
pool=self.name, pool=self.name,
settings={'size': str(self.replicas)}) settings={'size': str(self.replicas)})
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(client=self.service,
pool=self.name,
settings={'target_size_ratio': str(self.percent_data / 100.0)})
try: try:
set_app_name_for_pool(client=self.service, set_app_name_for_pool(client=self.service,
pool=self.name, pool=self.name,
name=self.app_name) name=self.app_name)
except CalledProcessError: except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING)) log('Could not set app name for pool {}'.format(self.name, level=WARNING))
if 'pg_autoscaler' in enabled_manager_modules():
try:
enable_pg_autoscale(self.service, self.name)
except CalledProcessError as e:
log('Could not configure auto scaling for pool {}: {}'.format(
self.name, e, level=WARNING))
except CalledProcessError: except CalledProcessError:
raise raise
@ -382,6 +395,18 @@ class ErasurePool(Pool):
name=self.app_name) name=self.app_name)
except CalledProcessError: except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING)) log('Could not set app name for pool {}'.format(self.name, level=WARNING))
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later:
# Ensure we set the expected pool ratio
update_pool(client=self.service,
pool=self.name,
settings={'target_size_ratio': str(self.percent_data / 100.0)})
if 'pg_autoscaler' in enabled_manager_modules():
try:
enable_pg_autoscale(self.service, self.name)
except CalledProcessError as e:
log('Could not configure auto scaling for pool {}: {}'.format(
self.name, e, level=WARNING))
except CalledProcessError: except CalledProcessError:
raise raise
@ -389,6 +414,32 @@ class ErasurePool(Pool):
Returns json formatted output""" Returns json formatted output"""
def enabled_manager_modules():
"""Return a list of enabled manager modules.
:rtype: List[str]
"""
cmd = ['ceph', 'mgr', 'module', 'ls']
try:
modules = check_output(cmd)
except CalledProcessError as e:
log("Failed to list ceph modules: {}".format(e), WARNING)
return []
modules = json.loads(modules)
return modules['enabled_modules']
def enable_pg_autoscale(service, pool_name):
"""
Enable Ceph's PG autoscaler for the specified pool.
:param service: six.string_types. The Ceph user name to run the command under
:param pool_name: six.string_types. The name of the pool to enable sutoscaling on
:raise: CalledProcessError if the command fails
"""
check_call(['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, 'pg_autoscale_mode', 'on'])
def get_mon_map(service): def get_mon_map(service):
""" """
Returns the current monitor map. Returns the current monitor map.

View File

@ -1075,3 +1075,30 @@ def install_ca_cert(ca_cert, name=None):
log("Installing new CA cert at: {}".format(cert_file), level=INFO) log("Installing new CA cert at: {}".format(cert_file), level=INFO)
write_file(cert_file, ca_cert) write_file(cert_file, ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh']) subprocess.check_call(['update-ca-certificates', '--fresh'])
def get_system_env(key, default=None):
"""Get data from system environment as represented in ``/etc/environment``.
:param key: Key to look up
:type key: str
:param default: Value to return if key is not found
:type default: any
:returns: Value for key if found or contents of default parameter
:rtype: any
:raises: subprocess.CalledProcessError
"""
env_file = '/etc/environment'
# use the shell and env(1) to parse the global environments file. This is
# done to get the correct result even if the user has shell variable
# substitutions or other shell logic in that file.
output = subprocess.check_output(
['env', '-i', '/bin/bash', '-c',
'set -a && source {} && env'.format(env_file)],
universal_newlines=True)
for k, v in (line.split('=', 1)
for line in output.splitlines() if '=' in line):
if k == key:
return v
else:
return default

View File

@ -24,6 +24,7 @@ UBUNTU_RELEASES = (
'bionic', 'bionic',
'cosmic', 'cosmic',
'disco', 'disco',
'eoan',
) )
@ -93,7 +94,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
the pkgcache argument is None. Be sure to add charmhelpers.fetch if the pkgcache argument is None. Be sure to add charmhelpers.fetch if
you call this function, or pass an apt_pkg.Cache() instance. you call this function, or pass an apt_pkg.Cache() instance.
""" """
import apt_pkg from charmhelpers.fetch import apt_pkg
if not pkgcache: if not pkgcache:
from charmhelpers.fetch import apt_cache from charmhelpers.fetch import apt_cache
pkgcache = apt_cache() pkgcache = apt_cache()

View File

@ -103,6 +103,8 @@ if __platform__ == "ubuntu":
apt_unhold = fetch.apt_unhold apt_unhold = fetch.apt_unhold
import_key = fetch.import_key import_key = fetch.import_key
get_upstream_version = fetch.get_upstream_version get_upstream_version = fetch.get_upstream_version
apt_pkg = fetch.ubuntu_apt_pkg
get_apt_dpkg_env = fetch.get_apt_dpkg_env
elif __platform__ == "centos": elif __platform__ == "centos":
yum_search = fetch.yum_search yum_search = fetch.yum_search

View File

@ -13,14 +13,14 @@
# limitations under the License. # limitations under the License.
from collections import OrderedDict from collections import OrderedDict
import os
import platform import platform
import re import re
import six import six
import time
import subprocess import subprocess
import sys
import time
from charmhelpers.core.host import get_distrib_codename from charmhelpers.core.host import get_distrib_codename, get_system_env
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
@ -29,6 +29,7 @@ from charmhelpers.core.hookenv import (
env_proxy_settings, env_proxy_settings,
) )
from charmhelpers.fetch import SourceConfigError, GPGKeyError from charmhelpers.fetch import SourceConfigError, GPGKeyError
from charmhelpers.fetch import ubuntu_apt_pkg
PROPOSED_POCKET = ( PROPOSED_POCKET = (
"# Proposed\n" "# Proposed\n"
@ -216,18 +217,42 @@ def filter_missing_packages(packages):
) )
def apt_cache(in_memory=True, progress=None): def apt_cache(*_, **__):
"""Build and return an apt cache.""" """Shim returning an object simulating the apt_pkg Cache.
from apt import apt_pkg
apt_pkg.init() :param _: Accept arguments for compability, not used.
if in_memory: :type _: any
apt_pkg.config.set("Dir::Cache::pkgcache", "") :param __: Accept keyword arguments for compability, not used.
apt_pkg.config.set("Dir::Cache::srcpkgcache", "") :type __: any
return apt_pkg.Cache(progress) :returns:Object used to interrogate the system apt and dpkg databases.
:rtype:ubuntu_apt_pkg.Cache
"""
if 'apt_pkg' in sys.modules:
# NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module
# in conjunction with the apt_cache helper function, they may expect us
# to call ``apt_pkg.init()`` for them.
#
# Detect this situation, log a warning and make the call to
# ``apt_pkg.init()`` to avoid the consumer Python interpreter from
# crashing with a segmentation fault.
log('Support for use of upstream ``apt_pkg`` module in conjunction'
'with charm-helpers is deprecated since 2019-06-25', level=WARNING)
sys.modules['apt_pkg'].init()
return ubuntu_apt_pkg.Cache()
def apt_install(packages, options=None, fatal=False): def apt_install(packages, options=None, fatal=False):
"""Install one or more packages.""" """Install one or more packages.
:param packages: Package(s) to install
:type packages: Option[str, List[str]]
:param options: Options to pass on to apt-get
:type options: Option[None, List[str]]
:param fatal: Whether the command's output should be checked and
retried.
:type fatal: bool
:raises: subprocess.CalledProcessError
"""
if options is None: if options is None:
options = ['--option=Dpkg::Options::=--force-confold'] options = ['--option=Dpkg::Options::=--force-confold']
@ -244,7 +269,17 @@ def apt_install(packages, options=None, fatal=False):
def apt_upgrade(options=None, fatal=False, dist=False): def apt_upgrade(options=None, fatal=False, dist=False):
"""Upgrade all packages.""" """Upgrade all packages.
:param options: Options to pass on to apt-get
:type options: Option[None, List[str]]
:param fatal: Whether the command's output should be checked and
retried.
:type fatal: bool
:param dist: Whether ``dist-upgrade`` should be used over ``upgrade``
:type dist: bool
:raises: subprocess.CalledProcessError
"""
if options is None: if options is None:
options = ['--option=Dpkg::Options::=--force-confold'] options = ['--option=Dpkg::Options::=--force-confold']
@ -265,7 +300,15 @@ def apt_update(fatal=False):
def apt_purge(packages, fatal=False): def apt_purge(packages, fatal=False):
"""Purge one or more packages.""" """Purge one or more packages.
:param packages: Package(s) to install
:type packages: Option[str, List[str]]
:param fatal: Whether the command's output should be checked and
retried.
:type fatal: bool
:raises: subprocess.CalledProcessError
"""
cmd = ['apt-get', '--assume-yes', 'purge'] cmd = ['apt-get', '--assume-yes', 'purge']
if isinstance(packages, six.string_types): if isinstance(packages, six.string_types):
cmd.append(packages) cmd.append(packages)
@ -276,7 +319,14 @@ def apt_purge(packages, fatal=False):
def apt_autoremove(purge=True, fatal=False): def apt_autoremove(purge=True, fatal=False):
"""Purge one or more packages.""" """Purge one or more packages.
:param purge: Whether the ``--purge`` option should be passed on or not.
:type purge: bool
:param fatal: Whether the command's output should be checked and
retried.
:type fatal: bool
:raises: subprocess.CalledProcessError
"""
cmd = ['apt-get', '--assume-yes', 'autoremove'] cmd = ['apt-get', '--assume-yes', 'autoremove']
if purge: if purge:
cmd.append('--purge') cmd.append('--purge')
@ -660,21 +710,22 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None): retry_message="", cmd_env=None):
"""Run a command and retry until success or max_retries is reached. """Run a command and retry until success or max_retries is reached.
:param: cmd: str: The apt command to run. :param cmd: The apt command to run.
:param: max_retries: int: The number of retries to attempt on a fatal :type cmd: str
command. Defaults to CMD_RETRY_COUNT. :param max_retries: The number of retries to attempt on a fatal
:param: retry_exitcodes: tuple: Optional additional exit codes to retry. command. Defaults to CMD_RETRY_COUNT.
Defaults to retry on exit code 1. :type max_retries: int
:param: retry_message: str: Optional log prefix emitted during retries. :param retry_exitcodes: Optional additional exit codes to retry.
:param: cmd_env: dict: Environment variables to add to the command run. Defaults to retry on exit code 1.
:type retry_exitcodes: tuple
:param retry_message: Optional log prefix emitted during retries.
:type retry_message: str
:param: cmd_env: Environment variables to add to the command run.
:type cmd_env: Option[None, Dict[str, str]]
""" """
env = get_apt_dpkg_env()
env = None
kwargs = {}
if cmd_env: if cmd_env:
env = os.environ.copy()
env.update(cmd_env) env.update(cmd_env)
kwargs['env'] = env
if not retry_message: if not retry_message:
retry_message = "Failed executing '{}'".format(" ".join(cmd)) retry_message = "Failed executing '{}'".format(" ".join(cmd))
@ -686,8 +737,7 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_results = (None,) + retry_exitcodes retry_results = (None,) + retry_exitcodes
while result in retry_results: while result in retry_results:
try: try:
# result = subprocess.check_call(cmd, env=env) result = subprocess.check_call(cmd, env=env)
result = subprocess.check_call(cmd, **kwargs)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
retry_count = retry_count + 1 retry_count = retry_count + 1
if retry_count > max_retries: if retry_count > max_retries:
@ -700,22 +750,18 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
def _run_apt_command(cmd, fatal=False): def _run_apt_command(cmd, fatal=False):
"""Run an apt command with optional retries. """Run an apt command with optional retries.
:param: cmd: str: The apt command to run. :param cmd: The apt command to run.
:param: fatal: bool: Whether the command's output should be checked and :type cmd: str
retried. :param fatal: Whether the command's output should be checked and
retried.
:type fatal: bool
""" """
# Provide DEBIAN_FRONTEND=noninteractive if not present in the environment.
cmd_env = {
'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')}
if fatal: if fatal:
_run_with_retries( _run_with_retries(
cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), cmd, retry_exitcodes=(1, APT_NO_LOCK,),
retry_message="Couldn't acquire DPKG lock") retry_message="Couldn't acquire DPKG lock")
else: else:
env = os.environ.copy() subprocess.call(cmd, env=get_apt_dpkg_env())
env.update(cmd_env)
subprocess.call(cmd, env=env)
def get_upstream_version(package): def get_upstream_version(package):
@ -723,7 +769,6 @@ def get_upstream_version(package):
@returns None (if not installed) or the upstream version @returns None (if not installed) or the upstream version
""" """
import apt_pkg
cache = apt_cache() cache = apt_cache()
try: try:
pkg = cache[package] pkg = cache[package]
@ -735,4 +780,18 @@ def get_upstream_version(package):
# package is known, but no version is currently installed. # package is known, but no version is currently installed.
return None return None
return apt_pkg.upstream_version(pkg.current_ver.ver_str) return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str)
def get_apt_dpkg_env():
"""Get environment suitable for execution of APT and DPKG tools.
We keep this in a helper function instead of in a global constant to
avoid execution on import of the library.
:returns: Environment suitable for execution of APT and DPKG tools.
:rtype: Dict[str, str]
"""
# The fallback is used in the event of ``/etc/environment`` not containing
# avalid PATH variable.
return {'DEBIAN_FRONTEND': 'noninteractive',
'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')}

View File

@ -0,0 +1,237 @@
# Copyright 2019 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a subset of the ``python-apt`` module API.
Data collection is done through subprocess calls to ``apt-cache`` and
``dpkg-query`` commands.
The main purpose for this module is to avoid dependency on the
``python-apt`` python module.
The indicated python module is a wrapper around the ``apt`` C++ library
which is tightly connected to the version of the distribution it was
shipped on. It is not developed in a backward/forward compatible manner.
This in turn makes it incredibly hard to distribute as a wheel for a piece
of python software that supports a span of distro releases [0][1].
Upstream feedback like [2] does not give confidence in this ever changing,
so with this we get rid of the dependency.
0: https://github.com/juju-solutions/layer-basic/pull/135
1: https://bugs.launchpad.net/charm-octavia/+bug/1824112
2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10
"""
import locale
import os
import subprocess
class _container(dict):
"""Simple container for attributes."""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
class Package(_container):
"""Simple container for package attributes."""
class Version(_container):
"""Simple container for version attributes."""
class Cache(object):
"""Simulation of ``apt_pkg`` Cache object."""
def __init__(self, progress=None):
pass
def __getitem__(self, package):
"""Get information about a package from apt and dpkg databases.
:param package: Name of package
:type package: str
:returns: Package object
:rtype: object
:raises: KeyError, subprocess.CalledProcessError
"""
apt_result = self._apt_cache_show([package])[package]
apt_result['name'] = apt_result.pop('package')
pkg = Package(apt_result)
dpkg_result = self._dpkg_list([package]).get(package, {})
current_ver = None
installed_version = dpkg_result.get('version')
if installed_version:
current_ver = Version({'ver_str': installed_version})
pkg.current_ver = current_ver
pkg.architecture = dpkg_result.get('architecture')
return pkg
def _dpkg_list(self, packages):
"""Get data from system dpkg database for package.
:param packages: Packages to get data from
:type packages: List[str]
:returns: Structured data about installed packages, keys like
``dpkg-query --list``
:rtype: dict
:raises: subprocess.CalledProcessError
"""
pkgs = {}
cmd = ['dpkg-query', '--list']
cmd.extend(packages)
if locale.getlocale() == (None, None):
# subprocess calls out to locale.getpreferredencoding(False) to
# determine encoding. Workaround for Trusty where the
# environment appears to not be set up correctly.
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
try:
output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
except subprocess.CalledProcessError as cp:
# ``dpkg-query`` may return error and at the same time have
# produced useful output, for example when asked for multiple
# packages where some are not installed
if cp.returncode != 1:
raise
output = cp.output
headings = []
for line in output.splitlines():
if line.startswith('||/'):
headings = line.split()
headings.pop(0)
continue
elif (line.startswith('|') or line.startswith('+') or
line.startswith('dpkg-query:')):
continue
else:
data = line.split(None, 4)
status = data.pop(0)
if status != 'ii':
continue
pkg = {}
pkg.update({k.lower(): v for k, v in zip(headings, data)})
if 'name' in pkg:
pkgs.update({pkg['name']: pkg})
return pkgs
def _apt_cache_show(self, packages):
"""Get data from system apt cache for package.
:param packages: Packages to get data from
:type packages: List[str]
:returns: Structured data about package, keys like
``apt-cache show``
:rtype: dict
:raises: subprocess.CalledProcessError
"""
pkgs = {}
cmd = ['apt-cache', 'show', '--no-all-versions']
cmd.extend(packages)
if locale.getlocale() == (None, None):
# subprocess calls out to locale.getpreferredencoding(False) to
# determine encoding. Workaround for Trusty where the
# environment appears to not be set up correctly.
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
try:
output = subprocess.check_output(cmd,
stderr=subprocess.STDOUT,
universal_newlines=True)
previous = None
pkg = {}
for line in output.splitlines():
if not line:
if 'package' in pkg:
pkgs.update({pkg['package']: pkg})
pkg = {}
continue
if line.startswith(' '):
if previous and previous in pkg:
pkg[previous] += os.linesep + line.lstrip()
continue
if ':' in line:
kv = line.split(':', 1)
key = kv[0].lower()
if key == 'n':
continue
previous = key
pkg.update({key: kv[1].lstrip()})
except subprocess.CalledProcessError as cp:
# ``apt-cache`` returns 100 if none of the packages asked for
# exist in the apt cache.
if cp.returncode != 100:
raise
return pkgs
def init():
"""Compability shim that does nothing."""
pass
def upstream_version(version):
"""Extracts upstream version from a version string.
Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/
apt-pkg/deb/debversion.cc#L259
:param version: Version string
:type version: str
:returns: Upstream version
:rtype: str
"""
if version:
version = version.split(':')[-1]
version = version.split('-')[0]
return version
def version_compare(a, b):
"""Compare the given versions.
Call out to ``dpkg`` to make sure the code doing the comparison is
compatible with what the ``apt`` library would do. Mimic the return
values.
Upstream reference:
https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html
?highlight=version_compare#apt_pkg.version_compare
:param a: version string
:type a: str
:param b: version string
:type b: str
:returns: >0 if ``a`` is greater than ``b``, 0 if a equals b,
<0 if ``a`` is smaller than ``b``
:rtype: int
:raises: subprocess.CalledProcessError, RuntimeError
"""
for op in ('gt', 1), ('eq', 0), ('lt', -1):
try:
subprocess.check_call(['dpkg', '--compare-versions',
a, op[0], b],
stderr=subprocess.STDOUT,
universal_newlines=True)
return op[1]
except subprocess.CalledProcessError as cp:
if cp.returncode == 1:
continue
raise
else:
raise RuntimeError('Unable to compare "{}" and "{}", according to '
'our logic they are neither greater, equal nor '
'less than each other.')

View File

@ -12,7 +12,7 @@ requests>=2.18.4
amulet>=1.14.3,<2.0;python_version=='2.7' amulet>=1.14.3,<2.0;python_version=='2.7'
bundletester>=0.6.1,<1.0;python_version=='2.7' bundletester>=0.6.1,<1.0;python_version=='2.7'
python-ceilometerclient>=1.5.0 python-ceilometerclient>=1.5.0
python-cinderclient>=1.4.0 python-cinderclient>=1.4.0,<5.0.0
python-glanceclient>=1.1.0 python-glanceclient>=1.1.0
python-heatclient>=0.8.0 python-heatclient>=0.8.0
python-keystoneclient>=1.7.1 python-keystoneclient>=1.7.1

42
tox.ini
View File

@ -1,12 +1,14 @@
# Classic charm: ./tox.ini # Classic charm (with amulet): ./tox.ini
# This file is managed centrally by release-tools and should not be modified # This file is managed centrally by release-tools and should not be modified
# within individual charm repos. # within individual charm repos. See the 'global' dir contents for available
# choices of tox.ini for OpenStack Charms:
# https://github.com/openstack-charmers/release-tools
[tox] [tox]
envlist = pep8,py27,py37 envlist = pep8,py3
skipsdist = True skipsdist = True
# NOTE(beisner): Avoid build/test env pollution by not enabling sitepackages. # NOTE: Avoid build/test env pollution by not enabling sitepackages.
sitepackages = False sitepackages = False
# NOTE(beisner): Avoid false positives by not skipping missing interpreters. # NOTE: Avoid false positives by not skipping missing interpreters.
skip_missing_interpreters = False skip_missing_interpreters = False
[testenv] [testenv]
@ -16,15 +18,14 @@ setenv = VIRTUAL_ENV={envdir}
AMULET_SETUP_TIMEOUT=5400 AMULET_SETUP_TIMEOUT=5400
install_command = install_command =
pip install {opts} {packages} pip install {opts} {packages}
commands = stestr run {posargs} commands = stestr run --slowest {posargs}
whitelist_externals = juju whitelist_externals = juju
passenv = HOME TERM AMULET_* CS_API_* passenv = HOME TERM AMULET_* CS_* OS_* TEST_*
[testenv:py27] [testenv:py3]
basepython = python2.7 basepython = python3
deps = -r{toxinidir}/requirements.txt deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
commands = /bin/true
[testenv:py35] [testenv:py35]
basepython = python3.5 basepython = python3.5
@ -45,7 +46,7 @@ deps = -r{toxinidir}/requirements.txt
basepython = python3 basepython = python3
deps = -r{toxinidir}/requirements.txt deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
commands = flake8 {posargs} hooks unit_tests tests actions lib commands = flake8 {posargs} hooks unit_tests tests actions lib files
charm-proof charm-proof
[testenv:cover] [testenv:cover]
@ -59,7 +60,7 @@ setenv =
PYTHON=coverage run PYTHON=coverage run
commands = commands =
coverage erase coverage erase
stestr run {posargs} stestr run --slowest {posargs}
coverage combine coverage combine
coverage html -d cover coverage html -d cover
coverage xml -o cover/coverage.xml coverage xml -o cover/coverage.xml
@ -80,7 +81,7 @@ omit =
basepython = python3 basepython = python3
commands = {posargs} commands = {posargs}
[testenv:func27-noop] [testenv:func-noop]
# DRY RUN - For Debug # DRY RUN - For Debug
basepython = python2.7 basepython = python2.7
deps = -r{toxinidir}/requirements.txt deps = -r{toxinidir}/requirements.txt
@ -88,7 +89,7 @@ deps = -r{toxinidir}/requirements.txt
commands = commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" -n --no-destroy
[testenv:func27] [testenv:func]
# Charm Functional Test # Charm Functional Test
# Run all gate tests which are +x (expected to always pass) # Run all gate tests which are +x (expected to always pass)
basepython = python2.7 basepython = python2.7
@ -97,7 +98,7 @@ deps = -r{toxinidir}/requirements.txt
commands = commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy bundletester -vl DEBUG -r json -o func-results.json --test-pattern "gate-*" --no-destroy
[testenv:func27-smoke] [testenv:func-smoke]
# Charm Functional Test # Charm Functional Test
# Run a specific test as an Amulet smoke test (expected to always pass) # Run a specific test as an Amulet smoke test (expected to always pass)
basepython = python2.7 basepython = python2.7
@ -106,16 +107,7 @@ deps = -r{toxinidir}/requirements.txt
commands = commands =
bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-stein --no-destroy bundletester -vl DEBUG -r json -o func-results.json gate-basic-bionic-stein --no-destroy
[testenv:func27-dfs] [testenv:func-dev]
# Charm Functional Test
# Run all deploy-from-source tests which are +x (may not always pass!)
basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
bundletester -vl DEBUG -r json -o func-results.json --test-pattern "dfs-*" --no-destroy
[testenv:func27-dev]
# Charm Functional Test # Charm Functional Test
# Run all development test targets which are +x (may not always pass!) # Run all development test targets which are +x (may not always pass!)
basepython = python2.7 basepython = python2.7