[hopem,r=]
Ensure all nics are included when resolving mac addresses and filter out virtual interfaces. Closes-Bug: 1485655
This commit is contained in:
parent
c29499f318
commit
8d4cd89036
@ -1,4 +1,4 @@
|
|||||||
branch: lp:charm-helpers
|
branch: lp:~hopem/charm-helpers/allow-list-nics-return-all
|
||||||
destination: hooks/charmhelpers
|
destination: hooks/charmhelpers
|
||||||
include:
|
include:
|
||||||
- core
|
- core
|
||||||
|
@ -79,6 +79,12 @@ options:
|
|||||||
their corresponding bridge. The bridges will allow usage of flat or
|
their corresponding bridge. The bridges will allow usage of flat or
|
||||||
VLAN network types with Neutron and should match this defined in
|
VLAN network types with Neutron and should match this defined in
|
||||||
bridge-mappings.
|
bridge-mappings.
|
||||||
|
.
|
||||||
|
Ports provided can be the name or MAC address of the interface to be
|
||||||
|
added to the bridge. If MAC addresses are used, you may provide multiple
|
||||||
|
bridge:mac for the same bridge so as to be able to configure multiple
|
||||||
|
units. In this case the charm will run through the provided MAC addresses
|
||||||
|
for each bridge until it finds one it can resolve to an interface name.
|
||||||
run-internal-router:
|
run-internal-router:
|
||||||
type: string
|
type: string
|
||||||
default: all
|
default: all
|
||||||
|
@ -152,15 +152,11 @@ class CommandLine(object):
|
|||||||
arguments = self.argument_parser.parse_args()
|
arguments = self.argument_parser.parse_args()
|
||||||
argspec = inspect.getargspec(arguments.func)
|
argspec = inspect.getargspec(arguments.func)
|
||||||
vargs = []
|
vargs = []
|
||||||
kwargs = {}
|
|
||||||
for arg in argspec.args:
|
for arg in argspec.args:
|
||||||
vargs.append(getattr(arguments, arg))
|
vargs.append(getattr(arguments, arg))
|
||||||
if argspec.varargs:
|
if argspec.varargs:
|
||||||
vargs.extend(getattr(arguments, argspec.varargs))
|
vargs.extend(getattr(arguments, argspec.varargs))
|
||||||
if argspec.keywords:
|
output = arguments.func(*vargs)
|
||||||
for kwarg in argspec.keywords.items():
|
|
||||||
kwargs[kwarg] = getattr(arguments, kwarg)
|
|
||||||
output = arguments.func(*vargs, **kwargs)
|
|
||||||
if getattr(arguments.func, '_cli_test_command', False):
|
if getattr(arguments.func, '_cli_test_command', False):
|
||||||
self.exit_code = 0 if output else 1
|
self.exit_code = 0 if output else 1
|
||||||
output = ''
|
output = ''
|
||||||
|
@ -26,7 +26,7 @@ from . import CommandLine # noqa
|
|||||||
"""
|
"""
|
||||||
Import the sub-modules which have decorated subcommands to register with chlp.
|
Import the sub-modules which have decorated subcommands to register with chlp.
|
||||||
"""
|
"""
|
||||||
import host # noqa
|
from . import host # noqa
|
||||||
import benchmark # noqa
|
from . import benchmark # noqa
|
||||||
import unitdata # noqa
|
from . import unitdata # noqa
|
||||||
from charmhelpers.core import hookenv # noqa
|
from . import hookenv # noqa
|
||||||
|
23
hooks/charmhelpers/cli/hookenv.py
Normal file
23
hooks/charmhelpers/cli/hookenv.py
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from . import cmdline
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
|
||||||
|
cmdline.subcommand('service-name')(hookenv.service_name)
|
||||||
|
cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
|
@ -44,7 +44,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
Determine if the local branch being tested is derived from its
|
Determine if the local branch being tested is derived from its
|
||||||
stable or next (dev) branch, and based on this, use the corresonding
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
stable or next branches for the other_services."""
|
stable or next branches for the other_services."""
|
||||||
base_charms = ['mysql', 'mongodb']
|
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||||
|
|
||||||
if self.series in ['precise', 'trusty']:
|
if self.series in ['precise', 'trusty']:
|
||||||
base_series = self.series
|
base_series = self.series
|
||||||
@ -81,7 +81,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw']
|
||||||
# Most OpenStack subordinate charms do not expose an origin option
|
# Most OpenStack subordinate charms do not expose an origin option
|
||||||
# as that is controlled by the principle.
|
# as that is controlled by the principle.
|
||||||
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
|
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
|
@ -50,6 +50,8 @@ from charmhelpers.core.sysctl import create as sysctl_create
|
|||||||
from charmhelpers.core.strutils import bool_from_string
|
from charmhelpers.core.strutils import bool_from_string
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
|
get_bond_master,
|
||||||
|
is_phy_iface,
|
||||||
list_nics,
|
list_nics,
|
||||||
get_nic_hwaddr,
|
get_nic_hwaddr,
|
||||||
mkdir,
|
mkdir,
|
||||||
@ -923,7 +925,6 @@ class NeutronContext(OSContextGenerator):
|
|||||||
|
|
||||||
|
|
||||||
class NeutronPortContext(OSContextGenerator):
|
class NeutronPortContext(OSContextGenerator):
|
||||||
NIC_PREFIXES = ['eth', 'bond']
|
|
||||||
|
|
||||||
def resolve_ports(self, ports):
|
def resolve_ports(self, ports):
|
||||||
"""Resolve NICs not yet bound to bridge(s)
|
"""Resolve NICs not yet bound to bridge(s)
|
||||||
@ -935,7 +936,18 @@ class NeutronPortContext(OSContextGenerator):
|
|||||||
|
|
||||||
hwaddr_to_nic = {}
|
hwaddr_to_nic = {}
|
||||||
hwaddr_to_ip = {}
|
hwaddr_to_ip = {}
|
||||||
for nic in list_nics(self.NIC_PREFIXES):
|
for nic in list_nics():
|
||||||
|
# Ignore virtual interfaces (bond masters will be identified from
|
||||||
|
# their slaves)
|
||||||
|
if not is_phy_iface(nic):
|
||||||
|
continue
|
||||||
|
|
||||||
|
_nic = get_bond_master(nic)
|
||||||
|
if _nic:
|
||||||
|
log("Replacing iface '%s' with bond master '%s'" % (nic, _nic),
|
||||||
|
level=DEBUG)
|
||||||
|
nic = _nic
|
||||||
|
|
||||||
hwaddr = get_nic_hwaddr(nic)
|
hwaddr = get_nic_hwaddr(nic)
|
||||||
hwaddr_to_nic[hwaddr] = nic
|
hwaddr_to_nic[hwaddr] = nic
|
||||||
addresses = get_ipv4_addr(nic, fatal=False)
|
addresses = get_ipv4_addr(nic, fatal=False)
|
||||||
@ -961,7 +973,8 @@ class NeutronPortContext(OSContextGenerator):
|
|||||||
# trust it to be the real external network).
|
# trust it to be the real external network).
|
||||||
resolved.append(entry)
|
resolved.append(entry)
|
||||||
|
|
||||||
return resolved
|
# Ensure no duplicates
|
||||||
|
return list(set(resolved))
|
||||||
|
|
||||||
|
|
||||||
class OSConfigFlagContext(OSContextGenerator):
|
class OSConfigFlagContext(OSContextGenerator):
|
||||||
@ -1280,15 +1293,19 @@ class DataPortContext(NeutronPortContext):
|
|||||||
def __call__(self):
|
def __call__(self):
|
||||||
ports = config('data-port')
|
ports = config('data-port')
|
||||||
if ports:
|
if ports:
|
||||||
|
# Map of {port/mac:bridge}
|
||||||
portmap = parse_data_port_mappings(ports)
|
portmap = parse_data_port_mappings(ports)
|
||||||
ports = portmap.values()
|
ports = portmap.keys()
|
||||||
|
# Resolve provided ports or mac addresses and filter out those
|
||||||
|
# already attached to a bridge.
|
||||||
resolved = self.resolve_ports(ports)
|
resolved = self.resolve_ports(ports)
|
||||||
|
# FIXME: is this necessary?
|
||||||
normalized = {get_nic_hwaddr(port): port for port in resolved
|
normalized = {get_nic_hwaddr(port): port for port in resolved
|
||||||
if port not in ports}
|
if port not in ports}
|
||||||
normalized.update({port: port for port in resolved
|
normalized.update({port: port for port in resolved
|
||||||
if port in ports})
|
if port in ports})
|
||||||
if resolved:
|
if resolved:
|
||||||
return {bridge: normalized[port] for bridge, port in
|
return {bridge: normalized[port] for port, bridge in
|
||||||
six.iteritems(portmap) if port in normalized.keys()}
|
six.iteritems(portmap) if port in normalized.keys()}
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
@ -255,17 +255,30 @@ def network_manager():
|
|||||||
return 'neutron'
|
return 'neutron'
|
||||||
|
|
||||||
|
|
||||||
def parse_mappings(mappings):
|
def parse_mappings(mappings, key_rvalue=False):
|
||||||
|
"""By default mappings are lvalue keyed.
|
||||||
|
|
||||||
|
If key_rvalue is True, the mapping will be reversed to allow multiple
|
||||||
|
configs for the same lvalue.
|
||||||
|
"""
|
||||||
parsed = {}
|
parsed = {}
|
||||||
if mappings:
|
if mappings:
|
||||||
mappings = mappings.split()
|
mappings = mappings.split()
|
||||||
for m in mappings:
|
for m in mappings:
|
||||||
p = m.partition(':')
|
p = m.partition(':')
|
||||||
key = p[0].strip()
|
|
||||||
if p[1]:
|
if key_rvalue:
|
||||||
parsed[key] = p[2].strip()
|
key_index = 2
|
||||||
|
val_index = 0
|
||||||
|
# if there is no rvalue skip to next
|
||||||
|
if not p[1]:
|
||||||
|
continue
|
||||||
else:
|
else:
|
||||||
parsed[key] = ''
|
key_index = 0
|
||||||
|
val_index = 2
|
||||||
|
|
||||||
|
key = p[key_index].strip()
|
||||||
|
parsed[key] = p[val_index].strip()
|
||||||
|
|
||||||
return parsed
|
return parsed
|
||||||
|
|
||||||
@ -283,25 +296,25 @@ def parse_bridge_mappings(mappings):
|
|||||||
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
||||||
"""Parse data port mappings.
|
"""Parse data port mappings.
|
||||||
|
|
||||||
Mappings must be a space-delimited list of bridge:port mappings.
|
Mappings must be a space-delimited list of port:bridge mappings.
|
||||||
|
|
||||||
Returns dict of the form {bridge:port}.
|
Returns dict of the form {port:bridge} where port may be an mac address or
|
||||||
|
interface name.
|
||||||
"""
|
"""
|
||||||
_mappings = parse_mappings(mappings)
|
|
||||||
|
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
|
||||||
|
# proposed for <port> since it may be a mac address which will differ
|
||||||
|
# across units this allowing first-known-good to be chosen.
|
||||||
|
_mappings = parse_mappings(mappings, key_rvalue=True)
|
||||||
if not _mappings or list(_mappings.values()) == ['']:
|
if not _mappings or list(_mappings.values()) == ['']:
|
||||||
if not mappings:
|
if not mappings:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
# For backwards-compatibility we need to support port-only provided in
|
# For backwards-compatibility we need to support port-only provided in
|
||||||
# config.
|
# config.
|
||||||
_mappings = {default_bridge: mappings.split()[0]}
|
_mappings = {mappings.split()[0]: default_bridge}
|
||||||
|
|
||||||
bridges = _mappings.keys()
|
|
||||||
ports = _mappings.values()
|
|
||||||
if len(set(bridges)) != len(bridges):
|
|
||||||
raise Exception("It is not allowed to have more than one port "
|
|
||||||
"configured on the same bridge")
|
|
||||||
|
|
||||||
|
ports = _mappings.keys()
|
||||||
if len(set(ports)) != len(ports):
|
if len(set(ports)) != len(ports):
|
||||||
raise Exception("It is not allowed to have the same port configured "
|
raise Exception("It is not allowed to have the same port configured "
|
||||||
"on more than one bridge")
|
"on more than one bridge")
|
||||||
|
@ -24,6 +24,7 @@ import subprocess
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
import re
|
||||||
|
|
||||||
import six
|
import six
|
||||||
import yaml
|
import yaml
|
||||||
@ -69,7 +70,6 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
|||||||
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
||||||
'restricted main multiverse universe')
|
'restricted main multiverse universe')
|
||||||
|
|
||||||
|
|
||||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||||
('oneiric', 'diablo'),
|
('oneiric', 'diablo'),
|
||||||
('precise', 'essex'),
|
('precise', 'essex'),
|
||||||
@ -118,6 +118,34 @@ SWIFT_CODENAMES = OrderedDict([
|
|||||||
('2.3.0', 'liberty'),
|
('2.3.0', 'liberty'),
|
||||||
])
|
])
|
||||||
|
|
||||||
|
# >= Liberty version->codename mapping
|
||||||
|
PACKAGE_CODENAMES = {
|
||||||
|
'nova-common': OrderedDict([
|
||||||
|
('12.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'neutron-common': OrderedDict([
|
||||||
|
('7.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'cinder-common': OrderedDict([
|
||||||
|
('7.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'keystone': OrderedDict([
|
||||||
|
('8.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'horizon-common': OrderedDict([
|
||||||
|
('8.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'ceilometer-common': OrderedDict([
|
||||||
|
('5.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'heat-common': OrderedDict([
|
||||||
|
('5.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
'glance-common': OrderedDict([
|
||||||
|
('11.0.0', 'liberty'),
|
||||||
|
]),
|
||||||
|
}
|
||||||
|
|
||||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
|
|
||||||
|
|
||||||
@ -201,7 +229,16 @@ def get_os_codename_package(package, fatal=True):
|
|||||||
error_out(e)
|
error_out(e)
|
||||||
|
|
||||||
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||||
|
match = re.match('^(\d)\.(\d)\.(\d)', vers)
|
||||||
|
if match:
|
||||||
|
vers = match.group(0)
|
||||||
|
|
||||||
|
# >= Liberty independent project versions
|
||||||
|
if (package in PACKAGE_CODENAMES and
|
||||||
|
vers in PACKAGE_CODENAMES[package]):
|
||||||
|
return PACKAGE_CODENAMES[package][vers]
|
||||||
|
else:
|
||||||
|
# < Liberty co-ordinated project versions
|
||||||
try:
|
try:
|
||||||
if 'swift' in pkg.name:
|
if 'swift' in pkg.name:
|
||||||
swift_vers = vers[:5]
|
swift_vers = vers[:5]
|
||||||
|
@ -43,9 +43,10 @@ def zap_disk(block_device):
|
|||||||
|
|
||||||
:param block_device: str: Full path of block device to clean.
|
:param block_device: str: Full path of block device to clean.
|
||||||
'''
|
'''
|
||||||
|
# https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
|
||||||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
||||||
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
call(['sgdisk', '--zap-all', '--', block_device])
|
||||||
'--clear', block_device])
|
call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
|
||||||
dev_end = check_output(['blockdev', '--getsz',
|
dev_end = check_output(['blockdev', '--getsz',
|
||||||
block_device]).decode('UTF-8')
|
block_device]).decode('UTF-8')
|
||||||
gpt_end = int(dev_end.split()[0]) - 100
|
gpt_end = int(dev_end.split()[0]) - 100
|
||||||
|
@ -34,23 +34,6 @@ import errno
|
|||||||
import tempfile
|
import tempfile
|
||||||
from subprocess import CalledProcessError
|
from subprocess import CalledProcessError
|
||||||
|
|
||||||
try:
|
|
||||||
from charmhelpers.cli import cmdline
|
|
||||||
except ImportError as e:
|
|
||||||
# due to the anti-pattern of partially synching charmhelpers directly
|
|
||||||
# into charms, it's possible that charmhelpers.cli is not available;
|
|
||||||
# if that's the case, they don't really care about using the cli anyway,
|
|
||||||
# so mock it out
|
|
||||||
if str(e) == 'No module named cli':
|
|
||||||
class cmdline(object):
|
|
||||||
@classmethod
|
|
||||||
def subcommand(cls, *args, **kwargs):
|
|
||||||
def _wrap(func):
|
|
||||||
return func
|
|
||||||
return _wrap
|
|
||||||
else:
|
|
||||||
raise
|
|
||||||
|
|
||||||
import six
|
import six
|
||||||
if not six.PY3:
|
if not six.PY3:
|
||||||
from UserDict import UserDict
|
from UserDict import UserDict
|
||||||
@ -91,6 +74,7 @@ def cached(func):
|
|||||||
res = func(*args, **kwargs)
|
res = func(*args, **kwargs)
|
||||||
cache[key] = res
|
cache[key] = res
|
||||||
return res
|
return res
|
||||||
|
wrapper._wrapped = func
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
@ -190,7 +174,6 @@ def relation_type():
|
|||||||
return os.environ.get('JUJU_RELATION', None)
|
return os.environ.get('JUJU_RELATION', None)
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand()
|
|
||||||
@cached
|
@cached
|
||||||
def relation_id(relation_name=None, service_or_unit=None):
|
def relation_id(relation_name=None, service_or_unit=None):
|
||||||
"""The relation ID for the current or a specified relation"""
|
"""The relation ID for the current or a specified relation"""
|
||||||
@ -216,13 +199,11 @@ def remote_unit():
|
|||||||
return os.environ.get('JUJU_REMOTE_UNIT', None)
|
return os.environ.get('JUJU_REMOTE_UNIT', None)
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand()
|
|
||||||
def service_name():
|
def service_name():
|
||||||
"""The name service group this unit belongs to"""
|
"""The name service group this unit belongs to"""
|
||||||
return local_unit().split('/')[0]
|
return local_unit().split('/')[0]
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand()
|
|
||||||
@cached
|
@cached
|
||||||
def remote_service_name(relid=None):
|
def remote_service_name(relid=None):
|
||||||
"""The remote service name for a given relation-id (or the current relation)"""
|
"""The remote service name for a given relation-id (or the current relation)"""
|
||||||
|
@ -72,7 +72,7 @@ def service_pause(service_name, init_dir=None):
|
|||||||
stopped = service_stop(service_name)
|
stopped = service_stop(service_name)
|
||||||
# XXX: Support systemd too
|
# XXX: Support systemd too
|
||||||
override_path = os.path.join(
|
override_path = os.path.join(
|
||||||
init_dir, '{}.conf.override'.format(service_name))
|
init_dir, '{}.override'.format(service_name))
|
||||||
with open(override_path, 'w') as fh:
|
with open(override_path, 'w') as fh:
|
||||||
fh.write("manual\n")
|
fh.write("manual\n")
|
||||||
return stopped
|
return stopped
|
||||||
@ -86,7 +86,7 @@ def service_resume(service_name, init_dir=None):
|
|||||||
if init_dir is None:
|
if init_dir is None:
|
||||||
init_dir = "/etc/init"
|
init_dir = "/etc/init"
|
||||||
override_path = os.path.join(
|
override_path = os.path.join(
|
||||||
init_dir, '{}.conf.override'.format(service_name))
|
init_dir, '{}.override'.format(service_name))
|
||||||
if os.path.exists(override_path):
|
if os.path.exists(override_path):
|
||||||
os.unlink(override_path)
|
os.unlink(override_path)
|
||||||
started = service_start(service_name)
|
started = service_start(service_name)
|
||||||
@ -148,6 +148,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
|||||||
return user_info
|
return user_info
|
||||||
|
|
||||||
|
|
||||||
|
def user_exists(username):
|
||||||
|
"""Check if a user exists"""
|
||||||
|
try:
|
||||||
|
pwd.getpwnam(username)
|
||||||
|
user_exists = True
|
||||||
|
except KeyError:
|
||||||
|
user_exists = False
|
||||||
|
return user_exists
|
||||||
|
|
||||||
|
|
||||||
def add_group(group_name, system_group=False):
|
def add_group(group_name, system_group=False):
|
||||||
"""Add a group to the system"""
|
"""Add a group to the system"""
|
||||||
try:
|
try:
|
||||||
@ -280,6 +290,17 @@ def mounts():
|
|||||||
return system_mounts
|
return system_mounts
|
||||||
|
|
||||||
|
|
||||||
|
def fstab_mount(mountpoint):
|
||||||
|
"""Mount filesystem using fstab"""
|
||||||
|
cmd_args = ['mount', mountpoint]
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def file_hash(path, hash_type='md5'):
|
def file_hash(path, hash_type='md5'):
|
||||||
"""
|
"""
|
||||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
Generate a hash checksum of the contents of 'path' or None if not found.
|
||||||
@ -396,25 +417,80 @@ def pwgen(length=None):
|
|||||||
return(''.join(random_chars))
|
return(''.join(random_chars))
|
||||||
|
|
||||||
|
|
||||||
def list_nics(nic_type):
|
def is_phy_iface(interface):
|
||||||
|
"""Returns True if interface is not virtual, otherwise False."""
|
||||||
|
if interface:
|
||||||
|
sys_net = '/sys/class/net'
|
||||||
|
if os.path.isdir(sys_net):
|
||||||
|
for iface in glob.glob(os.path.join(sys_net, '*')):
|
||||||
|
if '/virtual/' in os.path.realpath(iface):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if interface == os.path.basename(iface):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_bond_master(interface):
|
||||||
|
"""Returns bond master if interface is bond slave otherwise None.
|
||||||
|
|
||||||
|
NOTE: the provided interface is expected to be physical
|
||||||
|
"""
|
||||||
|
if interface:
|
||||||
|
iface_path = '/sys/class/net/%s' % (interface)
|
||||||
|
if os.path.exists(iface_path):
|
||||||
|
if '/virtual/' in os.path.realpath(iface_path):
|
||||||
|
return None
|
||||||
|
|
||||||
|
master = os.path.join(iface_path, 'master')
|
||||||
|
if os.path.exists(master):
|
||||||
|
master = os.path.realpath(master)
|
||||||
|
# make sure it is a bond master
|
||||||
|
if os.path.exists(os.path.join(master, 'bonding')):
|
||||||
|
return os.path.basename(master)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def list_nics(nic_type=None):
|
||||||
'''Return a list of nics of given type(s)'''
|
'''Return a list of nics of given type(s)'''
|
||||||
if isinstance(nic_type, six.string_types):
|
if isinstance(nic_type, six.string_types):
|
||||||
int_types = [nic_type]
|
int_types = [nic_type]
|
||||||
else:
|
else:
|
||||||
int_types = nic_type
|
int_types = nic_type
|
||||||
|
|
||||||
interfaces = []
|
interfaces = []
|
||||||
|
if nic_type:
|
||||||
for int_type in int_types:
|
for int_type in int_types:
|
||||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
ip_output = ip_output.split('\n')
|
||||||
ip_output = (line for line in ip_output if line)
|
ip_output = (line for line in ip_output if line)
|
||||||
for line in ip_output:
|
for line in ip_output:
|
||||||
if line.split()[1].startswith(int_type):
|
if line.split()[1].startswith(int_type):
|
||||||
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
|
matched = re.search('.*: (' + int_type +
|
||||||
|
r'[0-9]+\.[0-9]+)@.*', line)
|
||||||
if matched:
|
if matched:
|
||||||
interface = matched.groups()[0]
|
iface = matched.groups()[0]
|
||||||
else:
|
else:
|
||||||
interface = line.split()[1].replace(":", "")
|
iface = line.split()[1].replace(":", "")
|
||||||
interfaces.append(interface)
|
|
||||||
|
if iface not in interfaces:
|
||||||
|
interfaces.append(iface)
|
||||||
|
else:
|
||||||
|
cmd = ['ip', 'a']
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
|
ip_output = (line.strip() for line in ip_output if line)
|
||||||
|
|
||||||
|
key = re.compile('^[0-9]+:\s+(.+):')
|
||||||
|
for line in ip_output:
|
||||||
|
matched = re.search(key, line)
|
||||||
|
if matched:
|
||||||
|
iface = matched.group(1)
|
||||||
|
iface = iface.partition("@")[0]
|
||||||
|
if iface not in interfaces:
|
||||||
|
interfaces.append(iface)
|
||||||
|
|
||||||
return interfaces
|
return interfaces
|
||||||
|
|
||||||
|
@ -16,7 +16,9 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import host
|
||||||
from charmhelpers.core import templating
|
from charmhelpers.core import templating
|
||||||
|
|
||||||
from charmhelpers.core.services.base import ManagerCallback
|
from charmhelpers.core.services.base import ManagerCallback
|
||||||
@ -240,27 +242,41 @@ class TemplateCallback(ManagerCallback):
|
|||||||
|
|
||||||
:param str source: The template source file, relative to
|
:param str source: The template source file, relative to
|
||||||
`$CHARM_DIR/templates`
|
`$CHARM_DIR/templates`
|
||||||
|
|
||||||
:param str target: The target to write the rendered template to
|
:param str target: The target to write the rendered template to
|
||||||
:param str owner: The owner of the rendered file
|
:param str owner: The owner of the rendered file
|
||||||
:param str group: The group of the rendered file
|
:param str group: The group of the rendered file
|
||||||
:param int perms: The permissions of the rendered file
|
:param int perms: The permissions of the rendered file
|
||||||
|
:param partial on_change_action: functools partial to be executed when
|
||||||
|
rendered file changes
|
||||||
"""
|
"""
|
||||||
def __init__(self, source, target,
|
def __init__(self, source, target,
|
||||||
owner='root', group='root', perms=0o444):
|
owner='root', group='root', perms=0o444,
|
||||||
|
on_change_action=None):
|
||||||
self.source = source
|
self.source = source
|
||||||
self.target = target
|
self.target = target
|
||||||
self.owner = owner
|
self.owner = owner
|
||||||
self.group = group
|
self.group = group
|
||||||
self.perms = perms
|
self.perms = perms
|
||||||
|
self.on_change_action = on_change_action
|
||||||
|
|
||||||
def __call__(self, manager, service_name, event_name):
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
pre_checksum = ''
|
||||||
|
if self.on_change_action and os.path.isfile(self.target):
|
||||||
|
pre_checksum = host.file_hash(self.target)
|
||||||
service = manager.get_service(service_name)
|
service = manager.get_service(service_name)
|
||||||
context = {}
|
context = {}
|
||||||
for ctx in service.get('required_data', []):
|
for ctx in service.get('required_data', []):
|
||||||
context.update(ctx)
|
context.update(ctx)
|
||||||
templating.render(self.source, self.target, context,
|
templating.render(self.source, self.target, context,
|
||||||
self.owner, self.group, self.perms)
|
self.owner, self.group, self.perms)
|
||||||
|
if self.on_change_action:
|
||||||
|
if pre_checksum == host.file_hash(self.target):
|
||||||
|
hookenv.log(
|
||||||
|
'No change detected: {}'.format(self.target),
|
||||||
|
hookenv.DEBUG)
|
||||||
|
else:
|
||||||
|
self.on_change_action()
|
||||||
|
|
||||||
|
|
||||||
# Convenience aliases for templates
|
# Convenience aliases for templates
|
||||||
|
@ -90,6 +90,14 @@ CLOUD_ARCHIVE_POCKETS = {
|
|||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
||||||
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
||||||
|
# Liberty
|
||||||
|
'liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty': 'trusty-updates/liberty',
|
||||||
|
'trusty-liberty/updates': 'trusty-updates/liberty',
|
||||||
|
'trusty-updates/liberty': 'trusty-updates/liberty',
|
||||||
|
'liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-liberty/proposed': 'trusty-proposed/liberty',
|
||||||
|
'trusty-proposed/liberty': 'trusty-proposed/liberty',
|
||||||
}
|
}
|
||||||
|
|
||||||
# The order of this list is very important. Handlers should be listed in from
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
|
@ -14,17 +14,23 @@
|
|||||||
# You should have received a copy of the GNU Lesser General Public License
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import amulet
|
|
||||||
import ConfigParser
|
|
||||||
import distro_info
|
|
||||||
import io
|
import io
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import six
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import urlparse
|
|
||||||
|
import amulet
|
||||||
|
import distro_info
|
||||||
|
import six
|
||||||
|
from six.moves import configparser
|
||||||
|
if six.PY3:
|
||||||
|
from urllib import parse as urlparse
|
||||||
|
else:
|
||||||
|
import urlparse
|
||||||
|
|
||||||
|
|
||||||
class AmuletUtils(object):
|
class AmuletUtils(object):
|
||||||
@ -142,19 +148,23 @@ class AmuletUtils(object):
|
|||||||
|
|
||||||
for service_name in services_list:
|
for service_name in services_list:
|
||||||
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
||||||
service_name == "rabbitmq-server"):
|
service_name in ['rabbitmq-server', 'apache2']):
|
||||||
# init is systemd
|
# init is systemd (or regular sysv)
|
||||||
cmd = 'sudo service {} status'.format(service_name)
|
cmd = 'sudo service {} status'.format(service_name)
|
||||||
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0
|
||||||
elif self.ubuntu_releases.index(release) < systemd_switch:
|
elif self.ubuntu_releases.index(release) < systemd_switch:
|
||||||
# init is upstart
|
# init is upstart
|
||||||
cmd = 'sudo status {}'.format(service_name)
|
cmd = 'sudo status {}'.format(service_name)
|
||||||
|
|
||||||
output, code = sentry_unit.run(cmd)
|
output, code = sentry_unit.run(cmd)
|
||||||
|
service_running = code == 0 and "start/running" in output
|
||||||
|
|
||||||
self.log.debug('{} `{}` returned '
|
self.log.debug('{} `{}` returned '
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
'{}'.format(sentry_unit.info['unit_name'],
|
||||||
cmd, code))
|
cmd, code))
|
||||||
if code != 0:
|
if not service_running:
|
||||||
return "command `{}` returned {}".format(cmd, str(code))
|
return u"command `{}` returned {} {}".format(
|
||||||
|
cmd, output, str(code))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _get_config(self, unit, filename):
|
def _get_config(self, unit, filename):
|
||||||
@ -164,7 +174,7 @@ class AmuletUtils(object):
|
|||||||
# NOTE(beisner): by default, ConfigParser does not handle options
|
# NOTE(beisner): by default, ConfigParser does not handle options
|
||||||
# with no value, such as the flags used in the mysql my.cnf file.
|
# with no value, such as the flags used in the mysql my.cnf file.
|
||||||
# https://bugs.python.org/issue7005
|
# https://bugs.python.org/issue7005
|
||||||
config = ConfigParser.ConfigParser(allow_no_value=True)
|
config = configparser.ConfigParser(allow_no_value=True)
|
||||||
config.readfp(io.StringIO(file_contents))
|
config.readfp(io.StringIO(file_contents))
|
||||||
return config
|
return config
|
||||||
|
|
||||||
@ -450,15 +460,20 @@ class AmuletUtils(object):
|
|||||||
cmd, code, output))
|
cmd, code, output))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_process_id_list(self, sentry_unit, process_name):
|
def get_process_id_list(self, sentry_unit, process_name,
|
||||||
|
expect_success=True):
|
||||||
"""Get a list of process ID(s) from a single sentry juju unit
|
"""Get a list of process ID(s) from a single sentry juju unit
|
||||||
for a single process name.
|
for a single process name.
|
||||||
|
|
||||||
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
:param sentry_unit: Amulet sentry instance (juju unit)
|
||||||
:param process_name: Process name
|
:param process_name: Process name
|
||||||
|
:param expect_success: If False, expect the PID to be missing,
|
||||||
|
raise if it is present.
|
||||||
:returns: List of process IDs
|
:returns: List of process IDs
|
||||||
"""
|
"""
|
||||||
cmd = 'pidof {}'.format(process_name)
|
cmd = 'pidof -x {}'.format(process_name)
|
||||||
|
if not expect_success:
|
||||||
|
cmd += " || exit 0 && exit 1"
|
||||||
output, code = sentry_unit.run(cmd)
|
output, code = sentry_unit.run(cmd)
|
||||||
if code != 0:
|
if code != 0:
|
||||||
msg = ('{} `{}` returned {} '
|
msg = ('{} `{}` returned {} '
|
||||||
@ -467,14 +482,23 @@ class AmuletUtils(object):
|
|||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||||
return str(output).split()
|
return str(output).split()
|
||||||
|
|
||||||
def get_unit_process_ids(self, unit_processes):
|
def get_unit_process_ids(self, unit_processes, expect_success=True):
|
||||||
"""Construct a dict containing unit sentries, process names, and
|
"""Construct a dict containing unit sentries, process names, and
|
||||||
process IDs."""
|
process IDs.
|
||||||
|
|
||||||
|
:param unit_processes: A dictionary of Amulet sentry instance
|
||||||
|
to list of process names.
|
||||||
|
:param expect_success: if False expect the processes to not be
|
||||||
|
running, raise if they are.
|
||||||
|
:returns: Dictionary of Amulet sentry instance to dictionary
|
||||||
|
of process names to PIDs.
|
||||||
|
"""
|
||||||
pid_dict = {}
|
pid_dict = {}
|
||||||
for sentry_unit, process_list in unit_processes.iteritems():
|
for sentry_unit, process_list in six.iteritems(unit_processes):
|
||||||
pid_dict[sentry_unit] = {}
|
pid_dict[sentry_unit] = {}
|
||||||
for process in process_list:
|
for process in process_list:
|
||||||
pids = self.get_process_id_list(sentry_unit, process)
|
pids = self.get_process_id_list(
|
||||||
|
sentry_unit, process, expect_success=expect_success)
|
||||||
pid_dict[sentry_unit].update({process: pids})
|
pid_dict[sentry_unit].update({process: pids})
|
||||||
return pid_dict
|
return pid_dict
|
||||||
|
|
||||||
@ -488,7 +512,7 @@ class AmuletUtils(object):
|
|||||||
return ('Unit count mismatch. expected, actual: {}, '
|
return ('Unit count mismatch. expected, actual: {}, '
|
||||||
'{} '.format(len(expected), len(actual)))
|
'{} '.format(len(expected), len(actual)))
|
||||||
|
|
||||||
for (e_sentry, e_proc_names) in expected.iteritems():
|
for (e_sentry, e_proc_names) in six.iteritems(expected):
|
||||||
e_sentry_name = e_sentry.info['unit_name']
|
e_sentry_name = e_sentry.info['unit_name']
|
||||||
if e_sentry in actual.keys():
|
if e_sentry in actual.keys():
|
||||||
a_proc_names = actual[e_sentry]
|
a_proc_names = actual[e_sentry]
|
||||||
@ -507,11 +531,23 @@ class AmuletUtils(object):
|
|||||||
'{}'.format(e_proc_name, a_proc_name))
|
'{}'.format(e_proc_name, a_proc_name))
|
||||||
|
|
||||||
a_pids_length = len(a_pids)
|
a_pids_length = len(a_pids)
|
||||||
if e_pids_length != a_pids_length:
|
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
|
||||||
return ('PID count mismatch. {} ({}) expected, actual: '
|
|
||||||
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
||||||
e_pids_length, a_pids_length,
|
e_pids_length, a_pids_length,
|
||||||
a_pids))
|
a_pids))
|
||||||
|
|
||||||
|
# If expected is not bool, ensure PID quantities match
|
||||||
|
if not isinstance(e_pids_length, bool) and \
|
||||||
|
a_pids_length != e_pids_length:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool True, ensure 1 or more PIDs exist
|
||||||
|
elif isinstance(e_pids_length, bool) and \
|
||||||
|
e_pids_length is True and a_pids_length < 1:
|
||||||
|
return fail_msg
|
||||||
|
# If expected is bool False, ensure 0 PIDs exist
|
||||||
|
elif isinstance(e_pids_length, bool) and \
|
||||||
|
e_pids_length is False and a_pids_length != 0:
|
||||||
|
return fail_msg
|
||||||
else:
|
else:
|
||||||
self.log.debug('PID check OK: {} {} {}: '
|
self.log.debug('PID check OK: {} {} {}: '
|
||||||
'{}'.format(e_sentry_name, e_proc_name,
|
'{}'.format(e_sentry_name, e_proc_name,
|
||||||
@ -531,3 +567,30 @@ class AmuletUtils(object):
|
|||||||
return 'Dicts within list are not identical'
|
return 'Dicts within list are not identical'
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def run_action(self, unit_sentry, action,
|
||||||
|
_check_output=subprocess.check_output):
|
||||||
|
"""Run the named action on a given unit sentry.
|
||||||
|
|
||||||
|
_check_output parameter is used for dependency injection.
|
||||||
|
|
||||||
|
@return action_id.
|
||||||
|
"""
|
||||||
|
unit_id = unit_sentry.info["unit_name"]
|
||||||
|
command = ["juju", "action", "do", "--format=json", unit_id, action]
|
||||||
|
self.log.info("Running command: %s\n" % " ".join(command))
|
||||||
|
output = _check_output(command, universal_newlines=True)
|
||||||
|
data = json.loads(output)
|
||||||
|
action_id = data[u'Action queued with id']
|
||||||
|
return action_id
|
||||||
|
|
||||||
|
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
|
||||||
|
"""Wait for a given action, returning if it completed or not.
|
||||||
|
|
||||||
|
_check_output parameter is used for dependency injection.
|
||||||
|
"""
|
||||||
|
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
|
||||||
|
action_id]
|
||||||
|
output = _check_output(command, universal_newlines=True)
|
||||||
|
data = json.loads(output)
|
||||||
|
return data.get(u"status") == "completed"
|
||||||
|
@ -44,7 +44,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
Determine if the local branch being tested is derived from its
|
Determine if the local branch being tested is derived from its
|
||||||
stable or next (dev) branch, and based on this, use the corresonding
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
stable or next branches for the other_services."""
|
stable or next branches for the other_services."""
|
||||||
base_charms = ['mysql', 'mongodb']
|
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||||
|
|
||||||
if self.series in ['precise', 'trusty']:
|
if self.series in ['precise', 'trusty']:
|
||||||
base_series = self.series
|
base_series = self.series
|
||||||
@ -81,7 +81,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
'ceph-osd', 'ceph-radosgw']
|
'ceph-osd', 'ceph-radosgw']
|
||||||
# Most OpenStack subordinate charms do not expose an origin option
|
# Most OpenStack subordinate charms do not expose an origin option
|
||||||
# as that is controlled by the principle.
|
# as that is controlled by the principle.
|
||||||
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
|
ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe']
|
||||||
|
|
||||||
if self.openstack:
|
if self.openstack:
|
||||||
for svc in services:
|
for svc in services:
|
||||||
|
Loading…
Reference in New Issue
Block a user