Adding charmhelpers
This commit is contained in:
parent
fde4c9cd38
commit
86cef5347f
1
Makefile
1
Makefile
@ -17,7 +17,6 @@ bin/charm_helpers_sync.py:
|
|||||||
|
|
||||||
sync: bin/charm_helpers_sync.py
|
sync: bin/charm_helpers_sync.py
|
||||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
|
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
|
||||||
patch -p0 < ~/profsvcs/canonical/charms/charmhelpers.patch
|
|
||||||
|
|
||||||
publish: lint unit_test
|
publish: lint unit_test
|
||||||
bzr push lp:charms/plumgrid-director
|
bzr push lp:charms/plumgrid-director
|
||||||
|
38
hooks/charmhelpers/__init__.py
Normal file
38
hooks/charmhelpers/__init__.py
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
||||||
|
# only standard libraries.
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
|
||||||
|
try:
|
||||||
|
import six # flake8: noqa
|
||||||
|
except ImportError:
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||||
|
import six # flake8: noqa
|
||||||
|
|
||||||
|
try:
|
||||||
|
import yaml # flake8: noqa
|
||||||
|
except ImportError:
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||||
|
import yaml # flake8: noqa
|
15
hooks/charmhelpers/contrib/__init__.py
Normal file
15
hooks/charmhelpers/contrib/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
15
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
15
hooks/charmhelpers/contrib/hahelpers/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
82
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
82
hooks/charmhelpers/contrib/hahelpers/apache.py
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# This file is sourced from lp:openstack-charm-helpers
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# James Page <james.page@ubuntu.com>
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config as config_get,
|
||||||
|
relation_get,
|
||||||
|
relation_ids,
|
||||||
|
related_units as relation_list,
|
||||||
|
log,
|
||||||
|
INFO,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_cert(cn=None):
|
||||||
|
# TODO: deal with multiple https endpoints via charm config
|
||||||
|
cert = config_get('ssl_cert')
|
||||||
|
key = config_get('ssl_key')
|
||||||
|
if not (cert and key):
|
||||||
|
log("Inspecting identity-service relations for SSL certificate.",
|
||||||
|
level=INFO)
|
||||||
|
cert = key = None
|
||||||
|
if cn:
|
||||||
|
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
|
||||||
|
ssl_key_attr = 'ssl_key_{}'.format(cn)
|
||||||
|
else:
|
||||||
|
ssl_cert_attr = 'ssl_cert'
|
||||||
|
ssl_key_attr = 'ssl_key'
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
if not cert:
|
||||||
|
cert = relation_get(ssl_cert_attr,
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
if not key:
|
||||||
|
key = relation_get(ssl_key_attr,
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
return (cert, key)
|
||||||
|
|
||||||
|
|
||||||
|
def get_ca_cert():
|
||||||
|
ca_cert = config_get('ssl_ca')
|
||||||
|
if ca_cert is None:
|
||||||
|
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||||
|
level=INFO)
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
if ca_cert is None:
|
||||||
|
ca_cert = relation_get('ca_cert',
|
||||||
|
rid=r_id, unit=unit)
|
||||||
|
return ca_cert
|
||||||
|
|
||||||
|
|
||||||
|
def install_ca_cert(ca_cert):
|
||||||
|
if ca_cert:
|
||||||
|
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
|
||||||
|
'w') as crt:
|
||||||
|
crt.write(ca_cert)
|
||||||
|
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
297
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
297
hooks/charmhelpers/contrib/hahelpers/cluster.py
Normal file
@ -0,0 +1,297 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# James Page <james.page@ubuntu.com>
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
"""
|
||||||
|
Helpers for clustering and determining "cluster leadership" and other
|
||||||
|
clustering-related helpers.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
|
||||||
|
from socket import gethostname as get_unit_hostname
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
relation_ids,
|
||||||
|
related_units as relation_list,
|
||||||
|
relation_get,
|
||||||
|
config as config_get,
|
||||||
|
INFO,
|
||||||
|
ERROR,
|
||||||
|
WARNING,
|
||||||
|
unit_get,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.decorators import (
|
||||||
|
retry_on_exception,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.strutils import (
|
||||||
|
bool_from_string,
|
||||||
|
)
|
||||||
|
|
||||||
|
DC_RESOURCE_NAME = 'DC'
|
||||||
|
|
||||||
|
|
||||||
|
class HAIncompleteConfig(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class CRMResourceNotFound(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def is_elected_leader(resource):
|
||||||
|
"""
|
||||||
|
Returns True if the charm executing this is the elected cluster leader.
|
||||||
|
|
||||||
|
It relies on two mechanisms to determine leadership:
|
||||||
|
1. If the charm is part of a corosync cluster, call corosync to
|
||||||
|
determine leadership.
|
||||||
|
2. If the charm is not part of a corosync cluster, the leader is
|
||||||
|
determined as being "the alive unit with the lowest unit numer". In
|
||||||
|
other words, the oldest surviving unit.
|
||||||
|
"""
|
||||||
|
if is_clustered():
|
||||||
|
if not is_crm_leader(resource):
|
||||||
|
log('Deferring action to CRM leader.', level=INFO)
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
peers = peer_units()
|
||||||
|
if peers and not oldest_peer(peers):
|
||||||
|
log('Deferring action to oldest service unit.', level=INFO)
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def is_clustered():
|
||||||
|
for r_id in (relation_ids('ha') or []):
|
||||||
|
for unit in (relation_list(r_id) or []):
|
||||||
|
clustered = relation_get('clustered',
|
||||||
|
rid=r_id,
|
||||||
|
unit=unit)
|
||||||
|
if clustered:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def is_crm_dc():
|
||||||
|
"""
|
||||||
|
Determine leadership by querying the pacemaker Designated Controller
|
||||||
|
"""
|
||||||
|
cmd = ['crm', 'status']
|
||||||
|
try:
|
||||||
|
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||||
|
if not isinstance(status, six.text_type):
|
||||||
|
status = six.text_type(status, "utf-8")
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
current_dc = ''
|
||||||
|
for line in status.split('\n'):
|
||||||
|
if line.startswith('Current DC'):
|
||||||
|
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
|
||||||
|
current_dc = line.split(':')[1].split()[0]
|
||||||
|
if current_dc == get_unit_hostname():
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
@retry_on_exception(5, base_delay=2, exc_type=CRMResourceNotFound)
|
||||||
|
def is_crm_leader(resource, retry=False):
|
||||||
|
"""
|
||||||
|
Returns True if the charm calling this is the elected corosync leader,
|
||||||
|
as returned by calling the external "crm" command.
|
||||||
|
|
||||||
|
We allow this operation to be retried to avoid the possibility of getting a
|
||||||
|
false negative. See LP #1396246 for more info.
|
||||||
|
"""
|
||||||
|
if resource == DC_RESOURCE_NAME:
|
||||||
|
return is_crm_dc()
|
||||||
|
cmd = ['crm', 'resource', 'show', resource]
|
||||||
|
try:
|
||||||
|
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
||||||
|
if not isinstance(status, six.text_type):
|
||||||
|
status = six.text_type(status, "utf-8")
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
status = None
|
||||||
|
|
||||||
|
if status and get_unit_hostname() in status:
|
||||||
|
return True
|
||||||
|
|
||||||
|
if status and "resource %s is NOT running" % (resource) in status:
|
||||||
|
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def is_leader(resource):
|
||||||
|
log("is_leader is deprecated. Please consider using is_crm_leader "
|
||||||
|
"instead.", level=WARNING)
|
||||||
|
return is_crm_leader(resource)
|
||||||
|
|
||||||
|
|
||||||
|
def peer_units(peer_relation="cluster"):
|
||||||
|
peers = []
|
||||||
|
for r_id in (relation_ids(peer_relation) or []):
|
||||||
|
for unit in (relation_list(r_id) or []):
|
||||||
|
peers.append(unit)
|
||||||
|
return peers
|
||||||
|
|
||||||
|
|
||||||
|
def peer_ips(peer_relation='cluster', addr_key='private-address'):
|
||||||
|
'''Return a dict of peers and their private-address'''
|
||||||
|
peers = {}
|
||||||
|
for r_id in relation_ids(peer_relation):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
|
||||||
|
return peers
|
||||||
|
|
||||||
|
|
||||||
|
def oldest_peer(peers):
|
||||||
|
"""Determines who the oldest peer is by comparing unit numbers."""
|
||||||
|
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||||
|
for peer in peers:
|
||||||
|
remote_unit_no = int(peer.split('/')[1])
|
||||||
|
if remote_unit_no < local_unit_no:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def eligible_leader(resource):
|
||||||
|
log("eligible_leader is deprecated. Please consider using "
|
||||||
|
"is_elected_leader instead.", level=WARNING)
|
||||||
|
return is_elected_leader(resource)
|
||||||
|
|
||||||
|
|
||||||
|
def https():
|
||||||
|
'''
|
||||||
|
Determines whether enough data has been provided in configuration
|
||||||
|
or relation data to configure HTTPS
|
||||||
|
.
|
||||||
|
returns: boolean
|
||||||
|
'''
|
||||||
|
use_https = config_get('use-https')
|
||||||
|
if use_https and bool_from_string(use_https):
|
||||||
|
return True
|
||||||
|
if config_get('ssl_cert') and config_get('ssl_key'):
|
||||||
|
return True
|
||||||
|
for r_id in relation_ids('identity-service'):
|
||||||
|
for unit in relation_list(r_id):
|
||||||
|
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
|
||||||
|
rel_state = [
|
||||||
|
relation_get('https_keystone', rid=r_id, unit=unit),
|
||||||
|
relation_get('ca_cert', rid=r_id, unit=unit),
|
||||||
|
]
|
||||||
|
# NOTE: works around (LP: #1203241)
|
||||||
|
if (None not in rel_state) and ('' not in rel_state):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def determine_api_port(public_port, singlenode_mode=False):
|
||||||
|
'''
|
||||||
|
Determine correct API server listening port based on
|
||||||
|
existence of HTTPS reverse proxy and/or haproxy.
|
||||||
|
|
||||||
|
public_port: int: standard public port for given service
|
||||||
|
|
||||||
|
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
||||||
|
|
||||||
|
returns: int: the correct listening port for the API service
|
||||||
|
'''
|
||||||
|
i = 0
|
||||||
|
if singlenode_mode:
|
||||||
|
i += 1
|
||||||
|
elif len(peer_units()) > 0 or is_clustered():
|
||||||
|
i += 1
|
||||||
|
if https():
|
||||||
|
i += 1
|
||||||
|
return public_port - (i * 10)
|
||||||
|
|
||||||
|
|
||||||
|
def determine_apache_port(public_port, singlenode_mode=False):
|
||||||
|
'''
|
||||||
|
Description: Determine correct apache listening port based on public IP +
|
||||||
|
state of the cluster.
|
||||||
|
|
||||||
|
public_port: int: standard public port for given service
|
||||||
|
|
||||||
|
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
||||||
|
|
||||||
|
returns: int: the correct listening port for the HAProxy service
|
||||||
|
'''
|
||||||
|
i = 0
|
||||||
|
if singlenode_mode:
|
||||||
|
i += 1
|
||||||
|
elif len(peer_units()) > 0 or is_clustered():
|
||||||
|
i += 1
|
||||||
|
return public_port - (i * 10)
|
||||||
|
|
||||||
|
|
||||||
|
def get_hacluster_config(exclude_keys=None):
|
||||||
|
'''
|
||||||
|
Obtains all relevant configuration from charm configuration required
|
||||||
|
for initiating a relation to hacluster:
|
||||||
|
|
||||||
|
ha-bindiface, ha-mcastport, vip
|
||||||
|
|
||||||
|
param: exclude_keys: list of setting key(s) to be excluded.
|
||||||
|
returns: dict: A dict containing settings keyed by setting name.
|
||||||
|
raises: HAIncompleteConfig if settings are missing.
|
||||||
|
'''
|
||||||
|
settings = ['ha-bindiface', 'ha-mcastport', 'vip']
|
||||||
|
conf = {}
|
||||||
|
for setting in settings:
|
||||||
|
if exclude_keys and setting in exclude_keys:
|
||||||
|
continue
|
||||||
|
|
||||||
|
conf[setting] = config_get(setting)
|
||||||
|
missing = []
|
||||||
|
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
|
||||||
|
if missing:
|
||||||
|
log('Insufficient config data to configure hacluster.', level=ERROR)
|
||||||
|
raise HAIncompleteConfig
|
||||||
|
return conf
|
||||||
|
|
||||||
|
|
||||||
|
def canonical_url(configs, vip_setting='vip'):
|
||||||
|
'''
|
||||||
|
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||||
|
configuration and hacluster.
|
||||||
|
|
||||||
|
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
||||||
|
a complete https context.
|
||||||
|
|
||||||
|
:vip_setting: str: Setting in charm config that specifies
|
||||||
|
VIP address.
|
||||||
|
'''
|
||||||
|
scheme = 'http'
|
||||||
|
if 'https' in configs.complete_contexts():
|
||||||
|
scheme = 'https'
|
||||||
|
if is_clustered():
|
||||||
|
addr = config_get(vip_setting)
|
||||||
|
else:
|
||||||
|
addr = unit_get('private-address')
|
||||||
|
return '%s://%s' % (scheme, addr)
|
15
hooks/charmhelpers/contrib/network/__init__.py
Normal file
15
hooks/charmhelpers/contrib/network/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
450
hooks/charmhelpers/contrib/network/ip.py
Normal file
450
hooks/charmhelpers/contrib/network/ip.py
Normal file
@ -0,0 +1,450 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import glob
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import six
|
||||||
|
import socket
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import unit_get
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
WARNING,
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
import netifaces
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-netifaces')
|
||||||
|
import netifaces
|
||||||
|
|
||||||
|
try:
|
||||||
|
import netaddr
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-netaddr')
|
||||||
|
import netaddr
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_cidr(network):
|
||||||
|
try:
|
||||||
|
netaddr.IPNetwork(network)
|
||||||
|
except (netaddr.core.AddrFormatError, ValueError):
|
||||||
|
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
||||||
|
network)
|
||||||
|
|
||||||
|
|
||||||
|
def no_ip_found_error_out(network):
|
||||||
|
errmsg = ("No IP address found in network: %s" % network)
|
||||||
|
raise ValueError(errmsg)
|
||||||
|
|
||||||
|
|
||||||
|
def get_address_in_network(network, fallback=None, fatal=False):
|
||||||
|
"""Get an IPv4 or IPv6 address within the network from the host.
|
||||||
|
|
||||||
|
:param network (str): CIDR presentation format. For example,
|
||||||
|
'192.168.1.0/24'.
|
||||||
|
:param fallback (str): If no address is found, return fallback.
|
||||||
|
:param fatal (boolean): If no address is found, fallback is not
|
||||||
|
set and fatal is True then exit(1).
|
||||||
|
"""
|
||||||
|
if network is None:
|
||||||
|
if fallback is not None:
|
||||||
|
return fallback
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
no_ip_found_error_out(network)
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
_validate_cidr(network)
|
||||||
|
network = netaddr.IPNetwork(network)
|
||||||
|
for iface in netifaces.interfaces():
|
||||||
|
addresses = netifaces.ifaddresses(iface)
|
||||||
|
if network.version == 4 and netifaces.AF_INET in addresses:
|
||||||
|
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||||
|
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||||
|
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||||
|
if cidr in network:
|
||||||
|
return str(cidr.ip)
|
||||||
|
|
||||||
|
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
||||||
|
for addr in addresses[netifaces.AF_INET6]:
|
||||||
|
if not addr['addr'].startswith('fe80'):
|
||||||
|
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||||
|
addr['netmask']))
|
||||||
|
if cidr in network:
|
||||||
|
return str(cidr.ip)
|
||||||
|
|
||||||
|
if fallback is not None:
|
||||||
|
return fallback
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
no_ip_found_error_out(network)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def is_ipv6(address):
|
||||||
|
"""Determine whether provided address is IPv6 or not."""
|
||||||
|
try:
|
||||||
|
address = netaddr.IPAddress(address)
|
||||||
|
except netaddr.AddrFormatError:
|
||||||
|
# probably a hostname - so not an address at all!
|
||||||
|
return False
|
||||||
|
|
||||||
|
return address.version == 6
|
||||||
|
|
||||||
|
|
||||||
|
def is_address_in_network(network, address):
|
||||||
|
"""
|
||||||
|
Determine whether the provided address is within a network range.
|
||||||
|
|
||||||
|
:param network (str): CIDR presentation format. For example,
|
||||||
|
'192.168.1.0/24'.
|
||||||
|
:param address: An individual IPv4 or IPv6 address without a net
|
||||||
|
mask or subnet prefix. For example, '192.168.1.1'.
|
||||||
|
:returns boolean: Flag indicating whether address is in network.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
network = netaddr.IPNetwork(network)
|
||||||
|
except (netaddr.core.AddrFormatError, ValueError):
|
||||||
|
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
||||||
|
network)
|
||||||
|
|
||||||
|
try:
|
||||||
|
address = netaddr.IPAddress(address)
|
||||||
|
except (netaddr.core.AddrFormatError, ValueError):
|
||||||
|
raise ValueError("Address (%s) is not in correct presentation format" %
|
||||||
|
address)
|
||||||
|
|
||||||
|
if address in network:
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _get_for_address(address, key):
|
||||||
|
"""Retrieve an attribute of or the physical interface that
|
||||||
|
the IP address provided could be bound to.
|
||||||
|
|
||||||
|
:param address (str): An individual IPv4 or IPv6 address without a net
|
||||||
|
mask or subnet prefix. For example, '192.168.1.1'.
|
||||||
|
:param key: 'iface' for the physical interface name or an attribute
|
||||||
|
of the configured interface, for example 'netmask'.
|
||||||
|
:returns str: Requested attribute or None if address is not bindable.
|
||||||
|
"""
|
||||||
|
address = netaddr.IPAddress(address)
|
||||||
|
for iface in netifaces.interfaces():
|
||||||
|
addresses = netifaces.ifaddresses(iface)
|
||||||
|
if address.version == 4 and netifaces.AF_INET in addresses:
|
||||||
|
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||||
|
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||||
|
network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||||
|
cidr = network.cidr
|
||||||
|
if address in cidr:
|
||||||
|
if key == 'iface':
|
||||||
|
return iface
|
||||||
|
else:
|
||||||
|
return addresses[netifaces.AF_INET][0][key]
|
||||||
|
|
||||||
|
if address.version == 6 and netifaces.AF_INET6 in addresses:
|
||||||
|
for addr in addresses[netifaces.AF_INET6]:
|
||||||
|
if not addr['addr'].startswith('fe80'):
|
||||||
|
network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||||
|
addr['netmask']))
|
||||||
|
cidr = network.cidr
|
||||||
|
if address in cidr:
|
||||||
|
if key == 'iface':
|
||||||
|
return iface
|
||||||
|
elif key == 'netmask' and cidr:
|
||||||
|
return str(cidr).split('/')[1]
|
||||||
|
else:
|
||||||
|
return addr[key]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
get_iface_for_address = partial(_get_for_address, key='iface')
|
||||||
|
|
||||||
|
|
||||||
|
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||||
|
|
||||||
|
|
||||||
|
def format_ipv6_addr(address):
|
||||||
|
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
||||||
|
|
||||||
|
This is required by most configuration files when specifying IPv6
|
||||||
|
addresses.
|
||||||
|
"""
|
||||||
|
if is_ipv6(address):
|
||||||
|
return "[%s]" % address
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
||||||
|
fatal=True, exc_list=None):
|
||||||
|
"""Return the assigned IP address for a given interface, if any."""
|
||||||
|
# Extract nic if passed /dev/ethX
|
||||||
|
if '/' in iface:
|
||||||
|
iface = iface.split('/')[-1]
|
||||||
|
|
||||||
|
if not exc_list:
|
||||||
|
exc_list = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
inet_num = getattr(netifaces, inet_type)
|
||||||
|
except AttributeError:
|
||||||
|
raise Exception("Unknown inet type '%s'" % str(inet_type))
|
||||||
|
|
||||||
|
interfaces = netifaces.interfaces()
|
||||||
|
if inc_aliases:
|
||||||
|
ifaces = []
|
||||||
|
for _iface in interfaces:
|
||||||
|
if iface == _iface or _iface.split(':')[0] == iface:
|
||||||
|
ifaces.append(_iface)
|
||||||
|
|
||||||
|
if fatal and not ifaces:
|
||||||
|
raise Exception("Invalid interface '%s'" % iface)
|
||||||
|
|
||||||
|
ifaces.sort()
|
||||||
|
else:
|
||||||
|
if iface not in interfaces:
|
||||||
|
if fatal:
|
||||||
|
raise Exception("Interface '%s' not found " % (iface))
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
else:
|
||||||
|
ifaces = [iface]
|
||||||
|
|
||||||
|
addresses = []
|
||||||
|
for netiface in ifaces:
|
||||||
|
net_info = netifaces.ifaddresses(netiface)
|
||||||
|
if inet_num in net_info:
|
||||||
|
for entry in net_info[inet_num]:
|
||||||
|
if 'addr' in entry and entry['addr'] not in exc_list:
|
||||||
|
addresses.append(entry['addr'])
|
||||||
|
|
||||||
|
if fatal and not addresses:
|
||||||
|
raise Exception("Interface '%s' doesn't have any %s addresses." %
|
||||||
|
(iface, inet_type))
|
||||||
|
|
||||||
|
return sorted(addresses)
|
||||||
|
|
||||||
|
|
||||||
|
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
||||||
|
|
||||||
|
|
||||||
|
def get_iface_from_addr(addr):
|
||||||
|
"""Work out on which interface the provided address is configured."""
|
||||||
|
for iface in netifaces.interfaces():
|
||||||
|
addresses = netifaces.ifaddresses(iface)
|
||||||
|
for inet_type in addresses:
|
||||||
|
for _addr in addresses[inet_type]:
|
||||||
|
_addr = _addr['addr']
|
||||||
|
# link local
|
||||||
|
ll_key = re.compile("(.+)%.*")
|
||||||
|
raw = re.match(ll_key, _addr)
|
||||||
|
if raw:
|
||||||
|
_addr = raw.group(1)
|
||||||
|
|
||||||
|
if _addr == addr:
|
||||||
|
log("Address '%s' is configured on iface '%s'" %
|
||||||
|
(addr, iface))
|
||||||
|
return iface
|
||||||
|
|
||||||
|
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
|
||||||
|
raise Exception(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def sniff_iface(f):
|
||||||
|
"""Ensure decorated function is called with a value for iface.
|
||||||
|
|
||||||
|
If no iface provided, inject net iface inferred from unit private address.
|
||||||
|
"""
|
||||||
|
def iface_sniffer(*args, **kwargs):
|
||||||
|
if not kwargs.get('iface', None):
|
||||||
|
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
|
||||||
|
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
|
||||||
|
return iface_sniffer
|
||||||
|
|
||||||
|
|
||||||
|
@sniff_iface
|
||||||
|
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
||||||
|
dynamic_only=True):
|
||||||
|
"""Get assigned IPv6 address for a given interface.
|
||||||
|
|
||||||
|
Returns list of addresses found. If no address found, returns empty list.
|
||||||
|
|
||||||
|
If iface is None, we infer the current primary interface by doing a reverse
|
||||||
|
lookup on the unit private-address.
|
||||||
|
|
||||||
|
We currently only support scope global IPv6 addresses i.e. non-temporary
|
||||||
|
addresses. If no global IPv6 address is found, return the first one found
|
||||||
|
in the ipv6 address list.
|
||||||
|
"""
|
||||||
|
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
|
||||||
|
inc_aliases=inc_aliases, fatal=fatal,
|
||||||
|
exc_list=exc_list)
|
||||||
|
|
||||||
|
if addresses:
|
||||||
|
global_addrs = []
|
||||||
|
for addr in addresses:
|
||||||
|
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
|
||||||
|
m = re.match(key_scope_link_local, addr)
|
||||||
|
if m:
|
||||||
|
eui_64_mac = m.group(1)
|
||||||
|
iface = m.group(2)
|
||||||
|
else:
|
||||||
|
global_addrs.append(addr)
|
||||||
|
|
||||||
|
if global_addrs:
|
||||||
|
# Make sure any found global addresses are not temporary
|
||||||
|
cmd = ['ip', 'addr', 'show', iface]
|
||||||
|
out = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
if dynamic_only:
|
||||||
|
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
|
||||||
|
else:
|
||||||
|
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
|
||||||
|
|
||||||
|
addrs = []
|
||||||
|
for line in out.split('\n'):
|
||||||
|
line = line.strip()
|
||||||
|
m = re.match(key, line)
|
||||||
|
if m and 'temporary' not in line:
|
||||||
|
# Return the first valid address we find
|
||||||
|
for addr in global_addrs:
|
||||||
|
if m.group(1) == addr:
|
||||||
|
if not dynamic_only or \
|
||||||
|
m.group(1).endswith(eui_64_mac):
|
||||||
|
addrs.append(addr)
|
||||||
|
|
||||||
|
if addrs:
|
||||||
|
return addrs
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
raise Exception("Interface '%s' does not have a scope global "
|
||||||
|
"non-temporary ipv6 address." % iface)
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
|
||||||
|
"""Return a list of bridges on the system."""
|
||||||
|
b_regex = "%s/*/bridge" % vnic_dir
|
||||||
|
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
|
||||||
|
|
||||||
|
|
||||||
|
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
|
||||||
|
"""Return a list of nics comprising a given bridge on the system."""
|
||||||
|
brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
||||||
|
return [x.split('/')[-1] for x in glob.glob(brif_regex)]
|
||||||
|
|
||||||
|
|
||||||
|
def is_bridge_member(nic):
|
||||||
|
"""Check if a given nic is a member of a bridge."""
|
||||||
|
for bridge in get_bridges():
|
||||||
|
if nic in get_bridge_nics(bridge):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def is_ip(address):
|
||||||
|
"""
|
||||||
|
Returns True if address is a valid IP address.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Test to see if already an IPv4 address
|
||||||
|
socket.inet_aton(address)
|
||||||
|
return True
|
||||||
|
except socket.error:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def ns_query(address):
|
||||||
|
try:
|
||||||
|
import dns.resolver
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-dnspython')
|
||||||
|
import dns.resolver
|
||||||
|
|
||||||
|
if isinstance(address, dns.name.Name):
|
||||||
|
rtype = 'PTR'
|
||||||
|
elif isinstance(address, six.string_types):
|
||||||
|
rtype = 'A'
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
answers = dns.resolver.query(address, rtype)
|
||||||
|
if answers:
|
||||||
|
return str(answers[0])
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def get_host_ip(hostname, fallback=None):
|
||||||
|
"""
|
||||||
|
Resolves the IP for a given hostname, or returns
|
||||||
|
the input if it is already an IP.
|
||||||
|
"""
|
||||||
|
if is_ip(hostname):
|
||||||
|
return hostname
|
||||||
|
|
||||||
|
ip_addr = ns_query(hostname)
|
||||||
|
if not ip_addr:
|
||||||
|
try:
|
||||||
|
ip_addr = socket.gethostbyname(hostname)
|
||||||
|
except:
|
||||||
|
log("Failed to resolve hostname '%s'" % (hostname),
|
||||||
|
level=WARNING)
|
||||||
|
return fallback
|
||||||
|
return ip_addr
|
||||||
|
|
||||||
|
|
||||||
|
def get_hostname(address, fqdn=True):
|
||||||
|
"""
|
||||||
|
Resolves hostname for given IP, or returns the input
|
||||||
|
if it is already a hostname.
|
||||||
|
"""
|
||||||
|
if is_ip(address):
|
||||||
|
try:
|
||||||
|
import dns.reversename
|
||||||
|
except ImportError:
|
||||||
|
apt_install("python-dnspython")
|
||||||
|
import dns.reversename
|
||||||
|
|
||||||
|
rev = dns.reversename.from_address(address)
|
||||||
|
result = ns_query(rev)
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
result = address
|
||||||
|
|
||||||
|
if fqdn:
|
||||||
|
# strip trailing .
|
||||||
|
if result.endswith('.'):
|
||||||
|
return result[:-1]
|
||||||
|
else:
|
||||||
|
return result
|
||||||
|
else:
|
||||||
|
return result.split('.')[0]
|
96
hooks/charmhelpers/contrib/network/ovs/__init__.py
Normal file
96
hooks/charmhelpers/contrib/network/ovs/__init__.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
''' Helpers for interacting with OpenvSwitch '''
|
||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log, WARNING
|
||||||
|
)
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
service
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def add_bridge(name):
|
||||||
|
''' Add the named bridge to openvswitch '''
|
||||||
|
log('Creating bridge {}'.format(name))
|
||||||
|
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
|
||||||
|
|
||||||
|
|
||||||
|
def del_bridge(name):
|
||||||
|
''' Delete the named bridge from openvswitch '''
|
||||||
|
log('Deleting bridge {}'.format(name))
|
||||||
|
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
|
||||||
|
|
||||||
|
|
||||||
|
def add_bridge_port(name, port, promisc=False):
|
||||||
|
''' Add a port to the named openvswitch bridge '''
|
||||||
|
log('Adding port {} to bridge {}'.format(port, name))
|
||||||
|
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
|
||||||
|
name, port])
|
||||||
|
subprocess.check_call(["ip", "link", "set", port, "up"])
|
||||||
|
if promisc:
|
||||||
|
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
|
||||||
|
else:
|
||||||
|
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
||||||
|
|
||||||
|
|
||||||
|
def del_bridge_port(name, port):
|
||||||
|
''' Delete a port from the named openvswitch bridge '''
|
||||||
|
log('Deleting port {} from bridge {}'.format(port, name))
|
||||||
|
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
|
||||||
|
name, port])
|
||||||
|
subprocess.check_call(["ip", "link", "set", port, "down"])
|
||||||
|
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
||||||
|
|
||||||
|
|
||||||
|
def set_manager(manager):
|
||||||
|
''' Set the controller for the local openvswitch '''
|
||||||
|
log('Setting manager for local ovs to {}'.format(manager))
|
||||||
|
subprocess.check_call(['ovs-vsctl', 'set-manager',
|
||||||
|
'ssl:{}'.format(manager)])
|
||||||
|
|
||||||
|
|
||||||
|
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
|
||||||
|
|
||||||
|
|
||||||
|
def get_certificate():
|
||||||
|
''' Read openvswitch certificate from disk '''
|
||||||
|
if os.path.exists(CERT_PATH):
|
||||||
|
log('Reading ovs certificate from {}'.format(CERT_PATH))
|
||||||
|
with open(CERT_PATH, 'r') as cert:
|
||||||
|
full_cert = cert.read()
|
||||||
|
begin_marker = "-----BEGIN CERTIFICATE-----"
|
||||||
|
end_marker = "-----END CERTIFICATE-----"
|
||||||
|
begin_index = full_cert.find(begin_marker)
|
||||||
|
end_index = full_cert.rfind(end_marker)
|
||||||
|
if end_index == -1 or begin_index == -1:
|
||||||
|
raise RuntimeError("Certificate does not contain valid begin"
|
||||||
|
" and end markers.")
|
||||||
|
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
|
||||||
|
return full_cert
|
||||||
|
else:
|
||||||
|
log('Certificate not found', level=WARNING)
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def full_restart():
|
||||||
|
''' Full restart and reload of openvswitch '''
|
||||||
|
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
|
||||||
|
service('start', 'openvswitch-force-reload-kmod')
|
||||||
|
else:
|
||||||
|
service('force-reload-kmod', 'openvswitch-switch')
|
15
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
15
hooks/charmhelpers/contrib/openstack/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
33
hooks/charmhelpers/contrib/openstack/alternatives.py
Normal file
33
hooks/charmhelpers/contrib/openstack/alternatives.py
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
''' Helper for managing alternatives for file conflict resolution '''
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def install_alternative(name, target, source, priority=50):
|
||||||
|
''' Install alternative configuration '''
|
||||||
|
if (os.path.exists(target) and not os.path.islink(target)):
|
||||||
|
# Move existing file/directory away before installing
|
||||||
|
shutil.move(target, '{}.bak'.format(target))
|
||||||
|
cmd = [
|
||||||
|
'update-alternatives', '--force', '--install',
|
||||||
|
target, name, source, str(priority)
|
||||||
|
]
|
||||||
|
subprocess.check_call(cmd)
|
15
hooks/charmhelpers/contrib/openstack/amulet/__init__.py
Normal file
15
hooks/charmhelpers/contrib/openstack/amulet/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
146
hooks/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
146
hooks/charmhelpers/contrib/openstack/amulet/deployment.py
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import six
|
||||||
|
from collections import OrderedDict
|
||||||
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
|
AmuletDeployment
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackAmuletDeployment(AmuletDeployment):
|
||||||
|
"""OpenStack amulet deployment.
|
||||||
|
|
||||||
|
This class inherits from AmuletDeployment and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, series=None, openstack=None, source=None, stable=True):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
super(OpenStackAmuletDeployment, self).__init__(series)
|
||||||
|
self.openstack = openstack
|
||||||
|
self.source = source
|
||||||
|
self.stable = stable
|
||||||
|
# Note(coreycb): this needs to be changed when new next branches come
|
||||||
|
# out.
|
||||||
|
self.current_next = "trusty"
|
||||||
|
|
||||||
|
def _determine_branch_locations(self, other_services):
|
||||||
|
"""Determine the branch locations for the other services.
|
||||||
|
|
||||||
|
Determine if the local branch being tested is derived from its
|
||||||
|
stable or next (dev) branch, and based on this, use the corresonding
|
||||||
|
stable or next branches for the other_services."""
|
||||||
|
base_charms = ['mysql', 'mongodb']
|
||||||
|
|
||||||
|
if self.series in ['precise', 'trusty']:
|
||||||
|
base_series = self.series
|
||||||
|
else:
|
||||||
|
base_series = self.current_next
|
||||||
|
|
||||||
|
if self.stable:
|
||||||
|
for svc in other_services:
|
||||||
|
temp = 'lp:charms/{}/{}'
|
||||||
|
svc['location'] = temp.format(base_series,
|
||||||
|
svc['name'])
|
||||||
|
else:
|
||||||
|
for svc in other_services:
|
||||||
|
if svc['name'] in base_charms:
|
||||||
|
temp = 'lp:charms/{}/{}'
|
||||||
|
svc['location'] = temp.format(base_series,
|
||||||
|
svc['name'])
|
||||||
|
else:
|
||||||
|
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||||
|
svc['location'] = temp.format(self.current_next,
|
||||||
|
svc['name'])
|
||||||
|
return other_services
|
||||||
|
|
||||||
|
def _add_services(self, this_service, other_services):
|
||||||
|
"""Add services to the deployment and set openstack-origin/source."""
|
||||||
|
other_services = self._determine_branch_locations(other_services)
|
||||||
|
|
||||||
|
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
||||||
|
other_services)
|
||||||
|
|
||||||
|
services = other_services
|
||||||
|
services.append(this_service)
|
||||||
|
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
||||||
|
'ceph-osd', 'ceph-radosgw']
|
||||||
|
# Openstack subordinate charms do not expose an origin option as that
|
||||||
|
# is controlled by the principle
|
||||||
|
ignore = ['neutron-openvswitch']
|
||||||
|
|
||||||
|
if self.openstack:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] not in use_source + ignore:
|
||||||
|
config = {'openstack-origin': self.openstack}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
if self.source:
|
||||||
|
for svc in services:
|
||||||
|
if svc['name'] in use_source and svc['name'] not in ignore:
|
||||||
|
config = {'source': self.source}
|
||||||
|
self.d.configure(svc['name'], config)
|
||||||
|
|
||||||
|
def _configure_services(self, configs):
|
||||||
|
"""Configure all of the services."""
|
||||||
|
for service, config in six.iteritems(configs):
|
||||||
|
self.d.configure(service, config)
|
||||||
|
|
||||||
|
def _get_openstack_release(self):
|
||||||
|
"""Get openstack release.
|
||||||
|
|
||||||
|
Return an integer representing the enum value of the openstack
|
||||||
|
release.
|
||||||
|
"""
|
||||||
|
# Must be ordered by OpenStack release (not by Ubuntu release):
|
||||||
|
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||||
|
self.precise_havana, self.precise_icehouse,
|
||||||
|
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
||||||
|
self.trusty_kilo, self.vivid_kilo) = range(10)
|
||||||
|
|
||||||
|
releases = {
|
||||||
|
('precise', None): self.precise_essex,
|
||||||
|
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||||
|
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
|
||||||
|
('precise', 'cloud:precise-havana'): self.precise_havana,
|
||||||
|
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
||||||
|
('trusty', None): self.trusty_icehouse,
|
||||||
|
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||||
|
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
||||||
|
('utopic', None): self.utopic_juno,
|
||||||
|
('vivid', None): self.vivid_kilo}
|
||||||
|
return releases[(self.series, self.openstack)]
|
||||||
|
|
||||||
|
def _get_openstack_release_string(self):
|
||||||
|
"""Get openstack release string.
|
||||||
|
|
||||||
|
Return a string representing the openstack release.
|
||||||
|
"""
|
||||||
|
releases = OrderedDict([
|
||||||
|
('precise', 'essex'),
|
||||||
|
('quantal', 'folsom'),
|
||||||
|
('raring', 'grizzly'),
|
||||||
|
('saucy', 'havana'),
|
||||||
|
('trusty', 'icehouse'),
|
||||||
|
('utopic', 'juno'),
|
||||||
|
('vivid', 'kilo'),
|
||||||
|
])
|
||||||
|
if self.openstack:
|
||||||
|
os_origin = self.openstack.split(':')[1]
|
||||||
|
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||||
|
else:
|
||||||
|
return releases[self.series]
|
294
hooks/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
294
hooks/charmhelpers/contrib/openstack/amulet/utils.py
Normal file
@ -0,0 +1,294 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import urllib
|
||||||
|
|
||||||
|
import glanceclient.v1.client as glance_client
|
||||||
|
import keystoneclient.v2_0 as keystone_client
|
||||||
|
import novaclient.v1_1.client as nova_client
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
|
AmuletUtils
|
||||||
|
)
|
||||||
|
|
||||||
|
DEBUG = logging.DEBUG
|
||||||
|
ERROR = logging.ERROR
|
||||||
|
|
||||||
|
|
||||||
|
class OpenStackAmuletUtils(AmuletUtils):
|
||||||
|
"""OpenStack amulet utilities.
|
||||||
|
|
||||||
|
This class inherits from AmuletUtils and has additional support
|
||||||
|
that is specifically for use by OpenStack charms.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, log_level=ERROR):
|
||||||
|
"""Initialize the deployment environment."""
|
||||||
|
super(OpenStackAmuletUtils, self).__init__(log_level)
|
||||||
|
|
||||||
|
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||||
|
public_port, expected):
|
||||||
|
"""Validate endpoint data.
|
||||||
|
|
||||||
|
Validate actual endpoint data vs expected endpoint data. The ports
|
||||||
|
are used to find the matching endpoint.
|
||||||
|
"""
|
||||||
|
found = False
|
||||||
|
for ep in endpoints:
|
||||||
|
self.log.debug('endpoint: {}'.format(repr(ep)))
|
||||||
|
if (admin_port in ep.adminurl and
|
||||||
|
internal_port in ep.internalurl and
|
||||||
|
public_port in ep.publicurl):
|
||||||
|
found = True
|
||||||
|
actual = {'id': ep.id,
|
||||||
|
'region': ep.region,
|
||||||
|
'adminurl': ep.adminurl,
|
||||||
|
'internalurl': ep.internalurl,
|
||||||
|
'publicurl': ep.publicurl,
|
||||||
|
'service_id': ep.service_id}
|
||||||
|
ret = self._validate_dict_data(expected, actual)
|
||||||
|
if ret:
|
||||||
|
return 'unexpected endpoint data - {}'.format(ret)
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
return 'endpoint not found'
|
||||||
|
|
||||||
|
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
|
"""Validate service catalog endpoint data.
|
||||||
|
|
||||||
|
Validate a list of actual service catalog endpoints vs a list of
|
||||||
|
expected service catalog endpoints.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for k, v in six.iteritems(expected):
|
||||||
|
if k in actual:
|
||||||
|
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
||||||
|
if ret:
|
||||||
|
return self.endpoint_error(k, ret)
|
||||||
|
else:
|
||||||
|
return "endpoint {} does not exist".format(k)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_tenant_data(self, expected, actual):
|
||||||
|
"""Validate tenant data.
|
||||||
|
|
||||||
|
Validate a list of actual tenant data vs list of expected tenant
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'enabled': act.enabled, 'description': act.description,
|
||||||
|
'name': act.name, 'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected tenant data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "tenant {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_role_data(self, expected, actual):
|
||||||
|
"""Validate role data.
|
||||||
|
|
||||||
|
Validate a list of actual role data vs a list of expected role
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'name': act.name, 'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected role data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "role {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_user_data(self, expected, actual):
|
||||||
|
"""Validate user data.
|
||||||
|
|
||||||
|
Validate a list of actual user data vs a list of expected user
|
||||||
|
data.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
for e in expected:
|
||||||
|
found = False
|
||||||
|
for act in actual:
|
||||||
|
a = {'enabled': act.enabled, 'name': act.name,
|
||||||
|
'email': act.email, 'tenantId': act.tenantId,
|
||||||
|
'id': act.id}
|
||||||
|
if e['name'] == a['name']:
|
||||||
|
found = True
|
||||||
|
ret = self._validate_dict_data(e, a)
|
||||||
|
if ret:
|
||||||
|
return "unexpected user data - {}".format(ret)
|
||||||
|
if not found:
|
||||||
|
return "user {} does not exist".format(e['name'])
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def validate_flavor_data(self, expected, actual):
|
||||||
|
"""Validate flavor data.
|
||||||
|
|
||||||
|
Validate a list of actual flavors vs a list of expected flavors.
|
||||||
|
"""
|
||||||
|
self.log.debug('actual: {}'.format(repr(actual)))
|
||||||
|
act = [a.name for a in actual]
|
||||||
|
return self._validate_list_data(expected, act)
|
||||||
|
|
||||||
|
def tenant_exists(self, keystone, tenant):
|
||||||
|
"""Return True if tenant exists."""
|
||||||
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
|
tenant):
|
||||||
|
"""Authenticates admin user with the keystone admin endpoint."""
|
||||||
|
unit = keystone_sentry
|
||||||
|
service_ip = unit.relation('shared-db',
|
||||||
|
'mysql:shared-db')['private-address']
|
||||||
|
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||||
|
return keystone_client.Client(username=user, password=password,
|
||||||
|
tenant_name=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with the keystone public endpoint."""
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return keystone_client.Client(username=user, password=password,
|
||||||
|
tenant_name=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def authenticate_glance_admin(self, keystone):
|
||||||
|
"""Authenticates admin user with glance."""
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='image',
|
||||||
|
endpoint_type='adminURL')
|
||||||
|
return glance_client.Client(ep, token=keystone.auth_token)
|
||||||
|
|
||||||
|
def authenticate_nova_user(self, keystone, user, password, tenant):
|
||||||
|
"""Authenticates a regular user with nova-api."""
|
||||||
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
|
endpoint_type='publicURL')
|
||||||
|
return nova_client.Client(username=user, api_key=password,
|
||||||
|
project_id=tenant, auth_url=ep)
|
||||||
|
|
||||||
|
def create_cirros_image(self, glance, image_name):
|
||||||
|
"""Download the latest cirros image and upload it to glance."""
|
||||||
|
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||||
|
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||||
|
if http_proxy:
|
||||||
|
proxies = {'http': http_proxy}
|
||||||
|
opener = urllib.FancyURLopener(proxies)
|
||||||
|
else:
|
||||||
|
opener = urllib.FancyURLopener()
|
||||||
|
|
||||||
|
f = opener.open("http://download.cirros-cloud.net/version/released")
|
||||||
|
version = f.read().strip()
|
||||||
|
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
||||||
|
local_path = os.path.join('tests', cirros_img)
|
||||||
|
|
||||||
|
if not os.path.exists(local_path):
|
||||||
|
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
||||||
|
version, cirros_img)
|
||||||
|
opener.retrieve(cirros_url, local_path)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
with open(local_path) as f:
|
||||||
|
image = glance.images.create(name=image_name, is_public=True,
|
||||||
|
disk_format='qcow2',
|
||||||
|
container_format='bare', data=f)
|
||||||
|
count = 1
|
||||||
|
status = image.status
|
||||||
|
while status != 'active' and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
image = glance.images.get(image.id)
|
||||||
|
status = image.status
|
||||||
|
self.log.debug('image status: {}'.format(status))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if status != 'active':
|
||||||
|
self.log.error('image creation timed out')
|
||||||
|
return None
|
||||||
|
|
||||||
|
return image
|
||||||
|
|
||||||
|
def delete_image(self, glance, image):
|
||||||
|
"""Delete the specified image."""
|
||||||
|
num_before = len(list(glance.images.list()))
|
||||||
|
glance.images.delete(image)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
num_after = len(list(glance.images.list()))
|
||||||
|
while num_after != (num_before - 1) and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
num_after = len(list(glance.images.list()))
|
||||||
|
self.log.debug('number of images: {}'.format(num_after))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if num_after != (num_before - 1):
|
||||||
|
self.log.error('image deletion timed out')
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||||
|
"""Create the specified instance."""
|
||||||
|
image = nova.images.find(name=image_name)
|
||||||
|
flavor = nova.flavors.find(name=flavor)
|
||||||
|
instance = nova.servers.create(name=instance_name, image=image,
|
||||||
|
flavor=flavor)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
status = instance.status
|
||||||
|
while status != 'ACTIVE' and count < 60:
|
||||||
|
time.sleep(3)
|
||||||
|
instance = nova.servers.get(instance.id)
|
||||||
|
status = instance.status
|
||||||
|
self.log.debug('instance status: {}'.format(status))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if status != 'ACTIVE':
|
||||||
|
self.log.error('instance creation timed out')
|
||||||
|
return None
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
def delete_instance(self, nova, instance):
|
||||||
|
"""Delete the specified instance."""
|
||||||
|
num_before = len(list(nova.servers.list()))
|
||||||
|
nova.servers.delete(instance)
|
||||||
|
|
||||||
|
count = 1
|
||||||
|
num_after = len(list(nova.servers.list()))
|
||||||
|
while num_after != (num_before - 1) and count < 10:
|
||||||
|
time.sleep(3)
|
||||||
|
num_after = len(list(nova.servers.list()))
|
||||||
|
self.log.debug('number of instances: {}'.format(num_after))
|
||||||
|
count += 1
|
||||||
|
|
||||||
|
if num_after != (num_before - 1):
|
||||||
|
self.log.error('instance deletion timed out')
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
1343
hooks/charmhelpers/contrib/openstack/context.py
Normal file
1343
hooks/charmhelpers/contrib/openstack/context.py
Normal file
File diff suppressed because it is too large
Load Diff
18
hooks/charmhelpers/contrib/openstack/files/__init__.py
Normal file
18
hooks/charmhelpers/contrib/openstack/files/__init__.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
||||||
|
# module
|
32
hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
Executable file
32
hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
Executable file
@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#--------------------------------------------
|
||||||
|
# This file is managed by Juju
|
||||||
|
#--------------------------------------------
|
||||||
|
#
|
||||||
|
# Copyright 2009,2012 Canonical Ltd.
|
||||||
|
# Author: Tom Haddon
|
||||||
|
|
||||||
|
CRITICAL=0
|
||||||
|
NOTACTIVE=''
|
||||||
|
LOGFILE=/var/log/nagios/check_haproxy.log
|
||||||
|
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
||||||
|
|
||||||
|
for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
|
||||||
|
do
|
||||||
|
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
|
||||||
|
if [ $? != 0 ]; then
|
||||||
|
date >> $LOGFILE
|
||||||
|
echo $output >> $LOGFILE
|
||||||
|
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
|
||||||
|
CRITICAL=1
|
||||||
|
NOTACTIVE="${NOTACTIVE} $appserver"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ $CRITICAL = 1 ]; then
|
||||||
|
echo "CRITICAL:${NOTACTIVE}"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "OK: All haproxy instances looking good"
|
||||||
|
exit 0
|
30
hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
Executable file
30
hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
Executable file
@ -0,0 +1,30 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#--------------------------------------------
|
||||||
|
# This file is managed by Juju
|
||||||
|
#--------------------------------------------
|
||||||
|
#
|
||||||
|
# Copyright 2009,2012 Canonical Ltd.
|
||||||
|
# Author: Tom Haddon
|
||||||
|
|
||||||
|
# These should be config options at some stage
|
||||||
|
CURRQthrsh=0
|
||||||
|
MAXQthrsh=100
|
||||||
|
|
||||||
|
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
||||||
|
|
||||||
|
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
|
||||||
|
|
||||||
|
for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
|
||||||
|
do
|
||||||
|
CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
|
||||||
|
MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
|
||||||
|
|
||||||
|
if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
|
||||||
|
echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "OK: All haproxy queue depths looking good"
|
||||||
|
exit 0
|
||||||
|
|
146
hooks/charmhelpers/contrib/openstack/ip.py
Normal file
146
hooks/charmhelpers/contrib/openstack/ip.py
Normal file
@ -0,0 +1,146 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
unit_get,
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.network.ip import (
|
||||||
|
get_address_in_network,
|
||||||
|
is_address_in_network,
|
||||||
|
is_ipv6,
|
||||||
|
get_ipv6_addr,
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
||||||
|
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
PUBLIC = 'public'
|
||||||
|
INTERNAL = 'int'
|
||||||
|
ADMIN = 'admin'
|
||||||
|
|
||||||
|
ADDRESS_MAP = {
|
||||||
|
PUBLIC: {
|
||||||
|
'config': 'os-public-network',
|
||||||
|
'fallback': 'public-address'
|
||||||
|
},
|
||||||
|
INTERNAL: {
|
||||||
|
'config': 'os-internal-network',
|
||||||
|
'fallback': 'private-address'
|
||||||
|
},
|
||||||
|
ADMIN: {
|
||||||
|
'config': 'os-admin-network',
|
||||||
|
'fallback': 'private-address'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def canonical_url(configs, endpoint_type=PUBLIC):
|
||||||
|
"""Returns the correct HTTP URL to this host given the state of HTTPS
|
||||||
|
configuration, hacluster and charm configuration.
|
||||||
|
|
||||||
|
:param configs: OSTemplateRenderer config templating object to inspect
|
||||||
|
for a complete https context.
|
||||||
|
:param endpoint_type: str endpoint type to resolve.
|
||||||
|
:param returns: str base URL for services on the current service unit.
|
||||||
|
"""
|
||||||
|
scheme = 'http'
|
||||||
|
if 'https' in configs.complete_contexts():
|
||||||
|
scheme = 'https'
|
||||||
|
address = resolve_address(endpoint_type)
|
||||||
|
if is_ipv6(address):
|
||||||
|
address = "[{}]".format(address)
|
||||||
|
return '%s://%s' % (scheme, address)
|
||||||
|
|
||||||
|
|
||||||
|
def resolve_address(endpoint_type=PUBLIC):
|
||||||
|
"""Return unit address depending on net config.
|
||||||
|
|
||||||
|
If unit is clustered with vip(s) and has net splits defined, return vip on
|
||||||
|
correct network. If clustered with no nets defined, return primary vip.
|
||||||
|
|
||||||
|
If not clustered, return unit address ensuring address is on configured net
|
||||||
|
split if one is configured.
|
||||||
|
|
||||||
|
:param endpoint_type: Network endpoing type
|
||||||
|
"""
|
||||||
|
resolved_address = None
|
||||||
|
vips = config('vip')
|
||||||
|
if vips:
|
||||||
|
vips = vips.split()
|
||||||
|
|
||||||
|
net_type = ADDRESS_MAP[endpoint_type]['config']
|
||||||
|
net_addr = config(net_type)
|
||||||
|
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
||||||
|
clustered = is_clustered()
|
||||||
|
if clustered:
|
||||||
|
if not net_addr:
|
||||||
|
# If no net-splits defined, we expect a single vip
|
||||||
|
resolved_address = vips[0]
|
||||||
|
else:
|
||||||
|
for vip in vips:
|
||||||
|
if is_address_in_network(net_addr, vip):
|
||||||
|
resolved_address = vip
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
if config('prefer-ipv6'):
|
||||||
|
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
||||||
|
else:
|
||||||
|
fallback_addr = unit_get(net_fallback)
|
||||||
|
|
||||||
|
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
||||||
|
|
||||||
|
if resolved_address is None:
|
||||||
|
raise ValueError("Unable to resolve a suitable IP address based on "
|
||||||
|
"charm state and configuration. (net_type=%s, "
|
||||||
|
"clustered=%s)" % (net_type, clustered))
|
||||||
|
|
||||||
|
return resolved_address
|
||||||
|
|
||||||
|
|
||||||
|
def endpoint_url(configs, url_template, port, endpoint_type=PUBLIC,
|
||||||
|
override=None):
|
||||||
|
"""Returns the correct endpoint URL to advertise to Keystone.
|
||||||
|
|
||||||
|
This method provides the correct endpoint URL which should be advertised to
|
||||||
|
the keystone charm for endpoint creation. This method allows for the url to
|
||||||
|
be overridden to force a keystone endpoint to have specific URL for any of
|
||||||
|
the defined scopes (admin, internal, public).
|
||||||
|
|
||||||
|
:param configs: OSTemplateRenderer config templating object to inspect
|
||||||
|
for a complete https context.
|
||||||
|
:param url_template: str format string for creating the url template. Only
|
||||||
|
two values will be passed - the scheme+hostname
|
||||||
|
returned by the canonical_url and the port.
|
||||||
|
:param endpoint_type: str endpoint type to resolve.
|
||||||
|
:param override: str the name of the config option which overrides the
|
||||||
|
endpoint URL defined by the charm itself. None will
|
||||||
|
disable any overrides (default).
|
||||||
|
"""
|
||||||
|
if override:
|
||||||
|
# Return any user-defined overrides for the keystone endpoint URL.
|
||||||
|
user_value = config(override)
|
||||||
|
if user_value:
|
||||||
|
return user_value.strip()
|
||||||
|
|
||||||
|
return url_template % (canonical_url(configs, endpoint_type), port)
|
||||||
|
|
||||||
|
|
||||||
|
public_endpoint = partial(endpoint_url, endpoint_type=PUBLIC)
|
||||||
|
|
||||||
|
internal_endpoint = partial(endpoint_url, endpoint_type=INTERNAL)
|
||||||
|
|
||||||
|
admin_endpoint = partial(endpoint_url, endpoint_type=ADMIN)
|
337
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
337
hooks/charmhelpers/contrib/openstack/neutron.py
Normal file
@ -0,0 +1,337 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Various utilies for dealing with Neutron and the renaming from Quantum.
|
||||||
|
|
||||||
|
import six
|
||||||
|
from subprocess import check_output
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
log,
|
||||||
|
ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.openstack.utils import os_release
|
||||||
|
|
||||||
|
|
||||||
|
def headers_package():
|
||||||
|
"""Ensures correct linux-headers for running kernel are installed,
|
||||||
|
for building DKMS package"""
|
||||||
|
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
||||||
|
return 'linux-headers-%s' % kver
|
||||||
|
|
||||||
|
QUANTUM_CONF_DIR = '/etc/quantum'
|
||||||
|
|
||||||
|
|
||||||
|
def kernel_version():
|
||||||
|
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
|
||||||
|
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
||||||
|
kver = kver.split('.')
|
||||||
|
return (int(kver[0]), int(kver[1]))
|
||||||
|
|
||||||
|
|
||||||
|
def determine_dkms_package():
|
||||||
|
""" Determine which DKMS package should be used based on kernel version """
|
||||||
|
# NOTE: 3.13 kernels have support for GRE and VXLAN native
|
||||||
|
if kernel_version() >= (3, 13):
|
||||||
|
return []
|
||||||
|
else:
|
||||||
|
return ['openvswitch-datapath-dkms']
|
||||||
|
|
||||||
|
|
||||||
|
# legacy
|
||||||
|
|
||||||
|
|
||||||
|
def quantum_plugins():
|
||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
return {
|
||||||
|
'ovs': {
|
||||||
|
'config': '/etc/quantum/plugins/openvswitch/'
|
||||||
|
'ovs_quantum_plugin.ini',
|
||||||
|
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
||||||
|
'OVSQuantumPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=QUANTUM_CONF_DIR)],
|
||||||
|
'services': ['quantum-plugin-openvswitch-agent'],
|
||||||
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
|
['quantum-plugin-openvswitch-agent']],
|
||||||
|
'server_packages': ['quantum-server',
|
||||||
|
'quantum-plugin-openvswitch'],
|
||||||
|
'server_services': ['quantum-server']
|
||||||
|
},
|
||||||
|
'nvp': {
|
||||||
|
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||||
|
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||||
|
'QuantumPlugin.NvpPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=QUANTUM_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [],
|
||||||
|
'server_packages': ['quantum-server',
|
||||||
|
'quantum-plugin-nicira'],
|
||||||
|
'server_services': ['quantum-server']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
NEUTRON_CONF_DIR = '/etc/neutron'
|
||||||
|
|
||||||
|
|
||||||
|
def neutron_plugins():
|
||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
release = os_release('nova-common')
|
||||||
|
plugins = {
|
||||||
|
'ovs': {
|
||||||
|
'config': '/etc/neutron/plugins/openvswitch/'
|
||||||
|
'ovs_neutron_plugin.ini',
|
||||||
|
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
||||||
|
'OVSNeutronPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
|
['neutron-plugin-openvswitch-agent']],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-openvswitch'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'nvp': {
|
||||||
|
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
||||||
|
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
||||||
|
'NeutronPlugin.NvpPluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-nicira'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'nsx': {
|
||||||
|
'config': '/etc/neutron/plugins/vmware/nsx.ini',
|
||||||
|
'driver': 'vmware',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-vmware'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'n1kv': {
|
||||||
|
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
|
||||||
|
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
|
['neutron-plugin-cisco']],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-cisco'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'Calico': {
|
||||||
|
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
|
||||||
|
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': ['calico-felix',
|
||||||
|
'bird',
|
||||||
|
'neutron-dhcp-agent',
|
||||||
|
'nova-api-metadata'],
|
||||||
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
|
['calico-compute',
|
||||||
|
'bird',
|
||||||
|
'neutron-dhcp-agent',
|
||||||
|
'nova-api-metadata']],
|
||||||
|
'server_packages': ['neutron-server', 'calico-control'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'vsp': {
|
||||||
|
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
|
||||||
|
'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
|
database=config('neutron-database'),
|
||||||
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [],
|
||||||
|
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
},
|
||||||
|
'plumgrid': {
|
||||||
|
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
|
||||||
|
'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
|
||||||
|
'contexts': [
|
||||||
|
context.SharedDBContext(user=config('database-user'),
|
||||||
|
database=config('database'),
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
|
'services': [],
|
||||||
|
'packages': [['plumgrid-lxc'],
|
||||||
|
['iovisor-dkms'],
|
||||||
|
['plumgrid-puppet']],
|
||||||
|
'server_packages': ['neutron-server',
|
||||||
|
'neutron-plugin-plumgrid'],
|
||||||
|
'server_services': ['neutron-server']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if release >= 'icehouse':
|
||||||
|
# NOTE: patch in ml2 plugin for icehouse onwards
|
||||||
|
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
|
||||||
|
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
|
||||||
|
plugins['ovs']['server_packages'] = ['neutron-server',
|
||||||
|
'neutron-plugin-ml2']
|
||||||
|
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
||||||
|
plugins['nvp'] = plugins['nsx']
|
||||||
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
|
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
||||||
|
manager = net_manager or network_manager()
|
||||||
|
if manager == 'quantum':
|
||||||
|
plugins = quantum_plugins()
|
||||||
|
elif manager == 'neutron':
|
||||||
|
plugins = neutron_plugins()
|
||||||
|
else:
|
||||||
|
log("Network manager '%s' does not support plugins." % (manager),
|
||||||
|
level=ERROR)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
try:
|
||||||
|
_plugin = plugins[plugin]
|
||||||
|
except KeyError:
|
||||||
|
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
|
||||||
|
raise Exception
|
||||||
|
|
||||||
|
try:
|
||||||
|
return _plugin[attr]
|
||||||
|
except KeyError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def network_manager():
|
||||||
|
'''
|
||||||
|
Deals with the renaming of Quantum to Neutron in H and any situations
|
||||||
|
that require compatability (eg, deploying H with network-manager=quantum,
|
||||||
|
upgrading from G).
|
||||||
|
'''
|
||||||
|
release = os_release('nova-common')
|
||||||
|
manager = config('network-manager').lower()
|
||||||
|
|
||||||
|
if manager not in ['quantum', 'neutron']:
|
||||||
|
return manager
|
||||||
|
|
||||||
|
if release in ['essex']:
|
||||||
|
# E does not support neutron
|
||||||
|
log('Neutron networking not supported in Essex.', level=ERROR)
|
||||||
|
raise Exception
|
||||||
|
elif release in ['folsom', 'grizzly']:
|
||||||
|
# neutron is named quantum in F and G
|
||||||
|
return 'quantum'
|
||||||
|
else:
|
||||||
|
# ensure accurate naming for all releases post-H
|
||||||
|
return 'neutron'
|
||||||
|
|
||||||
|
|
||||||
|
def parse_mappings(mappings):
|
||||||
|
parsed = {}
|
||||||
|
if mappings:
|
||||||
|
mappings = mappings.split(' ')
|
||||||
|
for m in mappings:
|
||||||
|
p = m.partition(':')
|
||||||
|
if p[1] == ':':
|
||||||
|
parsed[p[0].strip()] = p[2].strip()
|
||||||
|
|
||||||
|
return parsed
|
||||||
|
|
||||||
|
|
||||||
|
def parse_bridge_mappings(mappings):
|
||||||
|
"""Parse bridge mappings.
|
||||||
|
|
||||||
|
Mappings must be a space-delimited list of provider:bridge mappings.
|
||||||
|
|
||||||
|
Returns dict of the form {provider:bridge}.
|
||||||
|
"""
|
||||||
|
return parse_mappings(mappings)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
||||||
|
"""Parse data port mappings.
|
||||||
|
|
||||||
|
Mappings must be a space-delimited list of bridge:port mappings.
|
||||||
|
|
||||||
|
Returns dict of the form {bridge:port}.
|
||||||
|
"""
|
||||||
|
_mappings = parse_mappings(mappings)
|
||||||
|
if not _mappings:
|
||||||
|
if not mappings:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
# For backwards-compatibility we need to support port-only provided in
|
||||||
|
# config.
|
||||||
|
_mappings = {default_bridge: mappings.split(' ')[0]}
|
||||||
|
|
||||||
|
bridges = _mappings.keys()
|
||||||
|
ports = _mappings.values()
|
||||||
|
if len(set(bridges)) != len(bridges):
|
||||||
|
raise Exception("It is not allowed to have more than one port "
|
||||||
|
"configured on the same bridge")
|
||||||
|
|
||||||
|
if len(set(ports)) != len(ports):
|
||||||
|
raise Exception("It is not allowed to have the same port configured "
|
||||||
|
"on more than one bridge")
|
||||||
|
|
||||||
|
return _mappings
|
||||||
|
|
||||||
|
|
||||||
|
def parse_vlan_range_mappings(mappings):
|
||||||
|
"""Parse vlan range mappings.
|
||||||
|
|
||||||
|
Mappings must be a space-delimited list of provider:start:end mappings.
|
||||||
|
|
||||||
|
Returns dict of the form {provider: (start, end)}.
|
||||||
|
"""
|
||||||
|
_mappings = parse_mappings(mappings)
|
||||||
|
if not _mappings:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
mappings = {}
|
||||||
|
for p, r in six.iteritems(_mappings):
|
||||||
|
mappings[p] = tuple(r.split(':'))
|
||||||
|
|
||||||
|
return mappings
|
18
hooks/charmhelpers/contrib/openstack/templates/__init__.py
Normal file
18
hooks/charmhelpers/contrib/openstack/templates/__init__.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
||||||
|
# module
|
15
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
15
hooks/charmhelpers/contrib/openstack/templates/ceph.conf
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
###############################################################################
|
||||||
|
# [ WARNING ]
|
||||||
|
# cinder configuration file maintained by Juju
|
||||||
|
# local changes may be overwritten.
|
||||||
|
###############################################################################
|
||||||
|
[global]
|
||||||
|
{% if auth -%}
|
||||||
|
auth_supported = {{ auth }}
|
||||||
|
keyring = /etc/ceph/$cluster.$name.keyring
|
||||||
|
mon host = {{ mon_hosts }}
|
||||||
|
{% endif -%}
|
||||||
|
log to syslog = {{ use_syslog }}
|
||||||
|
err to syslog = {{ use_syslog }}
|
||||||
|
clog to syslog = {{ use_syslog }}
|
||||||
|
|
17
hooks/charmhelpers/contrib/openstack/templates/git.upstart
Normal file
17
hooks/charmhelpers/contrib/openstack/templates/git.upstart
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
description "{{ service_description }}"
|
||||||
|
author "Juju {{ service_name }} Charm <juju@localhost>"
|
||||||
|
|
||||||
|
start on runlevel [2345]
|
||||||
|
stop on runlevel [!2345]
|
||||||
|
|
||||||
|
respawn
|
||||||
|
|
||||||
|
exec start-stop-daemon --start --chuid {{ user_name }} \
|
||||||
|
--chdir {{ start_dir }} --name {{ process_name }} \
|
||||||
|
--exec {{ executable_name }} -- \
|
||||||
|
{% for config_file in config_files -%}
|
||||||
|
--config-file={{ config_file }} \
|
||||||
|
{% endfor -%}
|
||||||
|
{% if log_file -%}
|
||||||
|
--log-file={{ log_file }}
|
||||||
|
{% endif -%}
|
58
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
58
hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
global
|
||||||
|
log {{ local_host }} local0
|
||||||
|
log {{ local_host }} local1 notice
|
||||||
|
maxconn 20000
|
||||||
|
user haproxy
|
||||||
|
group haproxy
|
||||||
|
spread-checks 0
|
||||||
|
|
||||||
|
defaults
|
||||||
|
log global
|
||||||
|
mode tcp
|
||||||
|
option tcplog
|
||||||
|
option dontlognull
|
||||||
|
retries 3
|
||||||
|
timeout queue 1000
|
||||||
|
timeout connect 1000
|
||||||
|
{% if haproxy_client_timeout -%}
|
||||||
|
timeout client {{ haproxy_client_timeout }}
|
||||||
|
{% else -%}
|
||||||
|
timeout client 30000
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if haproxy_server_timeout -%}
|
||||||
|
timeout server {{ haproxy_server_timeout }}
|
||||||
|
{% else -%}
|
||||||
|
timeout server 30000
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
listen stats {{ stat_port }}
|
||||||
|
mode http
|
||||||
|
stats enable
|
||||||
|
stats hide-version
|
||||||
|
stats realm Haproxy\ Statistics
|
||||||
|
stats uri /
|
||||||
|
stats auth admin:password
|
||||||
|
|
||||||
|
{% if frontends -%}
|
||||||
|
{% for service, ports in service_ports.items() -%}
|
||||||
|
frontend tcp-in_{{ service }}
|
||||||
|
bind *:{{ ports[0] }}
|
||||||
|
{% if ipv6 -%}
|
||||||
|
bind :::{{ ports[0] }}
|
||||||
|
{% endif -%}
|
||||||
|
{% for frontend in frontends -%}
|
||||||
|
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
|
||||||
|
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
|
||||||
|
{% endfor -%}
|
||||||
|
default_backend {{ service }}_{{ default_backend }}
|
||||||
|
|
||||||
|
{% for frontend in frontends -%}
|
||||||
|
backend {{ service }}_{{ frontend }}
|
||||||
|
balance leastconn
|
||||||
|
{% for unit, address in frontends[frontend]['backends'].items() -%}
|
||||||
|
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor -%}
|
||||||
|
{% endfor -%}
|
||||||
|
{% endif -%}
|
@ -0,0 +1,24 @@
|
|||||||
|
{% if endpoints -%}
|
||||||
|
{% for ext_port in ext_ports -%}
|
||||||
|
Listen {{ ext_port }}
|
||||||
|
{% endfor -%}
|
||||||
|
{% for address, endpoint, ext, int in endpoints -%}
|
||||||
|
<VirtualHost {{ address }}:{{ ext }}>
|
||||||
|
ServerName {{ endpoint }}
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
||||||
|
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
||||||
|
ProxyPass / http://localhost:{{ int }}/
|
||||||
|
ProxyPassReverse / http://localhost:{{ int }}/
|
||||||
|
ProxyPreserveHost on
|
||||||
|
</VirtualHost>
|
||||||
|
{% endfor -%}
|
||||||
|
<Proxy *>
|
||||||
|
Order deny,allow
|
||||||
|
Allow from all
|
||||||
|
</Proxy>
|
||||||
|
<Location />
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Location>
|
||||||
|
{% endif -%}
|
@ -0,0 +1,24 @@
|
|||||||
|
{% if endpoints -%}
|
||||||
|
{% for ext_port in ext_ports -%}
|
||||||
|
Listen {{ ext_port }}
|
||||||
|
{% endfor -%}
|
||||||
|
{% for address, endpoint, ext, int in endpoints -%}
|
||||||
|
<VirtualHost {{ address }}:{{ ext }}>
|
||||||
|
ServerName {{ endpoint }}
|
||||||
|
SSLEngine on
|
||||||
|
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
||||||
|
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
||||||
|
ProxyPass / http://localhost:{{ int }}/
|
||||||
|
ProxyPassReverse / http://localhost:{{ int }}/
|
||||||
|
ProxyPreserveHost on
|
||||||
|
</VirtualHost>
|
||||||
|
{% endfor -%}
|
||||||
|
<Proxy *>
|
||||||
|
Order deny,allow
|
||||||
|
Allow from all
|
||||||
|
</Proxy>
|
||||||
|
<Location />
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</Location>
|
||||||
|
{% endif -%}
|
@ -0,0 +1,9 @@
|
|||||||
|
{% if auth_host -%}
|
||||||
|
[keystone_authtoken]
|
||||||
|
identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
|
||||||
|
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
|
||||||
|
admin_tenant_name = {{ admin_tenant_name }}
|
||||||
|
admin_user = {{ admin_user }}
|
||||||
|
admin_password = {{ admin_password }}
|
||||||
|
signing_dir = {{ signing_dir }}
|
||||||
|
{% endif -%}
|
@ -0,0 +1,22 @@
|
|||||||
|
{% if rabbitmq_host or rabbitmq_hosts -%}
|
||||||
|
[oslo_messaging_rabbit]
|
||||||
|
rabbit_userid = {{ rabbitmq_user }}
|
||||||
|
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
||||||
|
rabbit_password = {{ rabbitmq_password }}
|
||||||
|
{% if rabbitmq_hosts -%}
|
||||||
|
rabbit_hosts = {{ rabbitmq_hosts }}
|
||||||
|
{% if rabbitmq_ha_queues -%}
|
||||||
|
rabbit_ha_queues = True
|
||||||
|
rabbit_durable_queues = False
|
||||||
|
{% endif -%}
|
||||||
|
{% else -%}
|
||||||
|
rabbit_host = {{ rabbitmq_host }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if rabbit_ssl_port -%}
|
||||||
|
rabbit_use_ssl = True
|
||||||
|
rabbit_port = {{ rabbit_ssl_port }}
|
||||||
|
{% if rabbit_ssl_ca -%}
|
||||||
|
kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
|
||||||
|
{% endif -%}
|
||||||
|
{% endif -%}
|
||||||
|
{% endif -%}
|
@ -0,0 +1,14 @@
|
|||||||
|
{% if zmq_host -%}
|
||||||
|
# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
|
||||||
|
rpc_backend = zmq
|
||||||
|
rpc_zmq_host = {{ zmq_host }}
|
||||||
|
{% if zmq_redis_address -%}
|
||||||
|
rpc_zmq_matchmaker = redis
|
||||||
|
matchmaker_heartbeat_freq = 15
|
||||||
|
matchmaker_heartbeat_ttl = 30
|
||||||
|
[matchmaker_redis]
|
||||||
|
host = {{ zmq_redis_address }}
|
||||||
|
{% else -%}
|
||||||
|
rpc_zmq_matchmaker = ring
|
||||||
|
{% endif -%}
|
||||||
|
{% endif -%}
|
295
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
295
hooks/charmhelpers/contrib/openstack/templating.py
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
ERROR,
|
||||||
|
INFO
|
||||||
|
)
|
||||||
|
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
||||||
|
|
||||||
|
try:
|
||||||
|
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
||||||
|
except ImportError:
|
||||||
|
# python-jinja2 may not be installed yet, or we're running unittests.
|
||||||
|
FileSystemLoader = ChoiceLoader = Environment = exceptions = None
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_loader(templates_dir, os_release):
|
||||||
|
"""
|
||||||
|
Create a jinja2.ChoiceLoader containing template dirs up to
|
||||||
|
and including os_release. If directory template directory
|
||||||
|
is missing at templates_dir, it will be omitted from the loader.
|
||||||
|
templates_dir is added to the bottom of the search list as a base
|
||||||
|
loading dir.
|
||||||
|
|
||||||
|
A charm may also ship a templates dir with this module
|
||||||
|
and it will be appended to the bottom of the search list, eg::
|
||||||
|
|
||||||
|
hooks/charmhelpers/contrib/openstack/templates
|
||||||
|
|
||||||
|
:param templates_dir (str): Base template directory containing release
|
||||||
|
sub-directories.
|
||||||
|
:param os_release (str): OpenStack release codename to construct template
|
||||||
|
loader.
|
||||||
|
:returns: jinja2.ChoiceLoader constructed with a list of
|
||||||
|
jinja2.FilesystemLoaders, ordered in descending
|
||||||
|
order by OpenStack release.
|
||||||
|
"""
|
||||||
|
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
|
||||||
|
for rel in six.itervalues(OPENSTACK_CODENAMES)]
|
||||||
|
|
||||||
|
if not os.path.isdir(templates_dir):
|
||||||
|
log('Templates directory not found @ %s.' % templates_dir,
|
||||||
|
level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
|
||||||
|
# the bottom contains tempaltes_dir and possibly a common templates dir
|
||||||
|
# shipped with the helper.
|
||||||
|
loaders = [FileSystemLoader(templates_dir)]
|
||||||
|
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
|
||||||
|
if os.path.isdir(helper_templates):
|
||||||
|
loaders.append(FileSystemLoader(helper_templates))
|
||||||
|
|
||||||
|
for rel, tmpl_dir in tmpl_dirs:
|
||||||
|
if os.path.isdir(tmpl_dir):
|
||||||
|
loaders.insert(0, FileSystemLoader(tmpl_dir))
|
||||||
|
if rel == os_release:
|
||||||
|
break
|
||||||
|
log('Creating choice loader with dirs: %s' %
|
||||||
|
[l.searchpath for l in loaders], level=INFO)
|
||||||
|
return ChoiceLoader(loaders)
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigTemplate(object):
|
||||||
|
"""
|
||||||
|
Associates a config file template with a list of context generators.
|
||||||
|
Responsible for constructing a template context based on those generators.
|
||||||
|
"""
|
||||||
|
def __init__(self, config_file, contexts):
|
||||||
|
self.config_file = config_file
|
||||||
|
|
||||||
|
if hasattr(contexts, '__call__'):
|
||||||
|
self.contexts = [contexts]
|
||||||
|
else:
|
||||||
|
self.contexts = contexts
|
||||||
|
|
||||||
|
self._complete_contexts = []
|
||||||
|
|
||||||
|
def context(self):
|
||||||
|
ctxt = {}
|
||||||
|
for context in self.contexts:
|
||||||
|
_ctxt = context()
|
||||||
|
if _ctxt:
|
||||||
|
ctxt.update(_ctxt)
|
||||||
|
# track interfaces for every complete context.
|
||||||
|
[self._complete_contexts.append(interface)
|
||||||
|
for interface in context.interfaces
|
||||||
|
if interface not in self._complete_contexts]
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
def complete_contexts(self):
|
||||||
|
'''
|
||||||
|
Return a list of interfaces that have atisfied contexts.
|
||||||
|
'''
|
||||||
|
if self._complete_contexts:
|
||||||
|
return self._complete_contexts
|
||||||
|
self.context()
|
||||||
|
return self._complete_contexts
|
||||||
|
|
||||||
|
|
||||||
|
class OSConfigRenderer(object):
|
||||||
|
"""
|
||||||
|
This class provides a common templating system to be used by OpenStack
|
||||||
|
charms. It is intended to help charms share common code and templates,
|
||||||
|
and ease the burden of managing config templates across multiple OpenStack
|
||||||
|
releases.
|
||||||
|
|
||||||
|
Basic usage::
|
||||||
|
|
||||||
|
# import some common context generates from charmhelpers
|
||||||
|
from charmhelpers.contrib.openstack import context
|
||||||
|
|
||||||
|
# Create a renderer object for a specific OS release.
|
||||||
|
configs = OSConfigRenderer(templates_dir='/tmp/templates',
|
||||||
|
openstack_release='folsom')
|
||||||
|
# register some config files with context generators.
|
||||||
|
configs.register(config_file='/etc/nova/nova.conf',
|
||||||
|
contexts=[context.SharedDBContext(),
|
||||||
|
context.AMQPContext()])
|
||||||
|
configs.register(config_file='/etc/nova/api-paste.ini',
|
||||||
|
contexts=[context.IdentityServiceContext()])
|
||||||
|
configs.register(config_file='/etc/haproxy/haproxy.conf',
|
||||||
|
contexts=[context.HAProxyContext()])
|
||||||
|
# write out a single config
|
||||||
|
configs.write('/etc/nova/nova.conf')
|
||||||
|
# write out all registered configs
|
||||||
|
configs.write_all()
|
||||||
|
|
||||||
|
**OpenStack Releases and template loading**
|
||||||
|
|
||||||
|
When the object is instantiated, it is associated with a specific OS
|
||||||
|
release. This dictates how the template loader will be constructed.
|
||||||
|
|
||||||
|
The constructed loader attempts to load the template from several places
|
||||||
|
in the following order:
|
||||||
|
- from the most recent OS release-specific template dir (if one exists)
|
||||||
|
- the base templates_dir
|
||||||
|
- a template directory shipped in the charm with this helper file.
|
||||||
|
|
||||||
|
For the example above, '/tmp/templates' contains the following structure::
|
||||||
|
|
||||||
|
/tmp/templates/nova.conf
|
||||||
|
/tmp/templates/api-paste.ini
|
||||||
|
/tmp/templates/grizzly/api-paste.ini
|
||||||
|
/tmp/templates/havana/api-paste.ini
|
||||||
|
|
||||||
|
Since it was registered with the grizzly release, it first seraches
|
||||||
|
the grizzly directory for nova.conf, then the templates dir.
|
||||||
|
|
||||||
|
When writing api-paste.ini, it will find the template in the grizzly
|
||||||
|
directory.
|
||||||
|
|
||||||
|
If the object were created with folsom, it would fall back to the
|
||||||
|
base templates dir for its api-paste.ini template.
|
||||||
|
|
||||||
|
This system should help manage changes in config files through
|
||||||
|
openstack releases, allowing charms to fall back to the most recently
|
||||||
|
updated config template for a given release
|
||||||
|
|
||||||
|
The haproxy.conf, since it is not shipped in the templates dir, will
|
||||||
|
be loaded from the module directory's template directory, eg
|
||||||
|
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
|
||||||
|
us to ship common templates (haproxy, apache) with the helpers.
|
||||||
|
|
||||||
|
**Context generators**
|
||||||
|
|
||||||
|
Context generators are used to generate template contexts during hook
|
||||||
|
execution. Doing so may require inspecting service relations, charm
|
||||||
|
config, etc. When registered, a config file is associated with a list
|
||||||
|
of generators. When a template is rendered and written, all context
|
||||||
|
generates are called in a chain to generate the context dictionary
|
||||||
|
passed to the jinja2 template. See context.py for more info.
|
||||||
|
"""
|
||||||
|
def __init__(self, templates_dir, openstack_release):
|
||||||
|
if not os.path.isdir(templates_dir):
|
||||||
|
log('Could not locate templates dir %s' % templates_dir,
|
||||||
|
level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
|
||||||
|
self.templates_dir = templates_dir
|
||||||
|
self.openstack_release = openstack_release
|
||||||
|
self.templates = {}
|
||||||
|
self._tmpl_env = None
|
||||||
|
|
||||||
|
if None in [Environment, ChoiceLoader, FileSystemLoader]:
|
||||||
|
# if this code is running, the object is created pre-install hook.
|
||||||
|
# jinja2 shouldn't get touched until the module is reloaded on next
|
||||||
|
# hook execution, with proper jinja2 bits successfully imported.
|
||||||
|
apt_install('python-jinja2')
|
||||||
|
|
||||||
|
def register(self, config_file, contexts):
|
||||||
|
"""
|
||||||
|
Register a config file with a list of context generators to be called
|
||||||
|
during rendering.
|
||||||
|
"""
|
||||||
|
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
|
||||||
|
contexts=contexts)
|
||||||
|
log('Registered config file: %s' % config_file, level=INFO)
|
||||||
|
|
||||||
|
def _get_tmpl_env(self):
|
||||||
|
if not self._tmpl_env:
|
||||||
|
loader = get_loader(self.templates_dir, self.openstack_release)
|
||||||
|
self._tmpl_env = Environment(loader=loader)
|
||||||
|
|
||||||
|
def _get_template(self, template):
|
||||||
|
self._get_tmpl_env()
|
||||||
|
template = self._tmpl_env.get_template(template)
|
||||||
|
log('Loaded template from %s' % template.filename, level=INFO)
|
||||||
|
return template
|
||||||
|
|
||||||
|
def render(self, config_file):
|
||||||
|
if config_file not in self.templates:
|
||||||
|
log('Config not registered: %s' % config_file, level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
ctxt = self.templates[config_file].context()
|
||||||
|
|
||||||
|
_tmpl = os.path.basename(config_file)
|
||||||
|
try:
|
||||||
|
template = self._get_template(_tmpl)
|
||||||
|
except exceptions.TemplateNotFound:
|
||||||
|
# if no template is found with basename, try looking for it
|
||||||
|
# using a munged full path, eg:
|
||||||
|
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
|
||||||
|
_tmpl = '_'.join(config_file.split('/')[1:])
|
||||||
|
try:
|
||||||
|
template = self._get_template(_tmpl)
|
||||||
|
except exceptions.TemplateNotFound as e:
|
||||||
|
log('Could not load template from %s by %s or %s.' %
|
||||||
|
(self.templates_dir, os.path.basename(config_file), _tmpl),
|
||||||
|
level=ERROR)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
log('Rendering from template: %s' % _tmpl, level=INFO)
|
||||||
|
return template.render(ctxt)
|
||||||
|
|
||||||
|
def write(self, config_file):
|
||||||
|
"""
|
||||||
|
Write a single config file, raises if config file is not registered.
|
||||||
|
"""
|
||||||
|
if config_file not in self.templates:
|
||||||
|
log('Config not registered: %s' % config_file, level=ERROR)
|
||||||
|
raise OSConfigException
|
||||||
|
|
||||||
|
_out = self.render(config_file)
|
||||||
|
|
||||||
|
with open(config_file, 'wb') as out:
|
||||||
|
out.write(_out)
|
||||||
|
|
||||||
|
log('Wrote template %s.' % config_file, level=INFO)
|
||||||
|
|
||||||
|
def write_all(self):
|
||||||
|
"""
|
||||||
|
Write out all registered config files.
|
||||||
|
"""
|
||||||
|
[self.write(k) for k in six.iterkeys(self.templates)]
|
||||||
|
|
||||||
|
def set_release(self, openstack_release):
|
||||||
|
"""
|
||||||
|
Resets the template environment and generates a new template loader
|
||||||
|
based on a the new openstack release.
|
||||||
|
"""
|
||||||
|
self._tmpl_env = None
|
||||||
|
self.openstack_release = openstack_release
|
||||||
|
self._get_tmpl_env()
|
||||||
|
|
||||||
|
def complete_contexts(self):
|
||||||
|
'''
|
||||||
|
Returns a list of context interfaces that yield a complete context.
|
||||||
|
'''
|
||||||
|
interfaces = []
|
||||||
|
[interfaces.extend(i.complete_contexts())
|
||||||
|
for i in six.itervalues(self.templates)]
|
||||||
|
return interfaces
|
642
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
642
hooks/charmhelpers/contrib/openstack/utils.py
Normal file
@ -0,0 +1,642 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# Common python helper functions used for OpenStack charms.
|
||||||
|
from collections import OrderedDict
|
||||||
|
from functools import wraps
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
import six
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from charmhelpers.contrib.network import ip
|
||||||
|
|
||||||
|
from charmhelpers.core import (
|
||||||
|
unitdata,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
log as juju_log,
|
||||||
|
charm_dir,
|
||||||
|
INFO,
|
||||||
|
relation_ids,
|
||||||
|
relation_set
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.storage.linux.lvm import (
|
||||||
|
deactivate_lvm_volume_group,
|
||||||
|
is_lvm_physical_volume,
|
||||||
|
remove_lvm_physical_volume,
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.contrib.network.ip import (
|
||||||
|
get_ipv6_addr
|
||||||
|
)
|
||||||
|
|
||||||
|
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||||
|
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
||||||
|
from charmhelpers.contrib.python.packages import pip_install
|
||||||
|
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||||
|
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||||
|
|
||||||
|
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||||
|
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||||
|
|
||||||
|
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
||||||
|
'restricted main multiverse universe')
|
||||||
|
|
||||||
|
|
||||||
|
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||||
|
('oneiric', 'diablo'),
|
||||||
|
('precise', 'essex'),
|
||||||
|
('quantal', 'folsom'),
|
||||||
|
('raring', 'grizzly'),
|
||||||
|
('saucy', 'havana'),
|
||||||
|
('trusty', 'icehouse'),
|
||||||
|
('utopic', 'juno'),
|
||||||
|
('vivid', 'kilo'),
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
|
OPENSTACK_CODENAMES = OrderedDict([
|
||||||
|
('2011.2', 'diablo'),
|
||||||
|
('2012.1', 'essex'),
|
||||||
|
('2012.2', 'folsom'),
|
||||||
|
('2013.1', 'grizzly'),
|
||||||
|
('2013.2', 'havana'),
|
||||||
|
('2014.1', 'icehouse'),
|
||||||
|
('2014.2', 'juno'),
|
||||||
|
('2015.1', 'kilo'),
|
||||||
|
])
|
||||||
|
|
||||||
|
# The ugly duckling
|
||||||
|
SWIFT_CODENAMES = OrderedDict([
|
||||||
|
('1.4.3', 'diablo'),
|
||||||
|
('1.4.8', 'essex'),
|
||||||
|
('1.7.4', 'folsom'),
|
||||||
|
('1.8.0', 'grizzly'),
|
||||||
|
('1.7.7', 'grizzly'),
|
||||||
|
('1.7.6', 'grizzly'),
|
||||||
|
('1.10.0', 'havana'),
|
||||||
|
('1.9.1', 'havana'),
|
||||||
|
('1.9.0', 'havana'),
|
||||||
|
('1.13.1', 'icehouse'),
|
||||||
|
('1.13.0', 'icehouse'),
|
||||||
|
('1.12.0', 'icehouse'),
|
||||||
|
('1.11.0', 'icehouse'),
|
||||||
|
('2.0.0', 'juno'),
|
||||||
|
('2.1.0', 'juno'),
|
||||||
|
('2.2.0', 'juno'),
|
||||||
|
('2.2.1', 'kilo'),
|
||||||
|
('2.2.2', 'kilo'),
|
||||||
|
])
|
||||||
|
|
||||||
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
|
|
||||||
|
|
||||||
|
def error_out(msg):
|
||||||
|
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_codename_install_source(src):
|
||||||
|
'''Derive OpenStack release codename from a given installation source.'''
|
||||||
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
rel = ''
|
||||||
|
if src is None:
|
||||||
|
return rel
|
||||||
|
if src in ['distro', 'distro-proposed']:
|
||||||
|
try:
|
||||||
|
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Could not derive openstack release for '\
|
||||||
|
'this Ubuntu release: %s' % ubuntu_rel
|
||||||
|
error_out(e)
|
||||||
|
return rel
|
||||||
|
|
||||||
|
if src.startswith('cloud:'):
|
||||||
|
ca_rel = src.split(':')[1]
|
||||||
|
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
||||||
|
return ca_rel
|
||||||
|
|
||||||
|
# Best guess match based on deb string provided
|
||||||
|
if src.startswith('deb') or src.startswith('ppa'):
|
||||||
|
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
||||||
|
if v in src:
|
||||||
|
return v
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_install_source(src):
|
||||||
|
codename = get_os_codename_install_source(src)
|
||||||
|
return get_os_version_codename(codename)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_codename_version(vers):
|
||||||
|
'''Determine OpenStack codename from version number.'''
|
||||||
|
try:
|
||||||
|
return OPENSTACK_CODENAMES[vers]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_codename(codename):
|
||||||
|
'''Determine OpenStack version number from codename.'''
|
||||||
|
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
||||||
|
if v == codename:
|
||||||
|
return k
|
||||||
|
e = 'Could not derive OpenStack version for '\
|
||||||
|
'codename: %s' % codename
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_codename_package(package, fatal=True):
|
||||||
|
'''Derive OpenStack release codename from an installed package.'''
|
||||||
|
import apt_pkg as apt
|
||||||
|
|
||||||
|
cache = apt_cache()
|
||||||
|
|
||||||
|
try:
|
||||||
|
pkg = cache[package]
|
||||||
|
except:
|
||||||
|
if not fatal:
|
||||||
|
return None
|
||||||
|
# the package is unknown to the current apt cache.
|
||||||
|
e = 'Could not determine version of package with no installation '\
|
||||||
|
'candidate: %s' % package
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
if not pkg.current_ver:
|
||||||
|
if not fatal:
|
||||||
|
return None
|
||||||
|
# package is known, but no version is currently installed.
|
||||||
|
e = 'Could not determine version of uninstalled package: %s' % package
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if 'swift' in pkg.name:
|
||||||
|
swift_vers = vers[:5]
|
||||||
|
if swift_vers not in SWIFT_CODENAMES:
|
||||||
|
# Deal with 1.10.0 upward
|
||||||
|
swift_vers = vers[:6]
|
||||||
|
return SWIFT_CODENAMES[swift_vers]
|
||||||
|
else:
|
||||||
|
vers = vers[:6]
|
||||||
|
return OPENSTACK_CODENAMES[vers]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Could not determine OpenStack codename for version %s' % vers
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
def get_os_version_package(pkg, fatal=True):
|
||||||
|
'''Derive OpenStack version number from an installed package.'''
|
||||||
|
codename = get_os_codename_package(pkg, fatal=fatal)
|
||||||
|
|
||||||
|
if not codename:
|
||||||
|
return None
|
||||||
|
|
||||||
|
if 'swift' in pkg:
|
||||||
|
vers_map = SWIFT_CODENAMES
|
||||||
|
else:
|
||||||
|
vers_map = OPENSTACK_CODENAMES
|
||||||
|
|
||||||
|
for version, cname in six.iteritems(vers_map):
|
||||||
|
if cname == codename:
|
||||||
|
return version
|
||||||
|
# e = "Could not determine OpenStack version for package: %s" % pkg
|
||||||
|
# error_out(e)
|
||||||
|
|
||||||
|
|
||||||
|
os_rel = None
|
||||||
|
|
||||||
|
|
||||||
|
def os_release(package, base='essex'):
|
||||||
|
'''
|
||||||
|
Returns OpenStack release codename from a cached global.
|
||||||
|
If the codename can not be determined from either an installed package or
|
||||||
|
the installation source, the earliest release supported by the charm should
|
||||||
|
be returned.
|
||||||
|
'''
|
||||||
|
global os_rel
|
||||||
|
if os_rel:
|
||||||
|
return os_rel
|
||||||
|
os_rel = (get_os_codename_package(package, fatal=False) or
|
||||||
|
get_os_codename_install_source(config('openstack-origin')) or
|
||||||
|
base)
|
||||||
|
return os_rel
|
||||||
|
|
||||||
|
|
||||||
|
def import_key(keyid):
|
||||||
|
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
|
||||||
|
"--recv-keys %s" % keyid
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd.split(' '))
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
error_out("Error importing repo key %s" % keyid)
|
||||||
|
|
||||||
|
|
||||||
|
def configure_installation_source(rel):
|
||||||
|
'''Configure apt installation source.'''
|
||||||
|
if rel == 'distro':
|
||||||
|
return
|
||||||
|
elif rel == 'distro-proposed':
|
||||||
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||||
|
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
||||||
|
elif rel[:4] == "ppa:":
|
||||||
|
src = rel
|
||||||
|
subprocess.check_call(["add-apt-repository", "-y", src])
|
||||||
|
elif rel[:3] == "deb":
|
||||||
|
l = len(rel.split('|'))
|
||||||
|
if l == 2:
|
||||||
|
src, key = rel.split('|')
|
||||||
|
juju_log("Importing PPA key from keyserver for %s" % src)
|
||||||
|
import_key(key)
|
||||||
|
elif l == 1:
|
||||||
|
src = rel
|
||||||
|
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||||
|
f.write(src)
|
||||||
|
elif rel[:6] == 'cloud:':
|
||||||
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
rel = rel.split(':')[1]
|
||||||
|
u_rel = rel.split('-')[0]
|
||||||
|
ca_rel = rel.split('-')[1]
|
||||||
|
|
||||||
|
if u_rel != ubuntu_rel:
|
||||||
|
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
|
||||||
|
'version (%s)' % (ca_rel, ubuntu_rel)
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
if 'staging' in ca_rel:
|
||||||
|
# staging is just a regular PPA.
|
||||||
|
os_rel = ca_rel.split('/')[0]
|
||||||
|
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
|
||||||
|
cmd = 'add-apt-repository -y %s' % ppa
|
||||||
|
subprocess.check_call(cmd.split(' '))
|
||||||
|
return
|
||||||
|
|
||||||
|
# map charm config options to actual archive pockets.
|
||||||
|
pockets = {
|
||||||
|
'folsom': 'precise-updates/folsom',
|
||||||
|
'folsom/updates': 'precise-updates/folsom',
|
||||||
|
'folsom/proposed': 'precise-proposed/folsom',
|
||||||
|
'grizzly': 'precise-updates/grizzly',
|
||||||
|
'grizzly/updates': 'precise-updates/grizzly',
|
||||||
|
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||||
|
'havana': 'precise-updates/havana',
|
||||||
|
'havana/updates': 'precise-updates/havana',
|
||||||
|
'havana/proposed': 'precise-proposed/havana',
|
||||||
|
'icehouse': 'precise-updates/icehouse',
|
||||||
|
'icehouse/updates': 'precise-updates/icehouse',
|
||||||
|
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||||
|
'juno': 'trusty-updates/juno',
|
||||||
|
'juno/updates': 'trusty-updates/juno',
|
||||||
|
'juno/proposed': 'trusty-proposed/juno',
|
||||||
|
'kilo': 'trusty-updates/kilo',
|
||||||
|
'kilo/updates': 'trusty-updates/kilo',
|
||||||
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
pocket = pockets[ca_rel]
|
||||||
|
except KeyError:
|
||||||
|
e = 'Invalid Cloud Archive release specified: %s' % rel
|
||||||
|
error_out(e)
|
||||||
|
|
||||||
|
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
|
||||||
|
apt_install('ubuntu-cloud-keyring', fatal=True)
|
||||||
|
|
||||||
|
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
|
||||||
|
f.write(src)
|
||||||
|
else:
|
||||||
|
error_out("Invalid openstack-release specified: %s" % rel)
|
||||||
|
|
||||||
|
|
||||||
|
def config_value_changed(option):
|
||||||
|
"""
|
||||||
|
Determine if config value changed since last call to this function.
|
||||||
|
"""
|
||||||
|
hook_data = unitdata.HookData()
|
||||||
|
with hook_data():
|
||||||
|
db = unitdata.kv()
|
||||||
|
current = config(option)
|
||||||
|
saved = db.get(option)
|
||||||
|
db.set(option, current)
|
||||||
|
if saved is None:
|
||||||
|
return False
|
||||||
|
return current != saved
|
||||||
|
|
||||||
|
|
||||||
|
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||||
|
"""
|
||||||
|
Write an rc file in the charm-delivered directory containing
|
||||||
|
exported environment variables provided by env_vars. Any charm scripts run
|
||||||
|
outside the juju hook environment can source this scriptrc to obtain
|
||||||
|
updated config information necessary to perform health checks or
|
||||||
|
service changes.
|
||||||
|
"""
|
||||||
|
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
|
||||||
|
if not os.path.exists(os.path.dirname(juju_rc_path)):
|
||||||
|
os.mkdir(os.path.dirname(juju_rc_path))
|
||||||
|
with open(juju_rc_path, 'wb') as rc_script:
|
||||||
|
rc_script.write(
|
||||||
|
"#!/bin/bash\n")
|
||||||
|
[rc_script.write('export %s=%s\n' % (u, p))
|
||||||
|
for u, p in six.iteritems(env_vars) if u != "script_path"]
|
||||||
|
|
||||||
|
|
||||||
|
def openstack_upgrade_available(package):
|
||||||
|
"""
|
||||||
|
Determines if an OpenStack upgrade is available from installation
|
||||||
|
source, based on version of installed package.
|
||||||
|
|
||||||
|
:param package: str: Name of installed package.
|
||||||
|
|
||||||
|
:returns: bool: : Returns True if configured installation source offers
|
||||||
|
a newer version of package.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import apt_pkg as apt
|
||||||
|
src = config('openstack-origin')
|
||||||
|
cur_vers = get_os_version_package(package)
|
||||||
|
available_vers = get_os_version_install_source(src)
|
||||||
|
apt.init()
|
||||||
|
return apt.version_compare(available_vers, cur_vers) == 1
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_block_device(block_device):
|
||||||
|
'''
|
||||||
|
Confirm block_device, create as loopback if necessary.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to ensure.
|
||||||
|
|
||||||
|
:returns: str: Full path of ensured block device.
|
||||||
|
'''
|
||||||
|
_none = ['None', 'none', None]
|
||||||
|
if (block_device in _none):
|
||||||
|
error_out('prepare_storage(): Missing required input: block_device=%s.'
|
||||||
|
% block_device)
|
||||||
|
|
||||||
|
if block_device.startswith('/dev/'):
|
||||||
|
bdev = block_device
|
||||||
|
elif block_device.startswith('/'):
|
||||||
|
_bd = block_device.split('|')
|
||||||
|
if len(_bd) == 2:
|
||||||
|
bdev, size = _bd
|
||||||
|
else:
|
||||||
|
bdev = block_device
|
||||||
|
size = DEFAULT_LOOPBACK_SIZE
|
||||||
|
bdev = ensure_loopback_device(bdev, size)
|
||||||
|
else:
|
||||||
|
bdev = '/dev/%s' % block_device
|
||||||
|
|
||||||
|
if not is_block_device(bdev):
|
||||||
|
error_out('Failed to locate valid block device at %s' % bdev)
|
||||||
|
|
||||||
|
return bdev
|
||||||
|
|
||||||
|
|
||||||
|
def clean_storage(block_device):
|
||||||
|
'''
|
||||||
|
Ensures a block device is clean. That is:
|
||||||
|
- unmounted
|
||||||
|
- any lvm volume groups are deactivated
|
||||||
|
- any lvm physical device signatures removed
|
||||||
|
- partition table wiped
|
||||||
|
|
||||||
|
:param block_device: str: Full path to block device to clean.
|
||||||
|
'''
|
||||||
|
for mp, d in mounts():
|
||||||
|
if d == block_device:
|
||||||
|
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
|
||||||
|
(d, mp), level=INFO)
|
||||||
|
umount(mp, persist=True)
|
||||||
|
|
||||||
|
if is_lvm_physical_volume(block_device):
|
||||||
|
deactivate_lvm_volume_group(block_device)
|
||||||
|
remove_lvm_physical_volume(block_device)
|
||||||
|
else:
|
||||||
|
zap_disk(block_device)
|
||||||
|
|
||||||
|
is_ip = ip.is_ip
|
||||||
|
ns_query = ip.ns_query
|
||||||
|
get_host_ip = ip.get_host_ip
|
||||||
|
get_hostname = ip.get_hostname
|
||||||
|
|
||||||
|
|
||||||
|
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
|
||||||
|
mm_map = {}
|
||||||
|
if os.path.isfile(mm_file):
|
||||||
|
with open(mm_file, 'r') as f:
|
||||||
|
mm_map = json.load(f)
|
||||||
|
return mm_map
|
||||||
|
|
||||||
|
|
||||||
|
def sync_db_with_multi_ipv6_addresses(database, database_user,
|
||||||
|
relation_prefix=None):
|
||||||
|
hosts = get_ipv6_addr(dynamic_only=False)
|
||||||
|
|
||||||
|
kwargs = {'database': database,
|
||||||
|
'username': database_user,
|
||||||
|
'hostname': json.dumps(hosts)}
|
||||||
|
|
||||||
|
if relation_prefix:
|
||||||
|
for key in list(kwargs.keys()):
|
||||||
|
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
|
||||||
|
del kwargs[key]
|
||||||
|
|
||||||
|
for rid in relation_ids('shared-db'):
|
||||||
|
relation_set(relation_id=rid, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def os_requires_version(ostack_release, pkg):
|
||||||
|
"""
|
||||||
|
Decorator for hook to specify minimum supported release
|
||||||
|
"""
|
||||||
|
def wrap(f):
|
||||||
|
@wraps(f)
|
||||||
|
def wrapped_f(*args):
|
||||||
|
if os_release(pkg) < ostack_release:
|
||||||
|
raise Exception("This hook is not supported on releases"
|
||||||
|
" before %s" % ostack_release)
|
||||||
|
f(*args)
|
||||||
|
return wrapped_f
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def git_install_requested():
|
||||||
|
"""
|
||||||
|
Returns true if openstack-origin-git is specified.
|
||||||
|
"""
|
||||||
|
return config('openstack-origin-git') is not None
|
||||||
|
|
||||||
|
|
||||||
|
requirements_dir = None
|
||||||
|
|
||||||
|
|
||||||
|
def git_clone_and_install(projects_yaml, core_project):
|
||||||
|
"""
|
||||||
|
Clone/install all specified OpenStack repositories.
|
||||||
|
|
||||||
|
The expected format of projects_yaml is:
|
||||||
|
repositories:
|
||||||
|
- {name: keystone,
|
||||||
|
repository: 'git://git.openstack.org/openstack/keystone.git',
|
||||||
|
branch: 'stable/icehouse'}
|
||||||
|
- {name: requirements,
|
||||||
|
repository: 'git://git.openstack.org/openstack/requirements.git',
|
||||||
|
branch: 'stable/icehouse'}
|
||||||
|
directory: /mnt/openstack-git
|
||||||
|
http_proxy: http://squid.internal:3128
|
||||||
|
https_proxy: https://squid.internal:3128
|
||||||
|
|
||||||
|
The directory, http_proxy, and https_proxy keys are optional.
|
||||||
|
"""
|
||||||
|
global requirements_dir
|
||||||
|
parent_dir = '/mnt/openstack-git'
|
||||||
|
|
||||||
|
if not projects_yaml:
|
||||||
|
return
|
||||||
|
|
||||||
|
projects = yaml.load(projects_yaml)
|
||||||
|
_git_validate_projects_yaml(projects, core_project)
|
||||||
|
|
||||||
|
old_environ = dict(os.environ)
|
||||||
|
|
||||||
|
if 'http_proxy' in projects.keys():
|
||||||
|
os.environ['http_proxy'] = projects['http_proxy']
|
||||||
|
if 'https_proxy' in projects.keys():
|
||||||
|
os.environ['https_proxy'] = projects['https_proxy']
|
||||||
|
|
||||||
|
if 'directory' in projects.keys():
|
||||||
|
parent_dir = projects['directory']
|
||||||
|
|
||||||
|
for p in projects['repositories']:
|
||||||
|
repo = p['repository']
|
||||||
|
branch = p['branch']
|
||||||
|
if p['name'] == 'requirements':
|
||||||
|
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
||||||
|
update_requirements=False)
|
||||||
|
requirements_dir = repo_dir
|
||||||
|
else:
|
||||||
|
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
||||||
|
update_requirements=True)
|
||||||
|
|
||||||
|
os.environ = old_environ
|
||||||
|
|
||||||
|
|
||||||
|
def _git_validate_projects_yaml(projects, core_project):
|
||||||
|
"""
|
||||||
|
Validate the projects yaml.
|
||||||
|
"""
|
||||||
|
_git_ensure_key_exists('repositories', projects)
|
||||||
|
|
||||||
|
for project in projects['repositories']:
|
||||||
|
_git_ensure_key_exists('name', project.keys())
|
||||||
|
_git_ensure_key_exists('repository', project.keys())
|
||||||
|
_git_ensure_key_exists('branch', project.keys())
|
||||||
|
|
||||||
|
if projects['repositories'][0]['name'] != 'requirements':
|
||||||
|
error_out('{} git repo must be specified first'.format('requirements'))
|
||||||
|
|
||||||
|
if projects['repositories'][-1]['name'] != core_project:
|
||||||
|
error_out('{} git repo must be specified last'.format(core_project))
|
||||||
|
|
||||||
|
|
||||||
|
def _git_ensure_key_exists(key, keys):
|
||||||
|
"""
|
||||||
|
Ensure that key exists in keys.
|
||||||
|
"""
|
||||||
|
if key not in keys:
|
||||||
|
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
||||||
|
|
||||||
|
|
||||||
|
def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements):
|
||||||
|
"""
|
||||||
|
Clone and install a single git repository.
|
||||||
|
"""
|
||||||
|
dest_dir = os.path.join(parent_dir, os.path.basename(repo))
|
||||||
|
|
||||||
|
if not os.path.exists(parent_dir):
|
||||||
|
juju_log('Directory already exists at {}. '
|
||||||
|
'No need to create directory.'.format(parent_dir))
|
||||||
|
os.mkdir(parent_dir)
|
||||||
|
|
||||||
|
if not os.path.exists(dest_dir):
|
||||||
|
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||||
|
repo_dir = install_remote(repo, dest=parent_dir, branch=branch)
|
||||||
|
else:
|
||||||
|
repo_dir = dest_dir
|
||||||
|
|
||||||
|
if update_requirements:
|
||||||
|
if not requirements_dir:
|
||||||
|
error_out('requirements repo must be cloned before '
|
||||||
|
'updating from global requirements.')
|
||||||
|
_git_update_requirements(repo_dir, requirements_dir)
|
||||||
|
|
||||||
|
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
||||||
|
pip_install(repo_dir)
|
||||||
|
|
||||||
|
return repo_dir
|
||||||
|
|
||||||
|
|
||||||
|
def _git_update_requirements(package_dir, reqs_dir):
|
||||||
|
"""
|
||||||
|
Update from global requirements.
|
||||||
|
|
||||||
|
Update an OpenStack git directory's requirements.txt and
|
||||||
|
test-requirements.txt from global-requirements.txt.
|
||||||
|
"""
|
||||||
|
orig_dir = os.getcwd()
|
||||||
|
os.chdir(reqs_dir)
|
||||||
|
cmd = ['python', 'update.py', package_dir]
|
||||||
|
try:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
package = os.path.basename(package_dir)
|
||||||
|
error_out("Error updating {} from global-requirements.txt".format(package))
|
||||||
|
os.chdir(orig_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def git_src_dir(projects_yaml, project):
|
||||||
|
"""
|
||||||
|
Return the directory where the specified project's source is located.
|
||||||
|
"""
|
||||||
|
parent_dir = '/mnt/openstack-git'
|
||||||
|
|
||||||
|
if not projects_yaml:
|
||||||
|
return
|
||||||
|
|
||||||
|
projects = yaml.load(projects_yaml)
|
||||||
|
|
||||||
|
if 'directory' in projects.keys():
|
||||||
|
parent_dir = projects['directory']
|
||||||
|
|
||||||
|
for p in projects['repositories']:
|
||||||
|
if p['name'] == project:
|
||||||
|
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
||||||
|
|
||||||
|
return None
|
15
hooks/charmhelpers/contrib/python/__init__.py
Normal file
15
hooks/charmhelpers/contrib/python/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
96
hooks/charmhelpers/contrib/python/packages.py
Normal file
96
hooks/charmhelpers/contrib/python/packages.py
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from charmhelpers.fetch import apt_install, apt_update
|
||||||
|
from charmhelpers.core.hookenv import log
|
||||||
|
|
||||||
|
try:
|
||||||
|
from pip import main as pip_execute
|
||||||
|
except ImportError:
|
||||||
|
apt_update()
|
||||||
|
apt_install('python-pip')
|
||||||
|
from pip import main as pip_execute
|
||||||
|
|
||||||
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
|
||||||
|
def parse_options(given, available):
|
||||||
|
"""Given a set of options, check if available"""
|
||||||
|
for key, value in sorted(given.items()):
|
||||||
|
if key in available:
|
||||||
|
yield "--{0}={1}".format(key, value)
|
||||||
|
|
||||||
|
|
||||||
|
def pip_install_requirements(requirements, **options):
|
||||||
|
"""Install a requirements file """
|
||||||
|
command = ["install"]
|
||||||
|
|
||||||
|
available_options = ('proxy', 'src', 'log', )
|
||||||
|
for option in parse_options(options, available_options):
|
||||||
|
command.append(option)
|
||||||
|
|
||||||
|
command.append("-r {0}".format(requirements))
|
||||||
|
log("Installing from file: {} with options: {}".format(requirements,
|
||||||
|
command))
|
||||||
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
|
def pip_install(package, fatal=False, upgrade=False, **options):
|
||||||
|
"""Install a python package"""
|
||||||
|
command = ["install"]
|
||||||
|
|
||||||
|
available_options = ('proxy', 'src', 'log', "index-url", )
|
||||||
|
for option in parse_options(options, available_options):
|
||||||
|
command.append(option)
|
||||||
|
|
||||||
|
if upgrade:
|
||||||
|
command.append('--upgrade')
|
||||||
|
|
||||||
|
if isinstance(package, list):
|
||||||
|
command.extend(package)
|
||||||
|
else:
|
||||||
|
command.append(package)
|
||||||
|
|
||||||
|
log("Installing {} package with options: {}".format(package,
|
||||||
|
command))
|
||||||
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
|
def pip_uninstall(package, **options):
|
||||||
|
"""Uninstall a python package"""
|
||||||
|
command = ["uninstall", "-q", "-y"]
|
||||||
|
|
||||||
|
available_options = ('proxy', 'log', )
|
||||||
|
for option in parse_options(options, available_options):
|
||||||
|
command.append(option)
|
||||||
|
|
||||||
|
if isinstance(package, list):
|
||||||
|
command.extend(package)
|
||||||
|
else:
|
||||||
|
command.append(package)
|
||||||
|
|
||||||
|
log("Uninstalling {} package with options: {}".format(package,
|
||||||
|
command))
|
||||||
|
pip_execute(command)
|
||||||
|
|
||||||
|
|
||||||
|
def pip_list():
|
||||||
|
"""Returns the list of current python installed packages
|
||||||
|
"""
|
||||||
|
return pip_execute(["list"])
|
15
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
15
hooks/charmhelpers/contrib/storage/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
15
hooks/charmhelpers/contrib/storage/linux/__init__.py
Normal file
15
hooks/charmhelpers/contrib/storage/linux/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
444
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
444
hooks/charmhelpers/contrib/storage/linux/ceph.py
Normal file
@ -0,0 +1,444 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# This file is sourced from lp:openstack-charm-helpers
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# James Page <james.page@ubuntu.com>
|
||||||
|
# Adam Gandelman <adamg@ubuntu.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
|
||||||
|
from subprocess import (
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
CalledProcessError,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
relation_get,
|
||||||
|
relation_ids,
|
||||||
|
related_units,
|
||||||
|
log,
|
||||||
|
DEBUG,
|
||||||
|
INFO,
|
||||||
|
WARNING,
|
||||||
|
ERROR,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
mount,
|
||||||
|
mounts,
|
||||||
|
service_start,
|
||||||
|
service_stop,
|
||||||
|
service_running,
|
||||||
|
umount,
|
||||||
|
)
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
apt_install,
|
||||||
|
)
|
||||||
|
|
||||||
|
KEYRING = '/etc/ceph/ceph.client.{}.keyring'
|
||||||
|
KEYFILE = '/etc/ceph/ceph.client.{}.key'
|
||||||
|
|
||||||
|
CEPH_CONF = """[global]
|
||||||
|
auth supported = {auth}
|
||||||
|
keyring = {keyring}
|
||||||
|
mon host = {mon_hosts}
|
||||||
|
log to syslog = {use_syslog}
|
||||||
|
err to syslog = {use_syslog}
|
||||||
|
clog to syslog = {use_syslog}
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def install():
|
||||||
|
"""Basic Ceph client installation."""
|
||||||
|
ceph_dir = "/etc/ceph"
|
||||||
|
if not os.path.exists(ceph_dir):
|
||||||
|
os.mkdir(ceph_dir)
|
||||||
|
|
||||||
|
apt_install('ceph-common', fatal=True)
|
||||||
|
|
||||||
|
|
||||||
|
def rbd_exists(service, pool, rbd_img):
|
||||||
|
"""Check to see if a RADOS block device exists."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rbd', 'list', '--id',
|
||||||
|
service, '--pool', pool]).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return rbd_img in out
|
||||||
|
|
||||||
|
|
||||||
|
def create_rbd_image(service, pool, image, sizemb):
|
||||||
|
"""Create a new RADOS block device."""
|
||||||
|
cmd = ['rbd', 'create', image, '--size', str(sizemb), '--id', service,
|
||||||
|
'--pool', pool]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def pool_exists(service, name):
|
||||||
|
"""Check to see if a RADOS pool already exists."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rados', '--id', service,
|
||||||
|
'lspools']).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return name in out
|
||||||
|
|
||||||
|
|
||||||
|
def get_osds(service):
|
||||||
|
"""Return a list of all Ceph Object Storage Daemons currently in the
|
||||||
|
cluster.
|
||||||
|
"""
|
||||||
|
version = ceph_version()
|
||||||
|
if version and version >= '0.56':
|
||||||
|
return json.loads(check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'ls',
|
||||||
|
'--format=json']).decode('UTF-8'))
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def create_pool(service, name, replicas=3):
|
||||||
|
"""Create a new RADOS pool."""
|
||||||
|
if pool_exists(service, name):
|
||||||
|
log("Ceph pool {} already exists, skipping creation".format(name),
|
||||||
|
level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
# Calculate the number of placement groups based
|
||||||
|
# on upstream recommended best practices.
|
||||||
|
osds = get_osds(service)
|
||||||
|
if osds:
|
||||||
|
pgnum = (len(osds) * 100 // replicas)
|
||||||
|
else:
|
||||||
|
# NOTE(james-page): Default to 200 for older ceph versions
|
||||||
|
# which don't support OSD query from cli
|
||||||
|
pgnum = 200
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'create', name, str(pgnum)]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', name, 'size',
|
||||||
|
str(replicas)]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_pool(service, name):
|
||||||
|
"""Delete a RADOS pool from ceph."""
|
||||||
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'delete', name,
|
||||||
|
'--yes-i-really-really-mean-it']
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def _keyfile_path(service):
|
||||||
|
return KEYFILE.format(service)
|
||||||
|
|
||||||
|
|
||||||
|
def _keyring_path(service):
|
||||||
|
return KEYRING.format(service)
|
||||||
|
|
||||||
|
|
||||||
|
def create_keyring(service, key):
|
||||||
|
"""Create a new Ceph keyring containing key."""
|
||||||
|
keyring = _keyring_path(service)
|
||||||
|
if os.path.exists(keyring):
|
||||||
|
log('Ceph keyring exists at %s.' % keyring, level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
cmd = ['ceph-authtool', keyring, '--create-keyring',
|
||||||
|
'--name=client.{}'.format(service), '--add-key={}'.format(key)]
|
||||||
|
check_call(cmd)
|
||||||
|
log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_keyring(service):
|
||||||
|
"""Delete an existing Ceph keyring."""
|
||||||
|
keyring = _keyring_path(service)
|
||||||
|
if not os.path.exists(keyring):
|
||||||
|
log('Keyring does not exist at %s' % keyring, level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
os.remove(keyring)
|
||||||
|
log('Deleted ring at %s.' % keyring, level=INFO)
|
||||||
|
|
||||||
|
|
||||||
|
def create_key_file(service, key):
|
||||||
|
"""Create a file containing key."""
|
||||||
|
keyfile = _keyfile_path(service)
|
||||||
|
if os.path.exists(keyfile):
|
||||||
|
log('Keyfile exists at %s.' % keyfile, level=WARNING)
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(keyfile, 'w') as fd:
|
||||||
|
fd.write(key)
|
||||||
|
|
||||||
|
log('Created new keyfile at %s.' % keyfile, level=INFO)
|
||||||
|
|
||||||
|
|
||||||
|
def get_ceph_nodes():
|
||||||
|
"""Query named relation 'ceph' to determine current nodes."""
|
||||||
|
hosts = []
|
||||||
|
for r_id in relation_ids('ceph'):
|
||||||
|
for unit in related_units(r_id):
|
||||||
|
hosts.append(relation_get('private-address', unit=unit, rid=r_id))
|
||||||
|
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
|
||||||
|
def configure(service, key, auth, use_syslog):
|
||||||
|
"""Perform basic configuration of Ceph."""
|
||||||
|
create_keyring(service, key)
|
||||||
|
create_key_file(service, key)
|
||||||
|
hosts = get_ceph_nodes()
|
||||||
|
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||||
|
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
||||||
|
keyring=_keyring_path(service),
|
||||||
|
mon_hosts=",".join(map(str, hosts)),
|
||||||
|
use_syslog=use_syslog))
|
||||||
|
modprobe('rbd')
|
||||||
|
|
||||||
|
|
||||||
|
def image_mapped(name):
|
||||||
|
"""Determine whether a RADOS block device is mapped locally."""
|
||||||
|
try:
|
||||||
|
out = check_output(['rbd', 'showmapped']).decode('UTF-8')
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return name in out
|
||||||
|
|
||||||
|
|
||||||
|
def map_block_storage(service, pool, image):
|
||||||
|
"""Map a RADOS block device for local use."""
|
||||||
|
cmd = [
|
||||||
|
'rbd',
|
||||||
|
'map',
|
||||||
|
'{}/{}'.format(pool, image),
|
||||||
|
'--user',
|
||||||
|
service,
|
||||||
|
'--secret',
|
||||||
|
_keyfile_path(service),
|
||||||
|
]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def filesystem_mounted(fs):
|
||||||
|
"""Determine whether a filesytems is already mounted."""
|
||||||
|
return fs in [f for f, m in mounts()]
|
||||||
|
|
||||||
|
|
||||||
|
def make_filesystem(blk_device, fstype='ext4', timeout=10):
|
||||||
|
"""Make a new filesystem on the specified block device."""
|
||||||
|
count = 0
|
||||||
|
e_noent = os.errno.ENOENT
|
||||||
|
while not os.path.exists(blk_device):
|
||||||
|
if count >= timeout:
|
||||||
|
log('Gave up waiting on block device %s' % blk_device,
|
||||||
|
level=ERROR)
|
||||||
|
raise IOError(e_noent, os.strerror(e_noent), blk_device)
|
||||||
|
|
||||||
|
log('Waiting for block device %s to appear' % blk_device,
|
||||||
|
level=DEBUG)
|
||||||
|
count += 1
|
||||||
|
time.sleep(1)
|
||||||
|
else:
|
||||||
|
log('Formatting block device %s as filesystem %s.' %
|
||||||
|
(blk_device, fstype), level=INFO)
|
||||||
|
check_call(['mkfs', '-t', fstype, blk_device])
|
||||||
|
|
||||||
|
|
||||||
|
def place_data_on_block_device(blk_device, data_src_dst):
|
||||||
|
"""Migrate data in data_src_dst to blk_device and then remount."""
|
||||||
|
# mount block device into /mnt
|
||||||
|
mount(blk_device, '/mnt')
|
||||||
|
# copy data to /mnt
|
||||||
|
copy_files(data_src_dst, '/mnt')
|
||||||
|
# umount block device
|
||||||
|
umount('/mnt')
|
||||||
|
# Grab user/group ID's from original source
|
||||||
|
_dir = os.stat(data_src_dst)
|
||||||
|
uid = _dir.st_uid
|
||||||
|
gid = _dir.st_gid
|
||||||
|
# re-mount where the data should originally be
|
||||||
|
# TODO: persist is currently a NO-OP in core.host
|
||||||
|
mount(blk_device, data_src_dst, persist=True)
|
||||||
|
# ensure original ownership of new mount.
|
||||||
|
os.chown(data_src_dst, uid, gid)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: re-use
|
||||||
|
def modprobe(module):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
log('Loading kernel module', level=INFO)
|
||||||
|
cmd = ['modprobe', module]
|
||||||
|
check_call(cmd)
|
||||||
|
with open('/etc/modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write(module)
|
||||||
|
|
||||||
|
|
||||||
|
def copy_files(src, dst, symlinks=False, ignore=None):
|
||||||
|
"""Copy files from src to dst."""
|
||||||
|
for item in os.listdir(src):
|
||||||
|
s = os.path.join(src, item)
|
||||||
|
d = os.path.join(dst, item)
|
||||||
|
if os.path.isdir(s):
|
||||||
|
shutil.copytree(s, d, symlinks, ignore)
|
||||||
|
else:
|
||||||
|
shutil.copy2(s, d)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||||
|
blk_device, fstype, system_services=[],
|
||||||
|
replicas=3):
|
||||||
|
"""NOTE: This function must only be called from a single service unit for
|
||||||
|
the same rbd_img otherwise data loss will occur.
|
||||||
|
|
||||||
|
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||||
|
and the device is formatted and mounted at the given mount_point.
|
||||||
|
|
||||||
|
If formatting a device for the first time, data existing at mount_point
|
||||||
|
will be migrated to the RBD device before being re-mounted.
|
||||||
|
|
||||||
|
All services listed in system_services will be stopped prior to data
|
||||||
|
migration and restarted when complete.
|
||||||
|
"""
|
||||||
|
# Ensure pool, RBD image, RBD mappings are in place.
|
||||||
|
if not pool_exists(service, pool):
|
||||||
|
log('Creating new pool {}.'.format(pool), level=INFO)
|
||||||
|
create_pool(service, pool, replicas=replicas)
|
||||||
|
|
||||||
|
if not rbd_exists(service, pool, rbd_img):
|
||||||
|
log('Creating RBD image ({}).'.format(rbd_img), level=INFO)
|
||||||
|
create_rbd_image(service, pool, rbd_img, sizemb)
|
||||||
|
|
||||||
|
if not image_mapped(rbd_img):
|
||||||
|
log('Mapping RBD Image {} as a Block Device.'.format(rbd_img),
|
||||||
|
level=INFO)
|
||||||
|
map_block_storage(service, pool, rbd_img)
|
||||||
|
|
||||||
|
# make file system
|
||||||
|
# TODO: What happens if for whatever reason this is run again and
|
||||||
|
# the data is already in the rbd device and/or is mounted??
|
||||||
|
# When it is mounted already, it will fail to make the fs
|
||||||
|
# XXX: This is really sketchy! Need to at least add an fstab entry
|
||||||
|
# otherwise this hook will blow away existing data if its executed
|
||||||
|
# after a reboot.
|
||||||
|
if not filesystem_mounted(mount_point):
|
||||||
|
make_filesystem(blk_device, fstype)
|
||||||
|
|
||||||
|
for svc in system_services:
|
||||||
|
if service_running(svc):
|
||||||
|
log('Stopping services {} prior to migrating data.'
|
||||||
|
.format(svc), level=DEBUG)
|
||||||
|
service_stop(svc)
|
||||||
|
|
||||||
|
place_data_on_block_device(blk_device, mount_point)
|
||||||
|
|
||||||
|
for svc in system_services:
|
||||||
|
log('Starting service {} after migrating data.'
|
||||||
|
.format(svc), level=DEBUG)
|
||||||
|
service_start(svc)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_ceph_keyring(service, user=None, group=None):
|
||||||
|
"""Ensures a ceph keyring is created for a named service and optionally
|
||||||
|
ensures user and group ownership.
|
||||||
|
|
||||||
|
Returns False if no ceph key is available in relation state.
|
||||||
|
"""
|
||||||
|
key = None
|
||||||
|
for rid in relation_ids('ceph'):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
key = relation_get('key', rid=rid, unit=unit)
|
||||||
|
if key:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not key:
|
||||||
|
return False
|
||||||
|
|
||||||
|
create_keyring(service=service, key=key)
|
||||||
|
keyring = _keyring_path(service)
|
||||||
|
if user and group:
|
||||||
|
check_call(['chown', '%s.%s' % (user, group), keyring])
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def ceph_version():
|
||||||
|
"""Retrieve the local version of ceph."""
|
||||||
|
if os.path.exists('/usr/bin/ceph'):
|
||||||
|
cmd = ['ceph', '-v']
|
||||||
|
output = check_output(cmd).decode('US-ASCII')
|
||||||
|
output = output.split()
|
||||||
|
if len(output) > 3:
|
||||||
|
return output[2]
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class CephBrokerRq(object):
|
||||||
|
"""Ceph broker request.
|
||||||
|
|
||||||
|
Multiple operations can be added to a request and sent to the Ceph broker
|
||||||
|
to be executed.
|
||||||
|
|
||||||
|
Request is json-encoded for sending over the wire.
|
||||||
|
|
||||||
|
The API is versioned and defaults to version 1.
|
||||||
|
"""
|
||||||
|
def __init__(self, api_version=1):
|
||||||
|
self.api_version = api_version
|
||||||
|
self.ops = []
|
||||||
|
|
||||||
|
def add_op_create_pool(self, name, replica_count=3):
|
||||||
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
|
'replicas': replica_count})
|
||||||
|
|
||||||
|
@property
|
||||||
|
def request(self):
|
||||||
|
return json.dumps({'api-version': self.api_version, 'ops': self.ops})
|
||||||
|
|
||||||
|
|
||||||
|
class CephBrokerRsp(object):
|
||||||
|
"""Ceph broker response.
|
||||||
|
|
||||||
|
Response is json-decoded and contents provided as methods/properties.
|
||||||
|
|
||||||
|
The API is versioned and defaults to version 1.
|
||||||
|
"""
|
||||||
|
def __init__(self, encoded_rsp):
|
||||||
|
self.api_version = None
|
||||||
|
self.rsp = json.loads(encoded_rsp)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exit_code(self):
|
||||||
|
return self.rsp.get('exit-code')
|
||||||
|
|
||||||
|
@property
|
||||||
|
def exit_msg(self):
|
||||||
|
return self.rsp.get('stderr')
|
78
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
78
hooks/charmhelpers/contrib/storage/linux/loopback.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from subprocess import (
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
)
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
##################################################
|
||||||
|
# loopback device helpers.
|
||||||
|
##################################################
|
||||||
|
def loopback_devices():
|
||||||
|
'''
|
||||||
|
Parse through 'losetup -a' output to determine currently mapped
|
||||||
|
loopback devices. Output is expected to look like:
|
||||||
|
|
||||||
|
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
||||||
|
|
||||||
|
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
||||||
|
'''
|
||||||
|
loopbacks = {}
|
||||||
|
cmd = ['losetup', '-a']
|
||||||
|
devs = [d.strip().split(' ') for d in
|
||||||
|
check_output(cmd).splitlines() if d != '']
|
||||||
|
for dev, _, f in devs:
|
||||||
|
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
|
||||||
|
return loopbacks
|
||||||
|
|
||||||
|
|
||||||
|
def create_loopback(file_path):
|
||||||
|
'''
|
||||||
|
Create a loopback device for a given backing file.
|
||||||
|
|
||||||
|
:returns: str: Full path to new loopback device (eg, /dev/loop0)
|
||||||
|
'''
|
||||||
|
file_path = os.path.abspath(file_path)
|
||||||
|
check_call(['losetup', '--find', file_path])
|
||||||
|
for d, f in six.iteritems(loopback_devices()):
|
||||||
|
if f == file_path:
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_loopback_device(path, size):
|
||||||
|
'''
|
||||||
|
Ensure a loopback device exists for a given backing file path and size.
|
||||||
|
If it a loopback device is not mapped to file, a new one will be created.
|
||||||
|
|
||||||
|
TODO: Confirm size of found loopback device.
|
||||||
|
|
||||||
|
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
||||||
|
'''
|
||||||
|
for d, f in six.iteritems(loopback_devices()):
|
||||||
|
if f == path:
|
||||||
|
return d
|
||||||
|
|
||||||
|
if not os.path.exists(path):
|
||||||
|
cmd = ['truncate', '--size', size, path]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
return create_loopback(path)
|
105
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
105
hooks/charmhelpers/contrib/storage/linux/lvm.py
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from subprocess import (
|
||||||
|
CalledProcessError,
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
Popen,
|
||||||
|
PIPE,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
##################################################
|
||||||
|
# LVM helpers.
|
||||||
|
##################################################
|
||||||
|
def deactivate_lvm_volume_group(block_device):
|
||||||
|
'''
|
||||||
|
Deactivate any volume gruop associated with an LVM physical volume.
|
||||||
|
|
||||||
|
:param block_device: str: Full path to LVM physical volume
|
||||||
|
'''
|
||||||
|
vg = list_lvm_volume_group(block_device)
|
||||||
|
if vg:
|
||||||
|
cmd = ['vgchange', '-an', vg]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def is_lvm_physical_volume(block_device):
|
||||||
|
'''
|
||||||
|
Determine whether a block device is initialized as an LVM PV.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to inspect.
|
||||||
|
|
||||||
|
:returns: boolean: True if block device is a PV, False if not.
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
check_output(['pvdisplay', block_device])
|
||||||
|
return True
|
||||||
|
except CalledProcessError:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def remove_lvm_physical_volume(block_device):
|
||||||
|
'''
|
||||||
|
Remove LVM PV signatures from a given block device.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to scrub.
|
||||||
|
'''
|
||||||
|
p = Popen(['pvremove', '-ff', block_device],
|
||||||
|
stdin=PIPE)
|
||||||
|
p.communicate(input='y\n')
|
||||||
|
|
||||||
|
|
||||||
|
def list_lvm_volume_group(block_device):
|
||||||
|
'''
|
||||||
|
List LVM volume group associated with a given block device.
|
||||||
|
|
||||||
|
Assumes block device is a valid LVM PV.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to inspect.
|
||||||
|
|
||||||
|
:returns: str: Name of volume group associated with block device or None
|
||||||
|
'''
|
||||||
|
vg = None
|
||||||
|
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
||||||
|
for l in pvd:
|
||||||
|
l = l.decode('UTF-8')
|
||||||
|
if l.strip().startswith('VG Name'):
|
||||||
|
vg = ' '.join(l.strip().split()[2:])
|
||||||
|
return vg
|
||||||
|
|
||||||
|
|
||||||
|
def create_lvm_physical_volume(block_device):
|
||||||
|
'''
|
||||||
|
Initialize a block device as an LVM physical volume.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to initialize.
|
||||||
|
|
||||||
|
'''
|
||||||
|
check_call(['pvcreate', block_device])
|
||||||
|
|
||||||
|
|
||||||
|
def create_lvm_volume_group(volume_group, block_device):
|
||||||
|
'''
|
||||||
|
Create an LVM volume group backed by a given block device.
|
||||||
|
|
||||||
|
Assumes block device has already been initialized as an LVM PV.
|
||||||
|
|
||||||
|
:param volume_group: str: Name of volume group to create.
|
||||||
|
:block_device: str: Full path of PV-initialized block device.
|
||||||
|
'''
|
||||||
|
check_call(['vgcreate', volume_group, block_device])
|
70
hooks/charmhelpers/contrib/storage/linux/utils.py
Normal file
70
hooks/charmhelpers/contrib/storage/linux/utils.py
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from stat import S_ISBLK
|
||||||
|
|
||||||
|
from subprocess import (
|
||||||
|
check_call,
|
||||||
|
check_output,
|
||||||
|
call
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def is_block_device(path):
|
||||||
|
'''
|
||||||
|
Confirm device at path is a valid block device node.
|
||||||
|
|
||||||
|
:returns: boolean: True if path is a block device, False if not.
|
||||||
|
'''
|
||||||
|
if not os.path.exists(path):
|
||||||
|
return False
|
||||||
|
return S_ISBLK(os.stat(path).st_mode)
|
||||||
|
|
||||||
|
|
||||||
|
def zap_disk(block_device):
|
||||||
|
'''
|
||||||
|
Clear a block device of partition table. Relies on sgdisk, which is
|
||||||
|
installed as pat of the 'gdisk' package in Ubuntu.
|
||||||
|
|
||||||
|
:param block_device: str: Full path of block device to clean.
|
||||||
|
'''
|
||||||
|
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
||||||
|
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
||||||
|
'--clear', block_device])
|
||||||
|
dev_end = check_output(['blockdev', '--getsz',
|
||||||
|
block_device]).decode('UTF-8')
|
||||||
|
gpt_end = int(dev_end.split()[0]) - 100
|
||||||
|
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
||||||
|
'bs=1M', 'count=1'])
|
||||||
|
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
||||||
|
'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
|
||||||
|
|
||||||
|
|
||||||
|
def is_device_mounted(device):
|
||||||
|
'''Given a device path, return True if that device is mounted, and False
|
||||||
|
if it isn't.
|
||||||
|
|
||||||
|
:param device: str: Full path of the device to check.
|
||||||
|
:returns: boolean: True if the path represents a mounted device, False if
|
||||||
|
it doesn't.
|
||||||
|
'''
|
||||||
|
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
||||||
|
out = check_output(['mount']).decode('UTF-8')
|
||||||
|
if is_partition:
|
||||||
|
return bool(re.search(device + r"\b", out))
|
||||||
|
return bool(re.search(device + r"[0-9]+\b", out))
|
15
hooks/charmhelpers/core/__init__.py
Normal file
15
hooks/charmhelpers/core/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
57
hooks/charmhelpers/core/decorators.py
Normal file
57
hooks/charmhelpers/core/decorators.py
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright 2014 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Edward Hope-Morley <opentastic@gmail.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
INFO,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
|
||||||
|
"""If the decorated function raises exception exc_type, allow num_retries
|
||||||
|
retry attempts before raise the exception.
|
||||||
|
"""
|
||||||
|
def _retry_on_exception_inner_1(f):
|
||||||
|
def _retry_on_exception_inner_2(*args, **kwargs):
|
||||||
|
retries = num_retries
|
||||||
|
multiplier = 1
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
except exc_type:
|
||||||
|
if not retries:
|
||||||
|
raise
|
||||||
|
|
||||||
|
delay = base_delay * multiplier
|
||||||
|
multiplier += 1
|
||||||
|
log("Retrying '%s' %d more times (delay=%s)" %
|
||||||
|
(f.__name__, retries, delay), level=INFO)
|
||||||
|
retries -= 1
|
||||||
|
if delay:
|
||||||
|
time.sleep(delay)
|
||||||
|
|
||||||
|
return _retry_on_exception_inner_2
|
||||||
|
|
||||||
|
return _retry_on_exception_inner_1
|
134
hooks/charmhelpers/core/fstab.py
Normal file
134
hooks/charmhelpers/core/fstab.py
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||||
|
|
||||||
|
|
||||||
|
class Fstab(io.FileIO):
|
||||||
|
"""This class extends file in order to implement a file reader/writer
|
||||||
|
for file `/etc/fstab`
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Entry(object):
|
||||||
|
"""Entry class represents a non-comment line on the `/etc/fstab` file
|
||||||
|
"""
|
||||||
|
def __init__(self, device, mountpoint, filesystem,
|
||||||
|
options, d=0, p=0):
|
||||||
|
self.device = device
|
||||||
|
self.mountpoint = mountpoint
|
||||||
|
self.filesystem = filesystem
|
||||||
|
|
||||||
|
if not options:
|
||||||
|
options = "defaults"
|
||||||
|
|
||||||
|
self.options = options
|
||||||
|
self.d = int(d)
|
||||||
|
self.p = int(p)
|
||||||
|
|
||||||
|
def __eq__(self, o):
|
||||||
|
return str(self) == str(o)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "{} {} {} {} {} {}".format(self.device,
|
||||||
|
self.mountpoint,
|
||||||
|
self.filesystem,
|
||||||
|
self.options,
|
||||||
|
self.d,
|
||||||
|
self.p)
|
||||||
|
|
||||||
|
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
|
||||||
|
|
||||||
|
def __init__(self, path=None):
|
||||||
|
if path:
|
||||||
|
self._path = path
|
||||||
|
else:
|
||||||
|
self._path = self.DEFAULT_PATH
|
||||||
|
super(Fstab, self).__init__(self._path, 'rb+')
|
||||||
|
|
||||||
|
def _hydrate_entry(self, line):
|
||||||
|
# NOTE: use split with no arguments to split on any
|
||||||
|
# whitespace including tabs
|
||||||
|
return Fstab.Entry(*filter(
|
||||||
|
lambda x: x not in ('', None),
|
||||||
|
line.strip("\n").split()))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def entries(self):
|
||||||
|
self.seek(0)
|
||||||
|
for line in self.readlines():
|
||||||
|
line = line.decode('us-ascii')
|
||||||
|
try:
|
||||||
|
if line.strip() and not line.strip().startswith("#"):
|
||||||
|
yield self._hydrate_entry(line)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_entry_by_attr(self, attr, value):
|
||||||
|
for entry in self.entries:
|
||||||
|
e_attr = getattr(entry, attr)
|
||||||
|
if e_attr == value:
|
||||||
|
return entry
|
||||||
|
return None
|
||||||
|
|
||||||
|
def add_entry(self, entry):
|
||||||
|
if self.get_entry_by_attr('device', entry.device):
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.write((str(entry) + '\n').encode('us-ascii'))
|
||||||
|
self.truncate()
|
||||||
|
return entry
|
||||||
|
|
||||||
|
def remove_entry(self, entry):
|
||||||
|
self.seek(0)
|
||||||
|
|
||||||
|
lines = [l.decode('us-ascii') for l in self.readlines()]
|
||||||
|
|
||||||
|
found = False
|
||||||
|
for index, line in enumerate(lines):
|
||||||
|
if line.strip() and not line.strip().startswith("#"):
|
||||||
|
if self._hydrate_entry(line) == entry:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
return False
|
||||||
|
|
||||||
|
lines.remove(line)
|
||||||
|
|
||||||
|
self.seek(0)
|
||||||
|
self.write(''.join(lines).encode('us-ascii'))
|
||||||
|
self.truncate()
|
||||||
|
return True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def remove_by_mountpoint(cls, mountpoint, path=None):
|
||||||
|
fstab = cls(path=path)
|
||||||
|
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
|
||||||
|
if entry:
|
||||||
|
return fstab.remove_entry(entry)
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add(cls, device, mountpoint, filesystem, options=None, path=None):
|
||||||
|
return cls(path=path).add_entry(Fstab.Entry(device,
|
||||||
|
mountpoint, filesystem,
|
||||||
|
options=options))
|
683
hooks/charmhelpers/core/hookenv.py
Normal file
683
hooks/charmhelpers/core/hookenv.py
Normal file
@ -0,0 +1,683 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"Interactions with the Juju environment"
|
||||||
|
# Copyright 2013 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
from functools import wraps
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import yaml
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import errno
|
||||||
|
import tempfile
|
||||||
|
from subprocess import CalledProcessError
|
||||||
|
|
||||||
|
import six
|
||||||
|
if not six.PY3:
|
||||||
|
from UserDict import UserDict
|
||||||
|
else:
|
||||||
|
from collections import UserDict
|
||||||
|
|
||||||
|
CRITICAL = "CRITICAL"
|
||||||
|
ERROR = "ERROR"
|
||||||
|
WARNING = "WARNING"
|
||||||
|
INFO = "INFO"
|
||||||
|
DEBUG = "DEBUG"
|
||||||
|
MARKER = object()
|
||||||
|
|
||||||
|
cache = {}
|
||||||
|
|
||||||
|
|
||||||
|
def cached(func):
|
||||||
|
"""Cache return values for multiple executions of func + args
|
||||||
|
|
||||||
|
For example::
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def unit_get(attribute):
|
||||||
|
pass
|
||||||
|
|
||||||
|
unit_get('test')
|
||||||
|
|
||||||
|
will cache the result of unit_get + 'test' for future calls.
|
||||||
|
"""
|
||||||
|
@wraps(func)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
global cache
|
||||||
|
key = str((func, args, kwargs))
|
||||||
|
try:
|
||||||
|
return cache[key]
|
||||||
|
except KeyError:
|
||||||
|
pass # Drop out of the exception handler scope.
|
||||||
|
res = func(*args, **kwargs)
|
||||||
|
cache[key] = res
|
||||||
|
return res
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def flush(key):
|
||||||
|
"""Flushes any entries from function cache where the
|
||||||
|
key is found in the function+args """
|
||||||
|
flush_list = []
|
||||||
|
for item in cache:
|
||||||
|
if key in item:
|
||||||
|
flush_list.append(item)
|
||||||
|
for item in flush_list:
|
||||||
|
del cache[item]
|
||||||
|
|
||||||
|
|
||||||
|
def log(message, level=None):
|
||||||
|
"""Write a message to the juju log"""
|
||||||
|
command = ['juju-log']
|
||||||
|
if level:
|
||||||
|
command += ['-l', level]
|
||||||
|
if not isinstance(message, six.string_types):
|
||||||
|
message = repr(message)
|
||||||
|
command += [message]
|
||||||
|
# Missing juju-log should not cause failures in unit tests
|
||||||
|
# Send log output to stderr
|
||||||
|
try:
|
||||||
|
subprocess.call(command)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
if level:
|
||||||
|
message = "{}: {}".format(level, message)
|
||||||
|
message = "juju-log: {}".format(message)
|
||||||
|
print(message, file=sys.stderr)
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
class Serializable(UserDict):
|
||||||
|
"""Wrapper, an object that can be serialized to yaml or json"""
|
||||||
|
|
||||||
|
def __init__(self, obj):
|
||||||
|
# wrap the object
|
||||||
|
UserDict.__init__(self)
|
||||||
|
self.data = obj
|
||||||
|
|
||||||
|
def __getattr__(self, attr):
|
||||||
|
# See if this object has attribute.
|
||||||
|
if attr in ("json", "yaml", "data"):
|
||||||
|
return self.__dict__[attr]
|
||||||
|
# Check for attribute in wrapped object.
|
||||||
|
got = getattr(self.data, attr, MARKER)
|
||||||
|
if got is not MARKER:
|
||||||
|
return got
|
||||||
|
# Proxy to the wrapped object via dict interface.
|
||||||
|
try:
|
||||||
|
return self.data[attr]
|
||||||
|
except KeyError:
|
||||||
|
raise AttributeError(attr)
|
||||||
|
|
||||||
|
def __getstate__(self):
|
||||||
|
# Pickle as a standard dictionary.
|
||||||
|
return self.data
|
||||||
|
|
||||||
|
def __setstate__(self, state):
|
||||||
|
# Unpickle into our wrapper.
|
||||||
|
self.data = state
|
||||||
|
|
||||||
|
def json(self):
|
||||||
|
"""Serialize the object to json"""
|
||||||
|
return json.dumps(self.data)
|
||||||
|
|
||||||
|
def yaml(self):
|
||||||
|
"""Serialize the object to yaml"""
|
||||||
|
return yaml.dump(self.data)
|
||||||
|
|
||||||
|
|
||||||
|
def execution_environment():
|
||||||
|
"""A convenient bundling of the current execution context"""
|
||||||
|
context = {}
|
||||||
|
context['conf'] = config()
|
||||||
|
if relation_id():
|
||||||
|
context['reltype'] = relation_type()
|
||||||
|
context['relid'] = relation_id()
|
||||||
|
context['rel'] = relation_get()
|
||||||
|
context['unit'] = local_unit()
|
||||||
|
context['rels'] = relations()
|
||||||
|
context['env'] = os.environ
|
||||||
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def in_relation_hook():
|
||||||
|
"""Determine whether we're running in a relation hook"""
|
||||||
|
return 'JUJU_RELATION' in os.environ
|
||||||
|
|
||||||
|
|
||||||
|
def relation_type():
|
||||||
|
"""The scope for the current relation hook"""
|
||||||
|
return os.environ.get('JUJU_RELATION', None)
|
||||||
|
|
||||||
|
|
||||||
|
def relation_id():
|
||||||
|
"""The relation ID for the current relation hook"""
|
||||||
|
return os.environ.get('JUJU_RELATION_ID', None)
|
||||||
|
|
||||||
|
|
||||||
|
def local_unit():
|
||||||
|
"""Local unit ID"""
|
||||||
|
return os.environ['JUJU_UNIT_NAME']
|
||||||
|
|
||||||
|
|
||||||
|
def remote_unit():
|
||||||
|
"""The remote unit for the current relation hook"""
|
||||||
|
return os.environ.get('JUJU_REMOTE_UNIT', None)
|
||||||
|
|
||||||
|
|
||||||
|
def service_name():
|
||||||
|
"""The name service group this unit belongs to"""
|
||||||
|
return local_unit().split('/')[0]
|
||||||
|
|
||||||
|
|
||||||
|
def hook_name():
|
||||||
|
"""The name of the currently executing hook"""
|
||||||
|
return os.path.basename(sys.argv[0])
|
||||||
|
|
||||||
|
|
||||||
|
class Config(dict):
|
||||||
|
"""A dictionary representation of the charm's config.yaml, with some
|
||||||
|
extra features:
|
||||||
|
|
||||||
|
- See which values in the dictionary have changed since the previous hook.
|
||||||
|
- For values that have changed, see what the previous value was.
|
||||||
|
- Store arbitrary data for use in a later hook.
|
||||||
|
|
||||||
|
NOTE: Do not instantiate this object directly - instead call
|
||||||
|
``hookenv.config()``, which will return an instance of :class:`Config`.
|
||||||
|
|
||||||
|
Example usage::
|
||||||
|
|
||||||
|
>>> # inside a hook
|
||||||
|
>>> from charmhelpers.core import hookenv
|
||||||
|
>>> config = hookenv.config()
|
||||||
|
>>> config['foo']
|
||||||
|
'bar'
|
||||||
|
>>> # store a new key/value for later use
|
||||||
|
>>> config['mykey'] = 'myval'
|
||||||
|
|
||||||
|
|
||||||
|
>>> # user runs `juju set mycharm foo=baz`
|
||||||
|
>>> # now we're inside subsequent config-changed hook
|
||||||
|
>>> config = hookenv.config()
|
||||||
|
>>> config['foo']
|
||||||
|
'baz'
|
||||||
|
>>> # test to see if this val has changed since last hook
|
||||||
|
>>> config.changed('foo')
|
||||||
|
True
|
||||||
|
>>> # what was the previous value?
|
||||||
|
>>> config.previous('foo')
|
||||||
|
'bar'
|
||||||
|
>>> # keys/values that we add are preserved across hooks
|
||||||
|
>>> config['mykey']
|
||||||
|
'myval'
|
||||||
|
|
||||||
|
"""
|
||||||
|
CONFIG_FILE_NAME = '.juju-persistent-config'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kw):
|
||||||
|
super(Config, self).__init__(*args, **kw)
|
||||||
|
self.implicit_save = True
|
||||||
|
self._prev_dict = None
|
||||||
|
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||||
|
if os.path.exists(self.path):
|
||||||
|
self.load_previous()
|
||||||
|
|
||||||
|
def __getitem__(self, key):
|
||||||
|
"""For regular dict lookups, check the current juju config first,
|
||||||
|
then the previous (saved) copy. This ensures that user-saved values
|
||||||
|
will be returned by a dict lookup.
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
return dict.__getitem__(self, key)
|
||||||
|
except KeyError:
|
||||||
|
return (self._prev_dict or {})[key]
|
||||||
|
|
||||||
|
def get(self, key, default=None):
|
||||||
|
try:
|
||||||
|
return self[key]
|
||||||
|
except KeyError:
|
||||||
|
return default
|
||||||
|
|
||||||
|
def keys(self):
|
||||||
|
prev_keys = []
|
||||||
|
if self._prev_dict is not None:
|
||||||
|
prev_keys = self._prev_dict.keys()
|
||||||
|
return list(set(prev_keys + list(dict.keys(self))))
|
||||||
|
|
||||||
|
def load_previous(self, path=None):
|
||||||
|
"""Load previous copy of config from disk.
|
||||||
|
|
||||||
|
In normal usage you don't need to call this method directly - it
|
||||||
|
is called automatically at object initialization.
|
||||||
|
|
||||||
|
:param path:
|
||||||
|
|
||||||
|
File path from which to load the previous config. If `None`,
|
||||||
|
config is loaded from the default location. If `path` is
|
||||||
|
specified, subsequent `save()` calls will write to the same
|
||||||
|
path.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.path = path or self.path
|
||||||
|
with open(self.path) as f:
|
||||||
|
self._prev_dict = json.load(f)
|
||||||
|
|
||||||
|
def changed(self, key):
|
||||||
|
"""Return True if the current value for this key is different from
|
||||||
|
the previous value.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self._prev_dict is None:
|
||||||
|
return True
|
||||||
|
return self.previous(key) != self.get(key)
|
||||||
|
|
||||||
|
def previous(self, key):
|
||||||
|
"""Return previous value for this key, or None if there
|
||||||
|
is no previous value.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self._prev_dict:
|
||||||
|
return self._prev_dict.get(key)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
"""Save this config to disk.
|
||||||
|
|
||||||
|
If the charm is using the :mod:`Services Framework <services.base>`
|
||||||
|
or :meth:'@hook <Hooks.hook>' decorator, this
|
||||||
|
is called automatically at the end of successful hook execution.
|
||||||
|
Otherwise, it should be called directly by user code.
|
||||||
|
|
||||||
|
To disable automatic saves, set ``implicit_save=False`` on this
|
||||||
|
instance.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if self._prev_dict:
|
||||||
|
for k, v in six.iteritems(self._prev_dict):
|
||||||
|
if k not in self:
|
||||||
|
self[k] = v
|
||||||
|
with open(self.path, 'w') as f:
|
||||||
|
json.dump(self, f)
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def config(scope=None):
|
||||||
|
"""Juju charm configuration"""
|
||||||
|
config_cmd_line = ['config-get']
|
||||||
|
if scope is not None:
|
||||||
|
config_cmd_line.append(scope)
|
||||||
|
config_cmd_line.append('--format=json')
|
||||||
|
try:
|
||||||
|
config_data = json.loads(
|
||||||
|
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
||||||
|
if scope is not None:
|
||||||
|
return config_data
|
||||||
|
return Config(config_data)
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_get(attribute=None, unit=None, rid=None):
|
||||||
|
"""Get relation information"""
|
||||||
|
_args = ['relation-get', '--format=json']
|
||||||
|
if rid:
|
||||||
|
_args.append('-r')
|
||||||
|
_args.append(rid)
|
||||||
|
_args.append(attribute or '-')
|
||||||
|
if unit:
|
||||||
|
_args.append(unit)
|
||||||
|
try:
|
||||||
|
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
except CalledProcessError as e:
|
||||||
|
if e.returncode == 2:
|
||||||
|
return None
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||||
|
"""Set relation information for the current unit"""
|
||||||
|
relation_settings = relation_settings if relation_settings else {}
|
||||||
|
relation_cmd_line = ['relation-set']
|
||||||
|
accepts_file = "--file" in subprocess.check_output(
|
||||||
|
relation_cmd_line + ["--help"])
|
||||||
|
if relation_id is not None:
|
||||||
|
relation_cmd_line.extend(('-r', relation_id))
|
||||||
|
settings = relation_settings.copy()
|
||||||
|
settings.update(kwargs)
|
||||||
|
if accepts_file:
|
||||||
|
# --file was introduced in Juju 1.23.2. Use it by default if
|
||||||
|
# available, since otherwise we'll break if the relation data is
|
||||||
|
# too big. Ideally we should tell relation-set to read the data from
|
||||||
|
# stdin, but that feature is broken in 1.23.2: Bug #1454678.
|
||||||
|
with tempfile.NamedTemporaryFile(delete=False) as settings_file:
|
||||||
|
settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
|
||||||
|
subprocess.check_call(
|
||||||
|
relation_cmd_line + ["--file", settings_file.name])
|
||||||
|
os.remove(settings_file.name)
|
||||||
|
else:
|
||||||
|
for key, value in settings.items():
|
||||||
|
if value is None:
|
||||||
|
relation_cmd_line.append('{}='.format(key))
|
||||||
|
else:
|
||||||
|
relation_cmd_line.append('{}={}'.format(key, value))
|
||||||
|
subprocess.check_call(relation_cmd_line)
|
||||||
|
# Flush cache of any relation-gets for local unit
|
||||||
|
flush(local_unit())
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_ids(reltype=None):
|
||||||
|
"""A list of relation_ids"""
|
||||||
|
reltype = reltype or relation_type()
|
||||||
|
relid_cmd_line = ['relation-ids', '--format=json']
|
||||||
|
if reltype is not None:
|
||||||
|
relid_cmd_line.append(reltype)
|
||||||
|
return json.loads(
|
||||||
|
subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def related_units(relid=None):
|
||||||
|
"""A list of related units"""
|
||||||
|
relid = relid or relation_id()
|
||||||
|
units_cmd_line = ['relation-list', '--format=json']
|
||||||
|
if relid is not None:
|
||||||
|
units_cmd_line.extend(('-r', relid))
|
||||||
|
return json.loads(
|
||||||
|
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_for_unit(unit=None, rid=None):
|
||||||
|
"""Get the json represenation of a unit's relation"""
|
||||||
|
unit = unit or remote_unit()
|
||||||
|
relation = relation_get(unit=unit, rid=rid)
|
||||||
|
for key in relation:
|
||||||
|
if key.endswith('-list'):
|
||||||
|
relation[key] = relation[key].split()
|
||||||
|
relation['__unit__'] = unit
|
||||||
|
return relation
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relations_for_id(relid=None):
|
||||||
|
"""Get relations of a specific relation ID"""
|
||||||
|
relation_data = []
|
||||||
|
relid = relid or relation_ids()
|
||||||
|
for unit in related_units(relid):
|
||||||
|
unit_data = relation_for_unit(unit, relid)
|
||||||
|
unit_data['__relid__'] = relid
|
||||||
|
relation_data.append(unit_data)
|
||||||
|
return relation_data
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relations_of_type(reltype=None):
|
||||||
|
"""Get relations of a specific type"""
|
||||||
|
relation_data = []
|
||||||
|
reltype = reltype or relation_type()
|
||||||
|
for relid in relation_ids(reltype):
|
||||||
|
for relation in relations_for_id(relid):
|
||||||
|
relation['__relid__'] = relid
|
||||||
|
relation_data.append(relation)
|
||||||
|
return relation_data
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def metadata():
|
||||||
|
"""Get the current charm metadata.yaml contents as a python object"""
|
||||||
|
with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
|
||||||
|
return yaml.safe_load(md)
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relation_types():
|
||||||
|
"""Get a list of relation types supported by this charm"""
|
||||||
|
rel_types = []
|
||||||
|
md = metadata()
|
||||||
|
for key in ('provides', 'requires', 'peers'):
|
||||||
|
section = md.get(key)
|
||||||
|
if section:
|
||||||
|
rel_types.extend(section.keys())
|
||||||
|
return rel_types
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def charm_name():
|
||||||
|
"""Get the name of the current charm as is specified on metadata.yaml"""
|
||||||
|
return metadata().get('name')
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def relations():
|
||||||
|
"""Get a nested dictionary of relation data for all related units"""
|
||||||
|
rels = {}
|
||||||
|
for reltype in relation_types():
|
||||||
|
relids = {}
|
||||||
|
for relid in relation_ids(reltype):
|
||||||
|
units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
|
||||||
|
for unit in related_units(relid):
|
||||||
|
reldata = relation_get(unit=unit, rid=relid)
|
||||||
|
units[unit] = reldata
|
||||||
|
relids[relid] = units
|
||||||
|
rels[reltype] = relids
|
||||||
|
return rels
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def is_relation_made(relation, keys='private-address'):
|
||||||
|
'''
|
||||||
|
Determine whether a relation is established by checking for
|
||||||
|
presence of key(s). If a list of keys is provided, they
|
||||||
|
must all be present for the relation to be identified as made
|
||||||
|
'''
|
||||||
|
if isinstance(keys, str):
|
||||||
|
keys = [keys]
|
||||||
|
for r_id in relation_ids(relation):
|
||||||
|
for unit in related_units(r_id):
|
||||||
|
context = {}
|
||||||
|
for k in keys:
|
||||||
|
context[k] = relation_get(k, rid=r_id,
|
||||||
|
unit=unit)
|
||||||
|
if None not in context.values():
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def open_port(port, protocol="TCP"):
|
||||||
|
"""Open a service network port"""
|
||||||
|
_args = ['open-port']
|
||||||
|
_args.append('{}/{}'.format(port, protocol))
|
||||||
|
subprocess.check_call(_args)
|
||||||
|
|
||||||
|
|
||||||
|
def close_port(port, protocol="TCP"):
|
||||||
|
"""Close a service network port"""
|
||||||
|
_args = ['close-port']
|
||||||
|
_args.append('{}/{}'.format(port, protocol))
|
||||||
|
subprocess.check_call(_args)
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def unit_get(attribute):
|
||||||
|
"""Get the unit ID for the remote unit"""
|
||||||
|
_args = ['unit-get', '--format=json', attribute]
|
||||||
|
try:
|
||||||
|
return json.loads(subprocess.check_output(_args).decode('UTF-8'))
|
||||||
|
except ValueError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def unit_public_ip():
|
||||||
|
"""Get this unit's public IP address"""
|
||||||
|
return unit_get('public-address')
|
||||||
|
|
||||||
|
|
||||||
|
def unit_private_ip():
|
||||||
|
"""Get this unit's private IP address"""
|
||||||
|
return unit_get('private-address')
|
||||||
|
|
||||||
|
|
||||||
|
class UnregisteredHookError(Exception):
|
||||||
|
"""Raised when an undefined hook is called"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class Hooks(object):
|
||||||
|
"""A convenient handler for hook functions.
|
||||||
|
|
||||||
|
Example::
|
||||||
|
|
||||||
|
hooks = Hooks()
|
||||||
|
|
||||||
|
# register a hook, taking its name from the function name
|
||||||
|
@hooks.hook()
|
||||||
|
def install():
|
||||||
|
pass # your code here
|
||||||
|
|
||||||
|
# register a hook, providing a custom hook name
|
||||||
|
@hooks.hook("config-changed")
|
||||||
|
def config_changed():
|
||||||
|
pass # your code here
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# execute a hook based on the name the program is called by
|
||||||
|
hooks.execute(sys.argv)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, config_save=True):
|
||||||
|
super(Hooks, self).__init__()
|
||||||
|
self._hooks = {}
|
||||||
|
self._config_save = config_save
|
||||||
|
|
||||||
|
def register(self, name, function):
|
||||||
|
"""Register a hook"""
|
||||||
|
self._hooks[name] = function
|
||||||
|
|
||||||
|
def execute(self, args):
|
||||||
|
"""Execute a registered hook based on args[0]"""
|
||||||
|
hook_name = os.path.basename(args[0])
|
||||||
|
if hook_name in self._hooks:
|
||||||
|
self._hooks[hook_name]()
|
||||||
|
if self._config_save:
|
||||||
|
cfg = config()
|
||||||
|
if cfg.implicit_save:
|
||||||
|
cfg.save()
|
||||||
|
else:
|
||||||
|
raise UnregisteredHookError(hook_name)
|
||||||
|
|
||||||
|
def hook(self, *hook_names):
|
||||||
|
"""Decorator, registering them as hooks"""
|
||||||
|
def wrapper(decorated):
|
||||||
|
for hook_name in hook_names:
|
||||||
|
self.register(hook_name, decorated)
|
||||||
|
else:
|
||||||
|
self.register(decorated.__name__, decorated)
|
||||||
|
if '_' in decorated.__name__:
|
||||||
|
self.register(
|
||||||
|
decorated.__name__.replace('_', '-'), decorated)
|
||||||
|
return decorated
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def charm_dir():
|
||||||
|
"""Return the root directory of the current charm"""
|
||||||
|
return os.environ.get('CHARM_DIR')
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def action_get(key=None):
|
||||||
|
"""Gets the value of an action parameter, or all key/value param pairs"""
|
||||||
|
cmd = ['action-get']
|
||||||
|
if key is not None:
|
||||||
|
cmd.append(key)
|
||||||
|
cmd.append('--format=json')
|
||||||
|
action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
|
||||||
|
return action_data
|
||||||
|
|
||||||
|
|
||||||
|
def action_set(values):
|
||||||
|
"""Sets the values to be returned after the action finishes"""
|
||||||
|
cmd = ['action-set']
|
||||||
|
for k, v in list(values.items()):
|
||||||
|
cmd.append('{}={}'.format(k, v))
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def action_fail(message):
|
||||||
|
"""Sets the action status to failed and sets the error message.
|
||||||
|
|
||||||
|
The results set by action_set are preserved."""
|
||||||
|
subprocess.check_call(['action-fail', message])
|
||||||
|
|
||||||
|
|
||||||
|
def status_set(workload_state, message):
|
||||||
|
"""Set the workload state with a message
|
||||||
|
|
||||||
|
Use status-set to set the workload state with a message which is visible
|
||||||
|
to the user via juju status. If the status-set command is not found then
|
||||||
|
assume this is juju < 1.23 and juju-log the message unstead.
|
||||||
|
|
||||||
|
workload_state -- valid juju workload state.
|
||||||
|
message -- status update message
|
||||||
|
"""
|
||||||
|
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
|
||||||
|
if workload_state not in valid_states:
|
||||||
|
raise ValueError(
|
||||||
|
'{!r} is not a valid workload state'.format(workload_state)
|
||||||
|
)
|
||||||
|
cmd = ['status-set', workload_state, message]
|
||||||
|
try:
|
||||||
|
ret = subprocess.call(cmd)
|
||||||
|
if ret == 0:
|
||||||
|
return
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
log_message = 'status-set failed: {} {}'.format(workload_state,
|
||||||
|
message)
|
||||||
|
log(log_message, level='INFO')
|
||||||
|
|
||||||
|
|
||||||
|
def status_get():
|
||||||
|
"""Retrieve the previously set juju workload state
|
||||||
|
|
||||||
|
If the status-set command is not found then assume this is juju < 1.23 and
|
||||||
|
return 'unknown'
|
||||||
|
"""
|
||||||
|
cmd = ['status-get']
|
||||||
|
try:
|
||||||
|
raw_status = subprocess.check_output(cmd, universal_newlines=True)
|
||||||
|
status = raw_status.rstrip()
|
||||||
|
return status
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno == errno.ENOENT:
|
||||||
|
return 'unknown'
|
||||||
|
else:
|
||||||
|
raise
|
450
hooks/charmhelpers/core/host.py
Normal file
450
hooks/charmhelpers/core/host.py
Normal file
@ -0,0 +1,450 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"""Tools for working with the host system"""
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Nick Moffitt <nick.moffitt@canonical.com>
|
||||||
|
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import pwd
|
||||||
|
import grp
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import subprocess
|
||||||
|
import hashlib
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from collections import OrderedDict
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
from .hookenv import log
|
||||||
|
from .fstab import Fstab
|
||||||
|
|
||||||
|
|
||||||
|
def service_start(service_name):
|
||||||
|
"""Start a system service"""
|
||||||
|
return service('start', service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_stop(service_name):
|
||||||
|
"""Stop a system service"""
|
||||||
|
return service('stop', service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_restart(service_name):
|
||||||
|
"""Restart a system service"""
|
||||||
|
return service('restart', service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_reload(service_name, restart_on_failure=False):
|
||||||
|
"""Reload a system service, optionally falling back to restart if
|
||||||
|
reload fails"""
|
||||||
|
service_result = service('reload', service_name)
|
||||||
|
if not service_result and restart_on_failure:
|
||||||
|
service_result = service('restart', service_name)
|
||||||
|
return service_result
|
||||||
|
|
||||||
|
|
||||||
|
def service(action, service_name):
|
||||||
|
"""Control a system service"""
|
||||||
|
cmd = ['service', service_name, action]
|
||||||
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def service_running(service):
|
||||||
|
"""Determine whether a system service is running"""
|
||||||
|
try:
|
||||||
|
output = subprocess.check_output(
|
||||||
|
['service', service, 'status'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
if ("start/running" in output or "is running" in output):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def service_available(service_name):
|
||||||
|
"""Determine whether a system service is available"""
|
||||||
|
try:
|
||||||
|
subprocess.check_output(
|
||||||
|
['service', service_name, 'status'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
return b'unrecognized service' not in e.output
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||||
|
"""Add a user to the system"""
|
||||||
|
try:
|
||||||
|
user_info = pwd.getpwnam(username)
|
||||||
|
log('user {0} already exists!'.format(username))
|
||||||
|
except KeyError:
|
||||||
|
log('creating user {0}'.format(username))
|
||||||
|
cmd = ['useradd']
|
||||||
|
if system_user or password is None:
|
||||||
|
cmd.append('--system')
|
||||||
|
else:
|
||||||
|
cmd.extend([
|
||||||
|
'--create-home',
|
||||||
|
'--shell', shell,
|
||||||
|
'--password', password,
|
||||||
|
])
|
||||||
|
cmd.append(username)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
user_info = pwd.getpwnam(username)
|
||||||
|
return user_info
|
||||||
|
|
||||||
|
|
||||||
|
def add_group(group_name, system_group=False):
|
||||||
|
"""Add a group to the system"""
|
||||||
|
try:
|
||||||
|
group_info = grp.getgrnam(group_name)
|
||||||
|
log('group {0} already exists!'.format(group_name))
|
||||||
|
except KeyError:
|
||||||
|
log('creating group {0}'.format(group_name))
|
||||||
|
cmd = ['addgroup']
|
||||||
|
if system_group:
|
||||||
|
cmd.append('--system')
|
||||||
|
else:
|
||||||
|
cmd.extend([
|
||||||
|
'--group',
|
||||||
|
])
|
||||||
|
cmd.append(group_name)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
group_info = grp.getgrnam(group_name)
|
||||||
|
return group_info
|
||||||
|
|
||||||
|
|
||||||
|
def add_user_to_group(username, group):
|
||||||
|
"""Add a user to a group"""
|
||||||
|
cmd = [
|
||||||
|
'gpasswd', '-a',
|
||||||
|
username,
|
||||||
|
group
|
||||||
|
]
|
||||||
|
log("Adding user {} to group {}".format(username, group))
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def rsync(from_path, to_path, flags='-r', options=None):
|
||||||
|
"""Replicate the contents of a path"""
|
||||||
|
options = options or ['--delete', '--executability']
|
||||||
|
cmd = ['/usr/bin/rsync', flags]
|
||||||
|
cmd.extend(options)
|
||||||
|
cmd.append(from_path)
|
||||||
|
cmd.append(to_path)
|
||||||
|
log(" ".join(cmd))
|
||||||
|
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
||||||
|
|
||||||
|
|
||||||
|
def symlink(source, destination):
|
||||||
|
"""Create a symbolic link"""
|
||||||
|
log("Symlinking {} as {}".format(source, destination))
|
||||||
|
cmd = [
|
||||||
|
'ln',
|
||||||
|
'-sf',
|
||||||
|
source,
|
||||||
|
destination,
|
||||||
|
]
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
|
||||||
|
"""Create a directory"""
|
||||||
|
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||||
|
perms))
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
realpath = os.path.abspath(path)
|
||||||
|
path_exists = os.path.exists(realpath)
|
||||||
|
if path_exists and force:
|
||||||
|
if not os.path.isdir(realpath):
|
||||||
|
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||||
|
os.unlink(realpath)
|
||||||
|
os.makedirs(realpath, perms)
|
||||||
|
elif not path_exists:
|
||||||
|
os.makedirs(realpath, perms)
|
||||||
|
os.chown(realpath, uid, gid)
|
||||||
|
os.chmod(realpath, perms)
|
||||||
|
|
||||||
|
|
||||||
|
def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||||
|
"""Create or overwrite a file with the contents of a byte string."""
|
||||||
|
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
with open(path, 'wb') as target:
|
||||||
|
os.fchown(target.fileno(), uid, gid)
|
||||||
|
os.fchmod(target.fileno(), perms)
|
||||||
|
target.write(content)
|
||||||
|
|
||||||
|
|
||||||
|
def fstab_remove(mp):
|
||||||
|
"""Remove the given mountpoint entry from /etc/fstab
|
||||||
|
"""
|
||||||
|
return Fstab.remove_by_mountpoint(mp)
|
||||||
|
|
||||||
|
|
||||||
|
def fstab_add(dev, mp, fs, options=None):
|
||||||
|
"""Adds the given device entry to the /etc/fstab file
|
||||||
|
"""
|
||||||
|
return Fstab.add(dev, mp, fs, options=options)
|
||||||
|
|
||||||
|
|
||||||
|
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
|
||||||
|
"""Mount a filesystem at a particular mountpoint"""
|
||||||
|
cmd_args = ['mount']
|
||||||
|
if options is not None:
|
||||||
|
cmd_args.extend(['-o', options])
|
||||||
|
cmd_args.extend([device, mountpoint])
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
|
||||||
|
if persist:
|
||||||
|
return fstab_add(device, mountpoint, filesystem, options=options)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def umount(mountpoint, persist=False):
|
||||||
|
"""Unmount a filesystem"""
|
||||||
|
cmd_args = ['umount', mountpoint]
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
|
||||||
|
if persist:
|
||||||
|
return fstab_remove(mountpoint)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def mounts():
|
||||||
|
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
|
||||||
|
with open('/proc/mounts') as f:
|
||||||
|
# [['/mount/point','/dev/path'],[...]]
|
||||||
|
system_mounts = [m[1::-1] for m in [l.strip().split()
|
||||||
|
for l in f.readlines()]]
|
||||||
|
return system_mounts
|
||||||
|
|
||||||
|
|
||||||
|
def file_hash(path, hash_type='md5'):
|
||||||
|
"""
|
||||||
|
Generate a hash checksum of the contents of 'path' or None if not found.
|
||||||
|
|
||||||
|
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
||||||
|
such as md5, sha1, sha256, sha512, etc.
|
||||||
|
"""
|
||||||
|
if os.path.exists(path):
|
||||||
|
h = getattr(hashlib, hash_type)()
|
||||||
|
with open(path, 'rb') as source:
|
||||||
|
h.update(source.read())
|
||||||
|
return h.hexdigest()
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def check_hash(path, checksum, hash_type='md5'):
|
||||||
|
"""
|
||||||
|
Validate a file using a cryptographic checksum.
|
||||||
|
|
||||||
|
:param str checksum: Value of the checksum used to validate the file.
|
||||||
|
:param str hash_type: Hash algorithm used to generate `checksum`.
|
||||||
|
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||||
|
such as md5, sha1, sha256, sha512, etc.
|
||||||
|
:raises ChecksumError: If the file fails the checksum
|
||||||
|
|
||||||
|
"""
|
||||||
|
actual_checksum = file_hash(path, hash_type)
|
||||||
|
if checksum != actual_checksum:
|
||||||
|
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
|
||||||
|
|
||||||
|
|
||||||
|
class ChecksumError(ValueError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def restart_on_change(restart_map, stopstart=False):
|
||||||
|
"""Restart services based on configuration files changing
|
||||||
|
|
||||||
|
This function is used a decorator, for example::
|
||||||
|
|
||||||
|
@restart_on_change({
|
||||||
|
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||||
|
})
|
||||||
|
def ceph_client_changed():
|
||||||
|
pass # your code here
|
||||||
|
|
||||||
|
In this example, the cinder-api and cinder-volume services
|
||||||
|
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||||
|
ceph_client_changed function.
|
||||||
|
"""
|
||||||
|
def wrap(f):
|
||||||
|
def wrapped_f(*args, **kwargs):
|
||||||
|
checksums = {}
|
||||||
|
for path in restart_map:
|
||||||
|
checksums[path] = file_hash(path)
|
||||||
|
f(*args, **kwargs)
|
||||||
|
restarts = []
|
||||||
|
for path in restart_map:
|
||||||
|
if checksums[path] != file_hash(path):
|
||||||
|
restarts += restart_map[path]
|
||||||
|
services_list = list(OrderedDict.fromkeys(restarts))
|
||||||
|
if not stopstart:
|
||||||
|
for service_name in services_list:
|
||||||
|
service('restart', service_name)
|
||||||
|
else:
|
||||||
|
for action in ['stop', 'start']:
|
||||||
|
for service_name in services_list:
|
||||||
|
service(action, service_name)
|
||||||
|
return wrapped_f
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def lsb_release():
|
||||||
|
"""Return /etc/lsb-release in a dict"""
|
||||||
|
d = {}
|
||||||
|
with open('/etc/lsb-release', 'r') as lsb:
|
||||||
|
for l in lsb:
|
||||||
|
k, v = l.split('=')
|
||||||
|
d[k.strip()] = v.strip()
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def pwgen(length=None):
|
||||||
|
"""Generate a random pasword."""
|
||||||
|
if length is None:
|
||||||
|
# A random length is ok to use a weak PRNG
|
||||||
|
length = random.choice(range(35, 45))
|
||||||
|
alphanumeric_chars = [
|
||||||
|
l for l in (string.ascii_letters + string.digits)
|
||||||
|
if l not in 'l0QD1vAEIOUaeiou']
|
||||||
|
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
|
||||||
|
# actual password
|
||||||
|
random_generator = random.SystemRandom()
|
||||||
|
random_chars = [
|
||||||
|
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
||||||
|
return(''.join(random_chars))
|
||||||
|
|
||||||
|
|
||||||
|
def list_nics(nic_type):
|
||||||
|
'''Return a list of nics of given type(s)'''
|
||||||
|
if isinstance(nic_type, six.string_types):
|
||||||
|
int_types = [nic_type]
|
||||||
|
else:
|
||||||
|
int_types = nic_type
|
||||||
|
interfaces = []
|
||||||
|
for int_type in int_types:
|
||||||
|
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
|
ip_output = (line for line in ip_output if line)
|
||||||
|
for line in ip_output:
|
||||||
|
if line.split()[1].startswith(int_type):
|
||||||
|
matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line)
|
||||||
|
if matched:
|
||||||
|
interface = matched.groups()[0]
|
||||||
|
else:
|
||||||
|
interface = line.split()[1].replace(":", "")
|
||||||
|
interfaces.append(interface)
|
||||||
|
|
||||||
|
return interfaces
|
||||||
|
|
||||||
|
|
||||||
|
def set_nic_mtu(nic, mtu):
|
||||||
|
'''Set MTU on a network interface'''
|
||||||
|
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def get_nic_mtu(nic):
|
||||||
|
cmd = ['ip', 'addr', 'show', nic]
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
|
mtu = ""
|
||||||
|
for line in ip_output:
|
||||||
|
words = line.split()
|
||||||
|
if 'mtu' in words:
|
||||||
|
mtu = words[words.index("mtu") + 1]
|
||||||
|
return mtu
|
||||||
|
|
||||||
|
|
||||||
|
def get_nic_hwaddr(nic):
|
||||||
|
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
hwaddr = ""
|
||||||
|
words = ip_output.split()
|
||||||
|
if 'link/ether' in words:
|
||||||
|
hwaddr = words[words.index('link/ether') + 1]
|
||||||
|
return hwaddr
|
||||||
|
|
||||||
|
|
||||||
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
|
'''Compare supplied revno with the revno of the installed package
|
||||||
|
|
||||||
|
* 1 => Installed revno is greater than supplied arg
|
||||||
|
* 0 => Installed revno is the same as supplied arg
|
||||||
|
* -1 => Installed revno is less than supplied arg
|
||||||
|
|
||||||
|
This function imports apt_cache function from charmhelpers.fetch if
|
||||||
|
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
||||||
|
you call this function, or pass an apt_pkg.Cache() instance.
|
||||||
|
'''
|
||||||
|
import apt_pkg
|
||||||
|
if not pkgcache:
|
||||||
|
from charmhelpers.fetch import apt_cache
|
||||||
|
pkgcache = apt_cache()
|
||||||
|
pkg = pkgcache[package]
|
||||||
|
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def chdir(d):
|
||||||
|
cur = os.getcwd()
|
||||||
|
try:
|
||||||
|
yield os.chdir(d)
|
||||||
|
finally:
|
||||||
|
os.chdir(cur)
|
||||||
|
|
||||||
|
|
||||||
|
def chownr(path, owner, group, follow_links=True):
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
if follow_links:
|
||||||
|
chown = os.chown
|
||||||
|
else:
|
||||||
|
chown = os.lchown
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(path):
|
||||||
|
for name in dirs + files:
|
||||||
|
full = os.path.join(root, name)
|
||||||
|
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
||||||
|
if not broken_symlink:
|
||||||
|
chown(full, uid, gid)
|
||||||
|
|
||||||
|
|
||||||
|
def lchownr(path, owner, group):
|
||||||
|
chownr(path, owner, group, follow_links=False)
|
18
hooks/charmhelpers/core/services/__init__.py
Normal file
18
hooks/charmhelpers/core/services/__init__.py
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from .base import * # NOQA
|
||||||
|
from .helpers import * # NOQA
|
329
hooks/charmhelpers/core/services/base.py
Normal file
329
hooks/charmhelpers/core/services/base.py
Normal file
@ -0,0 +1,329 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
from collections import Iterable, OrderedDict
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['ServiceManager', 'ManagerCallback',
|
||||||
|
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
||||||
|
'service_restart', 'service_stop']
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceManager(object):
|
||||||
|
def __init__(self, services=None):
|
||||||
|
"""
|
||||||
|
Register a list of services, given their definitions.
|
||||||
|
|
||||||
|
Service definitions are dicts in the following formats (all keys except
|
||||||
|
'service' are optional)::
|
||||||
|
|
||||||
|
{
|
||||||
|
"service": <service name>,
|
||||||
|
"required_data": <list of required data contexts>,
|
||||||
|
"provided_data": <list of provided data contexts>,
|
||||||
|
"data_ready": <one or more callbacks>,
|
||||||
|
"data_lost": <one or more callbacks>,
|
||||||
|
"start": <one or more callbacks>,
|
||||||
|
"stop": <one or more callbacks>,
|
||||||
|
"ports": <list of ports to manage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
The 'required_data' list should contain dicts of required data (or
|
||||||
|
dependency managers that act like dicts and know how to collect the data).
|
||||||
|
Only when all items in the 'required_data' list are populated are the list
|
||||||
|
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
||||||
|
information.
|
||||||
|
|
||||||
|
The 'provided_data' list should contain relation data providers, most likely
|
||||||
|
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
||||||
|
that will indicate a set of data to set on a given relation.
|
||||||
|
|
||||||
|
The 'data_ready' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
||||||
|
are fired.
|
||||||
|
|
||||||
|
The 'data_lost' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when a 'required_data' item no longer passes
|
||||||
|
`is_ready()`. Each callback will be called with the service name as the
|
||||||
|
only parameter. After all of the 'data_lost' callbacks are called,
|
||||||
|
the 'stop' callbacks are fired.
|
||||||
|
|
||||||
|
The 'start' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when starting the service, after the 'data_ready'
|
||||||
|
callbacks are complete. Each callback will be called with the service
|
||||||
|
name as the only parameter. This defaults to
|
||||||
|
`[host.service_start, services.open_ports]`.
|
||||||
|
|
||||||
|
The 'stop' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when stopping the service. If the service is
|
||||||
|
being stopped because it no longer has all of its 'required_data', this
|
||||||
|
will be called after all of the 'data_lost' callbacks are complete.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
This defaults to `[services.close_ports, host.service_stop]`.
|
||||||
|
|
||||||
|
The 'ports' value should be a list of ports to manage. The default
|
||||||
|
'start' handler will open the ports after the service is started,
|
||||||
|
and the default 'stop' handler will close the ports prior to stopping
|
||||||
|
the service.
|
||||||
|
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
The following registers an Upstart service called bingod that depends on
|
||||||
|
a mongodb relation and which runs a custom `db_migrate` function prior to
|
||||||
|
restarting the service, and a Runit service called spadesd::
|
||||||
|
|
||||||
|
manager = services.ServiceManager([
|
||||||
|
{
|
||||||
|
'service': 'bingod',
|
||||||
|
'ports': [80, 443],
|
||||||
|
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
||||||
|
'data_ready': [
|
||||||
|
services.template(source='bingod.conf'),
|
||||||
|
services.template(source='bingod.ini',
|
||||||
|
target='/etc/bingod.ini',
|
||||||
|
owner='bingo', perms=0400),
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'service': 'spadesd',
|
||||||
|
'data_ready': services.template(source='spadesd_run.j2',
|
||||||
|
target='/etc/sv/spadesd/run',
|
||||||
|
perms=0555),
|
||||||
|
'start': runit_start,
|
||||||
|
'stop': runit_stop,
|
||||||
|
},
|
||||||
|
])
|
||||||
|
manager.manage()
|
||||||
|
"""
|
||||||
|
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
||||||
|
self._ready = None
|
||||||
|
self.services = OrderedDict()
|
||||||
|
for service in services or []:
|
||||||
|
service_name = service['service']
|
||||||
|
self.services[service_name] = service
|
||||||
|
|
||||||
|
def manage(self):
|
||||||
|
"""
|
||||||
|
Handle the current hook by doing The Right Thing with the registered services.
|
||||||
|
"""
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
if hook_name == 'stop':
|
||||||
|
self.stop_services()
|
||||||
|
else:
|
||||||
|
self.provide_data()
|
||||||
|
self.reconfigure_services()
|
||||||
|
cfg = hookenv.config()
|
||||||
|
if cfg.implicit_save:
|
||||||
|
cfg.save()
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Set the relation data for each provider in the ``provided_data`` list.
|
||||||
|
|
||||||
|
A provider must have a `name` attribute, which indicates which relation
|
||||||
|
to set data on, and a `provide_data()` method, which returns a dict of
|
||||||
|
data to set.
|
||||||
|
"""
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
for service in self.services.values():
|
||||||
|
for provider in service.get('provided_data', []):
|
||||||
|
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
|
||||||
|
data = provider.provide_data()
|
||||||
|
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
|
||||||
|
if _ready:
|
||||||
|
hookenv.relation_set(None, data)
|
||||||
|
|
||||||
|
def reconfigure_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Update all files for one or more registered services, and,
|
||||||
|
if ready, optionally restart them.
|
||||||
|
|
||||||
|
If no service names are given, reconfigures all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
if self.is_ready(service_name):
|
||||||
|
self.fire_event('data_ready', service_name)
|
||||||
|
self.fire_event('start', service_name, default=[
|
||||||
|
service_restart,
|
||||||
|
manage_ports])
|
||||||
|
self.save_ready(service_name)
|
||||||
|
else:
|
||||||
|
if self.was_ready(service_name):
|
||||||
|
self.fire_event('data_lost', service_name)
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
self.save_lost(service_name)
|
||||||
|
|
||||||
|
def stop_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Stop one or more registered services, by name.
|
||||||
|
|
||||||
|
If no service names are given, stops all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
|
||||||
|
def get_service(self, service_name):
|
||||||
|
"""
|
||||||
|
Given the name of a registered service, return its service definition.
|
||||||
|
"""
|
||||||
|
service = self.services.get(service_name)
|
||||||
|
if not service:
|
||||||
|
raise KeyError('Service not registered: %s' % service_name)
|
||||||
|
return service
|
||||||
|
|
||||||
|
def fire_event(self, event_name, service_name, default=None):
|
||||||
|
"""
|
||||||
|
Fire a data_ready, data_lost, start, or stop event on a given service.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
callbacks = service.get(event_name, default)
|
||||||
|
if not callbacks:
|
||||||
|
return
|
||||||
|
if not isinstance(callbacks, Iterable):
|
||||||
|
callbacks = [callbacks]
|
||||||
|
for callback in callbacks:
|
||||||
|
if isinstance(callback, ManagerCallback):
|
||||||
|
callback(self, service_name, event_name)
|
||||||
|
else:
|
||||||
|
callback(service_name)
|
||||||
|
|
||||||
|
def is_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if a registered service is ready, by checking its 'required_data'.
|
||||||
|
|
||||||
|
A 'required_data' item can be any mapping type, and is considered ready
|
||||||
|
if `bool(item)` evaluates as True.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
reqs = service.get('required_data', [])
|
||||||
|
return all(bool(req) for req in reqs)
|
||||||
|
|
||||||
|
def _load_ready_file(self):
|
||||||
|
if self._ready is not None:
|
||||||
|
return
|
||||||
|
if os.path.exists(self._ready_file):
|
||||||
|
with open(self._ready_file) as fp:
|
||||||
|
self._ready = set(json.load(fp))
|
||||||
|
else:
|
||||||
|
self._ready = set()
|
||||||
|
|
||||||
|
def _save_ready_file(self):
|
||||||
|
if self._ready is None:
|
||||||
|
return
|
||||||
|
with open(self._ready_file, 'w') as fp:
|
||||||
|
json.dump(list(self._ready), fp)
|
||||||
|
|
||||||
|
def save_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is now data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.add(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def save_lost(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is no longer data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.discard(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def was_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if the given service was previously data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
return service_name in self._ready
|
||||||
|
|
||||||
|
|
||||||
|
class ManagerCallback(object):
|
||||||
|
"""
|
||||||
|
Special case of a callback that takes the `ServiceManager` instance
|
||||||
|
in addition to the service name.
|
||||||
|
|
||||||
|
Subclasses should implement `__call__` which should accept three parameters:
|
||||||
|
|
||||||
|
* `manager` The `ServiceManager` instance
|
||||||
|
* `service_name` The name of the service it's being triggered for
|
||||||
|
* `event_name` The name of the event that this callback is handling
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class PortManagerCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will open or close ports, for use as either
|
||||||
|
a start or stop action.
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
new_ports = service.get('ports', [])
|
||||||
|
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||||
|
if os.path.exists(port_file):
|
||||||
|
with open(port_file) as fp:
|
||||||
|
old_ports = fp.read().split(',')
|
||||||
|
for old_port in old_ports:
|
||||||
|
if bool(old_port):
|
||||||
|
old_port = int(old_port)
|
||||||
|
if old_port not in new_ports:
|
||||||
|
hookenv.close_port(old_port)
|
||||||
|
with open(port_file, 'w') as fp:
|
||||||
|
fp.write(','.join(str(port) for port in new_ports))
|
||||||
|
for port in new_ports:
|
||||||
|
if event_name == 'start':
|
||||||
|
hookenv.open_port(port)
|
||||||
|
elif event_name == 'stop':
|
||||||
|
hookenv.close_port(port)
|
||||||
|
|
||||||
|
|
||||||
|
def service_stop(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_stop to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_stop(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_restart(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_restart to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_available(service_name):
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_restart(service_name)
|
||||||
|
else:
|
||||||
|
host.service_start(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases
|
||||||
|
open_ports = close_ports = manage_ports = PortManagerCallback()
|
267
hooks/charmhelpers/core/services/helpers.py
Normal file
267
hooks/charmhelpers/core/services/helpers.py
Normal file
@ -0,0 +1,267 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import templating
|
||||||
|
|
||||||
|
from charmhelpers.core.services.base import ManagerCallback
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['RelationContext', 'TemplateCallback',
|
||||||
|
'render_template', 'template']
|
||||||
|
|
||||||
|
|
||||||
|
class RelationContext(dict):
|
||||||
|
"""
|
||||||
|
Base class for a context generator that gets relation data from juju.
|
||||||
|
|
||||||
|
Subclasses must provide the attributes `name`, which is the name of the
|
||||||
|
interface of interest, `interface`, which is the type of the interface of
|
||||||
|
interest, and `required_keys`, which is the set of keys required for the
|
||||||
|
relation to be considered complete. The data for all interfaces matching
|
||||||
|
the `name` attribute that are complete will used to populate the dictionary
|
||||||
|
values (see `get_data`, below).
|
||||||
|
|
||||||
|
The generated context will be namespaced under the relation :attr:`name`,
|
||||||
|
to prevent potential naming conflicts.
|
||||||
|
|
||||||
|
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||||
|
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||||
|
"""
|
||||||
|
name = None
|
||||||
|
interface = None
|
||||||
|
|
||||||
|
def __init__(self, name=None, additional_required_keys=None):
|
||||||
|
if not hasattr(self, 'required_keys'):
|
||||||
|
self.required_keys = []
|
||||||
|
|
||||||
|
if name is not None:
|
||||||
|
self.name = name
|
||||||
|
if additional_required_keys:
|
||||||
|
self.required_keys.extend(additional_required_keys)
|
||||||
|
self.get_data()
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the required_keys are available.
|
||||||
|
"""
|
||||||
|
return self.is_ready()
|
||||||
|
|
||||||
|
__nonzero__ = __bool__
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return super(RelationContext, self).__repr__()
|
||||||
|
|
||||||
|
def is_ready(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the `required_keys` are available from any units.
|
||||||
|
"""
|
||||||
|
ready = len(self.get(self.name, [])) > 0
|
||||||
|
if not ready:
|
||||||
|
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
||||||
|
return ready
|
||||||
|
|
||||||
|
def _is_ready(self, unit_data):
|
||||||
|
"""
|
||||||
|
Helper method that tests a set of relation data and returns True if
|
||||||
|
all of the `required_keys` are present.
|
||||||
|
"""
|
||||||
|
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
||||||
|
|
||||||
|
def get_data(self):
|
||||||
|
"""
|
||||||
|
Retrieve the relation data for each unit involved in a relation and,
|
||||||
|
if complete, store it in a list under `self[self.name]`. This
|
||||||
|
is automatically called when the RelationContext is instantiated.
|
||||||
|
|
||||||
|
The units are sorted lexographically first by the service ID, then by
|
||||||
|
the unit ID. Thus, if an interface has two other services, 'db:1'
|
||||||
|
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
||||||
|
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
||||||
|
set of data, the relation data for the units will be stored in the
|
||||||
|
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
||||||
|
|
||||||
|
If you only care about a single unit on the relation, you can just
|
||||||
|
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
||||||
|
support multiple units on a relation, you should iterate over the list,
|
||||||
|
like::
|
||||||
|
|
||||||
|
{% for unit in interface -%}
|
||||||
|
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
|
||||||
|
Note that since all sets of relation data from all related services and
|
||||||
|
units are in a single list, if you need to know which service or unit a
|
||||||
|
set of data came from, you'll need to extend this class to preserve
|
||||||
|
that information.
|
||||||
|
"""
|
||||||
|
if not hookenv.relation_ids(self.name):
|
||||||
|
return
|
||||||
|
|
||||||
|
ns = self.setdefault(self.name, [])
|
||||||
|
for rid in sorted(hookenv.relation_ids(self.name)):
|
||||||
|
for unit in sorted(hookenv.related_units(rid)):
|
||||||
|
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
||||||
|
if self._is_ready(reldata):
|
||||||
|
ns.append(reldata)
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Return data to be relation_set for this interface.
|
||||||
|
"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class MysqlRelation(RelationContext):
|
||||||
|
"""
|
||||||
|
Relation context for the `mysql` interface.
|
||||||
|
|
||||||
|
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||||
|
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||||
|
"""
|
||||||
|
name = 'db'
|
||||||
|
interface = 'mysql'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.required_keys = ['host', 'user', 'password', 'database']
|
||||||
|
RelationContext.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class HttpRelation(RelationContext):
|
||||||
|
"""
|
||||||
|
Relation context for the `http` interface.
|
||||||
|
|
||||||
|
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||||
|
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||||
|
"""
|
||||||
|
name = 'website'
|
||||||
|
interface = 'http'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.required_keys = ['host', 'port']
|
||||||
|
RelationContext.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
return {
|
||||||
|
'host': hookenv.unit_get('private-address'),
|
||||||
|
'port': 80,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class RequiredConfig(dict):
|
||||||
|
"""
|
||||||
|
Data context that loads config options with one or more mandatory options.
|
||||||
|
|
||||||
|
Once the required options have been changed from their default values, all
|
||||||
|
config options will be available, namespaced under `config` to prevent
|
||||||
|
potential naming conflicts (for example, between a config option and a
|
||||||
|
relation property).
|
||||||
|
|
||||||
|
:param list *args: List of options that must be changed from their default values.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
self.required_options = args
|
||||||
|
self['config'] = hookenv.config()
|
||||||
|
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
||||||
|
self.config = yaml.load(fp).get('options', {})
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
for option in self.required_options:
|
||||||
|
if option not in self['config']:
|
||||||
|
return False
|
||||||
|
current_value = self['config'][option]
|
||||||
|
default_value = self.config[option].get('default')
|
||||||
|
if current_value == default_value:
|
||||||
|
return False
|
||||||
|
if current_value in (None, '') and default_value in (None, ''):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __nonzero__(self):
|
||||||
|
return self.__bool__()
|
||||||
|
|
||||||
|
|
||||||
|
class StoredContext(dict):
|
||||||
|
"""
|
||||||
|
A data context that always returns the data that it was first created with.
|
||||||
|
|
||||||
|
This is useful to do a one-time generation of things like passwords, that
|
||||||
|
will thereafter use the same value that was originally generated, instead
|
||||||
|
of generating a new value each time it is run.
|
||||||
|
"""
|
||||||
|
def __init__(self, file_name, config_data):
|
||||||
|
"""
|
||||||
|
If the file exists, populate `self` with the data from the file.
|
||||||
|
Otherwise, populate with the given data and persist it to the file.
|
||||||
|
"""
|
||||||
|
if os.path.exists(file_name):
|
||||||
|
self.update(self.read_context(file_name))
|
||||||
|
else:
|
||||||
|
self.store_context(file_name, config_data)
|
||||||
|
self.update(config_data)
|
||||||
|
|
||||||
|
def store_context(self, file_name, config_data):
|
||||||
|
if not os.path.isabs(file_name):
|
||||||
|
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||||
|
with open(file_name, 'w') as file_stream:
|
||||||
|
os.fchmod(file_stream.fileno(), 0o600)
|
||||||
|
yaml.dump(config_data, file_stream)
|
||||||
|
|
||||||
|
def read_context(self, file_name):
|
||||||
|
if not os.path.isabs(file_name):
|
||||||
|
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||||
|
with open(file_name, 'r') as file_stream:
|
||||||
|
data = yaml.load(file_stream)
|
||||||
|
if not data:
|
||||||
|
raise OSError("%s is empty" % file_name)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will render a Jinja2 template, for use as a ready
|
||||||
|
action.
|
||||||
|
|
||||||
|
:param str source: The template source file, relative to
|
||||||
|
`$CHARM_DIR/templates`
|
||||||
|
|
||||||
|
:param str target: The target to write the rendered template to
|
||||||
|
:param str owner: The owner of the rendered file
|
||||||
|
:param str group: The group of the rendered file
|
||||||
|
:param int perms: The permissions of the rendered file
|
||||||
|
"""
|
||||||
|
def __init__(self, source, target,
|
||||||
|
owner='root', group='root', perms=0o444):
|
||||||
|
self.source = source
|
||||||
|
self.target = target
|
||||||
|
self.owner = owner
|
||||||
|
self.group = group
|
||||||
|
self.perms = perms
|
||||||
|
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
context = {}
|
||||||
|
for ctx in service.get('required_data', []):
|
||||||
|
context.update(ctx)
|
||||||
|
templating.render(self.source, self.target, context,
|
||||||
|
self.owner, self.group, self.perms)
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases for templates
|
||||||
|
render_template = template = TemplateCallback
|
42
hooks/charmhelpers/core/strutils.py
Normal file
42
hooks/charmhelpers/core/strutils.py
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
def bool_from_string(value):
|
||||||
|
"""Interpret string value as boolean.
|
||||||
|
|
||||||
|
Returns True if value translates to True otherwise False.
|
||||||
|
"""
|
||||||
|
if isinstance(value, six.string_types):
|
||||||
|
value = six.text_type(value)
|
||||||
|
else:
|
||||||
|
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
value = value.strip().lower()
|
||||||
|
|
||||||
|
if value in ['y', 'yes', 'true', 't', 'on']:
|
||||||
|
return True
|
||||||
|
elif value in ['n', 'no', 'false', 'f', 'off']:
|
||||||
|
return False
|
||||||
|
|
||||||
|
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
||||||
|
raise ValueError(msg)
|
56
hooks/charmhelpers/core/sysctl.py
Normal file
56
hooks/charmhelpers/core/sysctl.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from subprocess import check_call
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
DEBUG,
|
||||||
|
ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||||
|
|
||||||
|
|
||||||
|
def create(sysctl_dict, sysctl_file):
|
||||||
|
"""Creates a sysctl.conf file from a YAML associative array
|
||||||
|
|
||||||
|
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
|
||||||
|
:type sysctl_dict: str
|
||||||
|
:param sysctl_file: path to the sysctl file to be saved
|
||||||
|
:type sysctl_file: str or unicode
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
||||||
|
except yaml.YAMLError:
|
||||||
|
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
||||||
|
level=ERROR)
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(sysctl_file, "w") as fd:
|
||||||
|
for key, value in sysctl_dict_parsed.items():
|
||||||
|
fd.write("{}={}\n".format(key, value))
|
||||||
|
|
||||||
|
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
|
||||||
|
level=DEBUG)
|
||||||
|
|
||||||
|
check_call(["sysctl", "-p", sysctl_file])
|
68
hooks/charmhelpers/core/templating.py
Normal file
68
hooks/charmhelpers/core/templating.py
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
def render(source, target, context, owner='root', group='root',
|
||||||
|
perms=0o444, templates_dir=None, encoding='UTF-8'):
|
||||||
|
"""
|
||||||
|
Render a template.
|
||||||
|
|
||||||
|
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||||
|
|
||||||
|
The `target` path should be absolute.
|
||||||
|
|
||||||
|
The context should be a dict containing the values to be replaced in the
|
||||||
|
template.
|
||||||
|
|
||||||
|
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
||||||
|
|
||||||
|
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||||
|
|
||||||
|
Note: Using this requires python-jinja2; if it is not installed, calling
|
||||||
|
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
except ImportError:
|
||||||
|
hookenv.log('Could not import jinja2, and could not import '
|
||||||
|
'charmhelpers.fetch to install it',
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise
|
||||||
|
apt_install('python-jinja2', fatal=True)
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
|
||||||
|
if templates_dir is None:
|
||||||
|
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||||
|
loader = Environment(loader=FileSystemLoader(templates_dir))
|
||||||
|
try:
|
||||||
|
source = source
|
||||||
|
template = loader.get_template(source)
|
||||||
|
except exceptions.TemplateNotFound as e:
|
||||||
|
hookenv.log('Could not load template %s from %s.' %
|
||||||
|
(source, templates_dir),
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise e
|
||||||
|
content = template.render(context)
|
||||||
|
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
||||||
|
host.write_file(target, content.encode(encoding), owner, group, perms)
|
477
hooks/charmhelpers/core/unitdata.py
Normal file
477
hooks/charmhelpers/core/unitdata.py
Normal file
@ -0,0 +1,477 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Kapil Thangavelu <kapil.foss@gmail.com>
|
||||||
|
#
|
||||||
|
"""
|
||||||
|
Intro
|
||||||
|
-----
|
||||||
|
|
||||||
|
A simple way to store state in units. This provides a key value
|
||||||
|
storage with support for versioned, transactional operation,
|
||||||
|
and can calculate deltas from previous values to simplify unit logic
|
||||||
|
when processing changes.
|
||||||
|
|
||||||
|
|
||||||
|
Hook Integration
|
||||||
|
----------------
|
||||||
|
|
||||||
|
There are several extant frameworks for hook execution, including
|
||||||
|
|
||||||
|
- charmhelpers.core.hookenv.Hooks
|
||||||
|
- charmhelpers.core.services.ServiceManager
|
||||||
|
|
||||||
|
The storage classes are framework agnostic, one simple integration is
|
||||||
|
via the HookData contextmanager. It will record the current hook
|
||||||
|
execution environment (including relation data, config data, etc.),
|
||||||
|
setup a transaction and allow easy access to the changes from
|
||||||
|
previously seen values. One consequence of the integration is the
|
||||||
|
reservation of particular keys ('rels', 'unit', 'env', 'config',
|
||||||
|
'charm_revisions') for their respective values.
|
||||||
|
|
||||||
|
Here's a fully worked integration example using hookenv.Hooks::
|
||||||
|
|
||||||
|
from charmhelper.core import hookenv, unitdata
|
||||||
|
|
||||||
|
hook_data = unitdata.HookData()
|
||||||
|
db = unitdata.kv()
|
||||||
|
hooks = hookenv.Hooks()
|
||||||
|
|
||||||
|
@hooks.hook
|
||||||
|
def config_changed():
|
||||||
|
# Print all changes to configuration from previously seen
|
||||||
|
# values.
|
||||||
|
for changed, (prev, cur) in hook_data.conf.items():
|
||||||
|
print('config changed', changed,
|
||||||
|
'previous value', prev,
|
||||||
|
'current value', cur)
|
||||||
|
|
||||||
|
# Get some unit specific bookeeping
|
||||||
|
if not db.get('pkg_key'):
|
||||||
|
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||||
|
db.set('pkg_key', key)
|
||||||
|
|
||||||
|
# Directly access all charm config as a mapping.
|
||||||
|
conf = db.getrange('config', True)
|
||||||
|
|
||||||
|
# Directly access all relation data as a mapping
|
||||||
|
rels = db.getrange('rels', True)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with hook_data():
|
||||||
|
hook.execute()
|
||||||
|
|
||||||
|
|
||||||
|
A more basic integration is via the hook_scope context manager which simply
|
||||||
|
manages transaction scope (and records hook name, and timestamp)::
|
||||||
|
|
||||||
|
>>> from unitdata import kv
|
||||||
|
>>> db = kv()
|
||||||
|
>>> with db.hook_scope('install'):
|
||||||
|
... # do work, in transactional scope.
|
||||||
|
... db.set('x', 1)
|
||||||
|
>>> db.get('x')
|
||||||
|
1
|
||||||
|
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
Values are automatically json de/serialized to preserve basic typing
|
||||||
|
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
|
||||||
|
|
||||||
|
Individual values can be manipulated via get/set::
|
||||||
|
|
||||||
|
>>> kv.set('y', True)
|
||||||
|
>>> kv.get('y')
|
||||||
|
True
|
||||||
|
|
||||||
|
# We can set complex values (dicts, lists) as a single key.
|
||||||
|
>>> kv.set('config', {'a': 1, 'b': True'})
|
||||||
|
|
||||||
|
# Also supports returning dictionaries as a record which
|
||||||
|
# provides attribute access.
|
||||||
|
>>> config = kv.get('config', record=True)
|
||||||
|
>>> config.b
|
||||||
|
True
|
||||||
|
|
||||||
|
|
||||||
|
Groups of keys can be manipulated with update/getrange::
|
||||||
|
|
||||||
|
>>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
|
||||||
|
>>> kv.getrange('gui.', strip=True)
|
||||||
|
{'z': 1, 'y': 2}
|
||||||
|
|
||||||
|
When updating values, its very helpful to understand which values
|
||||||
|
have actually changed and how have they changed. The storage
|
||||||
|
provides a delta method to provide for this::
|
||||||
|
|
||||||
|
>>> data = {'debug': True, 'option': 2}
|
||||||
|
>>> delta = kv.delta(data, 'config.')
|
||||||
|
>>> delta.debug.previous
|
||||||
|
None
|
||||||
|
>>> delta.debug.current
|
||||||
|
True
|
||||||
|
>>> delta
|
||||||
|
{'debug': (None, True), 'option': (None, 2)}
|
||||||
|
|
||||||
|
Note the delta method does not persist the actual change, it needs to
|
||||||
|
be explicitly saved via 'update' method::
|
||||||
|
|
||||||
|
>>> kv.update(data, 'config.')
|
||||||
|
|
||||||
|
Values modified in the context of a hook scope retain historical values
|
||||||
|
associated to the hookname.
|
||||||
|
|
||||||
|
>>> with db.hook_scope('config-changed'):
|
||||||
|
... db.set('x', 42)
|
||||||
|
>>> db.gethistory('x')
|
||||||
|
[(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
|
||||||
|
(2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import contextlib
|
||||||
|
import datetime
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import pprint
|
||||||
|
import sqlite3
|
||||||
|
import sys
|
||||||
|
|
||||||
|
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
||||||
|
|
||||||
|
|
||||||
|
class Storage(object):
|
||||||
|
"""Simple key value database for local unit state within charms.
|
||||||
|
|
||||||
|
Modifications are automatically committed at hook exit. That's
|
||||||
|
currently regardless of exit code.
|
||||||
|
|
||||||
|
To support dicts, lists, integer, floats, and booleans values
|
||||||
|
are automatically json encoded/decoded.
|
||||||
|
"""
|
||||||
|
def __init__(self, path=None):
|
||||||
|
self.db_path = path
|
||||||
|
if path is None:
|
||||||
|
self.db_path = os.path.join(
|
||||||
|
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||||
|
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||||
|
self.cursor = self.conn.cursor()
|
||||||
|
self.revision = None
|
||||||
|
self._closed = False
|
||||||
|
self._init()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._closed:
|
||||||
|
return
|
||||||
|
self.flush(False)
|
||||||
|
self.cursor.close()
|
||||||
|
self.conn.close()
|
||||||
|
self._closed = True
|
||||||
|
|
||||||
|
def _scoped_query(self, stmt, params=None):
|
||||||
|
if params is None:
|
||||||
|
params = []
|
||||||
|
return stmt, params
|
||||||
|
|
||||||
|
def get(self, key, default=None, record=False):
|
||||||
|
self.cursor.execute(
|
||||||
|
*self._scoped_query(
|
||||||
|
'select data from kv where key=?', [key]))
|
||||||
|
result = self.cursor.fetchone()
|
||||||
|
if not result:
|
||||||
|
return default
|
||||||
|
if record:
|
||||||
|
return Record(json.loads(result[0]))
|
||||||
|
return json.loads(result[0])
|
||||||
|
|
||||||
|
def getrange(self, key_prefix, strip=False):
|
||||||
|
stmt = "select key, data from kv where key like '%s%%'" % key_prefix
|
||||||
|
self.cursor.execute(*self._scoped_query(stmt))
|
||||||
|
result = self.cursor.fetchall()
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
return None
|
||||||
|
if not strip:
|
||||||
|
key_prefix = ''
|
||||||
|
return dict([
|
||||||
|
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
||||||
|
|
||||||
|
def update(self, mapping, prefix=""):
|
||||||
|
for k, v in mapping.items():
|
||||||
|
self.set("%s%s" % (prefix, k), v)
|
||||||
|
|
||||||
|
def unset(self, key):
|
||||||
|
self.cursor.execute('delete from kv where key=?', [key])
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
|
[key, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
|
def set(self, key, value):
|
||||||
|
serialized = json.dumps(value)
|
||||||
|
|
||||||
|
self.cursor.execute(
|
||||||
|
'select data from kv where key=?', [key])
|
||||||
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
|
# Skip mutations to the same value
|
||||||
|
if exists:
|
||||||
|
if exists[0] == serialized:
|
||||||
|
return value
|
||||||
|
|
||||||
|
if not exists:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv (key, data) values (?, ?)',
|
||||||
|
(key, serialized))
|
||||||
|
else:
|
||||||
|
self.cursor.execute('''
|
||||||
|
update kv
|
||||||
|
set data = ?
|
||||||
|
where key = ?''', [serialized, key])
|
||||||
|
|
||||||
|
# Save
|
||||||
|
if not self.revision:
|
||||||
|
return value
|
||||||
|
|
||||||
|
self.cursor.execute(
|
||||||
|
'select 1 from kv_revisions where key=? and revision=?',
|
||||||
|
[key, self.revision])
|
||||||
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
|
if not exists:
|
||||||
|
self.cursor.execute(
|
||||||
|
'''insert into kv_revisions (
|
||||||
|
revision, key, data) values (?, ?, ?)''',
|
||||||
|
(self.revision, key, serialized))
|
||||||
|
else:
|
||||||
|
self.cursor.execute(
|
||||||
|
'''
|
||||||
|
update kv_revisions
|
||||||
|
set data = ?
|
||||||
|
where key = ?
|
||||||
|
and revision = ?''',
|
||||||
|
[serialized, key, self.revision])
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def delta(self, mapping, prefix):
|
||||||
|
"""
|
||||||
|
return a delta containing values that have changed.
|
||||||
|
"""
|
||||||
|
previous = self.getrange(prefix, strip=True)
|
||||||
|
if not previous:
|
||||||
|
pk = set()
|
||||||
|
else:
|
||||||
|
pk = set(previous.keys())
|
||||||
|
ck = set(mapping.keys())
|
||||||
|
delta = DeltaSet()
|
||||||
|
|
||||||
|
# added
|
||||||
|
for k in ck.difference(pk):
|
||||||
|
delta[k] = Delta(None, mapping[k])
|
||||||
|
|
||||||
|
# removed
|
||||||
|
for k in pk.difference(ck):
|
||||||
|
delta[k] = Delta(previous[k], None)
|
||||||
|
|
||||||
|
# changed
|
||||||
|
for k in pk.intersection(ck):
|
||||||
|
c = mapping[k]
|
||||||
|
p = previous[k]
|
||||||
|
if c != p:
|
||||||
|
delta[k] = Delta(p, c)
|
||||||
|
|
||||||
|
return delta
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def hook_scope(self, name=""):
|
||||||
|
"""Scope all future interactions to the current hook execution
|
||||||
|
revision."""
|
||||||
|
assert not self.revision
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into hooks (hook, date) values (?, ?)',
|
||||||
|
(name or sys.argv[0],
|
||||||
|
datetime.datetime.utcnow().isoformat()))
|
||||||
|
self.revision = self.cursor.lastrowid
|
||||||
|
try:
|
||||||
|
yield self.revision
|
||||||
|
self.revision = None
|
||||||
|
except:
|
||||||
|
self.flush(False)
|
||||||
|
self.revision = None
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
def flush(self, save=True):
|
||||||
|
if save:
|
||||||
|
self.conn.commit()
|
||||||
|
elif self._closed:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.conn.rollback()
|
||||||
|
|
||||||
|
def _init(self):
|
||||||
|
self.cursor.execute('''
|
||||||
|
create table if not exists kv (
|
||||||
|
key text,
|
||||||
|
data text,
|
||||||
|
primary key (key)
|
||||||
|
)''')
|
||||||
|
self.cursor.execute('''
|
||||||
|
create table if not exists kv_revisions (
|
||||||
|
key text,
|
||||||
|
revision integer,
|
||||||
|
data text,
|
||||||
|
primary key (key, revision)
|
||||||
|
)''')
|
||||||
|
self.cursor.execute('''
|
||||||
|
create table if not exists hooks (
|
||||||
|
version integer primary key autoincrement,
|
||||||
|
hook text,
|
||||||
|
date text
|
||||||
|
)''')
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
def gethistory(self, key, deserialize=False):
|
||||||
|
self.cursor.execute(
|
||||||
|
'''
|
||||||
|
select kv.revision, kv.key, kv.data, h.hook, h.date
|
||||||
|
from kv_revisions kv,
|
||||||
|
hooks h
|
||||||
|
where kv.key=?
|
||||||
|
and kv.revision = h.version
|
||||||
|
''', [key])
|
||||||
|
if deserialize is False:
|
||||||
|
return self.cursor.fetchall()
|
||||||
|
return map(_parse_history, self.cursor.fetchall())
|
||||||
|
|
||||||
|
def debug(self, fh=sys.stderr):
|
||||||
|
self.cursor.execute('select * from kv')
|
||||||
|
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||||
|
self.cursor.execute('select * from kv_revisions')
|
||||||
|
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_history(d):
|
||||||
|
return (d[0], d[1], json.loads(d[2]), d[3],
|
||||||
|
datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
|
||||||
|
|
||||||
|
|
||||||
|
class HookData(object):
|
||||||
|
"""Simple integration for existing hook exec frameworks.
|
||||||
|
|
||||||
|
Records all unit information, and stores deltas for processing
|
||||||
|
by the hook.
|
||||||
|
|
||||||
|
Sample::
|
||||||
|
|
||||||
|
from charmhelper.core import hookenv, unitdata
|
||||||
|
|
||||||
|
changes = unitdata.HookData()
|
||||||
|
db = unitdata.kv()
|
||||||
|
hooks = hookenv.Hooks()
|
||||||
|
|
||||||
|
@hooks.hook
|
||||||
|
def config_changed():
|
||||||
|
# View all changes to configuration
|
||||||
|
for changed, (prev, cur) in changes.conf.items():
|
||||||
|
print('config changed', changed,
|
||||||
|
'previous value', prev,
|
||||||
|
'current value', cur)
|
||||||
|
|
||||||
|
# Get some unit specific bookeeping
|
||||||
|
if not db.get('pkg_key'):
|
||||||
|
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||||
|
db.set('pkg_key', key)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with changes():
|
||||||
|
hook.execute()
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.kv = kv()
|
||||||
|
self.conf = None
|
||||||
|
self.rels = None
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def __call__(self):
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
|
||||||
|
with self.kv.hook_scope(hook_name):
|
||||||
|
self._record_charm_version(hookenv.charm_dir())
|
||||||
|
delta_config, delta_relation = self._record_hook(hookenv)
|
||||||
|
yield self.kv, delta_config, delta_relation
|
||||||
|
|
||||||
|
def _record_charm_version(self, charm_dir):
|
||||||
|
# Record revisions.. charm revisions are meaningless
|
||||||
|
# to charm authors as they don't control the revision.
|
||||||
|
# so logic dependnent on revision is not particularly
|
||||||
|
# useful, however it is useful for debugging analysis.
|
||||||
|
charm_rev = open(
|
||||||
|
os.path.join(charm_dir, 'revision')).read().strip()
|
||||||
|
charm_rev = charm_rev or '0'
|
||||||
|
revs = self.kv.get('charm_revisions', [])
|
||||||
|
if charm_rev not in revs:
|
||||||
|
revs.append(charm_rev.strip() or '0')
|
||||||
|
self.kv.set('charm_revisions', revs)
|
||||||
|
|
||||||
|
def _record_hook(self, hookenv):
|
||||||
|
data = hookenv.execution_environment()
|
||||||
|
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
|
||||||
|
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
|
||||||
|
self.kv.set('env', dict(data['env']))
|
||||||
|
self.kv.set('unit', data['unit'])
|
||||||
|
self.kv.set('relid', data.get('relid'))
|
||||||
|
return conf_delta, rels_delta
|
||||||
|
|
||||||
|
|
||||||
|
class Record(dict):
|
||||||
|
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
def __getattr__(self, k):
|
||||||
|
if k in self:
|
||||||
|
return self[k]
|
||||||
|
raise AttributeError(k)
|
||||||
|
|
||||||
|
|
||||||
|
class DeltaSet(Record):
|
||||||
|
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
|
||||||
|
Delta = collections.namedtuple('Delta', ['previous', 'current'])
|
||||||
|
|
||||||
|
|
||||||
|
_KV = None
|
||||||
|
|
||||||
|
|
||||||
|
def kv():
|
||||||
|
global _KV
|
||||||
|
if _KV is None:
|
||||||
|
_KV = Storage()
|
||||||
|
return _KV
|
439
hooks/charmhelpers/fetch/__init__.py
Normal file
439
hooks/charmhelpers/fetch/__init__.py
Normal file
@ -0,0 +1,439 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import importlib
|
||||||
|
from tempfile import NamedTemporaryFile
|
||||||
|
import time
|
||||||
|
from yaml import safe_load
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
lsb_release
|
||||||
|
)
|
||||||
|
import subprocess
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
config,
|
||||||
|
log,
|
||||||
|
)
|
||||||
|
import os
|
||||||
|
|
||||||
|
import six
|
||||||
|
if six.PY3:
|
||||||
|
from urllib.parse import urlparse, urlunparse
|
||||||
|
else:
|
||||||
|
from urlparse import urlparse, urlunparse
|
||||||
|
|
||||||
|
|
||||||
|
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||||
|
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||||
|
"""
|
||||||
|
PROPOSED_POCKET = """# Proposed
|
||||||
|
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
|
||||||
|
"""
|
||||||
|
CLOUD_ARCHIVE_POCKETS = {
|
||||||
|
# Folsom
|
||||||
|
'folsom': 'precise-updates/folsom',
|
||||||
|
'precise-folsom': 'precise-updates/folsom',
|
||||||
|
'precise-folsom/updates': 'precise-updates/folsom',
|
||||||
|
'precise-updates/folsom': 'precise-updates/folsom',
|
||||||
|
'folsom/proposed': 'precise-proposed/folsom',
|
||||||
|
'precise-folsom/proposed': 'precise-proposed/folsom',
|
||||||
|
'precise-proposed/folsom': 'precise-proposed/folsom',
|
||||||
|
# Grizzly
|
||||||
|
'grizzly': 'precise-updates/grizzly',
|
||||||
|
'precise-grizzly': 'precise-updates/grizzly',
|
||||||
|
'precise-grizzly/updates': 'precise-updates/grizzly',
|
||||||
|
'precise-updates/grizzly': 'precise-updates/grizzly',
|
||||||
|
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||||
|
'precise-grizzly/proposed': 'precise-proposed/grizzly',
|
||||||
|
'precise-proposed/grizzly': 'precise-proposed/grizzly',
|
||||||
|
# Havana
|
||||||
|
'havana': 'precise-updates/havana',
|
||||||
|
'precise-havana': 'precise-updates/havana',
|
||||||
|
'precise-havana/updates': 'precise-updates/havana',
|
||||||
|
'precise-updates/havana': 'precise-updates/havana',
|
||||||
|
'havana/proposed': 'precise-proposed/havana',
|
||||||
|
'precise-havana/proposed': 'precise-proposed/havana',
|
||||||
|
'precise-proposed/havana': 'precise-proposed/havana',
|
||||||
|
# Icehouse
|
||||||
|
'icehouse': 'precise-updates/icehouse',
|
||||||
|
'precise-icehouse': 'precise-updates/icehouse',
|
||||||
|
'precise-icehouse/updates': 'precise-updates/icehouse',
|
||||||
|
'precise-updates/icehouse': 'precise-updates/icehouse',
|
||||||
|
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||||
|
'precise-icehouse/proposed': 'precise-proposed/icehouse',
|
||||||
|
'precise-proposed/icehouse': 'precise-proposed/icehouse',
|
||||||
|
# Juno
|
||||||
|
'juno': 'trusty-updates/juno',
|
||||||
|
'trusty-juno': 'trusty-updates/juno',
|
||||||
|
'trusty-juno/updates': 'trusty-updates/juno',
|
||||||
|
'trusty-updates/juno': 'trusty-updates/juno',
|
||||||
|
'juno/proposed': 'trusty-proposed/juno',
|
||||||
|
'trusty-juno/proposed': 'trusty-proposed/juno',
|
||||||
|
'trusty-proposed/juno': 'trusty-proposed/juno',
|
||||||
|
# Kilo
|
||||||
|
'kilo': 'trusty-updates/kilo',
|
||||||
|
'trusty-kilo': 'trusty-updates/kilo',
|
||||||
|
'trusty-kilo/updates': 'trusty-updates/kilo',
|
||||||
|
'trusty-updates/kilo': 'trusty-updates/kilo',
|
||||||
|
'kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
'trusty-kilo/proposed': 'trusty-proposed/kilo',
|
||||||
|
'trusty-proposed/kilo': 'trusty-proposed/kilo',
|
||||||
|
}
|
||||||
|
|
||||||
|
# The order of this list is very important. Handlers should be listed in from
|
||||||
|
# least- to most-specific URL matching.
|
||||||
|
FETCH_HANDLERS = (
|
||||||
|
'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
|
||||||
|
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
|
||||||
|
'charmhelpers.fetch.giturl.GitUrlFetchHandler',
|
||||||
|
)
|
||||||
|
|
||||||
|
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
|
||||||
|
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
|
||||||
|
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
|
||||||
|
|
||||||
|
|
||||||
|
class SourceConfigError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class UnhandledSource(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class AptLockError(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class BaseFetchHandler(object):
|
||||||
|
|
||||||
|
"""Base class for FetchHandler implementations in fetch plugins"""
|
||||||
|
|
||||||
|
def can_handle(self, source):
|
||||||
|
"""Returns True if the source can be handled. Otherwise returns
|
||||||
|
a string explaining why it cannot"""
|
||||||
|
return "Wrong source type"
|
||||||
|
|
||||||
|
def install(self, source):
|
||||||
|
"""Try to download and unpack the source. Return the path to the
|
||||||
|
unpacked files or raise UnhandledSource."""
|
||||||
|
raise UnhandledSource("Wrong source type {}".format(source))
|
||||||
|
|
||||||
|
def parse_url(self, url):
|
||||||
|
return urlparse(url)
|
||||||
|
|
||||||
|
def base_url(self, url):
|
||||||
|
"""Return url without querystring or fragment"""
|
||||||
|
parts = list(self.parse_url(url))
|
||||||
|
parts[4:] = ['' for i in parts[4:]]
|
||||||
|
return urlunparse(parts)
|
||||||
|
|
||||||
|
|
||||||
|
def filter_installed_packages(packages):
|
||||||
|
"""Returns a list of packages that require installation"""
|
||||||
|
cache = apt_cache()
|
||||||
|
_pkgs = []
|
||||||
|
for package in packages:
|
||||||
|
try:
|
||||||
|
p = cache[package]
|
||||||
|
p.current_ver or _pkgs.append(package)
|
||||||
|
except KeyError:
|
||||||
|
log('Package {} has no installation candidate.'.format(package),
|
||||||
|
level='WARNING')
|
||||||
|
_pkgs.append(package)
|
||||||
|
return _pkgs
|
||||||
|
|
||||||
|
|
||||||
|
def apt_cache(in_memory=True):
|
||||||
|
"""Build and return an apt cache"""
|
||||||
|
from apt import apt_pkg
|
||||||
|
apt_pkg.init()
|
||||||
|
if in_memory:
|
||||||
|
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||||
|
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
|
||||||
|
return apt_pkg.Cache()
|
||||||
|
|
||||||
|
|
||||||
|
def apt_install(packages, options=None, fatal=False):
|
||||||
|
"""Install one or more packages"""
|
||||||
|
if options is None:
|
||||||
|
options = ['--option=Dpkg::Options::=--force-confold']
|
||||||
|
|
||||||
|
cmd = ['apt-get', '--assume-yes']
|
||||||
|
cmd.extend(options)
|
||||||
|
cmd.append('install')
|
||||||
|
if isinstance(packages, six.string_types):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Installing {} with options: {}".format(packages,
|
||||||
|
options))
|
||||||
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_upgrade(options=None, fatal=False, dist=False):
|
||||||
|
"""Upgrade all packages"""
|
||||||
|
if options is None:
|
||||||
|
options = ['--option=Dpkg::Options::=--force-confold']
|
||||||
|
|
||||||
|
cmd = ['apt-get', '--assume-yes']
|
||||||
|
cmd.extend(options)
|
||||||
|
if dist:
|
||||||
|
cmd.append('dist-upgrade')
|
||||||
|
else:
|
||||||
|
cmd.append('upgrade')
|
||||||
|
log("Upgrading with options: {}".format(options))
|
||||||
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_update(fatal=False):
|
||||||
|
"""Update local apt cache"""
|
||||||
|
cmd = ['apt-get', 'update']
|
||||||
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_purge(packages, fatal=False):
|
||||||
|
"""Purge one or more packages"""
|
||||||
|
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||||
|
if isinstance(packages, six.string_types):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Purging {}".format(packages))
|
||||||
|
_run_apt_command(cmd, fatal)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_hold(packages, fatal=False):
|
||||||
|
"""Hold one or more packages"""
|
||||||
|
cmd = ['apt-mark', 'hold']
|
||||||
|
if isinstance(packages, six.string_types):
|
||||||
|
cmd.append(packages)
|
||||||
|
else:
|
||||||
|
cmd.extend(packages)
|
||||||
|
log("Holding {}".format(packages))
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def add_source(source, key=None):
|
||||||
|
"""Add a package source to this system.
|
||||||
|
|
||||||
|
@param source: a URL or sources.list entry, as supported by
|
||||||
|
add-apt-repository(1). Examples::
|
||||||
|
|
||||||
|
ppa:charmers/example
|
||||||
|
deb https://stub:key@private.example.com/ubuntu trusty main
|
||||||
|
|
||||||
|
In addition:
|
||||||
|
'proposed:' may be used to enable the standard 'proposed'
|
||||||
|
pocket for the release.
|
||||||
|
'cloud:' may be used to activate official cloud archive pockets,
|
||||||
|
such as 'cloud:icehouse'
|
||||||
|
'distro' may be used as a noop
|
||||||
|
|
||||||
|
@param key: A key to be added to the system's APT keyring and used
|
||||||
|
to verify the signatures on packages. Ideally, this should be an
|
||||||
|
ASCII format GPG public key including the block headers. A GPG key
|
||||||
|
id may also be used, but be aware that only insecure protocols are
|
||||||
|
available to retrieve the actual public key from a public keyserver
|
||||||
|
placing your Juju environment at risk. ppa and cloud archive keys
|
||||||
|
are securely added automtically, so sould not be provided.
|
||||||
|
"""
|
||||||
|
if source is None:
|
||||||
|
log('Source is not present. Skipping')
|
||||||
|
return
|
||||||
|
|
||||||
|
if (source.startswith('ppa:') or
|
||||||
|
source.startswith('http') or
|
||||||
|
source.startswith('deb ') or
|
||||||
|
source.startswith('cloud-archive:')):
|
||||||
|
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||||
|
elif source.startswith('cloud:'):
|
||||||
|
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||||
|
fatal=True)
|
||||||
|
pocket = source.split(':')[-1]
|
||||||
|
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
||||||
|
raise SourceConfigError(
|
||||||
|
'Unsupported cloud: source option %s' %
|
||||||
|
pocket)
|
||||||
|
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||||
|
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||||
|
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||||
|
elif source == 'proposed':
|
||||||
|
release = lsb_release()['DISTRIB_CODENAME']
|
||||||
|
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||||
|
apt.write(PROPOSED_POCKET.format(release))
|
||||||
|
elif source == 'distro':
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
log("Unknown source: {!r}".format(source))
|
||||||
|
|
||||||
|
if key:
|
||||||
|
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||||
|
with NamedTemporaryFile('w+') as key_file:
|
||||||
|
key_file.write(key)
|
||||||
|
key_file.flush()
|
||||||
|
key_file.seek(0)
|
||||||
|
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
|
||||||
|
else:
|
||||||
|
# Note that hkp: is in no way a secure protocol. Using a
|
||||||
|
# GPG key id is pointless from a security POV unless you
|
||||||
|
# absolutely trust your network and DNS.
|
||||||
|
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||||
|
'hkp://keyserver.ubuntu.com:80', '--recv',
|
||||||
|
key])
|
||||||
|
|
||||||
|
|
||||||
|
def configure_sources(update=False,
|
||||||
|
sources_var='install_sources',
|
||||||
|
keys_var='install_keys'):
|
||||||
|
"""
|
||||||
|
Configure multiple sources from charm configuration.
|
||||||
|
|
||||||
|
The lists are encoded as yaml fragments in the configuration.
|
||||||
|
The frament needs to be included as a string. Sources and their
|
||||||
|
corresponding keys are of the types supported by add_source().
|
||||||
|
|
||||||
|
Example config:
|
||||||
|
install_sources: |
|
||||||
|
- "ppa:foo"
|
||||||
|
- "http://example.com/repo precise main"
|
||||||
|
install_keys: |
|
||||||
|
- null
|
||||||
|
- "a1b2c3d4"
|
||||||
|
|
||||||
|
Note that 'null' (a.k.a. None) should not be quoted.
|
||||||
|
"""
|
||||||
|
sources = safe_load((config(sources_var) or '').strip()) or []
|
||||||
|
keys = safe_load((config(keys_var) or '').strip()) or None
|
||||||
|
|
||||||
|
if isinstance(sources, six.string_types):
|
||||||
|
sources = [sources]
|
||||||
|
|
||||||
|
if keys is None:
|
||||||
|
for source in sources:
|
||||||
|
add_source(source, None)
|
||||||
|
else:
|
||||||
|
if isinstance(keys, six.string_types):
|
||||||
|
keys = [keys]
|
||||||
|
|
||||||
|
if len(sources) != len(keys):
|
||||||
|
raise SourceConfigError(
|
||||||
|
'Install sources and keys lists are different lengths')
|
||||||
|
for source, key in zip(sources, keys):
|
||||||
|
add_source(source, key)
|
||||||
|
if update:
|
||||||
|
apt_update(fatal=True)
|
||||||
|
|
||||||
|
|
||||||
|
def install_remote(source, *args, **kwargs):
|
||||||
|
"""
|
||||||
|
Install a file tree from a remote source
|
||||||
|
|
||||||
|
The specified source should be a url of the form:
|
||||||
|
scheme://[host]/path[#[option=value][&...]]
|
||||||
|
|
||||||
|
Schemes supported are based on this modules submodules.
|
||||||
|
Options supported are submodule-specific.
|
||||||
|
Additional arguments are passed through to the submodule.
|
||||||
|
|
||||||
|
For example::
|
||||||
|
|
||||||
|
dest = install_remote('http://example.com/archive.tgz',
|
||||||
|
checksum='deadbeef',
|
||||||
|
hash_type='sha1')
|
||||||
|
|
||||||
|
This will download `archive.tgz`, validate it using SHA1 and, if
|
||||||
|
the file is ok, extract it and return the directory in which it
|
||||||
|
was extracted. If the checksum fails, it will raise
|
||||||
|
:class:`charmhelpers.core.host.ChecksumError`.
|
||||||
|
"""
|
||||||
|
# We ONLY check for True here because can_handle may return a string
|
||||||
|
# explaining why it can't handle a given source.
|
||||||
|
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||||
|
installed_to = None
|
||||||
|
for handler in handlers:
|
||||||
|
try:
|
||||||
|
installed_to = handler.install(source, *args, **kwargs)
|
||||||
|
except UnhandledSource:
|
||||||
|
pass
|
||||||
|
if not installed_to:
|
||||||
|
raise UnhandledSource("No handler found for source {}".format(source))
|
||||||
|
return installed_to
|
||||||
|
|
||||||
|
|
||||||
|
def install_from_config(config_var_name):
|
||||||
|
charm_config = config()
|
||||||
|
source = charm_config[config_var_name]
|
||||||
|
return install_remote(source)
|
||||||
|
|
||||||
|
|
||||||
|
def plugins(fetch_handlers=None):
|
||||||
|
if not fetch_handlers:
|
||||||
|
fetch_handlers = FETCH_HANDLERS
|
||||||
|
plugin_list = []
|
||||||
|
for handler_name in fetch_handlers:
|
||||||
|
package, classname = handler_name.rsplit('.', 1)
|
||||||
|
try:
|
||||||
|
handler_class = getattr(
|
||||||
|
importlib.import_module(package),
|
||||||
|
classname)
|
||||||
|
plugin_list.append(handler_class())
|
||||||
|
except (ImportError, AttributeError):
|
||||||
|
# Skip missing plugins so that they can be ommitted from
|
||||||
|
# installation if desired
|
||||||
|
log("FetchHandler {} not found, skipping plugin".format(
|
||||||
|
handler_name))
|
||||||
|
return plugin_list
|
||||||
|
|
||||||
|
|
||||||
|
def _run_apt_command(cmd, fatal=False):
|
||||||
|
"""
|
||||||
|
Run an APT command, checking output and retrying if the fatal flag is set
|
||||||
|
to True.
|
||||||
|
|
||||||
|
:param: cmd: str: The apt command to run.
|
||||||
|
:param: fatal: bool: Whether the command's output should be checked and
|
||||||
|
retried.
|
||||||
|
"""
|
||||||
|
env = os.environ.copy()
|
||||||
|
|
||||||
|
if 'DEBIAN_FRONTEND' not in env:
|
||||||
|
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
retry_count = 0
|
||||||
|
result = None
|
||||||
|
|
||||||
|
# If the command is considered "fatal", we need to retry if the apt
|
||||||
|
# lock was not acquired.
|
||||||
|
|
||||||
|
while result is None or result == APT_NO_LOCK:
|
||||||
|
try:
|
||||||
|
result = subprocess.check_call(cmd, env=env)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
retry_count = retry_count + 1
|
||||||
|
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
||||||
|
raise
|
||||||
|
result = e.returncode
|
||||||
|
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
|
||||||
|
"".format(APT_NO_LOCK_RETRY_DELAY))
|
||||||
|
time.sleep(APT_NO_LOCK_RETRY_DELAY)
|
||||||
|
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd, env=env)
|
161
hooks/charmhelpers/fetch/archiveurl.py
Normal file
161
hooks/charmhelpers/fetch/archiveurl.py
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import hashlib
|
||||||
|
import re
|
||||||
|
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
BaseFetchHandler,
|
||||||
|
UnhandledSource
|
||||||
|
)
|
||||||
|
from charmhelpers.payload.archive import (
|
||||||
|
get_archive_handler,
|
||||||
|
extract,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.host import mkdir, check_hash
|
||||||
|
|
||||||
|
import six
|
||||||
|
if six.PY3:
|
||||||
|
from urllib.request import (
|
||||||
|
build_opener, install_opener, urlopen, urlretrieve,
|
||||||
|
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||||
|
)
|
||||||
|
from urllib.parse import urlparse, urlunparse, parse_qs
|
||||||
|
from urllib.error import URLError
|
||||||
|
else:
|
||||||
|
from urllib import urlretrieve
|
||||||
|
from urllib2 import (
|
||||||
|
build_opener, install_opener, urlopen,
|
||||||
|
HTTPPasswordMgrWithDefaultRealm, HTTPBasicAuthHandler,
|
||||||
|
URLError
|
||||||
|
)
|
||||||
|
from urlparse import urlparse, urlunparse, parse_qs
|
||||||
|
|
||||||
|
|
||||||
|
def splituser(host):
|
||||||
|
'''urllib.splituser(), but six's support of this seems broken'''
|
||||||
|
_userprog = re.compile('^(.*)@(.*)$')
|
||||||
|
match = _userprog.match(host)
|
||||||
|
if match:
|
||||||
|
return match.group(1, 2)
|
||||||
|
return None, host
|
||||||
|
|
||||||
|
|
||||||
|
def splitpasswd(user):
|
||||||
|
'''urllib.splitpasswd(), but six's support of this is missing'''
|
||||||
|
_passwdprog = re.compile('^([^:]*):(.*)$', re.S)
|
||||||
|
match = _passwdprog.match(user)
|
||||||
|
if match:
|
||||||
|
return match.group(1, 2)
|
||||||
|
return user, None
|
||||||
|
|
||||||
|
|
||||||
|
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||||
|
"""
|
||||||
|
Handler to download archive files from arbitrary URLs.
|
||||||
|
|
||||||
|
Can fetch from http, https, ftp, and file URLs.
|
||||||
|
|
||||||
|
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
|
||||||
|
|
||||||
|
Installs the contents of the archive in $CHARM_DIR/fetched/.
|
||||||
|
"""
|
||||||
|
def can_handle(self, source):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||||
|
return "Wrong source type"
|
||||||
|
if get_archive_handler(self.base_url(source)):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def download(self, source, dest):
|
||||||
|
"""
|
||||||
|
Download an archive file.
|
||||||
|
|
||||||
|
:param str source: URL pointing to an archive file.
|
||||||
|
:param str dest: Local path location to download archive file to.
|
||||||
|
"""
|
||||||
|
# propogate all exceptions
|
||||||
|
# URLError, OSError, etc
|
||||||
|
proto, netloc, path, params, query, fragment = urlparse(source)
|
||||||
|
if proto in ('http', 'https'):
|
||||||
|
auth, barehost = splituser(netloc)
|
||||||
|
if auth is not None:
|
||||||
|
source = urlunparse((proto, barehost, path, params, query, fragment))
|
||||||
|
username, password = splitpasswd(auth)
|
||||||
|
passman = HTTPPasswordMgrWithDefaultRealm()
|
||||||
|
# Realm is set to None in add_password to force the username and password
|
||||||
|
# to be used whatever the realm
|
||||||
|
passman.add_password(None, source, username, password)
|
||||||
|
authhandler = HTTPBasicAuthHandler(passman)
|
||||||
|
opener = build_opener(authhandler)
|
||||||
|
install_opener(opener)
|
||||||
|
response = urlopen(source)
|
||||||
|
try:
|
||||||
|
with open(dest, 'w') as dest_file:
|
||||||
|
dest_file.write(response.read())
|
||||||
|
except Exception as e:
|
||||||
|
if os.path.isfile(dest):
|
||||||
|
os.unlink(dest)
|
||||||
|
raise e
|
||||||
|
|
||||||
|
# Mandatory file validation via Sha1 or MD5 hashing.
|
||||||
|
def download_and_validate(self, url, hashsum, validate="sha1"):
|
||||||
|
tempfile, headers = urlretrieve(url)
|
||||||
|
check_hash(tempfile, hashsum, validate)
|
||||||
|
return tempfile
|
||||||
|
|
||||||
|
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
|
||||||
|
"""
|
||||||
|
Download and install an archive file, with optional checksum validation.
|
||||||
|
|
||||||
|
The checksum can also be given on the `source` URL's fragment.
|
||||||
|
For example::
|
||||||
|
|
||||||
|
handler.install('http://example.com/file.tgz#sha1=deadbeef')
|
||||||
|
|
||||||
|
:param str source: URL pointing to an archive file.
|
||||||
|
:param str dest: Local destination path to install to. If not given,
|
||||||
|
installs to `$CHARM_DIR/archives/archive_file_name`.
|
||||||
|
:param str checksum: If given, validate the archive file after download.
|
||||||
|
:param str hash_type: Algorithm used to generate `checksum`.
|
||||||
|
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||||
|
such as md5, sha1, sha256, sha512, etc.
|
||||||
|
|
||||||
|
"""
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||||
|
if not os.path.exists(dest_dir):
|
||||||
|
mkdir(dest_dir, perms=0o755)
|
||||||
|
dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path))
|
||||||
|
try:
|
||||||
|
self.download(source, dld_file)
|
||||||
|
except URLError as e:
|
||||||
|
raise UnhandledSource(e.reason)
|
||||||
|
except OSError as e:
|
||||||
|
raise UnhandledSource(e.strerror)
|
||||||
|
options = parse_qs(url_parts.fragment)
|
||||||
|
for key, value in options.items():
|
||||||
|
if not six.PY3:
|
||||||
|
algorithms = hashlib.algorithms
|
||||||
|
else:
|
||||||
|
algorithms = hashlib.algorithms_available
|
||||||
|
if key in algorithms:
|
||||||
|
check_hash(dld_file, value, key)
|
||||||
|
if checksum:
|
||||||
|
check_hash(dld_file, checksum, hash_type)
|
||||||
|
return extract(dld_file, dest)
|
78
hooks/charmhelpers/fetch/bzrurl.py
Normal file
78
hooks/charmhelpers/fetch/bzrurl.py
Normal file
@ -0,0 +1,78 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
BaseFetchHandler,
|
||||||
|
UnhandledSource
|
||||||
|
)
|
||||||
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
|
import six
|
||||||
|
if six.PY3:
|
||||||
|
raise ImportError('bzrlib does not support Python3')
|
||||||
|
|
||||||
|
try:
|
||||||
|
from bzrlib.branch import Branch
|
||||||
|
from bzrlib import bzrdir, workingtree, errors
|
||||||
|
except ImportError:
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
apt_install("python-bzrlib")
|
||||||
|
from bzrlib.branch import Branch
|
||||||
|
from bzrlib import bzrdir, workingtree, errors
|
||||||
|
|
||||||
|
|
||||||
|
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||||
|
"""Handler for bazaar branches via generic and lp URLs"""
|
||||||
|
def can_handle(self, source):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
if url_parts.scheme not in ('bzr+ssh', 'lp'):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def branch(self, source, dest):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
# If we use lp:branchname scheme we need to load plugins
|
||||||
|
if not self.can_handle(source):
|
||||||
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
|
if url_parts.scheme == "lp":
|
||||||
|
from bzrlib.plugin import load_plugins
|
||||||
|
load_plugins()
|
||||||
|
try:
|
||||||
|
local_branch = bzrdir.BzrDir.create_branch_convenience(dest)
|
||||||
|
except errors.AlreadyControlDirError:
|
||||||
|
local_branch = Branch.open(dest)
|
||||||
|
try:
|
||||||
|
remote_branch = Branch.open(source)
|
||||||
|
remote_branch.push(local_branch)
|
||||||
|
tree = workingtree.WorkingTree.open(dest)
|
||||||
|
tree.update()
|
||||||
|
except Exception as e:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
def install(self, source):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||||
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
|
branch_name)
|
||||||
|
if not os.path.exists(dest_dir):
|
||||||
|
mkdir(dest_dir, perms=0o755)
|
||||||
|
try:
|
||||||
|
self.branch(source, dest_dir)
|
||||||
|
except OSError as e:
|
||||||
|
raise UnhandledSource(e.strerror)
|
||||||
|
return dest_dir
|
71
hooks/charmhelpers/fetch/giturl.py
Normal file
71
hooks/charmhelpers/fetch/giturl.py
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
from charmhelpers.fetch import (
|
||||||
|
BaseFetchHandler,
|
||||||
|
UnhandledSource
|
||||||
|
)
|
||||||
|
from charmhelpers.core.host import mkdir
|
||||||
|
|
||||||
|
import six
|
||||||
|
if six.PY3:
|
||||||
|
raise ImportError('GitPython does not support Python 3')
|
||||||
|
|
||||||
|
try:
|
||||||
|
from git import Repo
|
||||||
|
except ImportError:
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
apt_install("python-git")
|
||||||
|
from git import Repo
|
||||||
|
|
||||||
|
from git.exc import GitCommandError # noqa E402
|
||||||
|
|
||||||
|
|
||||||
|
class GitUrlFetchHandler(BaseFetchHandler):
|
||||||
|
"""Handler for git branches via generic and github URLs"""
|
||||||
|
def can_handle(self, source):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
# TODO (mattyw) no support for ssh git@ yet
|
||||||
|
if url_parts.scheme not in ('http', 'https', 'git'):
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
def clone(self, source, dest, branch):
|
||||||
|
if not self.can_handle(source):
|
||||||
|
raise UnhandledSource("Cannot handle {}".format(source))
|
||||||
|
|
||||||
|
repo = Repo.clone_from(source, dest)
|
||||||
|
repo.git.checkout(branch)
|
||||||
|
|
||||||
|
def install(self, source, branch="master", dest=None):
|
||||||
|
url_parts = self.parse_url(source)
|
||||||
|
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||||
|
if dest:
|
||||||
|
dest_dir = os.path.join(dest, branch_name)
|
||||||
|
else:
|
||||||
|
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||||
|
branch_name)
|
||||||
|
if not os.path.exists(dest_dir):
|
||||||
|
mkdir(dest_dir, perms=0o755)
|
||||||
|
try:
|
||||||
|
self.clone(source, dest_dir, branch)
|
||||||
|
except GitCommandError as e:
|
||||||
|
raise UnhandledSource(e.message)
|
||||||
|
except OSError as e:
|
||||||
|
raise UnhandledSource(e.strerror)
|
||||||
|
return dest_dir
|
17
hooks/charmhelpers/payload/__init__.py
Normal file
17
hooks/charmhelpers/payload/__init__.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
"Tools for working with files injected into a charm just before deployment."
|
66
hooks/charmhelpers/payload/execd.py
Normal file
66
hooks/charmhelpers/payload/execd.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# This file is part of charm-helpers.
|
||||||
|
#
|
||||||
|
# charm-helpers is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||||
|
# published by the Free Software Foundation.
|
||||||
|
#
|
||||||
|
# charm-helpers is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU Lesser General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU Lesser General Public License
|
||||||
|
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
def default_execd_dir():
|
||||||
|
return os.path.join(os.environ['CHARM_DIR'], 'exec.d')
|
||||||
|
|
||||||
|
|
||||||
|
def execd_module_paths(execd_dir=None):
|
||||||
|
"""Generate a list of full paths to modules within execd_dir."""
|
||||||
|
if not execd_dir:
|
||||||
|
execd_dir = default_execd_dir()
|
||||||
|
|
||||||
|
if not os.path.exists(execd_dir):
|
||||||
|
return
|
||||||
|
|
||||||
|
for subpath in os.listdir(execd_dir):
|
||||||
|
module = os.path.join(execd_dir, subpath)
|
||||||
|
if os.path.isdir(module):
|
||||||
|
yield module
|
||||||
|
|
||||||
|
|
||||||
|
def execd_submodule_paths(command, execd_dir=None):
|
||||||
|
"""Generate a list of full paths to the specified command within exec_dir.
|
||||||
|
"""
|
||||||
|
for module_path in execd_module_paths(execd_dir):
|
||||||
|
path = os.path.join(module_path, command)
|
||||||
|
if os.access(path, os.X_OK) and os.path.isfile(path):
|
||||||
|
yield path
|
||||||
|
|
||||||
|
|
||||||
|
def execd_run(command, execd_dir=None, die_on_error=False, stderr=None):
|
||||||
|
"""Run command for each module within execd_dir which defines it."""
|
||||||
|
for submodule_path in execd_submodule_paths(command, execd_dir):
|
||||||
|
try:
|
||||||
|
subprocess.check_call(submodule_path, shell=True, stderr=stderr)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
hookenv.log("Error ({}) running {}. Output: {}".format(
|
||||||
|
e.returncode, e.cmd, e.output))
|
||||||
|
if die_on_error:
|
||||||
|
sys.exit(e.returncode)
|
||||||
|
|
||||||
|
|
||||||
|
def execd_preinstall(execd_dir=None):
|
||||||
|
"""Run charm-pre-install for each module within execd_dir."""
|
||||||
|
execd_run('charm-pre-install', execd_dir=execd_dir)
|
Loading…
x
Reference in New Issue
Block a user