Updated restart_pg

This commit is contained in:
Junaid Ali 2016-03-21 21:30:34 +05:00
commit cf875680fd
8 changed files with 130 additions and 50 deletions

View File

@ -7,7 +7,7 @@ virtualenv:
netaddr jinja2
lint: virtualenv
.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests
.venv/bin/flake8 --exclude hooks/charmhelpers hooks unit_tests tests --ignore E402
@charm proof
unit_test: virtualenv

View File

@ -3,6 +3,9 @@
# This file contains the class that generates context
# for PLUMgrid template files.
import re
from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.core.hookenv import (
config,
unit_get,
@ -12,13 +15,15 @@ from charmhelpers.core.hookenv import (
related_units,
relation_get,
)
from charmhelpers.contrib.network.ip import is_ip
from charmhelpers.contrib.openstack import context
from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.contrib.network.ip import get_address_in_network
from charmhelpers.contrib.network.ip import (
is_ip,
get_address_in_network,
)
import re
from socket import gethostname as get_unit_hostname
from socket import (
gethostname,
getfqdn
)
def _pg_dir_ips():
@ -72,6 +77,7 @@ class PGDirContext(context.NeutronContext):
pg_dir_ips = _pg_dir_ips()
pg_dir_ips.append(str(get_address_in_network(network=None,
fallback=get_host_ip(unit_get('private-address')))))
pg_dir_ips = sorted(pg_dir_ips)
pg_ctxt['director_ips'] = pg_dir_ips
pg_dir_ips_string = ''
single_ip = True
@ -87,8 +93,10 @@ class PGDirContext(context.NeutronContext):
pg_ctxt['virtual_ip'] = conf['plumgrid-virtual-ip']
else:
raise ValueError('Invalid IP Provided')
unit_hostname = get_unit_hostname()
pg_ctxt['virtual_ip'] = conf['plumgrid-virtual-ip']
unit_hostname = gethostname()
pg_ctxt['pg_hostname'] = unit_hostname
pg_ctxt['pg_fqdn'] = getfqdn()
from pg_dir_utils import get_mgmt_interface, get_fabric_interface
pg_ctxt['interface'] = get_mgmt_interface()
pg_ctxt['fabric_interface'] = get_fabric_interface()

View File

@ -7,6 +7,7 @@
import sys
import time
from charmhelpers.core.hookenv import (
Hooks,
UnregisteredHookError,
@ -32,7 +33,8 @@ from pg_dir_utils import (
ensure_mtu,
add_lcm_key,
post_pg_license,
fabric_interface_changed
fabric_interface_changed,
load_iptables
)
hooks = Hooks()
@ -44,6 +46,7 @@ def install():
'''
Install hook is run when the charm is first deployed on a node.
'''
load_iptables()
configure_sources(update=True)
pkgs = determine_packages()
for pkg in pkgs:
@ -115,6 +118,15 @@ def start():
time.sleep(15)
@hooks.hook('upgrade-charm')
def upgrade_charm():
'''
This hook is run when the charm is upgraded
'''
load_iptables()
CONFIGS.write_all()
@hooks.hook('stop')
def stop():
'''

View File

@ -2,8 +2,18 @@
# This file contains functions used by the hooks to deploy PLUMgrid Director.
from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute
import pg_dir_context
import subprocess
import time
import os
import json
from collections import OrderedDict
from socket import gethostname as get_unit_hostname
from copy import deepcopy
from charmhelpers.contrib.openstack.neutron import neutron_plugin_attribute
from charmhelpers.contrib.openstack import templating
from charmhelpers.core.host import set_nic_mtu
from charmhelpers.contrib.storage.linux.ceph import modprobe
from charmhelpers.core.hookenv import (
log,
config,
@ -17,32 +27,23 @@ from charmhelpers.contrib.network.ip import (
is_address_in_network,
get_iface_addr
)
from charmhelpers.fetch import (
apt_cache
)
from charmhelpers.contrib.openstack import templating
from charmhelpers.core.host import set_nic_mtu
from collections import OrderedDict
from charmhelpers.contrib.storage.linux.ceph import modprobe
from charmhelpers.contrib.openstack.utils import (
os_release,
)
from charmhelpers.core.host import (
service_start,
service_stop,
service_running
)
from charmhelpers.fetch import (
apt_cache,
apt_install
)
from charmhelpers.contrib.openstack.utils import (
os_release,
)
from socket import gethostname as get_unit_hostname
import pg_dir_context
import subprocess
import time
import os
import json
LXC_CONF = '/etc/libvirt/lxc.conf'
TEMPLATES = 'templates/'
PG_LXC_DATA_PATH = '/var/lib/libvirt/filesystems/plumgrid-data'
PG_LXC_PATH = '/var/lib/libvirt/filesystems/plumgrid'
PG_CONF = '%s/conf/pg/plumgrid.conf' % PG_LXC_DATA_PATH
PG_KA_CONF = '%s/conf/etc/keepalived.conf' % PG_LXC_DATA_PATH
PG_DEF_CONF = '%s/conf/pg/nginx.conf' % PG_LXC_DATA_PATH
@ -52,7 +53,6 @@ PG_IFCS_CONF = '%s/conf/pg/ifcs.conf' % PG_LXC_DATA_PATH
AUTH_KEY_PATH = '%s/root/.ssh/authorized_keys' % PG_LXC_DATA_PATH
TEMP_LICENSE_FILE = '/tmp/license'
BASE_RESOURCE_MAP = OrderedDict([
(PG_KA_CONF, {
'services': ['plumgrid'],
@ -141,19 +141,20 @@ def restart_pg():
'''
Stops and Starts PLUMgrid service after flushing iptables.
'''
service_stop('plumgrid')
time.sleep(2)
_exec_cmd(cmd=['iptables', '-F'])
if not service_start('plumgrid'):
if not service_start('libvirt-bin'):
raise ValueError("libvirt-bin service couldn't be started")
stop_pg()
service_start('plumgrid')
time.sleep(3)
if not service_running('plumgrid'):
if service_running('libvirt-bin'):
raise ValueError("plumgrid service couldn't be started")
else:
# wait for 3 secs so that libvirt-bin can be completely up and
# start the plumgrid service
time.sleep(3)
if not service_start('plumgrid'):
raise ValueError("plumgrid service couldn't be started")
time.sleep(5)
if service_start('libvirt-bin'):
time.sleep(3)
if not service_running('plumgrid'):
raise ValueError("plumgrid service couldn't be started")
else:
raise ValueError("libvirt-bin service couldn't be started")
time.sleep(3)
def stop_pg():
@ -176,7 +177,7 @@ def remove_iovisor():
Removes iovisor kernel module.
'''
_exec_cmd(cmd=['rmmod', 'iovisor'],
error_msg='Error Loading IOVisor Kernel Module')
error_msg='Error Removing IOVisor Kernel Module')
time.sleep(1)
@ -335,8 +336,7 @@ def post_pg_license():
'plumgrid:plumgrid',
LICENSE_POST_PATH,
'-d',
json.dumps(license)
]
json.dumps(license)]
licence_get_cmd = [PG_CURL, '-u', 'plumgrid:plumgrid', LICENSE_GET_PATH]
try:
old_license = subprocess.check_output(licence_get_cmd)
@ -350,3 +350,55 @@ def post_pg_license():
log('No change in PLUMgrid License')
return 0
return 1
def load_iptables():
'''
Loads iptables rules to allow all PLUMgrid communication.
'''
network = get_cidr_from_iface(get_mgmt_interface())
if network:
_exec_cmd(['sudo', 'iptables', '-A', 'INPUT', '-p', 'tcp',
'-j', 'ACCEPT', '-s', network, '-d',
network, '-m', 'state', '--state', 'NEW'])
_exec_cmd(['sudo', 'iptables', '-A', 'INPUT', '-p', 'udp', '-j',
'ACCEPT', '-s', network, '-d', network,
'-m', 'state', '--state', 'NEW'])
_exec_cmd(['sudo', 'iptables', '-I', 'INPUT', '-s', network,
'-d', '224.0.0.18/32', '-j', 'ACCEPT'])
_exec_cmd(['sudo', 'iptables', '-I', 'INPUT', '-p', 'vrrp', '-j',
'ACCEPT'])
_exec_cmd(['sudo', 'iptables', '-A', 'INPUT', '-p', 'tcp', '-j',
'ACCEPT', '-d', config('plumgrid-virtual-ip'), '-m',
'state', '--state', 'NEW'])
apt_install('iptables-persistent')
def get_cidr_from_iface(interface):
'''
Determines Network CIDR from interface.
'''
if not interface:
return None
apt_install('ohai')
try:
os_info = subprocess.check_output(['ohai', '-l', 'fatal'])
except OSError:
log('Unable to get operating system information')
return None
try:
os_info_json = json.loads(os_info)
except ValueError:
log('Unable to determine network')
return None
device = os_info_json['network']['interfaces'].get(interface)
if device is not None:
if device.get('routes'):
routes = device['routes']
for net in routes:
if 'scope' in net:
return net.get('destination')
else:
return None
else:
return None

1
hooks/upgrade-charm Symbolic link
View File

@ -0,0 +1 @@
pg_dir_hooks.py

View File

@ -1,5 +1,5 @@
127.0.0.1 localhost
127.0.1.1 {{ pg_hostname }}
127.0.1.1 {{ pg_fqdn }} {{ pg_hostname }}
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback

View File

@ -8,7 +8,8 @@ TO_PATCH = [
'config',
'unit_get',
'get_host_ip',
'get_unit_hostname',
'gethostname',
'getfqdn'
]
@ -71,7 +72,8 @@ class PGDirContextTest(CharmTestCase):
_npa.side_effect = mock_npa
_unit_get.return_value = '192.168.100.201'
_unit_priv_ip.return_value = '192.168.100.201'
self.get_unit_hostname.return_value = 'node0'
self.gethostname.return_value = 'node0'
self.getfqdn.return_value = 'node0.maas'
self.get_host_ip.return_value = '192.168.100.201'
_pg_dir_ips.return_value = ['192.168.100.202', '192.168.100.203']
_mgmt_int.return_value = 'juju-br0'
@ -87,14 +89,15 @@ class PGDirContextTest(CharmTestCase):
'neutron_url': 'https://None:9696',
'virtual_ip': '192.168.100.250',
'pg_hostname': 'node0',
'pg_fqdn': 'node0.maas',
'interface': 'juju-br0',
'fabric_interface': 'juju-br0',
'label': 'node0',
'fabric_mode': 'host',
'virtual_router_id': '250',
'director_ips': ['192.168.100.202', '192.168.100.203',
'192.168.100.201'],
'director_ips': ['192.168.100.201', '192.168.100.202',
'192.168.100.203'],
'director_ips_string':
'192.168.100.202,192.168.100.203,192.168.100.201',
'192.168.100.201,192.168.100.202,192.168.100.203',
}
self.assertEquals(expect, napi_ctxt())

View File

@ -1,5 +1,7 @@
from mock import MagicMock, patch, call
from test_utils import CharmTestCase
with patch('charmhelpers.core.hookenv.config') as config:
config.return_value = 'neutron'
import pg_dir_utils as utils
@ -9,6 +11,7 @@ _map = utils.restart_map
utils.register_configs = MagicMock()
utils.restart_map = MagicMock()
import pg_dir_hooks as hooks
utils.register_configs = _reg
@ -28,7 +31,8 @@ TO_PATCH = [
'add_lcm_key',
'determine_packages',
'post_pg_license',
'config'
'config',
'load_iptables'
]
NEUTRON_CONF_DIR = "/etc/neutron"