Adding fabric/data network support

This commit is contained in:
Bilal Baqar 2015-11-20 20:26:17 -08:00
parent 9b14998e5d
commit b9a6d93d6e
5 changed files with 82 additions and 24 deletions

View File

@ -11,6 +11,24 @@ options:
type: string
default: 'juju-br0'
description: The interface connected to PLUMgrid Managment network.
os-data-network:
type: string
default:
description: |
The IP address and netmask of the OpenStack Data network (e.g.,
192.168.0.0/24)
.
This network will be used for tenant network traffic in overlay
networks.
fabric-interfaces:
default: 'MANAGEMENT'
type: string
description: |
Interfaces that will provide fabric connectivity on the director nodes.
Provided in form of json in a string. These interfaces have to be connected
to the os-data-network specified in the config. Default value is MANAGEMENT which
will configure the management interface as the fabric interface on each
director.
network-device-mtu:
type: string
default: '1580'

View File

@ -83,8 +83,9 @@ class PGDirContext(context.NeutronContext):
pg_ctxt['director_ips_string'] = pg_dir_ips_string
pg_ctxt['virtual_ip'] = conf['plumgrid-virtual-ip']
pg_ctxt['pg_hostname'] = "pg-director"
from pg_dir_utils import get_mgmt_interface
from pg_dir_utils import get_mgmt_interface, get_fabric_interface
pg_ctxt['interface'] = get_mgmt_interface()
pg_ctxt['fabric_interface'] = get_fabric_interface()
pg_ctxt['label'] = get_unit_hostname()
pg_ctxt['fabric_mode'] = 'host'
virtual_ip_array = re.split('\.', conf['plumgrid-virtual-ip'])

View File

@ -13,7 +13,9 @@ from charmhelpers.contrib.network.ip import (
get_iface_from_addr,
get_bridges,
get_bridge_nics,
is_ip
is_ip,
is_address_in_network,
get_iface_addr
)
from charmhelpers.fetch import (
apt_cache
@ -29,6 +31,7 @@ from charmhelpers.core.host import (
service_start,
service_stop,
)
from socket import gethostname as get_unit_hostname
import pg_dir_context
import subprocess
import time
@ -169,22 +172,23 @@ def remove_iovisor():
time.sleep(1)
def interface_exists(interface):
'''
Checks if interface exists on node.
'''
try:
subprocess.check_call(['ip', 'link', 'show', interface],
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
return True
def get_mgmt_interface():
'''
Returns the managment interface.
'''
def interface_exists(interface):
'''
Checks if interface exists on node.
'''
try:
subprocess.check_call(['ip', 'link', 'show', interface],
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return False
return True
mgmt_interface = config('mgmt-interface')
if interface_exists(mgmt_interface):
return mgmt_interface
@ -194,17 +198,49 @@ def get_mgmt_interface():
return get_iface_from_addr(unit_get('private-address'))
def get_fabric_interface():
'''
Returns the fabric interface.
'''
fabric_interfaces = config('fabric-interfaces')
if fabric_interfaces == 'MANAGEMENT':
return get_mgmt_interface()
else:
try:
all_fabric_interfaces = json.loads(fabric_interfaces)
except ValueError:
raise ValueError('Invalid json provided for fabric interfaces')
hostname = get_unit_hostname()
if hostname in all_fabric_interfaces:
node_fabric_interface = all_fabric_interfaces[hostname]
elif 'DEFAULT' in all_fabric_interfaces:
node_fabric_interface = all_fabric_interfaces['DEFAULT']
else:
raise ValueError('No fabric interface provided for node')
if interface_exists(node_fabric_interface):
if is_address_in_network(config('os-data-network'),
get_iface_addr(node_fabric_interface)[0]):
return node_fabric_interface
else:
raise ValueError('Fabric interface not in fabric network')
else:
log('Provided fabric interface %s does not exist'
% node_fabric_interface)
raise ValueError('Provided fabric interface does not exist')
return node_fabric_interface
def ensure_mtu():
'''
Ensures required MTU of the underlying networking of the node.
'''
interface_mtu = config('network-device-mtu')
mgmt_interface = get_mgmt_interface()
if mgmt_interface in get_bridges():
attached_interfaces = get_bridge_nics(mgmt_interface)
fabric_interface = get_fabric_interface()
if fabric_interface in get_bridges():
attached_interfaces = get_bridge_nics(fabric_interface)
for interface in attached_interfaces:
set_nic_mtu(interface, interface_mtu)
set_nic_mtu(mgmt_interface, interface_mtu)
set_nic_mtu(fabric_interface, interface_mtu)
def _exec_cmd(cmd=None, error_msg='Command exited with ERRORs', fatal=False):

View File

@ -1,2 +1,2 @@
{{ interface }} = fabric_core host
{{ fabric_interface }} = fabric_core host

View File

@ -46,11 +46,12 @@ class PGDirContextTest(CharmTestCase):
@patch.object(charmhelpers.contrib.openstack.context, 'unit_private_ip')
@patch.object(context, '_pg_dir_ips')
@patch.object(utils, 'get_mgmt_interface')
def test_neutroncc_context_api_rel(self, _mgmt_int, _pg_dir_ips,
_unit_priv_ip, _npa, _ens_pkgs,
_save_ff, _https, _is_clus,
_unit_get, _config, _runits, _rids,
_rget):
@patch.object(utils, 'get_fabric_interface')
def test_neutroncc_context_api_rel(self, _fabric_int, _mgmt_int,
_pg_dir_ips, _unit_priv_ip, _npa,
_ens_pkgs, _save_ff, _https,
_is_clus, _unit_get, _config,
_runits, _rids, _rget):
def mock_npa(plugin, section, manager):
if section == "driver":
return "neutron.randomdriver"
@ -74,6 +75,7 @@ class PGDirContextTest(CharmTestCase):
self.get_host_ip.return_value = '192.168.100.201'
_pg_dir_ips.return_value = ['192.168.100.202', '192.168.100.203']
_mgmt_int.return_value = 'juju-br0'
_fabric_int.return_value = 'juju-br0'
napi_ctxt = context.PGDirContext()
expect = {
'config': 'neutron.randomconfig',
@ -86,6 +88,7 @@ class PGDirContextTest(CharmTestCase):
'virtual_ip': '192.168.100.250',
'pg_hostname': 'pg-director',
'interface': 'juju-br0',
'fabric_interface': 'juju-br0',
'label': 'node0',
'fabric_mode': 'host',
'virtual_router_id': '250',