1) Fixed '-' v '_' inconsistancy in configuration parameter names
2) Set external_network_id in all l3_agent.inis for a given service group 3) Write out configs on cluster-relation-{joined,changed,departed} 4) Add code to ensure that neutron objects are only relocated to other nodes in the same service group 5) Tests for the above
This commit is contained in:
commit
96f4b40c1b
23
README.md
23
README.md
@ -49,6 +49,29 @@ The gateway provides two key services; L3 network routing and DHCP services.
|
||||
|
||||
These are both required in a fully functional Neutron Openstack deployment.
|
||||
|
||||
If multiple floating pools are needed then an L3 agent (which corresponds to
|
||||
a quantum-gateway for the sake of this charm) is needed for each one. Each
|
||||
gateway needs to be deployed as a seperate service so that the external
|
||||
network id can be set differently for each gateway e.g.
|
||||
|
||||
juju deploy quantum-gateway quantum-gateway-extnet1
|
||||
juju add-relation quantum-gateway-extnet1 mysql
|
||||
juju add-relation quantum-gateway-extnet1 rabbitmq-server
|
||||
juju add-relation quantum-gateway-extnet1 nova-cloud-controller
|
||||
juju deploy quantum-gateway quantum-gateway-extnet2
|
||||
juju add-relation quantum-gateway-extnet2 mysql
|
||||
juju add-relation quantum-gateway-extnet2 rabbitmq-server
|
||||
juju add-relation quantum-gateway-extnet2 nova-cloud-controller
|
||||
|
||||
Create extnet1 and extnet2 via neutron client and take a note of their ids
|
||||
|
||||
juju set quantum-gateway-extnet1 "run-internal-router=leader"
|
||||
juju set quantum-gateway-extnet2 "run-internal-router=none"
|
||||
juju set quantum-gateway-extnet1 "external-network-id=<extnet1 id>"
|
||||
juju set quantum-gateway-extnet2 "external-network-id=<extnet2 id>"
|
||||
|
||||
See upstream [Neutron multi extnet](http://docs.openstack.org/trunk/config-reference/content/adv_cfg_l3_agent_multi_extnet.html)
|
||||
|
||||
TODO
|
||||
----
|
||||
|
||||
|
17
config.yaml
17
config.yaml
@ -25,6 +25,23 @@ options:
|
||||
- deb http://my.archive.com/ubuntu main|KEYID
|
||||
.
|
||||
Note that quantum/neutron is only supported >= Folsom.
|
||||
run-internal-router:
|
||||
type: string
|
||||
default: all
|
||||
description: |
|
||||
Optional configuration to support how the L3 agent option
|
||||
handle_internal_only_routers is configured.
|
||||
all => Set to be true everywhere
|
||||
none => Set to be false everywhere
|
||||
leader => Set to be true on one node (the leader) and false everywhere
|
||||
else.
|
||||
Use leader and none when configuring multiple floating pools
|
||||
external-network-id:
|
||||
type: string
|
||||
description: |
|
||||
Optional configuration to set the external-network-id. Only needed when
|
||||
configuring multiple external networks and should be used in conjunction
|
||||
with run-internal-router.
|
||||
rabbit-user:
|
||||
type: string
|
||||
default: neutron
|
||||
|
1
hooks/cluster-relation-broken
Symbolic link
1
hooks/cluster-relation-broken
Symbolic link
@ -0,0 +1 @@
|
||||
quantum_hooks.py
|
@ -20,6 +20,9 @@ from charmhelpers.contrib.openstack.context import (
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
get_os_codename_install_source
|
||||
)
|
||||
from charmhelpers.contrib.hahelpers.cluster import(
|
||||
eligible_leader
|
||||
)
|
||||
|
||||
DB_USER = "quantum"
|
||||
QUANTUM_DB = "quantum"
|
||||
@ -99,6 +102,23 @@ class NetworkServiceContext(OSContextGenerator):
|
||||
return {}
|
||||
|
||||
|
||||
class L3AgentContext(OSContextGenerator):
|
||||
def __call__(self):
|
||||
ctxt = {}
|
||||
if config('run-internal-router') == 'leader':
|
||||
ctxt['handle_internal_only_router'] = eligible_leader(None)
|
||||
|
||||
if config('run-internal-router') == 'all':
|
||||
ctxt['handle_internal_only_router'] = True
|
||||
|
||||
if config('run-internal-router') == 'none':
|
||||
ctxt['handle_internal_only_router'] = False
|
||||
|
||||
if config('external-network-id'):
|
||||
ctxt['ext_net_id'] = config('external-network-id')
|
||||
return ctxt
|
||||
|
||||
|
||||
class ExternalPortContext(OSContextGenerator):
|
||||
def __call__(self):
|
||||
if config('ext-port'):
|
||||
|
@ -16,7 +16,8 @@ from charmhelpers.fetch import (
|
||||
)
|
||||
from charmhelpers.core.host import (
|
||||
restart_on_change,
|
||||
lsb_release
|
||||
lsb_release,
|
||||
service_stop
|
||||
)
|
||||
from charmhelpers.contrib.hahelpers.cluster import(
|
||||
eligible_leader
|
||||
@ -111,7 +112,9 @@ def amqp_joined(relation_id=None):
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-changed',
|
||||
'amqp-relation-changed')
|
||||
'amqp-relation-changed',
|
||||
'cluster-relation-changed',
|
||||
'cluster-relation-joined')
|
||||
@restart_on_change(restart_map())
|
||||
def db_amqp_changed():
|
||||
CONFIGS.write_all()
|
||||
@ -126,6 +129,7 @@ def nm_changed():
|
||||
|
||||
|
||||
@hooks.hook("cluster-relation-departed")
|
||||
@restart_on_change(restart_map())
|
||||
def cluster_departed():
|
||||
if config('plugin') == 'nvp':
|
||||
log('Unable to re-assign agent resources for failed nodes with nvp',
|
||||
@ -133,6 +137,13 @@ def cluster_departed():
|
||||
return
|
||||
if eligible_leader(None):
|
||||
reassign_agent_resources()
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
||||
@hooks.hook('cluster-relation-broken')
|
||||
@hooks.hook('stop')
|
||||
def stop():
|
||||
service_stop('neutron-l3-agent')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -2,6 +2,8 @@ from charmhelpers.core.host import service_running
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
config,
|
||||
relations_of_type,
|
||||
unit_private_ip,
|
||||
)
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
@ -27,6 +29,7 @@ from quantum_contexts import (
|
||||
networking_name,
|
||||
QuantumGatewayContext,
|
||||
NetworkServiceContext,
|
||||
L3AgentContext,
|
||||
QuantumSharedDBContext,
|
||||
ExternalPortContext,
|
||||
)
|
||||
@ -209,7 +212,8 @@ NEUTRON_OVS_CONFIG_FILES = {
|
||||
'neutron-plugin-openvswitch-agent']
|
||||
},
|
||||
NEUTRON_L3_AGENT_CONF: {
|
||||
'hook_contexts': [NetworkServiceContext()],
|
||||
'hook_contexts': [NetworkServiceContext(),
|
||||
L3AgentContext()],
|
||||
'services': ['neutron-l3-agent']
|
||||
},
|
||||
# TODO: Check to see if this is actually required
|
||||
@ -315,6 +319,10 @@ def reassign_agent_resources():
|
||||
auth_url=auth_url,
|
||||
region_name=env['region'])
|
||||
|
||||
partner_gateways = [unit_private_ip().split('.')[0]]
|
||||
for partner_gateway in relations_of_type(reltype='cluster'):
|
||||
partner_gateways.append(partner_gateway['private-address'].split('.')[0])
|
||||
|
||||
agents = quantum.list_agents(agent_type=DHCP_AGENT)
|
||||
dhcp_agents = []
|
||||
l3_agents = []
|
||||
@ -327,7 +335,8 @@ def reassign_agent_resources():
|
||||
agent['id'])['networks']:
|
||||
networks[network['id']] = agent['id']
|
||||
else:
|
||||
dhcp_agents.append(agent['id'])
|
||||
if agent['host'].split('.')[0] in partner_gateways:
|
||||
dhcp_agents.append(agent['id'])
|
||||
|
||||
agents = quantum.list_agents(agent_type=L3_AGENT)
|
||||
routers = {}
|
||||
@ -339,7 +348,13 @@ def reassign_agent_resources():
|
||||
agent['id'])['routers']:
|
||||
routers[router['id']] = agent['id']
|
||||
else:
|
||||
l3_agents.append(agent['id'])
|
||||
if agent['host'].split('.')[0] in partner_gateways:
|
||||
l3_agents.append(agent['id'])
|
||||
|
||||
if len(dhcp_agents) == 0 or len(l3_agents) == 0:
|
||||
log('Unable to relocate resources, there are %s dhcp_agents and %s \
|
||||
l3_agents in this cluster' % (len(dhcp_agents), len(l3_agents)))
|
||||
return
|
||||
|
||||
index = 0
|
||||
for router_id in routers:
|
||||
|
@ -7,3 +7,7 @@ admin_user = {{ service_username }}
|
||||
admin_password = {{ service_password }}
|
||||
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
|
||||
ovs_use_veth = True
|
||||
handle_internal_only_routers = {{ handle_internal_only_router }}
|
||||
{% if ext_net_id %}
|
||||
gateway_external_network_id = {{ ext_net_id }}
|
||||
{% endif %}
|
||||
|
@ -15,6 +15,7 @@ TO_PATCH = [
|
||||
'unit_get',
|
||||
'apt_install',
|
||||
'get_os_codename_install_source',
|
||||
'eligible_leader',
|
||||
]
|
||||
|
||||
|
||||
@ -141,6 +142,36 @@ class TestExternalPortContext(CharmTestCase):
|
||||
{'ext_port': 'eth1010'})
|
||||
|
||||
|
||||
class TestL3AgentContext(CharmTestCase):
|
||||
def setUp(self):
|
||||
super(TestL3AgentContext, self).setUp(quantum_contexts,
|
||||
TO_PATCH)
|
||||
self.config.side_effect = self.test_config.get
|
||||
|
||||
def test_no_ext_netid(self):
|
||||
self.test_config.set('run-internal-router', 'none')
|
||||
self.test_config.set('external-network-id', '')
|
||||
self.eligible_leader.return_value = False
|
||||
self.assertEquals(quantum_contexts.L3AgentContext()(),
|
||||
{'handle_internal_only_router': False})
|
||||
|
||||
def test_hior_leader(self):
|
||||
self.test_config.set('run-internal-router', 'leader')
|
||||
self.test_config.set('external-network-id', 'netid')
|
||||
self.eligible_leader.return_value = True
|
||||
self.assertEquals(quantum_contexts.L3AgentContext()(),
|
||||
{'handle_internal_only_router': True,
|
||||
'ext_net_id': 'netid'})
|
||||
|
||||
def test_hior_all(self):
|
||||
self.test_config.set('run-internal-router', 'all')
|
||||
self.test_config.set('external-network-id', 'netid')
|
||||
self.eligible_leader.return_value = True
|
||||
self.assertEquals(quantum_contexts.L3AgentContext()(),
|
||||
{'handle_internal_only_router': True,
|
||||
'ext_net_id': 'netid'})
|
||||
|
||||
|
||||
class TestQuantumGatewayContext(CharmTestCase):
|
||||
def setUp(self):
|
||||
super(TestQuantumGatewayContext, self).setUp(quantum_contexts,
|
||||
|
@ -34,7 +34,8 @@ TO_PATCH = [
|
||||
'reassign_agent_resources',
|
||||
'get_common_package',
|
||||
'execd_preinstall',
|
||||
'lsb_release'
|
||||
'lsb_release',
|
||||
'service_stop',
|
||||
]
|
||||
|
||||
|
||||
@ -157,3 +158,7 @@ class TestQuantumHooks(CharmTestCase):
|
||||
self.eligible_leader.return_value = True
|
||||
self._call_hook('cluster-relation-departed')
|
||||
self.reassign_agent_resources.assert_called()
|
||||
|
||||
def test_stop(self):
|
||||
self._call_hook('stop')
|
||||
self.service_stop.assert_called_with('neutron-l3-agent')
|
||||
|
@ -6,6 +6,7 @@ templating.OSConfigRenderer = MagicMock()
|
||||
|
||||
import quantum_utils
|
||||
|
||||
|
||||
try:
|
||||
import neutronclient
|
||||
except ImportError:
|
||||
@ -17,6 +18,7 @@ from test_utils import (
|
||||
|
||||
import charmhelpers.core.hookenv as hookenv
|
||||
|
||||
|
||||
TO_PATCH = [
|
||||
'config',
|
||||
'get_os_codename_install_source',
|
||||
@ -31,7 +33,9 @@ TO_PATCH = [
|
||||
'headers_package',
|
||||
'full_restart',
|
||||
'service_running',
|
||||
'NetworkServiceContext'
|
||||
'NetworkServiceContext',
|
||||
'unit_private_ip',
|
||||
'relations_of_type',
|
||||
]
|
||||
|
||||
|
||||
@ -235,44 +239,103 @@ agents_all_alive = {
|
||||
'DHCP Agent': {
|
||||
'agents': [
|
||||
{'alive': True,
|
||||
'host': 'cluster1-machine1.internal',
|
||||
'id': '3e3550f2-38cc-11e3-9617-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster1-machine2.internal',
|
||||
'id': '53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'}
|
||||
'host': 'cluster2-machine1.internal',
|
||||
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine3.internal',
|
||||
'id': 'ebdcc950-51c8-11e3-a804-1c6f65b044df'},
|
||||
]
|
||||
},
|
||||
'L3 Agent': {
|
||||
'agents': [
|
||||
{'alive': True,
|
||||
'host': 'cluster1-machine1.internal',
|
||||
'id': '7128198e-38ce-11e3-ba78-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster1-machine2.internal',
|
||||
'id': '72453824-38ce-11e3-938e-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'}
|
||||
'host': 'cluster2-machine1.internal',
|
||||
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine3.internal',
|
||||
'id': '00f4268a-51c9-11e3-9177-1c6f65b044df'},
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
agents_some_dead = {
|
||||
agents_some_dead_cl1 = {
|
||||
'DHCP Agent': {
|
||||
'agents': [
|
||||
{'alive': False,
|
||||
'host': 'cluster1-machine1.internal',
|
||||
'id': '3e3550f2-38cc-11e3-9617-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine1.internal',
|
||||
'id': '53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine2.internal',
|
||||
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine3.internal',
|
||||
'id': 'ebdcc950-51c8-11e3-a804-1c6f65b044df'},
|
||||
]
|
||||
},
|
||||
'L3 Agent': {
|
||||
'agents': [
|
||||
{'alive': False,
|
||||
'host': 'cluster1-machine1.internal',
|
||||
'id': '7128198e-38ce-11e3-ba78-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine1.internal',
|
||||
'id': '72453824-38ce-11e3-938e-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine2.internal',
|
||||
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine3.internal',
|
||||
'id': '00f4268a-51c9-11e3-9177-1c6f65b044df'},
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
agents_some_dead_cl2 = {
|
||||
'DHCP Agent': {
|
||||
'agents': [
|
||||
{'alive': True,
|
||||
'host': 'cluster1-machine1.internal',
|
||||
'id': '3e3550f2-38cc-11e3-9617-3c970e8b1cf7'},
|
||||
{'alive': False,
|
||||
'id': '53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'}
|
||||
'host': 'cluster2-machine1.internal',
|
||||
'id': '53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7'},
|
||||
{'alive': False,
|
||||
'host': 'cluster2-machine2.internal',
|
||||
'id': '92b8b6bc-38ce-11e3-8537-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine3.internal',
|
||||
'id': 'ebdcc950-51c8-11e3-a804-1c6f65b044df'},
|
||||
]
|
||||
},
|
||||
'L3 Agent': {
|
||||
'agents': [
|
||||
{'alive': True,
|
||||
'host': 'cluster1-machine1.internal',
|
||||
'id': '7128198e-38ce-11e3-ba78-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine1.internal',
|
||||
'id': '72453824-38ce-11e3-938e-3c970e8b1cf7'},
|
||||
{'alive': False,
|
||||
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'}
|
||||
'host': 'cluster2-machine2.internal',
|
||||
'id': '84a04126-38ce-11e3-9449-3c970e8b1cf7'},
|
||||
{'alive': True,
|
||||
'host': 'cluster2-machine3.internal',
|
||||
'id': '00f4268a-51c9-11e3-9177-1c6f65b044df'},
|
||||
]
|
||||
}
|
||||
}
|
||||
@ -291,6 +354,9 @@ l3_agent_routers = {
|
||||
]
|
||||
}
|
||||
|
||||
cluster1 = ['cluster1-machine1.internal']
|
||||
cluster2 = ['cluster2-machine1.internal', 'cluster2-machine2.internal'
|
||||
'cluster2-machine3.internal']
|
||||
|
||||
class TestQuantumAgentReallocation(CharmTestCase):
|
||||
def setUp(self):
|
||||
@ -327,12 +393,14 @@ class TestQuantumAgentReallocation(CharmTestCase):
|
||||
self.NetworkServiceContext.return_value = \
|
||||
DummyNetworkServiceContext(return_value=network_context)
|
||||
dummy_client = MagicMock()
|
||||
dummy_client.list_agents.side_effect = agents_some_dead.itervalues()
|
||||
dummy_client.list_agents.side_effect = agents_some_dead_cl2.itervalues()
|
||||
dummy_client.list_networks_on_dhcp_agent.return_value = \
|
||||
dhcp_agent_networks
|
||||
dummy_client.list_routers_on_l3_agent.return_value = \
|
||||
l3_agent_routers
|
||||
_client.return_value = dummy_client
|
||||
self.unit_private_ip.return_value = 'cluster2-machine1.internal'
|
||||
self.relations_of_type.return_value = [ { 'private-address': 'cluster2-machine3.internal' }]
|
||||
quantum_utils.reassign_agent_resources()
|
||||
|
||||
# Ensure routers removed from dead l3 agent
|
||||
@ -343,19 +411,37 @@ class TestQuantumAgentReallocation(CharmTestCase):
|
||||
router_id='baz')], any_order=True)
|
||||
# and re-assigned across the remaining two live agents
|
||||
dummy_client.add_router_to_l3_agent.assert_has_calls(
|
||||
[call(l3_agent='7128198e-38ce-11e3-ba78-3c970e8b1cf7',
|
||||
body={'router_id': 'bong'}),
|
||||
[call(l3_agent='00f4268a-51c9-11e3-9177-1c6f65b044df',
|
||||
body={'router_id': 'baz'}),
|
||||
call(l3_agent='72453824-38ce-11e3-938e-3c970e8b1cf7',
|
||||
body={'router_id': 'baz'})], any_order=True)
|
||||
body={'router_id': 'bong'})], any_order=True)
|
||||
# Ensure networks removed from dead dhcp agent
|
||||
dummy_client.remove_network_from_dhcp_agent.assert_has_calls(
|
||||
[call(dhcp_agent='53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7',
|
||||
[call(dhcp_agent='92b8b6bc-38ce-11e3-8537-3c970e8b1cf7',
|
||||
network_id='foo'),
|
||||
call(dhcp_agent='53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7',
|
||||
call(dhcp_agent='92b8b6bc-38ce-11e3-8537-3c970e8b1cf7',
|
||||
network_id='bar')], any_order=True)
|
||||
# and re-assigned across the remaining two live agents
|
||||
dummy_client.add_network_to_dhcp_agent.assert_has_calls(
|
||||
[call(dhcp_agent='3e3550f2-38cc-11e3-9617-3c970e8b1cf7',
|
||||
[call(dhcp_agent='53d6eefc-38cc-11e3-b3c8-3c970e8b1cf7',
|
||||
body={'network_id': 'foo'}),
|
||||
call(dhcp_agent='92b8b6bc-38ce-11e3-8537-3c970e8b1cf7',
|
||||
call(dhcp_agent='ebdcc950-51c8-11e3-a804-1c6f65b044df',
|
||||
body={'network_id': 'bar'})], any_order=True)
|
||||
|
||||
@patch('neutronclient.v2_0.client.Client')
|
||||
def test_agents_down_relocation_impossible(self, _client):
|
||||
self.NetworkServiceContext.return_value = \
|
||||
DummyNetworkServiceContext(return_value=network_context)
|
||||
dummy_client = MagicMock()
|
||||
dummy_client.list_agents.side_effect = agents_some_dead_cl1.itervalues()
|
||||
dummy_client.list_networks_on_dhcp_agent.return_value = \
|
||||
dhcp_agent_networks
|
||||
dummy_client.list_routers_on_l3_agent.return_value = \
|
||||
l3_agent_routers
|
||||
_client.return_value = dummy_client
|
||||
self.unit_private_ip.return_value = 'cluster1-machine1.internal'
|
||||
self.relations_of_type.return_value = []
|
||||
quantum_utils.reassign_agent_resources()
|
||||
self.log.assert_called()
|
||||
assert not dummy_client.remove_router_from_l3_agent.called
|
||||
assert not dummy_client.remove_network_from_dhcp_agent.called
|
||||
|
Loading…
x
Reference in New Issue
Block a user