Move router operations to xjob
1. What is the problem Currently router operations are all done in the Tricircle plugin in a synchronous way. Now we are going to support one top network to spread into different availability zones, so not only the Tricircle plugin but also the Nova_apigw need to handle router operation. Also, having one top network spreading into several availability zones means that we may need to operate routers in several bottom pods at one time, so it's better to handle the operations in an asynchronous way. 2. What is the solution to the problem Reconstrut the code to move router operations to a new helper module and add a new type of job to setup bottom routers, so both the Tricircle plugin and the Nova_apigw can operate bottom routers via xjob. 3. What the features need to be implemented to the Tricircle to realize the solution A new helper module is added while most of the codes is moved from the Tricircle plugin. Also, a new type of job is added. Change-Id: Ie5a89628a65c4d7cbcb2acd56bafe682580da2c6
This commit is contained in:
parent
9adc57e57f
commit
6be7ed7b44
@ -71,6 +71,7 @@ TOP = 'top'
|
||||
|
||||
# job type
|
||||
JT_ROUTER = 'router'
|
||||
JT_ROUTER_SETUP = 'router_setup'
|
||||
JT_PORT_DELETE = 'port_delete'
|
||||
|
||||
# network type
|
||||
|
@ -120,7 +120,7 @@ class NeutronResourceHandle(ResourceHandle):
|
||||
support_resource = {'network': LIST | CREATE | DELETE | GET,
|
||||
'subnet': LIST | CREATE | DELETE | GET,
|
||||
'port': LIST | CREATE | DELETE | GET,
|
||||
'router': LIST | CREATE | ACTION | UPDATE,
|
||||
'router': LIST | CREATE | ACTION | GET | UPDATE,
|
||||
'security_group': LIST | CREATE | GET,
|
||||
'security_group_rule': LIST | CREATE | DELETE,
|
||||
'floatingip': LIST | CREATE | UPDATE | DELETE}
|
||||
|
@ -75,6 +75,12 @@ class XJobAPI(object):
|
||||
def test_rpc(self, ctxt, payload):
|
||||
return self.client.call(ctxt, 'test_rpc', payload=payload)
|
||||
|
||||
def setup_bottom_router(self, ctxt, net_id, router_id, pod_id):
|
||||
combine_id = '%s#%s#%s' % (pod_id, router_id, net_id)
|
||||
self.client.prepare(exchange='openstack').cast(
|
||||
ctxt, 'setup_bottom_router',
|
||||
payload={constants.JT_ROUTER_SETUP: combine_id})
|
||||
|
||||
def configure_extra_routes(self, ctxt, router_id):
|
||||
# NOTE(zhiyuan) this RPC is called by plugin in Neutron server, whose
|
||||
# control exchange is "neutron", however, we starts xjob without
|
||||
|
@ -152,6 +152,12 @@ def update_resource(context, model, pk_value, update_dict):
|
||||
return res_obj.to_dict()
|
||||
|
||||
|
||||
def update_resources(context, model, filters, update_dict):
|
||||
query = context.session.query(model)
|
||||
query = _filter_query(model, query, filters)
|
||||
query.update(update_dict, synchronize_session=False)
|
||||
|
||||
|
||||
class DictBase(object):
|
||||
attributes = []
|
||||
|
||||
|
@ -259,7 +259,7 @@ def upgrade(migrate_engine):
|
||||
sql.Column('timestamp', sql.TIMESTAMP,
|
||||
server_default=sql.text('CURRENT_TIMESTAMP')),
|
||||
sql.Column('status', sql.String(length=36)),
|
||||
sql.Column('resource_id', sql.String(length=36)),
|
||||
sql.Column('resource_id', sql.String(length=127)),
|
||||
sql.Column('extra_id', sql.String(length=36)),
|
||||
migrate.UniqueConstraint(
|
||||
'type', 'status', 'resource_id', 'extra_id',
|
||||
|
@ -468,5 +468,5 @@ class Job(core.ModelBase, core.DictBase):
|
||||
timestamp = sql.Column('timestamp', sql.TIMESTAMP,
|
||||
server_default=sql.text('CURRENT_TIMESTAMP'))
|
||||
status = sql.Column('status', sql.String(length=36))
|
||||
resource_id = sql.Column('resource_id', sql.String(length=36))
|
||||
resource_id = sql.Column('resource_id', sql.String(length=127))
|
||||
extra_id = sql.Column('extra_id', sql.String(length=36))
|
||||
|
295
tricircle/network/helper.py
Normal file
295
tricircle/network/helper.py
Normal file
@ -0,0 +1,295 @@
|
||||
# Copyright 2015 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron_lib import constants
|
||||
|
||||
from neutron.extensions import external_net
|
||||
import neutron.plugins.common.constants as p_constants
|
||||
|
||||
import tricircle.common.client as t_client
|
||||
import tricircle.common.constants as t_constants
|
||||
import tricircle.common.lock_handle as t_lock
|
||||
from tricircle.common import utils
|
||||
|
||||
|
||||
class NetworkHelper(object):
|
||||
def __init__(self, call_obj=None):
|
||||
self.clients = {}
|
||||
self.call_obj = call_obj
|
||||
|
||||
@staticmethod
|
||||
def _transfer_network_type(network_type):
|
||||
network_type_map = {t_constants.NT_SHARED_VLAN: p_constants.TYPE_VLAN}
|
||||
return network_type_map.get(network_type, network_type)
|
||||
|
||||
def _get_client(self, pod_name=None):
|
||||
if not pod_name:
|
||||
if t_constants.TOP not in self.clients:
|
||||
self.clients[t_constants.TOP] = t_client.Client()
|
||||
return self.clients[t_constants.TOP]
|
||||
if pod_name not in self.clients:
|
||||
self.clients[pod_name] = t_client.Client(pod_name)
|
||||
return self.clients[pod_name]
|
||||
|
||||
# operate top resource
|
||||
def _prepare_top_element_by_call(self, t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body):
|
||||
def list_resources(t_ctx_, q_ctx_, pod_, ele_, _type_):
|
||||
return getattr(super(self.call_obj.__class__, self.call_obj),
|
||||
'get_%ss' % _type_)(q_ctx_,
|
||||
filters={'name': [ele_['id']]})
|
||||
|
||||
def create_resources(t_ctx_, q_ctx_, pod_, body_, _type_):
|
||||
if _type_ == t_constants.RT_NETWORK:
|
||||
# for network, we call TricirclePlugin's own create_network to
|
||||
# handle network segment
|
||||
return self.call_obj.create_network(q_ctx_, body_)
|
||||
else:
|
||||
return getattr(super(self.call_obj.__class__, self.call_obj),
|
||||
'create_%s' % _type_)(q_ctx_, body_)
|
||||
|
||||
return t_lock.get_or_create_element(
|
||||
t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body,
|
||||
list_resources, create_resources)
|
||||
|
||||
def _prepare_top_element_by_client(self, t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body):
|
||||
def list_resources(t_ctx_, q_ctx_, pod_, ele_, _type_):
|
||||
client = self._get_client()
|
||||
return client.list_resources(_type_, t_ctx_,
|
||||
[{'key': 'name', 'comparator': 'eq',
|
||||
'value': ele_['id']}])
|
||||
|
||||
def create_resources(t_ctx_, q_ctx_, pod_, body_, _type_):
|
||||
client = self._get_client()
|
||||
return client.create_resources(_type_, t_ctx_, body_)
|
||||
|
||||
assert _type == 'port'
|
||||
# currently only top port is possible to be created via client, other
|
||||
# top resources should be created directly by plugin
|
||||
return t_lock.get_or_create_element(
|
||||
t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body,
|
||||
list_resources, create_resources)
|
||||
|
||||
def prepare_top_element(self, t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body):
|
||||
"""Get or create shared top networking resource
|
||||
|
||||
:param t_ctx: tricircle context
|
||||
:param q_ctx: neutron context
|
||||
:param project_id: project id
|
||||
:param pod: dict of top pod
|
||||
:param ele: dict with "id" as key and distinctive identifier as value
|
||||
:param _type: type of the resource
|
||||
:param body: request body to create resource
|
||||
:return: boolean value indicating whether the resource is newly
|
||||
created or already exists and id of the resource
|
||||
"""
|
||||
if self.call_obj:
|
||||
return self._prepare_top_element_by_call(
|
||||
t_ctx, q_ctx, project_id, pod, ele, _type, body)
|
||||
else:
|
||||
return self._prepare_top_element_by_client(
|
||||
t_ctx, q_ctx, project_id, pod, ele, _type, body)
|
||||
|
||||
def get_bridge_interface(self, t_ctx, q_ctx, project_id, pod,
|
||||
t_net_id, b_router_id, b_port_id, is_ew):
|
||||
"""Get or create top bridge interface
|
||||
|
||||
:param t_ctx: tricircle context
|
||||
:param q_ctx: neutron context
|
||||
:param project_id: project id
|
||||
:param pod: dict of top pod
|
||||
:param t_net_id: top bridge network id
|
||||
:param b_router_id: bottom router id
|
||||
:param b_port_id: needed when creating bridge interface for south-
|
||||
north network, id of the internal port bound to floating ip
|
||||
:param is_ew: create the bridge interface for east-west network or
|
||||
south-north network
|
||||
:return: bridge interface id
|
||||
"""
|
||||
if is_ew:
|
||||
port_name = t_constants.ew_bridge_port_name % (project_id,
|
||||
b_router_id)
|
||||
else:
|
||||
port_name = t_constants.ns_bridge_port_name % (project_id,
|
||||
b_router_id,
|
||||
b_port_id)
|
||||
port_ele = {'id': port_name}
|
||||
port_body = {
|
||||
'port': {
|
||||
'tenant_id': project_id,
|
||||
'admin_state_up': True,
|
||||
'name': port_name,
|
||||
'network_id': t_net_id,
|
||||
'device_id': '',
|
||||
'device_owner': ''
|
||||
}
|
||||
}
|
||||
if self.call_obj:
|
||||
port_body['port'].update(
|
||||
{'mac_address': constants.ATTR_NOT_SPECIFIED,
|
||||
'fixed_ips': constants.ATTR_NOT_SPECIFIED})
|
||||
_, port_id = self.prepare_top_element(
|
||||
t_ctx, q_ctx, project_id, pod, port_ele, 'port', port_body)
|
||||
return port_id
|
||||
|
||||
# operate bottom resource
|
||||
def prepare_bottom_element(self, t_ctx,
|
||||
project_id, pod, ele, _type, body):
|
||||
"""Get or create bottom networking resource based on top resource
|
||||
|
||||
:param t_ctx: tricircle context
|
||||
:param project_id: project id
|
||||
:param pod: dict of bottom pod
|
||||
:param ele: dict of top resource
|
||||
:param _type: type of the resource
|
||||
:param body: request body to create resource
|
||||
:return: boolean value indicating whether the resource is newly
|
||||
created or already exists and id of the resource
|
||||
"""
|
||||
def list_resources(t_ctx_, q_ctx, pod_, ele_, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
if _type_ == t_constants.RT_NETWORK:
|
||||
value = utils.get_bottom_network_name(ele_)
|
||||
else:
|
||||
value = ele_['id']
|
||||
return client.list_resources(_type_, t_ctx_,
|
||||
[{'key': 'name', 'comparator': 'eq',
|
||||
'value': value}])
|
||||
|
||||
def create_resources(t_ctx_, q_ctx, pod_, body_, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
return client.create_resources(_type_, t_ctx_, body_)
|
||||
|
||||
return t_lock.get_or_create_element(
|
||||
t_ctx, None, # we don't need neutron context, so pass None
|
||||
project_id, pod, ele, _type, body,
|
||||
list_resources, create_resources)
|
||||
|
||||
def get_bottom_elements(self, t_ctx, project_id, pod,
|
||||
t_net, t_subnet, t_port):
|
||||
"""Get or create bottom network, subnet and port
|
||||
|
||||
:param t_ctx: tricircle context
|
||||
:param project_id: project id
|
||||
:param pod: dict of bottom pod
|
||||
:param t_net: dict of top network
|
||||
:param t_subnet: dict of top subnet
|
||||
:param t_port: dict of top port
|
||||
:return: bottom port id
|
||||
"""
|
||||
net_body = {
|
||||
'network': {
|
||||
'tenant_id': project_id,
|
||||
'name': utils.get_bottom_network_name(t_net),
|
||||
'admin_state_up': True
|
||||
}
|
||||
}
|
||||
_, net_id = self.prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_net, 'network', net_body)
|
||||
subnet_body = {
|
||||
'subnet': {
|
||||
'network_id': net_id,
|
||||
'name': t_subnet['id'],
|
||||
'ip_version': t_subnet['ip_version'],
|
||||
'cidr': t_subnet['cidr'],
|
||||
'gateway_ip': t_subnet['gateway_ip'],
|
||||
'allocation_pools': t_subnet['allocation_pools'],
|
||||
'enable_dhcp': t_subnet['enable_dhcp'],
|
||||
'tenant_id': project_id
|
||||
}
|
||||
}
|
||||
_, subnet_id = self.prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_subnet, 'subnet', subnet_body)
|
||||
port_body = {
|
||||
'port': {
|
||||
'network_id': net_id,
|
||||
'name': t_port['id'],
|
||||
'admin_state_up': True,
|
||||
'fixed_ips': [
|
||||
{'subnet_id': subnet_id,
|
||||
'ip_address': t_port['fixed_ips'][0]['ip_address']}],
|
||||
'mac_address': t_port['mac_address']
|
||||
}
|
||||
}
|
||||
_, port_id = self.prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_port, 'port', port_body)
|
||||
return port_id
|
||||
|
||||
def get_bottom_bridge_elements(self, t_ctx, project_id,
|
||||
pod, t_net, is_external, t_subnet, t_port):
|
||||
"""Get or create bottom bridge port
|
||||
|
||||
:param t_ctx: tricircle context
|
||||
:param project_id: project id
|
||||
:param pod: dict of bottom pod
|
||||
:param t_net: dict of top bridge network
|
||||
:param is_external: whether the bottom network should be created as
|
||||
an external network, this is True for south-north case
|
||||
:param t_subnet: dict of top bridge subnet
|
||||
:param t_port: dict of top bridge port
|
||||
:return:
|
||||
"""
|
||||
net_body = {'network': {
|
||||
'tenant_id': project_id,
|
||||
'name': t_net['id'],
|
||||
'provider:network_type': self._transfer_network_type(
|
||||
t_net['provider:network_type']),
|
||||
'provider:physical_network': t_net['provider:physical_network'],
|
||||
'provider:segmentation_id': t_net['provider:segmentation_id'],
|
||||
'admin_state_up': True}}
|
||||
if is_external:
|
||||
net_body['network'][external_net.EXTERNAL] = True
|
||||
_, b_net_id = self.prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_net, 'network', net_body)
|
||||
|
||||
subnet_body = {'subnet': {'network_id': b_net_id,
|
||||
'name': t_subnet['id'],
|
||||
'ip_version': 4,
|
||||
'cidr': t_subnet['cidr'],
|
||||
'enable_dhcp': False,
|
||||
'tenant_id': project_id}}
|
||||
# In the pod hosting external network, where ns bridge network is used
|
||||
# as an internal network, need to allocate ip address from .3 because
|
||||
# .2 is used by the router gateway port in the pod hosting servers,
|
||||
# where ns bridge network is used as an external network.
|
||||
# if t_subnet['name'].startswith('ns_bridge_') and not is_external:
|
||||
# prefix = t_subnet['cidr'][:t_subnet['cidr'].rindex('.')]
|
||||
# subnet_body['subnet']['allocation_pools'] = [
|
||||
# {'start': prefix + '.3', 'end': prefix + '.254'}]
|
||||
_, b_subnet_id = self.prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_subnet, 'subnet', subnet_body)
|
||||
|
||||
if t_port:
|
||||
port_body = {
|
||||
'port': {
|
||||
'tenant_id': project_id,
|
||||
'admin_state_up': True,
|
||||
'name': t_port['id'],
|
||||
'network_id': b_net_id,
|
||||
'fixed_ips': [
|
||||
{'subnet_id': b_subnet_id,
|
||||
'ip_address': t_port['fixed_ips'][0]['ip_address']}]
|
||||
}
|
||||
}
|
||||
is_new, b_port_id = self.prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_port, 'port', port_body)
|
||||
|
||||
return is_new, b_port_id, b_subnet_id, b_net_id
|
||||
else:
|
||||
return None, None, b_subnet_id, b_net_id
|
@ -18,7 +18,6 @@ import oslo_log.helpers as log_helpers
|
||||
from oslo_log import log
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions
|
||||
from neutron.db import common_db_mixin
|
||||
from neutron.db import db_base_plugin_v2
|
||||
@ -36,6 +35,7 @@ from neutron.extensions import external_net
|
||||
from neutron.extensions import l3
|
||||
from neutron.extensions import providernet as provider
|
||||
import neutron.plugins.common.constants as p_constants
|
||||
from neutron_lib import constants
|
||||
import neutronclient.common.exceptions as q_cli_exceptions
|
||||
|
||||
from sqlalchemy import sql
|
||||
@ -48,13 +48,12 @@ import tricircle.common.exceptions as t_exceptions
|
||||
from tricircle.common.i18n import _
|
||||
from tricircle.common.i18n import _LE
|
||||
from tricircle.common.i18n import _LI
|
||||
import tricircle.common.lock_handle as t_lock
|
||||
from tricircle.common import utils
|
||||
from tricircle.common import xrpcapi
|
||||
import tricircle.db.api as db_api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
import tricircle.network.exceptions as t_network_exc
|
||||
from tricircle.network import helper
|
||||
from tricircle.network import managers
|
||||
from tricircle.network import security_groups
|
||||
|
||||
@ -88,6 +87,10 @@ cfg.CONF.register_opts(tricircle_opts, group=tricircle_opt_group)
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
NON_VM_PORT_TYPES = [constants.DEVICE_OWNER_ROUTER_INTF,
|
||||
constants.DEVICE_OWNER_ROUTER_GW,
|
||||
constants.DEVICE_OWNER_DHCP]
|
||||
|
||||
|
||||
class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
security_groups.TricircleSecurityGroupMixin,
|
||||
@ -122,6 +125,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
self._setup_rpc()
|
||||
self.type_manager = managers.TricircleTypeManager()
|
||||
self.type_manager.initialize()
|
||||
self.helper = helper.NetworkHelper(self)
|
||||
|
||||
def _setup_rpc(self):
|
||||
self.endpoints = []
|
||||
@ -359,21 +363,26 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
|
||||
def delete_port(self, context, port_id, l3_port_check=True):
|
||||
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||
try:
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, port_id, t_constants.RT_PORT)
|
||||
if mappings:
|
||||
pod_name = mappings[0][0]['pod_name']
|
||||
bottom_port_id = mappings[0][1]
|
||||
self._get_client(pod_name).delete_ports(
|
||||
t_ctx, bottom_port_id)
|
||||
except Exception:
|
||||
raise
|
||||
with t_ctx.session.begin():
|
||||
core.delete_resources(t_ctx, models.ResourceRouting,
|
||||
filters=[{'key': 'top_id',
|
||||
'comparator': 'eq',
|
||||
'value': port_id}])
|
||||
port = super(TricirclePlugin, self).get_port(context, port_id)
|
||||
# NOTE(zhiyuan) for none vm ports like router interfaces and dhcp
|
||||
# ports, we just remove records in top pod and leave deletion of
|
||||
# ports and routing entries in bottom pods to xjob
|
||||
if port.get('device_owner') not in NON_VM_PORT_TYPES:
|
||||
try:
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, port_id, t_constants.RT_PORT)
|
||||
if mappings:
|
||||
pod_name = mappings[0][0]['pod_name']
|
||||
bottom_port_id = mappings[0][1]
|
||||
self._get_client(pod_name).delete_ports(
|
||||
t_ctx, bottom_port_id)
|
||||
except Exception:
|
||||
raise
|
||||
with t_ctx.session.begin():
|
||||
core.delete_resources(t_ctx, models.ResourceRouting,
|
||||
filters=[{'key': 'top_id',
|
||||
'comparator': 'eq',
|
||||
'value': port_id}])
|
||||
super(TricirclePlugin, self).delete_port(context, port_id)
|
||||
|
||||
def get_port(self, context, port_id, fields=None):
|
||||
@ -490,6 +499,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
for port in ports:
|
||||
if port['id'] not in bottom_top_map:
|
||||
continue
|
||||
if port.get('device_owner') in NON_VM_PORT_TYPES:
|
||||
continue
|
||||
port['id'] = bottom_top_map[port['id']]
|
||||
TricirclePlugin._map_port_from_bottom_to_top(port, bottom_top_map)
|
||||
port_list.append(port)
|
||||
@ -563,6 +574,11 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
def get_ports(self, context, filters=None, fields=None, sorts=None,
|
||||
limit=None, marker=None, page_reverse=False):
|
||||
t_ctx = t_context.get_context_from_neutron_context(context)
|
||||
|
||||
non_vm_ports = super(TricirclePlugin, self).get_ports(
|
||||
context, {'device_owner': NON_VM_PORT_TYPES}, ['id'])
|
||||
non_vm_port_ids = set([port['id'] for port in non_vm_ports])
|
||||
|
||||
with t_ctx.session.begin():
|
||||
bottom_top_map = {}
|
||||
top_bottom_map = {}
|
||||
@ -575,6 +591,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
route_filters, [])
|
||||
|
||||
for route in routes:
|
||||
if route['top_id'] in non_vm_port_ids:
|
||||
continue
|
||||
if route['bottom_id']:
|
||||
bottom_top_map[route['bottom_id']] = route['top_id']
|
||||
if route['resource_type'] == t_constants.RT_PORT:
|
||||
@ -668,45 +686,13 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
|
||||
def _prepare_top_element(self, t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body):
|
||||
def list_resources(t_ctx_, q_ctx_, pod_, ele_, _type_):
|
||||
return getattr(super(TricirclePlugin, self),
|
||||
'get_%ss' % _type_)(q_ctx_,
|
||||
filters={'name': [ele_['id']]})
|
||||
|
||||
def create_resources(t_ctx_, q_ctx_, pod_, body_, _type_):
|
||||
if _type_ == t_constants.RT_NETWORK:
|
||||
# for network, we call TricirclePlugin's own create_network to
|
||||
# handle network segment
|
||||
return self.create_network(q_ctx_, body_)
|
||||
else:
|
||||
return getattr(super(TricirclePlugin, self),
|
||||
'create_%s' % _type_)(q_ctx_, body_)
|
||||
|
||||
return t_lock.get_or_create_element(
|
||||
t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body,
|
||||
list_resources, create_resources)
|
||||
return self.helper.prepare_top_element(
|
||||
t_ctx, q_ctx, project_id, pod, ele, _type, body)
|
||||
|
||||
def _prepare_bottom_element(self, t_ctx,
|
||||
project_id, pod, ele, _type, body):
|
||||
def list_resources(t_ctx_, q_ctx, pod_, ele_, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
if _type_ == t_constants.RT_NETWORK:
|
||||
value = utils.get_bottom_network_name(ele_)
|
||||
else:
|
||||
value = ele_['id']
|
||||
return client.list_resources(_type_, t_ctx_,
|
||||
[{'key': 'name', 'comparator': 'eq',
|
||||
'value': value}])
|
||||
|
||||
def create_resources(t_ctx_, q_ctx, pod_, body_, _type_):
|
||||
client = self._get_client(pod_['pod_name'])
|
||||
return client.create_resources(_type_, t_ctx_, body_)
|
||||
|
||||
return t_lock.get_or_create_element(
|
||||
t_ctx, None, # we don't need neutron context, so pass None
|
||||
project_id, pod, ele, _type, body,
|
||||
list_resources, create_resources)
|
||||
return self.helper.prepare_bottom_element(
|
||||
t_ctx, project_id, pod, ele, _type, body)
|
||||
|
||||
def _get_bridge_subnet_pool_id(self, t_ctx, q_ctx, project_id, pod, is_ew):
|
||||
if is_ew:
|
||||
@ -783,68 +769,14 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
|
||||
def _get_bottom_elements(self, t_ctx, project_id, pod,
|
||||
t_net, t_subnet, t_port):
|
||||
net_body = {
|
||||
'network': {
|
||||
'tenant_id': project_id,
|
||||
'name': utils.get_bottom_network_name(t_net),
|
||||
'admin_state_up': True
|
||||
}
|
||||
}
|
||||
_, net_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_net, 'network', net_body)
|
||||
subnet_body = {
|
||||
'subnet': {
|
||||
'network_id': net_id,
|
||||
'name': t_subnet['id'],
|
||||
'ip_version': t_subnet['ip_version'],
|
||||
'cidr': t_subnet['cidr'],
|
||||
'gateway_ip': t_subnet['gateway_ip'],
|
||||
'allocation_pools': t_subnet['allocation_pools'],
|
||||
'enable_dhcp': t_subnet['enable_dhcp'],
|
||||
'tenant_id': project_id
|
||||
}
|
||||
}
|
||||
_, subnet_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_subnet, 'subnet', subnet_body)
|
||||
port_body = {
|
||||
'port': {
|
||||
'network_id': net_id,
|
||||
'name': t_port['id'],
|
||||
'admin_state_up': True,
|
||||
'fixed_ips': [
|
||||
{'subnet_id': subnet_id,
|
||||
'ip_address': t_port['fixed_ips'][0]['ip_address']}],
|
||||
'mac_address': t_port['mac_address']
|
||||
}
|
||||
}
|
||||
_, port_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_port, 'port', port_body)
|
||||
return port_id
|
||||
return self.helper.get_bottom_elements(
|
||||
t_ctx, project_id, pod, t_net, t_subnet, t_port)
|
||||
|
||||
def _get_bridge_interface(self, t_ctx, q_ctx, project_id, pod,
|
||||
t_net_id, b_router_id, b_port_id, is_ew):
|
||||
if is_ew:
|
||||
port_name = t_constants.ew_bridge_port_name % (project_id,
|
||||
b_router_id)
|
||||
else:
|
||||
port_name = t_constants.ns_bridge_port_name % (project_id,
|
||||
b_router_id,
|
||||
b_port_id)
|
||||
port_ele = {'id': port_name}
|
||||
port_body = {
|
||||
'port': {
|
||||
'tenant_id': project_id,
|
||||
'admin_state_up': True,
|
||||
'name': port_name,
|
||||
'network_id': t_net_id,
|
||||
'device_id': '',
|
||||
'device_owner': '',
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'fixed_ips': attributes.ATTR_NOT_SPECIFIED
|
||||
}
|
||||
}
|
||||
_, port_id = self._prepare_top_element(
|
||||
t_ctx, q_ctx, project_id, pod, port_ele, 'port', port_body)
|
||||
port_id = self.helper.get_bridge_interface(t_ctx, q_ctx, project_id,
|
||||
pod, t_net_id, b_router_id,
|
||||
b_port_id, is_ew)
|
||||
return super(TricirclePlugin, self).get_port(q_ctx, port_id)
|
||||
|
||||
@staticmethod
|
||||
@ -855,55 +787,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
def _get_bottom_bridge_elements(self, q_ctx, project_id,
|
||||
pod, t_net, is_external, t_subnet, t_port):
|
||||
t_ctx = t_context.get_context_from_neutron_context(q_ctx)
|
||||
|
||||
net_body = {'network': {
|
||||
'tenant_id': project_id,
|
||||
'name': t_net['id'],
|
||||
'provider:network_type': self._transfer_network_type(
|
||||
t_net['provider:network_type']),
|
||||
'provider:physical_network': t_net['provider:physical_network'],
|
||||
'provider:segmentation_id': t_net['provider:segmentation_id'],
|
||||
'admin_state_up': True}}
|
||||
if is_external:
|
||||
net_body['network'][external_net.EXTERNAL] = True
|
||||
_, b_net_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_net, 'network', net_body)
|
||||
|
||||
subnet_body = {'subnet': {'network_id': b_net_id,
|
||||
'name': t_subnet['id'],
|
||||
'ip_version': 4,
|
||||
'cidr': t_subnet['cidr'],
|
||||
'enable_dhcp': False,
|
||||
'tenant_id': project_id}}
|
||||
# In the pod hosting external network, where ns bridge network is used
|
||||
# as an internal network, need to allocate ip address from .3 because
|
||||
# .2 is used by the router gateway port in the pod hosting servers,
|
||||
# where ns bridge network is used as an external network.
|
||||
# if t_subnet['name'].startswith('ns_bridge_') and not is_external:
|
||||
# prefix = t_subnet['cidr'][:t_subnet['cidr'].rindex('.')]
|
||||
# subnet_body['subnet']['allocation_pools'] = [
|
||||
# {'start': prefix + '.3', 'end': prefix + '.254'}]
|
||||
_, b_subnet_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_subnet, 'subnet', subnet_body)
|
||||
|
||||
if t_port:
|
||||
port_body = {
|
||||
'port': {
|
||||
'tenant_id': project_id,
|
||||
'admin_state_up': True,
|
||||
'name': t_port['id'],
|
||||
'network_id': b_net_id,
|
||||
'fixed_ips': [
|
||||
{'subnet_id': b_subnet_id,
|
||||
'ip_address': t_port['fixed_ips'][0]['ip_address']}]
|
||||
}
|
||||
}
|
||||
is_new, b_port_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, pod, t_port, 'port', port_body)
|
||||
|
||||
return is_new, b_port_id, b_subnet_id, b_net_id
|
||||
else:
|
||||
return None, None, b_subnet_id, b_net_id
|
||||
return self.helper.get_bottom_bridge_elements(
|
||||
t_ctx, project_id, pod, t_net, is_external, t_subnet, t_port)
|
||||
|
||||
# NOTE(zhiyuan) the origin implementation in l3_db uses port returned from
|
||||
# get_port in core plugin to check, change it to base plugin, since only
|
||||
@ -1068,22 +953,11 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
t_pod = db_api.get_top_pod(t_ctx)
|
||||
assert t_pod
|
||||
|
||||
router_body = {'router': {'name': router_id,
|
||||
'distributed': False}}
|
||||
_, b_router_id = self._prepare_bottom_element(
|
||||
t_ctx, project_id, b_pod, router, 'router', router_body)
|
||||
|
||||
# bridge network for E-W networking
|
||||
pool_id = self._get_bridge_subnet_pool_id(
|
||||
t_ctx, context, None, t_pod, True)
|
||||
t_bridge_net, t_bridge_subnet = self._get_bridge_network_subnet(
|
||||
self._get_bridge_network_subnet(
|
||||
t_ctx, context, project_id, t_pod, pool_id, True)
|
||||
t_bridge_port = self._get_bridge_interface(
|
||||
t_ctx, context, project_id, t_pod, t_bridge_net['id'],
|
||||
b_router_id, None, True)
|
||||
is_new, b_bridge_port_id, _, _ = self._get_bottom_bridge_elements(
|
||||
context, project_id, b_pod, t_bridge_net, False, t_bridge_subnet,
|
||||
t_bridge_port)
|
||||
|
||||
# bridge network for N-S networking
|
||||
ext_nets = self.get_networks(context, {external_net.EXTERNAL: [True]})
|
||||
@ -1099,107 +973,21 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
if need_ns_bridge:
|
||||
pool_id = self._get_bridge_subnet_pool_id(
|
||||
t_ctx, context, None, t_pod, False)
|
||||
t_bridge_net, t_bridge_subnet = self._get_bridge_network_subnet(
|
||||
self._get_bridge_network_subnet(
|
||||
t_ctx, context, project_id, t_pod, pool_id, False)
|
||||
(_, _, b_bridge_subnet_id,
|
||||
b_bridge_net_id) = self._get_bottom_bridge_elements(
|
||||
context, project_id, b_pod, t_bridge_net, True,
|
||||
t_bridge_subnet, None)
|
||||
|
||||
ns_bridge_port = self._get_bridge_interface(
|
||||
t_ctx, context, project_id, t_pod, t_bridge_net['id'],
|
||||
b_router_id, None, False)
|
||||
|
||||
client = self._get_client(b_pod['pod_name'])
|
||||
# add gateway is update operation, can run multiple times
|
||||
gateway_ip = ns_bridge_port['fixed_ips'][0]['ip_address']
|
||||
client.action_routers(
|
||||
t_ctx, 'add_gateway', b_router_id,
|
||||
{'network_id': b_bridge_net_id,
|
||||
'external_fixed_ips': [{'subnet_id': b_bridge_subnet_id,
|
||||
'ip_address': gateway_ip}]})
|
||||
|
||||
# NOTE(zhiyuan) subnet pool, network, subnet are reusable resource,
|
||||
# we decide not to remove them when operation fails, so before adding
|
||||
# router interface, no clearing is needed.
|
||||
is_success = False
|
||||
for _ in xrange(2):
|
||||
try:
|
||||
return_info = super(TricirclePlugin,
|
||||
self).add_router_interface(
|
||||
context, router_id, interface_info)
|
||||
is_success = True
|
||||
except exceptions.PortInUse:
|
||||
# NOTE(zhiyuan) so top interface is already bound to top
|
||||
# router, we need to check if bottom interface is bound.
|
||||
|
||||
# safe to get port_id since only adding interface by port will
|
||||
# get PortInUse exception
|
||||
t_port_id = interface_info['port_id']
|
||||
mappings = db_api.get_bottom_mappings_by_top_id(
|
||||
t_ctx, t_port_id, t_constants.RT_PORT)
|
||||
if not mappings:
|
||||
# bottom interface does not exists, ignore this exception
|
||||
# and continue to create bottom interface
|
||||
self._unbound_top_interface(context, router_id, t_port_id)
|
||||
else:
|
||||
pod, b_port_id = mappings[0]
|
||||
b_port = self._get_client(pod['pod_name']).get_ports(
|
||||
t_ctx, b_port_id)
|
||||
if not b_port['device_id']:
|
||||
# bottom interface exists but is not bound, ignore this
|
||||
# exception and continue to bind bottom interface
|
||||
self._unbound_top_interface(context, router_id,
|
||||
t_port_id)
|
||||
else:
|
||||
# bottom interface already bound, re-raise exception
|
||||
raise
|
||||
if is_success:
|
||||
break
|
||||
|
||||
if not is_success:
|
||||
raise Exception()
|
||||
|
||||
t_port_id = return_info['port_id']
|
||||
t_port = self.get_port(context, t_port_id)
|
||||
t_subnet = self.get_subnet(context,
|
||||
t_port['fixed_ips'][0]['subnet_id'])
|
||||
|
||||
return_info = super(TricirclePlugin, self).add_router_interface(
|
||||
context, router_id, interface_info)
|
||||
try:
|
||||
b_port_id = self._get_bottom_elements(
|
||||
t_ctx, project_id, b_pod, t_net, t_subnet, t_port)
|
||||
self.xjob_handler.setup_bottom_router(
|
||||
t_ctx, t_net['id'], router_id, b_pod['pod_id'])
|
||||
except Exception:
|
||||
# NOTE(zhiyuan) remove_router_interface will delete top interface.
|
||||
# if mapping is already built between top and bottom interface,
|
||||
# bottom interface and resource routing entry will also be deleted.
|
||||
#
|
||||
# but remove_router_interface may fail when deleting bottom
|
||||
# interface, in this case, top and bottom interfaces are both left,
|
||||
# user needs to manually delete top interface.
|
||||
# NOTE(zhiyuan) we fail to submit the job, so bottom router
|
||||
# operations are not started, it's safe for us to remove the top
|
||||
# router interface
|
||||
super(TricirclePlugin, self).remove_router_interface(
|
||||
context, router_id, interface_info)
|
||||
raise
|
||||
|
||||
client = self._get_client(b_pod['pod_name'])
|
||||
try:
|
||||
if is_new:
|
||||
# only attach bridge port the first time
|
||||
client.action_routers(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id})
|
||||
else:
|
||||
# still need to check if the bridge port is bound
|
||||
port = client.get_ports(t_ctx, b_bridge_port_id)
|
||||
if not port.get('device_id'):
|
||||
client.action_routers(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id})
|
||||
client.action_routers(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_port_id})
|
||||
except Exception:
|
||||
super(TricirclePlugin, self).remove_router_interface(
|
||||
context, router_id, interface_info)
|
||||
raise
|
||||
|
||||
self.xjob_handler.configure_extra_routes(t_ctx, router_id)
|
||||
return return_info
|
||||
|
||||
def create_floatingip(self, context, floatingip):
|
||||
@ -1221,41 +1009,25 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
context, interface_info, add_by_port)
|
||||
b_pod, b_az = az_ag.get_pod_by_az_tenant(t_ctx, az, project_id)
|
||||
|
||||
b_router_id = db_api.get_bottom_id_by_top_id_pod_name(
|
||||
t_ctx, router_id, b_pod['pod_name'], t_constants.RT_ROUTER)
|
||||
handle_bottom = b_router_id is not None
|
||||
|
||||
if add_by_port:
|
||||
t_port_id = interface_info['port_id']
|
||||
b_port_id = db_api.get_bottom_id_by_top_id_pod_name(
|
||||
t_ctx, t_port_id, b_pod['pod_name'], t_constants.RT_PORT)
|
||||
request_body = {'port_id': b_port_id}
|
||||
handle_bottom = handle_bottom and (b_port_id is not None)
|
||||
else:
|
||||
t_subnet_id = interface_info['subnet_id']
|
||||
b_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
|
||||
t_ctx, t_subnet_id, b_pod['pod_name'], t_constants.RT_SUBNET)
|
||||
request_body = {'subnet_id': b_subnet_id}
|
||||
handle_bottom = handle_bottom and (b_subnet_id is not None)
|
||||
|
||||
if handle_bottom:
|
||||
b_client = self._get_client(b_pod['pod_name'])
|
||||
try:
|
||||
b_client.action_routers(t_ctx, 'remove_interface', b_router_id,
|
||||
request_body)
|
||||
except Exception as e:
|
||||
if hasattr(e, 'status_code') and e.status_code == 404:
|
||||
# Note(zhiyuan) we get a 404 error from client, the
|
||||
# possible reason is that router/subnet/port doesn't exist
|
||||
# in bottom pod, in this case, no need to remove bottom
|
||||
# interface, we just continue to remove top interface.
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
return_info = super(TricirclePlugin, self).remove_router_interface(
|
||||
context, router_id, interface_info)
|
||||
if handle_bottom:
|
||||
self.xjob_handler.configure_extra_routes(t_ctx, router_id)
|
||||
try:
|
||||
self.xjob_handler.setup_bottom_router(
|
||||
t_ctx, t_net['id'], router_id, b_pod['pod_id'])
|
||||
except Exception:
|
||||
# NOTE(zhiyuan) we fail to submit the job, so if bottom router
|
||||
# interface exists, it would not be deleted, then after we add
|
||||
# the top interface again, the relation of top and bottom router
|
||||
# interfaces are not updated in the resource routing entry. this
|
||||
# inconsistency would not cause problem because:
|
||||
# (1) when querying interface port, top port information is
|
||||
# returned, not rely on routing entry
|
||||
# (2) when setting up bottom router, xjob directly queries top
|
||||
# and bottom interfaces, not rely on routing entry neither
|
||||
# we may need some routing entry clean up process`
|
||||
super(TricirclePlugin, self).add_router_interface(
|
||||
context, router_id, interface_info)
|
||||
raise
|
||||
return return_info
|
||||
|
||||
@staticmethod
|
||||
|
@ -39,6 +39,7 @@ from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tricircle.common import client
|
||||
from tricircle.common import constants
|
||||
from tricircle.common import context
|
||||
from tricircle.common import exceptions
|
||||
@ -47,9 +48,11 @@ from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
from tricircle.network.drivers import type_local
|
||||
from tricircle.network.drivers import type_shared_vlan
|
||||
from tricircle.network import helper
|
||||
from tricircle.network import managers
|
||||
from tricircle.network import plugin
|
||||
from tricircle.tests.unit.network import test_security_groups
|
||||
from tricircle.xjob import xmanager
|
||||
|
||||
|
||||
TOP_NETS = []
|
||||
@ -102,6 +105,16 @@ RES_MAP = {'networks': TOP_NETS,
|
||||
'securitygrouprules': TOP_SG_RULES}
|
||||
|
||||
|
||||
def _transform_az(network):
|
||||
az_hints_key = 'availability_zone_hints'
|
||||
if az_hints_key in network:
|
||||
ret = DotDict(network)
|
||||
az_str = network[az_hints_key]
|
||||
ret[az_hints_key] = jsonutils.loads(az_str) if az_str else []
|
||||
return ret
|
||||
return network
|
||||
|
||||
|
||||
class DotDict(dict):
|
||||
def __init__(self, normal_dict=None):
|
||||
if normal_dict:
|
||||
@ -114,7 +127,8 @@ class DotDict(dict):
|
||||
|
||||
class FakeNeutronClient(object):
|
||||
|
||||
_res_map = {'pod_1': {'port': BOTTOM1_PORTS},
|
||||
_res_map = {'top': {'port': TOP_PORTS},
|
||||
'pod_1': {'port': BOTTOM1_PORTS},
|
||||
'pod_2': {'port': BOTTOM2_PORTS}}
|
||||
|
||||
def __init__(self, pod_name):
|
||||
@ -145,11 +159,13 @@ class FakeNeutronClient(object):
|
||||
return {'ports': port_list}
|
||||
|
||||
def get(self, path, params=None):
|
||||
if self.pod_name == 'pod_1' or self.pod_name == 'pod_2':
|
||||
if self.pod_name in ['pod_1', 'pod_2', 'top']:
|
||||
res_list = self._get(params)['ports']
|
||||
return_list = []
|
||||
for res in res_list:
|
||||
return_list.append(copy.copy(res))
|
||||
if self.pod_name != 'top':
|
||||
res = copy.copy(res)
|
||||
return_list.append(res)
|
||||
return {'ports': return_list}
|
||||
else:
|
||||
raise Exception()
|
||||
@ -157,7 +173,8 @@ class FakeNeutronClient(object):
|
||||
|
||||
class FakeClient(object):
|
||||
|
||||
_res_map = {'pod_1': {'network': BOTTOM1_NETS,
|
||||
_res_map = {'top': RES_MAP,
|
||||
'pod_1': {'network': BOTTOM1_NETS,
|
||||
'subnet': BOTTOM1_SUBNETS,
|
||||
'port': BOTTOM1_PORTS,
|
||||
'router': BOTTOM1_ROUTERS,
|
||||
@ -171,7 +188,10 @@ class FakeClient(object):
|
||||
'floatingip': BOTTOM2_FIPS}}
|
||||
|
||||
def __init__(self, pod_name):
|
||||
self.pod_name = pod_name
|
||||
if not pod_name:
|
||||
self.pod_name = 'top'
|
||||
else:
|
||||
self.pod_name = pod_name
|
||||
self.client = FakeNeutronClient(self.pod_name)
|
||||
|
||||
def get_native_client(self, resource, ctx):
|
||||
@ -219,6 +239,44 @@ class FakeClient(object):
|
||||
res_list.append(res)
|
||||
return res
|
||||
|
||||
def list_resources(self, _type, ctx, filters=None):
|
||||
if self.pod_name == 'top':
|
||||
res_list = self._res_map[self.pod_name][_type + 's']
|
||||
else:
|
||||
res_list = self._res_map[self.pod_name][_type]
|
||||
ret_list = []
|
||||
for res in res_list:
|
||||
is_selected = True
|
||||
for _filter in filters:
|
||||
if _filter['key'] not in res:
|
||||
is_selected = False
|
||||
break
|
||||
if _filter['value'] != res[_filter['key']]:
|
||||
is_selected = False
|
||||
break
|
||||
if is_selected:
|
||||
ret_list.append(res)
|
||||
return ret_list
|
||||
|
||||
def list_networks(self, ctx, filters=None):
|
||||
networks = self.list_resources('network', ctx, filters)
|
||||
if self.pod_name != 'top':
|
||||
return networks
|
||||
ret_list = []
|
||||
for network in networks:
|
||||
ret_list.append(_transform_az(network))
|
||||
return ret_list
|
||||
|
||||
def get_networks(self, ctx, net_id):
|
||||
return self.list_networks(ctx, [{'key': 'id',
|
||||
'comparator': 'eq',
|
||||
'value': net_id}])[0]
|
||||
|
||||
def get_subnets(self, ctx, subnet_id):
|
||||
return self.list_resources('subnet', ctx, [{'key': 'id',
|
||||
'comparator': 'eq',
|
||||
'value': subnet_id}])[0]
|
||||
|
||||
def create_ports(self, ctx, body):
|
||||
return self.create_resources('port', ctx, body)
|
||||
|
||||
@ -251,6 +309,11 @@ class FakeClient(object):
|
||||
# only for mock purpose
|
||||
pass
|
||||
|
||||
def get_routers(self, ctx, router_id):
|
||||
return self.list_resources('router', ctx, [{'key': 'id',
|
||||
'comparator': 'eq',
|
||||
'value': router_id}])[0]
|
||||
|
||||
def action_routers(self, ctx, action, *args, **kwargs):
|
||||
# divide into two functions for test purpose
|
||||
if action == 'add_interface':
|
||||
@ -266,21 +329,7 @@ class FakeClient(object):
|
||||
return fip
|
||||
|
||||
def list_floatingips(self, ctx, filters=None):
|
||||
filters = filters or []
|
||||
return_list = []
|
||||
for fip in self._res_map[self.pod_name]['floatingip']:
|
||||
is_skip = False
|
||||
for filter in filters:
|
||||
if filter['key'] not in fip:
|
||||
is_skip = True
|
||||
break
|
||||
if fip[filter['key']] != filter['value']:
|
||||
is_skip = True
|
||||
break
|
||||
if is_skip:
|
||||
continue
|
||||
return_list.append(copy.copy(fip))
|
||||
return return_list
|
||||
return self.list_resources('floatingip', ctx, filters)
|
||||
|
||||
def update_floatingips(self, ctx, _id, body):
|
||||
pass
|
||||
@ -627,16 +676,52 @@ class FakeSession(object):
|
||||
pass
|
||||
|
||||
|
||||
class FakeRPCAPI(object):
|
||||
def configure_extra_routes(self, context, router_id):
|
||||
class FakeXManager(xmanager.XManager):
|
||||
def __init__(self, fake_plugin):
|
||||
self.clients = {constants.TOP: client.Client()}
|
||||
self.job_handles = {
|
||||
constants.JT_ROUTER: self.configure_extra_routes,
|
||||
constants.JT_ROUTER_SETUP: self.setup_bottom_router,
|
||||
constants.JT_PORT_DELETE: self.delete_server_port}
|
||||
self.helper = FakeHelper(fake_plugin)
|
||||
self.xjob_handler = FakeBaseRPCAPI()
|
||||
|
||||
def _get_client(self, pod_name=None):
|
||||
return FakeClient(pod_name)
|
||||
|
||||
|
||||
class FakeBaseRPCAPI(object):
|
||||
def configure_extra_routes(self, ctxt, router_id):
|
||||
pass
|
||||
|
||||
|
||||
class FakeRPCAPI(FakeBaseRPCAPI):
|
||||
def __init__(self, fake_plugin):
|
||||
self.xmanager = FakeXManager(fake_plugin)
|
||||
|
||||
def setup_bottom_router(self, ctxt, net_id, router_id, pod_id):
|
||||
combine_id = '%s#%s#%s' % (pod_id, router_id, net_id)
|
||||
self.xmanager.setup_bottom_router(
|
||||
ctxt, payload={constants.JT_ROUTER_SETUP: combine_id})
|
||||
|
||||
|
||||
class FakeExtension(object):
|
||||
def __init__(self, ext_obj):
|
||||
self.obj = ext_obj
|
||||
|
||||
|
||||
class FakeHelper(helper.NetworkHelper):
|
||||
def _get_client(self, pod_name=None):
|
||||
return FakeClient(pod_name)
|
||||
|
||||
def _prepare_top_element_by_call(self, t_ctx, q_ctx,
|
||||
project_id, pod, ele, _type, body):
|
||||
if not q_ctx:
|
||||
q_ctx = FakeNeutronContext()
|
||||
return super(FakeHelper, self)._prepare_top_element_by_call(
|
||||
t_ctx, q_ctx, project_id, pod, ele, _type, body)
|
||||
|
||||
|
||||
class FakeTypeManager(managers.TricircleTypeManager):
|
||||
def _register_types(self):
|
||||
local_driver = type_local.LocalTypeDriver()
|
||||
@ -644,11 +729,28 @@ class FakeTypeManager(managers.TricircleTypeManager):
|
||||
vlan_driver = type_shared_vlan.SharedVLANTypeDriver()
|
||||
self.drivers[constants.NT_SHARED_VLAN] = FakeExtension(vlan_driver)
|
||||
|
||||
def extend_network_dict_provider(self, cxt, net):
|
||||
target_net = None
|
||||
for t_net in TOP_NETS:
|
||||
if t_net['id'] == net['id']:
|
||||
target_net = t_net
|
||||
if not target_net:
|
||||
return
|
||||
for segment in TOP_SEGMENTS:
|
||||
if target_net['id'] == segment['network_id']:
|
||||
target_net['provider:network_type'] = segment['network_type']
|
||||
target_net[
|
||||
'provider:physical_network'] = segment['physical_network']
|
||||
target_net[
|
||||
'provider:segmentation_id'] = segment['segmentation_id']
|
||||
break
|
||||
|
||||
|
||||
class FakePlugin(plugin.TricirclePlugin):
|
||||
def __init__(self):
|
||||
self.set_ipam_backend()
|
||||
self.xjob_handler = FakeRPCAPI()
|
||||
self.helper = FakeHelper(self)
|
||||
self.xjob_handler = FakeRPCAPI(self)
|
||||
self.type_manager = FakeTypeManager()
|
||||
|
||||
def _get_client(self, pod_name):
|
||||
@ -656,13 +758,7 @@ class FakePlugin(plugin.TricirclePlugin):
|
||||
|
||||
def _make_network_dict(self, network, fields=None,
|
||||
process_extensions=True, context=None):
|
||||
az_hints_key = 'availability_zone_hints'
|
||||
if az_hints_key in network:
|
||||
ret = DotDict(network)
|
||||
az_str = network[az_hints_key]
|
||||
ret[az_hints_key] = jsonutils.loads(az_str) if az_str else []
|
||||
return ret
|
||||
return network
|
||||
return _transform_az(network)
|
||||
|
||||
def _make_subnet_dict(self, subnet, fields=None, context=None):
|
||||
return subnet
|
||||
@ -740,6 +836,7 @@ class PluginTest(unittest.TestCase,
|
||||
self.save_method = manager.NeutronManager._get_default_service_plugins
|
||||
manager.NeutronManager._get_default_service_plugins = mock.Mock()
|
||||
manager.NeutronManager._get_default_service_plugins.return_value = []
|
||||
xmanager.IN_TEST = True
|
||||
|
||||
phynet = 'bridge'
|
||||
vlan_min = 2000
|
||||
@ -1074,7 +1171,7 @@ class PluginTest(unittest.TestCase,
|
||||
'_make_subnet_dict', new=fake_make_subnet_dict)
|
||||
@patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool',
|
||||
new=mock.Mock)
|
||||
@patch.object(FakeRPCAPI, 'configure_extra_routes')
|
||||
@patch.object(FakeBaseRPCAPI, 'configure_extra_routes')
|
||||
@patch.object(FakeClient, 'action_routers')
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
def test_add_interface(self, mock_context, mock_action, mock_rpc):
|
||||
@ -1180,7 +1277,7 @@ class PluginTest(unittest.TestCase,
|
||||
'_make_subnet_dict', new=fake_make_subnet_dict)
|
||||
@patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool',
|
||||
new=mock.Mock)
|
||||
@patch.object(FakeRPCAPI, 'configure_extra_routes')
|
||||
@patch.object(FakeBaseRPCAPI, 'configure_extra_routes')
|
||||
@patch.object(FakeClient, 'action_routers')
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
def test_add_interface_with_external_network(self, mock_context,
|
||||
@ -1291,13 +1388,13 @@ class PluginTest(unittest.TestCase,
|
||||
# add_router_interface is called, bottom router is already attached
|
||||
# to E-W bridge network, only need to attach internal network to
|
||||
# bottom router
|
||||
calls = [mock.call(t_ctx, 'add_gateway', b_router_id,
|
||||
calls = [mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id}),
|
||||
mock.call(t_ctx, 'add_gateway', b_router_id,
|
||||
{'network_id': b_ns_bridge_net_id,
|
||||
'external_fixed_ips': [
|
||||
{'subnet_id': b_ns_bridge_subnet_id,
|
||||
'ip_address': '100.128.0.2'}]}),
|
||||
mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id}),
|
||||
mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_port['id']}),
|
||||
mock.call(t_ctx, 'add_gateway', b_router_id,
|
||||
@ -1348,10 +1445,10 @@ class PluginTest(unittest.TestCase,
|
||||
# to create N-S bridge network when attaching router interface(N-S
|
||||
# bridge network is created when setting router external gateway), so
|
||||
# add_gateway is not called.
|
||||
calls = [mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id}),
|
||||
mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': another_b_port_id})]
|
||||
calls.extend([mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id}),
|
||||
mock.call(t_ctx, 'add_interface', b_router_id,
|
||||
{'port_id': another_b_port_id})])
|
||||
mock_action.assert_has_calls(calls)
|
||||
# all together 7 times calling
|
||||
self.assertEqual(mock_action.call_count, 7)
|
||||
@ -1420,7 +1517,7 @@ class PluginTest(unittest.TestCase,
|
||||
'_make_subnet_dict', new=fake_make_subnet_dict)
|
||||
@patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool',
|
||||
new=mock.Mock)
|
||||
@patch.object(FakeRPCAPI, 'configure_extra_routes', new=mock.Mock)
|
||||
@patch.object(FakeBaseRPCAPI, 'configure_extra_routes', new=mock.Mock)
|
||||
@patch.object(FakeClient, 'delete_ports')
|
||||
@patch.object(FakeClient, 'add_interface_routers')
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
@ -1441,15 +1538,14 @@ class PluginTest(unittest.TestCase,
|
||||
self.assertRaises(q_exceptions.ConnectionFailed,
|
||||
fake_plugin.add_router_interface,
|
||||
q_ctx, t_router_id, {'subnet_id': t_subnet_id})
|
||||
# fail to delete bottom interface, so top interface is also there
|
||||
self.assertEqual(1, len(TOP_ROUTERS[0]['attached_ports']))
|
||||
# top interface is removed
|
||||
self.assertEqual(0, len(TOP_ROUTERS[0]['attached_ports']))
|
||||
|
||||
mock_action.side_effect = None
|
||||
mock_delete.side_effect = None
|
||||
t_port_id = TOP_ROUTERS[0]['attached_ports'][0]['port_id']
|
||||
# test that we can reuse the left interface to attach
|
||||
# test that we can success when bottom pod comes back
|
||||
fake_plugin.add_router_interface(
|
||||
q_ctx, t_router_id, {'port_id': t_port_id})
|
||||
q_ctx, t_router_id, {'subnet_id': t_subnet_id})
|
||||
# bottom interface and bridge port
|
||||
self.assertEqual(2, len(BOTTOM1_PORTS))
|
||||
|
||||
@ -1459,7 +1555,7 @@ class PluginTest(unittest.TestCase,
|
||||
'_make_subnet_dict', new=fake_make_subnet_dict)
|
||||
@patch.object(subnet_alloc.SubnetAllocator, '_lock_subnetpool',
|
||||
new=mock.Mock)
|
||||
@patch.object(FakeRPCAPI, 'configure_extra_routes')
|
||||
@patch.object(FakeBaseRPCAPI, 'configure_extra_routes')
|
||||
@patch.object(FakeClient, 'action_routers')
|
||||
@patch.object(context, 'get_context_from_neutron_context')
|
||||
def test_remove_interface(self, mock_context, mock_action, mock_rpc):
|
||||
@ -2056,4 +2152,4 @@ class PluginTest(unittest.TestCase,
|
||||
for res in RES_LIST:
|
||||
del res[:]
|
||||
cfg.CONF.unregister_opts(q_config.core_opts)
|
||||
manager.NeutronManager._get_default_service_plugins = self.save_method
|
||||
xmanager.IN_TEST = False
|
||||
|
@ -30,17 +30,32 @@ from tricircle.common.i18n import _
|
||||
from tricircle.common.i18n import _LE
|
||||
from tricircle.common.i18n import _LI
|
||||
from tricircle.common.i18n import _LW
|
||||
from tricircle.common import xrpcapi
|
||||
import tricircle.db.api as db_api
|
||||
from tricircle.db import core
|
||||
from tricircle.db import models
|
||||
from tricircle.network import helper
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
IN_TEST = False
|
||||
AZ_HINTS = 'availability_zone_hints'
|
||||
|
||||
|
||||
def _job_handle(job_type):
|
||||
def handle_func(func):
|
||||
@six.wraps(func)
|
||||
def handle_args(*args, **kwargs):
|
||||
if IN_TEST:
|
||||
# NOTE(zhiyuan) job mechanism will cause some unpredictable
|
||||
# result in unit test so we would like to bypass it. However
|
||||
# we have problem mocking a decorator which decorates member
|
||||
# functions, that's why we use this label, not an elegant
|
||||
# way though.
|
||||
func(*args, **kwargs)
|
||||
return
|
||||
ctx = args[1]
|
||||
payload = kwargs['payload']
|
||||
|
||||
@ -129,7 +144,12 @@ class XManager(PeriodicTasks):
|
||||
# self.notifier = rpc.get_notifier(self.service_name, self.host)
|
||||
self.additional_endpoints = []
|
||||
self.clients = {constants.TOP: client.Client()}
|
||||
self.job_handles = {constants.JT_ROUTER: self.configure_extra_routes}
|
||||
self.job_handles = {
|
||||
constants.JT_ROUTER: self.configure_extra_routes,
|
||||
constants.JT_ROUTER_SETUP: self.setup_bottom_router,
|
||||
constants.JT_PORT_DELETE: self.delete_server_port}
|
||||
self.helper = helper.NetworkHelper()
|
||||
self.xjob_handler = xrpcapi.XJobAPI()
|
||||
super(XManager, self).__init__()
|
||||
|
||||
def _get_client(self, pod_name=None):
|
||||
@ -207,6 +227,20 @@ class XManager(PeriodicTasks):
|
||||
|
||||
return info_text
|
||||
|
||||
@staticmethod
|
||||
def _get_resource_by_name(cli, cxt, _type, name):
|
||||
return cli.list_resources(_type, cxt, filters=[{'key': 'name',
|
||||
'comparator': 'eq',
|
||||
'value': name}])[0]
|
||||
|
||||
@staticmethod
|
||||
def _get_router_interfaces(cli, cxt, router_id, net_id):
|
||||
return cli.list_ports(
|
||||
cxt, filters=[{'key': 'network_id', 'comparator': 'eq',
|
||||
'value': net_id},
|
||||
{'key': 'device_id', 'comparator': 'eq',
|
||||
'value': router_id}])
|
||||
|
||||
@periodic_task.periodic_task
|
||||
def redo_failed_job(self, ctx):
|
||||
failed_jobs = db_api.get_latest_failed_jobs(ctx)
|
||||
@ -225,6 +259,156 @@ class XManager(PeriodicTasks):
|
||||
'job_type': job_type})
|
||||
self.job_handles[job_type](ctx, payload=payload)
|
||||
|
||||
@_job_handle(constants.JT_ROUTER_SETUP)
|
||||
def setup_bottom_router(self, ctx, payload):
|
||||
(b_pod_id,
|
||||
t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')
|
||||
|
||||
t_client = self._get_client()
|
||||
t_pod = db_api.get_top_pod(ctx)
|
||||
b_pod = db_api.get_pod(ctx, b_pod_id)
|
||||
b_client = self._get_client(b_pod['pod_name'])
|
||||
b_az = b_pod['az_name']
|
||||
|
||||
t_router = t_client.get_routers(ctx, t_router_id)
|
||||
if not t_router:
|
||||
# we just end this job if top router no longer exists
|
||||
return
|
||||
router_body = {'router': {'name': t_router_id,
|
||||
'distributed': False}}
|
||||
project_id = t_router['tenant_id']
|
||||
|
||||
# create bottom router in target bottom pod
|
||||
_, b_router_id = self.helper.prepare_bottom_element(
|
||||
ctx, project_id, b_pod, t_router, 'router', router_body)
|
||||
|
||||
# create top E-W bridge port
|
||||
t_bridge_net_name = constants.ew_bridge_net_name % project_id
|
||||
t_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id
|
||||
t_bridge_net = self._get_resource_by_name(t_client, ctx, 'network',
|
||||
t_bridge_net_name)
|
||||
t_bridge_subnet = self._get_resource_by_name(t_client, ctx, 'subnet',
|
||||
t_bridge_subnet_name)
|
||||
q_cxt = None # no need to pass neutron context when using client
|
||||
t_bridge_port_id = self.helper.get_bridge_interface(
|
||||
ctx, q_cxt, project_id, t_pod, t_bridge_net['id'],
|
||||
b_router_id, None, True)
|
||||
|
||||
# create bottom E-W bridge port
|
||||
t_bridge_port = t_client.get_ports(ctx, t_bridge_port_id)
|
||||
(is_new, b_bridge_port_id,
|
||||
_, _) = self.helper.get_bottom_bridge_elements(
|
||||
ctx, project_id, b_pod, t_bridge_net, False, t_bridge_subnet,
|
||||
t_bridge_port)
|
||||
|
||||
# attach bottom E-W bridge port to bottom router
|
||||
if is_new:
|
||||
# only attach bridge port the first time
|
||||
b_client.action_routers(ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id})
|
||||
else:
|
||||
# still need to check if the bridge port is bound
|
||||
port = b_client.get_ports(ctx, b_bridge_port_id)
|
||||
if not port.get('device_id'):
|
||||
b_client.action_routers(ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_bridge_port_id})
|
||||
|
||||
# handle N-S networking
|
||||
ext_nets = t_client.list_networks(ctx,
|
||||
filters=[{'key': 'router:external',
|
||||
'comparator': 'eq',
|
||||
'value': True}])
|
||||
if not ext_nets:
|
||||
need_ns_bridge = False
|
||||
else:
|
||||
ext_net_pod_names = set(
|
||||
[ext_net[AZ_HINTS][0] for ext_net in ext_nets])
|
||||
if b_pod['pod_name'] in ext_net_pod_names:
|
||||
need_ns_bridge = False
|
||||
else:
|
||||
need_ns_bridge = True
|
||||
if need_ns_bridge:
|
||||
t_bridge_net_name = constants.ns_bridge_net_name % project_id
|
||||
t_bridge_subnet_name = constants.ns_bridge_subnet_name % project_id
|
||||
t_bridge_net = self._get_resource_by_name(
|
||||
t_client, ctx, 'network', t_bridge_net_name)
|
||||
t_bridge_subnet = self._get_resource_by_name(
|
||||
t_client, ctx, 'subnet', t_bridge_subnet_name)
|
||||
# create bottom N-S bridge network and subnet
|
||||
(_, _, b_bridge_subnet_id,
|
||||
b_bridge_net_id) = self.helper.get_bottom_bridge_elements(
|
||||
ctx, project_id, b_pod, t_bridge_net, True,
|
||||
t_bridge_subnet, None)
|
||||
# create top N-S bridge port
|
||||
ns_bridge_port_id = self.helper.get_bridge_interface(
|
||||
ctx, q_cxt, project_id, t_pod, t_bridge_net['id'],
|
||||
b_router_id, None, False)
|
||||
ns_bridge_port = t_client.get_ports(ctx, ns_bridge_port_id)
|
||||
# add external gateway for bottom router
|
||||
# add gateway is update operation, can run multiple times
|
||||
gateway_ip = ns_bridge_port['fixed_ips'][0]['ip_address']
|
||||
b_client.action_routers(
|
||||
ctx, 'add_gateway', b_router_id,
|
||||
{'network_id': b_bridge_net_id,
|
||||
'external_fixed_ips': [{'subnet_id': b_bridge_subnet_id,
|
||||
'ip_address': gateway_ip}]})
|
||||
|
||||
# attach internal port to bottom router
|
||||
t_net = t_client.get_networks(ctx, t_net_id)
|
||||
if not t_net:
|
||||
# we just end this job if top network no longer exists
|
||||
return
|
||||
net_azs = t_net.get(AZ_HINTS, [])
|
||||
if net_azs and b_az not in net_azs:
|
||||
return
|
||||
t_ports = self._get_router_interfaces(t_client, ctx, t_router_id,
|
||||
t_net_id)
|
||||
b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
|
||||
ctx, t_net_id, b_pod['pod_name'], constants.RT_NETWORK)
|
||||
if b_net_id:
|
||||
b_ports = self._get_router_interfaces(b_client, ctx, b_router_id,
|
||||
b_net_id)
|
||||
else:
|
||||
b_ports = []
|
||||
if not t_ports and b_ports:
|
||||
# remove redundant bottom interface
|
||||
b_port = b_ports[0]
|
||||
request_body = {'port_id': b_port['id']}
|
||||
b_client.action_routers(ctx, 'remove_interface', b_router_id,
|
||||
request_body)
|
||||
with ctx.session.begin():
|
||||
core.delete_resources(ctx, models.ResourceRouting,
|
||||
filters=[{'key': 'bottom_id',
|
||||
'comparator': 'eq',
|
||||
'value': b_port['id']}])
|
||||
elif t_ports and not b_ports:
|
||||
# create new bottom interface
|
||||
t_port = t_ports[0]
|
||||
# only consider ipv4 address currently
|
||||
t_subnet_id = t_port['fixed_ips'][0]['subnet_id']
|
||||
t_subnet = t_client.get_subnets(ctx, t_subnet_id)
|
||||
b_port_id = self.helper.get_bottom_elements(
|
||||
ctx, project_id, b_pod, t_net, t_subnet, t_port)
|
||||
b_client.action_routers(ctx, 'add_interface', b_router_id,
|
||||
{'port_id': b_port_id})
|
||||
elif t_ports and b_ports:
|
||||
# when users remove the interface again, it's possible that top
|
||||
# interface is removed but deletion of bottom interface fails.
|
||||
# if users add the interface again during the retry of the job,
|
||||
# we have top and bottom interfaces exist but the id mapping
|
||||
# in the routing entry is incorrect, so we update it here
|
||||
t_port = t_ports[0]
|
||||
b_port = b_ports[0]
|
||||
with ctx.session.begin():
|
||||
core.update_resources(ctx, models.ResourceRouting,
|
||||
[{'key': 'bottom_id', 'comparator': 'eq',
|
||||
'value': b_port['id']},
|
||||
{'key': 'pod_id', 'comparator': 'eq',
|
||||
'value': b_pod_id}
|
||||
], {'top_id': t_port['id']})
|
||||
|
||||
self.xjob_handler.configure_extra_routes(ctx, t_router_id)
|
||||
|
||||
@_job_handle(constants.JT_ROUTER)
|
||||
def configure_extra_routes(self, ctx, payload):
|
||||
t_router_id = payload[constants.JT_ROUTER]
|
||||
|
@ -44,8 +44,6 @@ _TIMER_INTERVAL = 30
|
||||
_TIMER_INTERVAL_MAX = 60
|
||||
|
||||
common_opts = [
|
||||
cfg.StrOpt('host', default='tricircle.xhost',
|
||||
help=_("The host name for RPC server")),
|
||||
cfg.IntOpt('workers', default=1,
|
||||
help=_("Number of workers")),
|
||||
cfg.IntOpt('worker_handle_timeout', default=1800,
|
||||
|
Loading…
Reference in New Issue
Block a user