1622 lines
70 KiB
Python
1622 lines
70 KiB
Python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
|
|
# Copyright 2013 VMware, Inc.
|
|
# All Rights Reserved
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
#
|
|
|
|
import netaddr
|
|
from oslo.config import cfg
|
|
|
|
from neutron.common import exceptions as q_exc
|
|
from neutron.db.firewall import firewall_db
|
|
from neutron.db import l3_db
|
|
from neutron.db.loadbalancer import loadbalancer_db
|
|
from neutron.db import routedserviceinsertion_db as rsi_db
|
|
from neutron.extensions import firewall as fw_ext
|
|
from neutron.extensions import l3
|
|
from neutron.openstack.common import excutils
|
|
from neutron.openstack.common import log as logging
|
|
from neutron.plugins.common import constants as service_constants
|
|
from neutron.plugins.nicira.common import config # noqa
|
|
from neutron.plugins.nicira.common import exceptions as nvp_exc
|
|
from neutron.plugins.nicira.dbexts import servicerouter as sr_db
|
|
from neutron.plugins.nicira.dbexts import vcns_db
|
|
from neutron.plugins.nicira.dbexts import vcns_models
|
|
from neutron.plugins.nicira.extensions import servicerouter as sr
|
|
from neutron.plugins.nicira import NeutronPlugin
|
|
from neutron.plugins.nicira import NvpApiClient
|
|
from neutron.plugins.nicira import nvplib
|
|
from neutron.plugins.nicira.vshield.common import (
|
|
constants as vcns_const)
|
|
from neutron.plugins.nicira.vshield.common.constants import RouterStatus
|
|
from neutron.plugins.nicira.vshield.common import exceptions
|
|
from neutron.plugins.nicira.vshield.tasks.constants import TaskState
|
|
from neutron.plugins.nicira.vshield.tasks.constants import TaskStatus
|
|
from neutron.plugins.nicira.vshield import vcns_driver
|
|
from sqlalchemy.orm import exc as sa_exc
|
|
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
ROUTER_TYPE_BASIC = 1
|
|
ROUTER_TYPE_ADVANCED = 2
|
|
|
|
ROUTER_STATUS = [
|
|
service_constants.ACTIVE,
|
|
service_constants.DOWN,
|
|
service_constants.PENDING_CREATE,
|
|
service_constants.PENDING_DELETE,
|
|
service_constants.ERROR
|
|
]
|
|
|
|
ROUTER_STATUS_LEVEL = {
|
|
service_constants.ACTIVE: RouterStatus.ROUTER_STATUS_ACTIVE,
|
|
service_constants.DOWN: RouterStatus.ROUTER_STATUS_DOWN,
|
|
service_constants.PENDING_CREATE: (
|
|
RouterStatus.ROUTER_STATUS_PENDING_CREATE
|
|
),
|
|
service_constants.PENDING_DELETE: (
|
|
RouterStatus.ROUTER_STATUS_PENDING_DELETE
|
|
),
|
|
service_constants.ERROR: RouterStatus.ROUTER_STATUS_ERROR
|
|
}
|
|
|
|
|
|
class NvpAdvancedPlugin(sr_db.ServiceRouter_mixin,
|
|
NeutronPlugin.NvpPluginV2,
|
|
rsi_db.RoutedServiceInsertionDbMixin,
|
|
firewall_db.Firewall_db_mixin,
|
|
loadbalancer_db.LoadBalancerPluginDb
|
|
):
|
|
|
|
supported_extension_aliases = (
|
|
NeutronPlugin.NvpPluginV2.supported_extension_aliases + [
|
|
"service-router",
|
|
"routed-service-insertion",
|
|
"fwaas",
|
|
"lbaas"
|
|
])
|
|
|
|
def __init__(self):
|
|
super(NvpAdvancedPlugin, self).__init__()
|
|
|
|
self._super_create_ext_gw_port = (
|
|
self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW])
|
|
self._super_delete_ext_gw_port = (
|
|
self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW])
|
|
|
|
self._port_drivers['create'][l3_db.DEVICE_OWNER_ROUTER_GW] = (
|
|
self._vcns_create_ext_gw_port)
|
|
self._port_drivers['delete'][l3_db.DEVICE_OWNER_ROUTER_GW] = (
|
|
self._vcns_delete_ext_gw_port)
|
|
|
|
# cache router type based on router id
|
|
self._router_type = {}
|
|
self.callbacks = VcnsCallbacks(self)
|
|
|
|
# load the vCNS driver
|
|
self._load_vcns_drivers()
|
|
|
|
# nvplib's create_lswitch needs to be replaced in order to proxy
|
|
# logical switch create requests to vcns
|
|
self._set_create_lswitch_proxy()
|
|
|
|
def _set_create_lswitch_proxy(self):
|
|
NeutronPlugin.nvplib.create_lswitch = self._proxy_create_lswitch
|
|
|
|
def _proxy_create_lswitch(self, *args, **kwargs):
|
|
name, tz_config, tags = (
|
|
_process_base_create_lswitch_args(*args, **kwargs)
|
|
)
|
|
return self.vcns_driver.create_lswitch(
|
|
name, tz_config, tags=tags,
|
|
port_isolation=None, replication_mode=None)
|
|
|
|
def _load_vcns_drivers(self):
|
|
self.vcns_driver = vcns_driver.VcnsDriver(self.callbacks)
|
|
|
|
def _set_router_type(self, router_id, router_type):
|
|
self._router_type[router_id] = router_type
|
|
|
|
def _get_router_type(self, context=None, router_id=None, router=None):
|
|
if not router:
|
|
if router_id in self._router_type:
|
|
return self._router_type[router_id]
|
|
router = self._get_router(context, router_id)
|
|
|
|
LOG.debug(_("EDGE: router = %s"), router)
|
|
if router['nsx_attributes']['service_router']:
|
|
router_type = ROUTER_TYPE_ADVANCED
|
|
else:
|
|
router_type = ROUTER_TYPE_BASIC
|
|
self._set_router_type(router['id'], router_type)
|
|
return router_type
|
|
|
|
def _find_router_type(self, router):
|
|
is_service_router = router.get(sr.SERVICE_ROUTER, False)
|
|
if is_service_router:
|
|
return ROUTER_TYPE_ADVANCED
|
|
else:
|
|
return ROUTER_TYPE_BASIC
|
|
|
|
def _is_advanced_service_router(self, context=None, router_id=None,
|
|
router=None):
|
|
if router:
|
|
router_type = self._get_router_type(router=router)
|
|
else:
|
|
router_type = self._get_router_type(context, router_id)
|
|
return (router_type == ROUTER_TYPE_ADVANCED)
|
|
|
|
def _vcns_create_ext_gw_port(self, context, port_data):
|
|
router_id = port_data['device_id']
|
|
if not self._is_advanced_service_router(context, router_id):
|
|
self._super_create_ext_gw_port(context, port_data)
|
|
return
|
|
|
|
# NOP for Edge because currently the port will be create internally
|
|
# by VSM
|
|
LOG.debug(_("EDGE: _vcns_create_ext_gw_port"))
|
|
|
|
def _vcns_delete_ext_gw_port(self, context, port_data):
|
|
router_id = port_data['device_id']
|
|
if not self._is_advanced_service_router(context, router_id):
|
|
self._super_delete_ext_gw_port(context, port_data)
|
|
return
|
|
|
|
# NOP for Edge
|
|
LOG.debug(_("EDGE: _vcns_delete_ext_gw_port"))
|
|
|
|
def _get_external_attachment_info(self, context, router):
|
|
gw_port = router.gw_port
|
|
ipaddress = None
|
|
netmask = None
|
|
nexthop = None
|
|
|
|
if gw_port:
|
|
# gw_port may have multiple IPs, only configure the first one
|
|
if gw_port.get('fixed_ips'):
|
|
ipaddress = gw_port['fixed_ips'][0]['ip_address']
|
|
|
|
network_id = gw_port.get('network_id')
|
|
if network_id:
|
|
ext_net = self._get_network(context, network_id)
|
|
if not ext_net.external:
|
|
msg = (_("Network '%s' is not a valid external "
|
|
"network") % network_id)
|
|
raise q_exc.BadRequest(resource='router', msg=msg)
|
|
if ext_net.subnets:
|
|
ext_subnet = ext_net.subnets[0]
|
|
netmask = str(netaddr.IPNetwork(ext_subnet.cidr).netmask)
|
|
nexthop = ext_subnet.gateway_ip
|
|
|
|
return (ipaddress, netmask, nexthop)
|
|
|
|
def _get_external_gateway_address(self, context, router):
|
|
ipaddress, netmask, nexthop = self._get_external_attachment_info(
|
|
context, router)
|
|
return nexthop
|
|
|
|
def _vcns_update_static_routes(self, context, **kwargs):
|
|
router = kwargs.get('router')
|
|
if router is None:
|
|
router = self._get_router(context, kwargs['router_id'])
|
|
|
|
edge_id = kwargs.get('edge_id')
|
|
if edge_id is None:
|
|
binding = vcns_db.get_vcns_router_binding(context.session,
|
|
router['id'])
|
|
edge_id = binding['edge_id']
|
|
|
|
skippable = True
|
|
if 'nexthop' in kwargs:
|
|
nexthop = kwargs['nexthop']
|
|
# The default gateway and vnic config has dependencies, if we
|
|
# explicitly specify nexthop to change, tell the driver not to
|
|
# skip this route update
|
|
skippable = False
|
|
else:
|
|
nexthop = self._get_external_gateway_address(context,
|
|
router)
|
|
|
|
if 'subnets' in kwargs:
|
|
subnets = kwargs['subnets']
|
|
else:
|
|
subnets = self._find_router_subnets_cidrs(context.elevated(),
|
|
router['id'])
|
|
|
|
routes = []
|
|
for subnet in subnets:
|
|
routes.append({
|
|
'cidr': subnet,
|
|
'nexthop': vcns_const.INTEGRATION_LR_IPADDRESS.split('/')[0]
|
|
})
|
|
self.vcns_driver.update_routes(router['id'], edge_id, nexthop, routes,
|
|
skippable)
|
|
|
|
def _get_nat_rules(self, context, router):
|
|
fip_qry = context.session.query(l3_db.FloatingIP)
|
|
fip_db = fip_qry.filter_by(router_id=router['id']).all()
|
|
|
|
dnat = []
|
|
snat = []
|
|
for fip in fip_db:
|
|
if fip.fixed_port_id:
|
|
dnat.append({
|
|
'dst': fip.floating_ip_address,
|
|
'translated': fip.fixed_ip_address
|
|
})
|
|
|
|
gw_port = router.gw_port
|
|
if gw_port and router.enable_snat:
|
|
if gw_port.get('fixed_ips'):
|
|
snat_ip = gw_port['fixed_ips'][0]['ip_address']
|
|
subnets = self._find_router_subnets_cidrs(context.elevated(),
|
|
router['id'])
|
|
for subnet in subnets:
|
|
snat.append({
|
|
'src': subnet,
|
|
'translated': snat_ip
|
|
})
|
|
|
|
return (snat, dnat)
|
|
|
|
def _update_nat_rules(self, context, router):
|
|
snat, dnat = self._get_nat_rules(context, router)
|
|
binding = vcns_db.get_vcns_router_binding(context.session,
|
|
router['id'])
|
|
self.vcns_driver.update_nat_rules(router['id'],
|
|
binding['edge_id'],
|
|
snat, dnat)
|
|
|
|
def _update_interface(self, context, router, sync=False):
|
|
addr, mask, nexthop = self._get_external_attachment_info(
|
|
context, router)
|
|
|
|
secondary = []
|
|
fip_qry = context.session.query(l3_db.FloatingIP)
|
|
fip_db = fip_qry.filter_by(router_id=router['id']).all()
|
|
for fip in fip_db:
|
|
if fip.fixed_port_id:
|
|
secondary.append(fip.floating_ip_address)
|
|
#Add all vip addresses bound on the router
|
|
vip_addrs = self._get_all_vip_addrs_by_router_id(context,
|
|
router['id'])
|
|
secondary.extend(vip_addrs)
|
|
|
|
binding = vcns_db.get_vcns_router_binding(context.session,
|
|
router['id'])
|
|
task = self.vcns_driver.update_interface(
|
|
router['id'], binding['edge_id'],
|
|
vcns_const.EXTERNAL_VNIC_INDEX,
|
|
self.vcns_driver.external_network,
|
|
addr, mask, secondary=secondary)
|
|
if sync:
|
|
task.wait(TaskState.RESULT)
|
|
|
|
def _update_router_gw_info(self, context, router_id, info):
|
|
if not self._is_advanced_service_router(context, router_id):
|
|
super(NvpAdvancedPlugin, self)._update_router_gw_info(
|
|
context, router_id, info)
|
|
return
|
|
|
|
# get original gw_port config
|
|
router = self._get_router(context, router_id)
|
|
org_ext_net_id = router.gw_port_id and router.gw_port.network_id
|
|
org_enable_snat = router.enable_snat
|
|
orgaddr, orgmask, orgnexthop = self._get_external_attachment_info(
|
|
context, router)
|
|
|
|
super(NeutronPlugin.NvpPluginV2, self)._update_router_gw_info(
|
|
context, router_id, info, router=router)
|
|
|
|
new_ext_net_id = router.gw_port_id and router.gw_port.network_id
|
|
new_enable_snat = router.enable_snat
|
|
newaddr, newmask, newnexthop = self._get_external_attachment_info(
|
|
context, router)
|
|
|
|
binding = vcns_db.get_vcns_router_binding(context.session, router_id)
|
|
|
|
if new_ext_net_id != org_ext_net_id and orgnexthop:
|
|
# network changed, need to remove default gateway before vnic
|
|
# can be configured
|
|
LOG.debug(_("VCNS: delete default gateway %s"), orgnexthop)
|
|
self._vcns_update_static_routes(context,
|
|
router=router,
|
|
edge_id=binding['edge_id'],
|
|
nexthop=None)
|
|
|
|
if orgaddr != newaddr or orgmask != newmask:
|
|
self.vcns_driver.update_interface(
|
|
router_id, binding['edge_id'],
|
|
vcns_const.EXTERNAL_VNIC_INDEX,
|
|
self.vcns_driver.external_network,
|
|
newaddr, newmask)
|
|
|
|
if orgnexthop != newnexthop:
|
|
self._vcns_update_static_routes(context,
|
|
router=router,
|
|
edge_id=binding['edge_id'],
|
|
nexthop=newnexthop)
|
|
|
|
if (new_ext_net_id == org_ext_net_id and
|
|
org_enable_snat == new_enable_snat):
|
|
return
|
|
|
|
self._update_nat_rules(context, router)
|
|
|
|
def _add_subnet_snat_rule(self, router, subnet):
|
|
# NOP for service router
|
|
if not self._is_advanced_service_router(router=router):
|
|
super(NvpAdvancedPlugin, self)._add_subnet_snat_rule(
|
|
router, subnet)
|
|
|
|
def _delete_subnet_snat_rule(self, router, subnet):
|
|
# NOP for service router
|
|
if not self._is_advanced_service_router(router=router):
|
|
super(NvpAdvancedPlugin, self)._delete_subnet_snat_rule(
|
|
router, subnet)
|
|
|
|
def _remove_floatingip_address(self, context, fip_db):
|
|
# NOP for service router
|
|
router_id = fip_db.router_id
|
|
if not self._is_advanced_service_router(context, router_id):
|
|
super(NvpAdvancedPlugin, self)._remove_floatingip_address(
|
|
context, fip_db)
|
|
|
|
def _create_advanced_service_router(self, context, name, lrouter, lswitch):
|
|
|
|
# store binding
|
|
binding = vcns_db.add_vcns_router_binding(
|
|
context.session, lrouter['uuid'], None, lswitch['uuid'],
|
|
service_constants.PENDING_CREATE)
|
|
|
|
# deploy edge
|
|
jobdata = {
|
|
'lrouter': lrouter,
|
|
'lswitch': lswitch,
|
|
'context': context
|
|
}
|
|
|
|
# deploy and wait until the deploy requeste has been requested
|
|
# so we will have edge_id ready. The wait here should be fine
|
|
# as we're not in a database transaction now
|
|
self.vcns_driver.deploy_edge(
|
|
lrouter['uuid'], name, lswitch['uuid'], jobdata=jobdata,
|
|
wait_for_exec=True)
|
|
|
|
return binding
|
|
|
|
def _create_integration_lswitch(self, tenant_id, name):
|
|
# use defautl transport zone
|
|
transport_zone_config = [{
|
|
"zone_uuid": self.cluster.default_tz_uuid,
|
|
"transport_type": cfg.CONF.NVP.default_transport_type
|
|
}]
|
|
return self.vcns_driver.create_lswitch(name, transport_zone_config)
|
|
|
|
def _add_router_integration_interface(self, tenant_id, name,
|
|
lrouter, lswitch):
|
|
# create logic switch port
|
|
try:
|
|
ls_port = nvplib.create_lport(
|
|
self.cluster, lswitch['uuid'], tenant_id,
|
|
'', '', lrouter['uuid'], True)
|
|
except NvpApiClient.NvpApiException:
|
|
msg = (_("An exception occured while creating a port "
|
|
"on lswitch %s") % lswitch['uuid'])
|
|
LOG.exception(msg)
|
|
raise q_exc.NeutronException(message=msg)
|
|
|
|
# create logic router port
|
|
try:
|
|
neutron_port_id = ''
|
|
pname = name[:36] + '-lp'
|
|
admin_status_enabled = True
|
|
lr_port = nvplib.create_router_lport(
|
|
self.cluster, lrouter['uuid'], tenant_id,
|
|
neutron_port_id, pname, admin_status_enabled,
|
|
[vcns_const.INTEGRATION_LR_IPADDRESS])
|
|
except NvpApiClient.NvpApiException:
|
|
msg = (_("Unable to create port on NVP logical router %s") % name)
|
|
LOG.exception(msg)
|
|
nvplib.delete_port(self.cluster, lswitch['uuid'], ls_port['uuid'])
|
|
raise q_exc.NeutronException(message=msg)
|
|
|
|
# attach logic router port to switch port
|
|
try:
|
|
self._update_router_port_attachment(
|
|
self.cluster, None, lrouter['uuid'], {}, lr_port['uuid'],
|
|
'PatchAttachment', ls_port['uuid'], None)
|
|
except NvpApiClient.NvpApiException as e:
|
|
# lr_port should have been deleted
|
|
nvplib.delete_port(self.cluster, lswitch['uuid'], ls_port['uuid'])
|
|
raise e
|
|
|
|
def _create_lrouter(self, context, router, nexthop):
|
|
lrouter = super(NvpAdvancedPlugin, self)._create_lrouter(
|
|
context, router, vcns_const.INTEGRATION_EDGE_IPADDRESS)
|
|
|
|
router_type = self._find_router_type(router)
|
|
self._set_router_type(lrouter['uuid'], router_type)
|
|
if router_type == ROUTER_TYPE_BASIC:
|
|
return lrouter
|
|
|
|
tenant_id = self._get_tenant_id_for_create(context, router)
|
|
name = router['name']
|
|
try:
|
|
lsname = name[:36] + '-ls'
|
|
lswitch = self._create_integration_lswitch(
|
|
tenant_id, lsname)
|
|
except Exception:
|
|
msg = _("Unable to create integration logic switch "
|
|
"for router %s") % name
|
|
LOG.exception(msg)
|
|
nvplib.delete_lrouter(self.cluster, lrouter['uuid'])
|
|
raise q_exc.NeutronException(message=msg)
|
|
|
|
try:
|
|
self._add_router_integration_interface(tenant_id, name,
|
|
lrouter, lswitch)
|
|
except Exception:
|
|
msg = _("Unable to add router interface to integration lswitch "
|
|
"for router %s") % name
|
|
LOG.exception(msg)
|
|
nvplib.delete_lrouter(self.cluster, lrouter['uuid'])
|
|
raise q_exc.NeutronException(message=msg)
|
|
|
|
try:
|
|
self._create_advanced_service_router(
|
|
context, name, lrouter, lswitch)
|
|
except Exception:
|
|
msg = (_("Unable to create advance service router for %s") % name)
|
|
LOG.exception(msg)
|
|
self.vcns_driver.delete_lswitch(lswitch('uuid'))
|
|
nvplib.delete_lrouter(self.cluster, lrouter['uuid'])
|
|
raise q_exc.NeutronException(message=msg)
|
|
|
|
lrouter['status'] = service_constants.PENDING_CREATE
|
|
return lrouter
|
|
|
|
def _delete_lrouter(self, context, id):
|
|
binding = vcns_db.get_vcns_router_binding(context.session, id)
|
|
if not binding:
|
|
super(NvpAdvancedPlugin, self)._delete_lrouter(context, id)
|
|
else:
|
|
vcns_db.update_vcns_router_binding(
|
|
context.session, id, status=service_constants.PENDING_DELETE)
|
|
|
|
lswitch_id = binding['lswitch_id']
|
|
edge_id = binding['edge_id']
|
|
|
|
# delete lswitch
|
|
try:
|
|
self.vcns_driver.delete_lswitch(lswitch_id)
|
|
except exceptions.ResourceNotFound:
|
|
LOG.warning(_("Did not found lswitch %s in NVP"), lswitch_id)
|
|
|
|
# delete edge
|
|
jobdata = {
|
|
'context': context
|
|
}
|
|
self.vcns_driver.delete_edge(id, edge_id, jobdata=jobdata)
|
|
|
|
# delete LR
|
|
nvplib.delete_lrouter(self.cluster, id)
|
|
|
|
if id in self._router_type:
|
|
del self._router_type[id]
|
|
|
|
def _update_lrouter(self, context, router_id, name, nexthop, routes=None):
|
|
if not self._is_advanced_service_router(context, router_id):
|
|
return super(NvpAdvancedPlugin, self)._update_lrouter(
|
|
context, router_id, name, nexthop, routes=routes)
|
|
|
|
previous_routes = super(NvpAdvancedPlugin, self)._update_lrouter(
|
|
context, router_id, name,
|
|
vcns_const.INTEGRATION_EDGE_IPADDRESS, routes=routes)
|
|
|
|
# TODO(fank): Theoretically users can specify extra routes for
|
|
# physical network, and routes for phyiscal network needs to be
|
|
# configured on Edge. This can be done by checking if nexthop is in
|
|
# external network. But for now we only handle routes for logic
|
|
# space and leave it for future enhancement.
|
|
|
|
# Let _update_router_gw_info handle nexthop change
|
|
#self._vcns_update_static_routes(context, router_id=router_id)
|
|
|
|
return previous_routes
|
|
|
|
def _retrieve_and_delete_nat_rules(self, context, floating_ip_address,
|
|
internal_ip, router_id,
|
|
min_num_rules_expected=0):
|
|
# NOP for advanced service router
|
|
if not self._is_advanced_service_router(context, router_id):
|
|
super(NvpAdvancedPlugin, self)._retrieve_and_delete_nat_rules(
|
|
context, floating_ip_address, internal_ip, router_id,
|
|
min_num_rules_expected=min_num_rules_expected)
|
|
|
|
def _update_fip_assoc(self, context, fip, floatingip_db, external_port):
|
|
# Update DB model only for advanced service router
|
|
router_id = self._get_fip_assoc_data(context, fip, floatingip_db)[2]
|
|
if (router_id and
|
|
not self._is_advanced_service_router(context, router_id)):
|
|
super(NvpAdvancedPlugin, self)._update_fip_assoc(
|
|
context, fip, floatingip_db, external_port)
|
|
else:
|
|
super(NeutronPlugin.NvpPluginV2, self)._update_fip_assoc(
|
|
context, fip, floatingip_db, external_port)
|
|
|
|
def _get_nvp_lrouter_status(self, id):
|
|
try:
|
|
lrouter = nvplib.get_lrouter(self.cluster, id)
|
|
lr_status = lrouter["_relations"]["LogicalRouterStatus"]
|
|
if lr_status["fabric_status"]:
|
|
nvp_status = RouterStatus.ROUTER_STATUS_ACTIVE
|
|
else:
|
|
nvp_status = RouterStatus.ROUTER_STATUS_DOWN
|
|
except q_exc.NotFound:
|
|
nvp_status = RouterStatus.ROUTER_STATUS_ERROR
|
|
|
|
return nvp_status
|
|
|
|
def _get_vse_status(self, context, id):
|
|
binding = vcns_db.get_vcns_router_binding(context.session, id)
|
|
|
|
edge_status_level = self.vcns_driver.get_edge_status(
|
|
binding['edge_id'])
|
|
edge_db_status_level = ROUTER_STATUS_LEVEL[binding.status]
|
|
|
|
if edge_status_level > edge_db_status_level:
|
|
return edge_status_level
|
|
else:
|
|
return edge_db_status_level
|
|
|
|
def _get_all_nvp_lrouters_statuses(self, tenant_id, fields):
|
|
# get nvp lrouters status
|
|
nvp_lrouters = nvplib.get_lrouters(self.cluster,
|
|
tenant_id,
|
|
fields)
|
|
|
|
nvp_status = {}
|
|
for nvp_lrouter in nvp_lrouters:
|
|
if (nvp_lrouter["_relations"]["LogicalRouterStatus"]
|
|
["fabric_status"]):
|
|
nvp_status[nvp_lrouter['uuid']] = (
|
|
RouterStatus.ROUTER_STATUS_ACTIVE
|
|
)
|
|
else:
|
|
nvp_status[nvp_lrouter['uuid']] = (
|
|
RouterStatus.ROUTER_STATUS_DOWN
|
|
)
|
|
|
|
return nvp_status
|
|
|
|
def _get_all_vse_statuses(self, context):
|
|
bindings = self._model_query(
|
|
context, vcns_models.VcnsRouterBinding)
|
|
|
|
vse_db_status_level = {}
|
|
edge_id_to_router_id = {}
|
|
router_ids = []
|
|
for binding in bindings:
|
|
if not binding['edge_id']:
|
|
continue
|
|
router_id = binding['router_id']
|
|
router_ids.append(router_id)
|
|
edge_id_to_router_id[binding['edge_id']] = router_id
|
|
vse_db_status_level[router_id] = (
|
|
ROUTER_STATUS_LEVEL[binding['status']])
|
|
|
|
if not vse_db_status_level:
|
|
# no advanced service router, no need to query
|
|
return {}
|
|
|
|
vse_status_level = {}
|
|
edges_status_level = self.vcns_driver.get_edges_statuses()
|
|
for edge_id, status_level in edges_status_level.iteritems():
|
|
if edge_id in edge_id_to_router_id:
|
|
router_id = edge_id_to_router_id[edge_id]
|
|
db_status_level = vse_db_status_level[router_id]
|
|
if status_level > db_status_level:
|
|
vse_status_level[router_id] = status_level
|
|
else:
|
|
vse_status_level[router_id] = db_status_level
|
|
|
|
return vse_status_level
|
|
|
|
def get_router(self, context, id, fields=None):
|
|
if fields and 'status' not in fields:
|
|
return super(NvpAdvancedPlugin, self).get_router(
|
|
context, id, fields=fields)
|
|
|
|
router = super(NvpAdvancedPlugin, self).get_router(context, id)
|
|
|
|
router_type = self._find_router_type(router)
|
|
if router_type == ROUTER_TYPE_ADVANCED:
|
|
vse_status_level = self._get_vse_status(context, id)
|
|
if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]:
|
|
router['status'] = ROUTER_STATUS[vse_status_level]
|
|
|
|
return self._fields(router, fields)
|
|
|
|
def get_routers(self, context, filters=None, fields=None, **kwargs):
|
|
routers = super(NvpAdvancedPlugin, self).get_routers(
|
|
context, filters=filters, **kwargs)
|
|
|
|
if fields and 'status' not in fields:
|
|
# no status checking, just return regular get_routers
|
|
return [self._fields(router, fields) for router in routers]
|
|
|
|
for router in routers:
|
|
router_type = self._find_router_type(router)
|
|
if router_type == ROUTER_TYPE_ADVANCED:
|
|
break
|
|
else:
|
|
# no advanced service router, return here
|
|
return [self._fields(router, fields) for router in routers]
|
|
|
|
vse_status_all = self._get_all_vse_statuses(context)
|
|
for router in routers:
|
|
router_type = self._find_router_type(router)
|
|
if router_type == ROUTER_TYPE_ADVANCED:
|
|
vse_status_level = vse_status_all.get(router['id'])
|
|
if vse_status_level is None:
|
|
vse_status_level = RouterStatus.ROUTER_STATUS_ERROR
|
|
if vse_status_level > ROUTER_STATUS_LEVEL[router['status']]:
|
|
router['status'] = ROUTER_STATUS[vse_status_level]
|
|
|
|
return [self._fields(router, fields) for router in routers]
|
|
|
|
def add_router_interface(self, context, router_id, interface_info):
|
|
info = super(NvpAdvancedPlugin, self).add_router_interface(
|
|
context, router_id, interface_info)
|
|
if self._is_advanced_service_router(context, router_id):
|
|
router = self._get_router(context, router_id)
|
|
if router.enable_snat:
|
|
self._update_nat_rules(context, router)
|
|
# TODO(fank): do rollback on error, or have a dedicated thread
|
|
# do sync work (rollback, re-configure, or make router down)
|
|
self._vcns_update_static_routes(context, router=router)
|
|
return info
|
|
|
|
def remove_router_interface(self, context, router_id, interface_info):
|
|
info = super(NvpAdvancedPlugin, self).remove_router_interface(
|
|
context, router_id, interface_info)
|
|
if self._is_advanced_service_router(context, router_id):
|
|
router = self._get_router(context, router_id)
|
|
if router.enable_snat:
|
|
self._update_nat_rules(context, router)
|
|
# TODO(fank): do rollback on error, or have a dedicated thread
|
|
# do sync work (rollback, re-configure, or make router down)
|
|
self._vcns_update_static_routes(context, router=router)
|
|
return info
|
|
|
|
def create_floatingip(self, context, floatingip):
|
|
fip = super(NvpAdvancedPlugin, self).create_floatingip(
|
|
context, floatingip)
|
|
router_id = fip.get('router_id')
|
|
if router_id and self._is_advanced_service_router(context, router_id):
|
|
router = self._get_router(context, router_id)
|
|
# TODO(fank): do rollback on error, or have a dedicated thread
|
|
# do sync work (rollback, re-configure, or make router down)
|
|
self._update_nat_rules(context, router)
|
|
self._update_interface(context, router)
|
|
return fip
|
|
|
|
def update_floatingip(self, context, id, floatingip):
|
|
fip = super(NvpAdvancedPlugin, self).update_floatingip(
|
|
context, id, floatingip)
|
|
router_id = fip.get('router_id')
|
|
if router_id and self._is_advanced_service_router(context, router_id):
|
|
router = self._get_router(context, router_id)
|
|
# TODO(fank): do rollback on error, or have a dedicated thread
|
|
# do sync work (rollback, re-configure, or make router down)
|
|
self._update_nat_rules(context, router)
|
|
self._update_interface(context, router)
|
|
return fip
|
|
|
|
def delete_floatingip(self, context, id):
|
|
fip_db = self._get_floatingip(context, id)
|
|
router_id = None
|
|
if fip_db.fixed_port_id:
|
|
router_id = fip_db.router_id
|
|
super(NvpAdvancedPlugin, self).delete_floatingip(context, id)
|
|
if router_id and self._is_advanced_service_router(context, router_id):
|
|
router = self._get_router(context, router_id)
|
|
# TODO(fank): do rollback on error, or have a dedicated thread
|
|
# do sync work (rollback, re-configure, or make router down)
|
|
self._update_interface(context, router)
|
|
self._update_nat_rules(context, router)
|
|
|
|
def disassociate_floatingips(self, context, port_id):
|
|
try:
|
|
fip_qry = context.session.query(l3_db.FloatingIP)
|
|
fip_db = fip_qry.filter_by(fixed_port_id=port_id).one()
|
|
router_id = fip_db.router_id
|
|
except sa_exc.NoResultFound:
|
|
router_id = None
|
|
super(NvpAdvancedPlugin, self).disassociate_floatingips(context,
|
|
port_id)
|
|
if router_id and self._is_advanced_service_router(context, router_id):
|
|
router = self._get_router(context, router_id)
|
|
# TODO(fank): do rollback on error, or have a dedicated thread
|
|
# do sync work (rollback, re-configure, or make router down)
|
|
self._update_interface(context, router)
|
|
self._update_nat_rules(context, router)
|
|
|
|
#
|
|
# FWaaS plugin implementation
|
|
#
|
|
def _firewall_set_status(
|
|
self, context, firewall_id, status, firewall=None):
|
|
with context.session.begin(subtransactions=True):
|
|
fw_db = self._get_firewall(context, firewall_id)
|
|
if status == service_constants.PENDING_UPDATE and (
|
|
fw_db.status == service_constants.PENDING_DELETE):
|
|
raise fw_ext.FirewallInPendingState(
|
|
firewall_id=firewall_id, pending_state=status)
|
|
else:
|
|
fw_db.status = status
|
|
if firewall:
|
|
firewall['status'] = status
|
|
|
|
def _ensure_firewall_update_allowed(self, context, firewall_id):
|
|
fwall = self.get_firewall(context, firewall_id)
|
|
if fwall['status'] in [service_constants.PENDING_CREATE,
|
|
service_constants.PENDING_UPDATE,
|
|
service_constants.PENDING_DELETE]:
|
|
raise fw_ext.FirewallInPendingState(firewall_id=firewall_id,
|
|
pending_state=fwall['status'])
|
|
|
|
def _ensure_firewall_policy_update_allowed(
|
|
self, context, firewall_policy_id):
|
|
firewall_policy = self.get_firewall_policy(context, firewall_policy_id)
|
|
for firewall_id in firewall_policy.get('firewall_list', []):
|
|
self._ensure_firewall_update_allowed(context, firewall_id)
|
|
|
|
def _ensure_update_or_delete_firewall_rule(
|
|
self, context, firewall_rule_id):
|
|
fw_rule = self.get_firewall_rule(context, firewall_rule_id)
|
|
if fw_rule.get('firewall_policy_id'):
|
|
self._ensure_firewall_policy_update_allowed(
|
|
context, fw_rule['firewall_policy_id'])
|
|
|
|
def _make_firewall_rule_list_by_policy_id(self, context, fw_policy_id):
|
|
if not fw_policy_id:
|
|
return None
|
|
firewall_policy_db = self._get_firewall_policy(context, fw_policy_id)
|
|
return [
|
|
self._make_firewall_rule_dict(fw_rule_db)
|
|
for fw_rule_db in firewall_policy_db['firewall_rules']
|
|
]
|
|
|
|
def _get_edge_id_by_vcns_edge_binding(self, context,
|
|
router_id):
|
|
#Get vcns_router_binding mapping between router and edge
|
|
router_binding = vcns_db.get_vcns_router_binding(
|
|
context.session, router_id)
|
|
return router_binding.edge_id
|
|
|
|
def _get_firewall_list_from_firewall_policy(self, context, policy_id):
|
|
firewall_policy_db = self._get_firewall_policy(context, policy_id)
|
|
return [
|
|
self._make_firewall_dict(fw_db)
|
|
for fw_db in firewall_policy_db['firewalls']
|
|
]
|
|
|
|
def _get_firewall_list_from_firewall_rule(self, context, rule_id):
|
|
rule = self._get_firewall_rule(context, rule_id)
|
|
if not rule.firewall_policy_id:
|
|
# The firewall rule is not associated with firewall policy yet
|
|
return None
|
|
|
|
return self._get_firewall_list_from_firewall_policy(
|
|
context, rule.firewall_policy_id)
|
|
|
|
def _vcns_update_firewall(self, context, fw, router_id=None, **kwargs):
|
|
edge_id = kwargs.get('edge_id')
|
|
if not edge_id:
|
|
edge_id = self._get_edge_id_by_vcns_edge_binding(
|
|
context, router_id)
|
|
firewall_rule_list = kwargs.get('firewall_rule_list')
|
|
if not firewall_rule_list:
|
|
firewall_rule_list = self._make_firewall_rule_list_by_policy_id(
|
|
context, fw['firewall_policy_id'])
|
|
fw_with_rules = fw
|
|
fw_with_rules['firewall_rule_list'] = firewall_rule_list
|
|
try:
|
|
self.vcns_driver.update_firewall(context, edge_id, fw_with_rules)
|
|
except exceptions.VcnsApiException as e:
|
|
self._firewall_set_status(
|
|
context, fw['id'], service_constants.ERROR)
|
|
msg = (_("Failed to create firewall on vShield Edge "
|
|
"bound on router %s") % router_id)
|
|
LOG.exception(msg)
|
|
raise e
|
|
|
|
except exceptions.BadRequest as e:
|
|
self._firewall_set_status(
|
|
context, fw['id'], service_constants.ERROR)
|
|
LOG.exception(_("Bad Firewall request Input"))
|
|
raise e
|
|
|
|
def _vcns_delete_firewall(self, context, router_id=None, **kwargs):
|
|
edge_id = kwargs.get('edge_id')
|
|
if not edge_id:
|
|
edge_id = self._get_edge_id_by_vcns_edge_binding(
|
|
context, router_id)
|
|
#TODO(linb):do rollback on error
|
|
self.vcns_driver.delete_firewall(context, edge_id)
|
|
|
|
def create_firewall(self, context, firewall):
|
|
LOG.debug(_("create_firewall() called"))
|
|
router_id = firewall['firewall'].get(vcns_const.ROUTER_ID)
|
|
if not router_id:
|
|
msg = _("router_id is not provided!")
|
|
LOG.error(msg)
|
|
raise q_exc.BadRequest(resource='router', msg=msg)
|
|
if not self._is_advanced_service_router(context, router_id):
|
|
msg = _("router_id:%s is not an advanced router!") % router_id
|
|
LOG.error(msg)
|
|
raise q_exc.BadRequest(resource='router', msg=msg)
|
|
if self._get_resource_router_id_binding(
|
|
context, firewall_db.Firewall, router_id=router_id):
|
|
msg = _("A firewall is already associated with the router")
|
|
LOG.error(msg)
|
|
raise nvp_exc.NvpServiceOverQuota(
|
|
overs='firewall', err_msg=msg)
|
|
|
|
fw = super(NvpAdvancedPlugin, self).create_firewall(context, firewall)
|
|
#Add router service insertion binding with firewall object
|
|
res = {
|
|
'id': fw['id'],
|
|
'router_id': router_id
|
|
}
|
|
self._process_create_resource_router_id(
|
|
context, res, firewall_db.Firewall)
|
|
#Since there is only one firewall per edge,
|
|
#here would be bulk configureation operation on firewall
|
|
self._vcns_update_firewall(context, fw, router_id)
|
|
self._firewall_set_status(
|
|
context, fw['id'], service_constants.ACTIVE, fw)
|
|
return fw
|
|
|
|
def update_firewall(self, context, id, firewall):
|
|
LOG.debug(_("update_firewall() called"))
|
|
self._ensure_firewall_update_allowed(context, id)
|
|
rule_list_pre = self._make_firewall_rule_list_by_policy_id(
|
|
context,
|
|
self.get_firewall(context, id)['firewall_policy_id'])
|
|
firewall['firewall']['status'] = service_constants.PENDING_UPDATE
|
|
fw = super(NvpAdvancedPlugin, self).update_firewall(
|
|
context, id, firewall)
|
|
rule_list_new = self._make_firewall_rule_list_by_policy_id(
|
|
context, fw['firewall_policy_id'])
|
|
if rule_list_pre == rule_list_new:
|
|
self._firewall_set_status(
|
|
context, fw['id'], service_constants.ACTIVE, fw)
|
|
return fw
|
|
else:
|
|
service_router_binding = self._get_resource_router_id_binding(
|
|
context, firewall_db.Firewall, resource_id=id)
|
|
self._vcns_update_firewall(
|
|
context, fw, service_router_binding.router_id)
|
|
self._firewall_set_status(
|
|
context, fw['id'], service_constants.ACTIVE, fw)
|
|
return fw
|
|
|
|
def delete_firewall(self, context, id):
|
|
LOG.debug(_("delete_firewall() called"))
|
|
self._firewall_set_status(
|
|
context, id, service_constants.PENDING_DELETE)
|
|
service_router_binding = self._get_resource_router_id_binding(
|
|
context, firewall_db.Firewall, resource_id=id)
|
|
self._vcns_delete_firewall(context, service_router_binding.router_id)
|
|
super(NvpAdvancedPlugin, self).delete_firewall(context, id)
|
|
self._delete_resource_router_id_binding(
|
|
context, id, firewall_db.Firewall)
|
|
|
|
def update_firewall_rule(self, context, id, firewall_rule):
|
|
LOG.debug(_("update_firewall_rule() called"))
|
|
self._ensure_update_or_delete_firewall_rule(context, id)
|
|
fwr_pre = self.get_firewall_rule(context, id)
|
|
fwr = super(NvpAdvancedPlugin, self).update_firewall_rule(
|
|
context, id, firewall_rule)
|
|
if fwr_pre == fwr:
|
|
return fwr
|
|
|
|
# check if this rule is associated with firewall
|
|
fw_list = self._get_firewall_list_from_firewall_rule(context, id)
|
|
if not fw_list:
|
|
return fwr
|
|
|
|
for fw in fw_list:
|
|
# get router service insertion binding with firewall id
|
|
service_router_binding = self._get_resource_router_id_binding(
|
|
context, firewall_db.Firewall, resource_id=fw['id'])
|
|
edge_id = self._get_edge_id_by_vcns_edge_binding(
|
|
context, service_router_binding.router_id)
|
|
|
|
#TODO(linb): do rollback on error
|
|
self.vcns_driver.update_firewall_rule(context, id, edge_id, fwr)
|
|
|
|
return fwr
|
|
|
|
def update_firewall_policy(self, context, id, firewall_policy):
|
|
LOG.debug(_("update_firewall_policy() called"))
|
|
self._ensure_firewall_policy_update_allowed(context, id)
|
|
firewall_rules_pre = self._make_firewall_rule_list_by_policy_id(
|
|
context, id)
|
|
fwp = super(NvpAdvancedPlugin, self).update_firewall_policy(
|
|
context, id, firewall_policy)
|
|
firewall_rules = self._make_firewall_rule_list_by_policy_id(
|
|
context, id)
|
|
if firewall_rules_pre == firewall_rules:
|
|
return fwp
|
|
|
|
# check if this policy is associated with firewall
|
|
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
|
|
if not fw_list:
|
|
return fwp
|
|
|
|
for fw in fw_list:
|
|
# Get the router_service insertion binding with firewall id
|
|
# TODO(fank): optimized by using _get_resource_router_id_bindings
|
|
service_router_binding = self._get_resource_router_id_binding(
|
|
context, firewall_db.Firewall, resource_id=fw['id'])
|
|
self._vcns_update_firewall(
|
|
context, fw, service_router_binding.router_id)
|
|
return fwp
|
|
|
|
def insert_rule(self, context, id, rule_info):
|
|
LOG.debug(_("insert_rule() called"))
|
|
self._ensure_firewall_policy_update_allowed(context, id)
|
|
fwp = super(NvpAdvancedPlugin, self).insert_rule(
|
|
context, id, rule_info)
|
|
fwr = super(NvpAdvancedPlugin, self).get_firewall_rule(
|
|
context, rule_info['firewall_rule_id'])
|
|
|
|
# check if this policy is associated with firewall
|
|
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
|
|
if not fw_list:
|
|
return fwp
|
|
for fw in fw_list:
|
|
# TODO(fank): optimized by using _get_resource_router_id_bindings
|
|
service_router_binding = self._get_resource_router_id_binding(
|
|
context, firewall_db.Firewall, resource_id=fw['id'])
|
|
edge_id = self._get_edge_id_by_vcns_edge_binding(
|
|
context, service_router_binding.router_id)
|
|
|
|
if rule_info.get('insert_before') or rule_info.get('insert_after'):
|
|
#if insert_before or insert_after is set, we would call
|
|
#VCNS insert_rule API
|
|
#TODO(linb): do rollback on error
|
|
self.vcns_driver.insert_rule(context, rule_info, edge_id, fwr)
|
|
else:
|
|
#Else we would call bulk configuration on the firewall
|
|
self._vcns_update_firewall(context, fw, edge_id=edge_id)
|
|
return fwp
|
|
|
|
def remove_rule(self, context, id, rule_info):
|
|
LOG.debug(_("remove_rule() called"))
|
|
self._ensure_firewall_policy_update_allowed(context, id)
|
|
fwp = super(NvpAdvancedPlugin, self).remove_rule(
|
|
context, id, rule_info)
|
|
fwr = super(NvpAdvancedPlugin, self).get_firewall_rule(
|
|
context, rule_info['firewall_rule_id'])
|
|
|
|
# check if this policy is associated with firewall
|
|
fw_list = self._get_firewall_list_from_firewall_policy(context, id)
|
|
if not fw_list:
|
|
return fwp
|
|
for fw in fw_list:
|
|
# TODO(fank): optimized by using _get_resource_router_id_bindings
|
|
service_router_binding = self._get_resource_router_id_binding(
|
|
context, firewall_db.Firewall, resource_id=fw['id'])
|
|
edge_id = self._get_edge_id_by_vcns_edge_binding(
|
|
context, service_router_binding.router_id)
|
|
#TODO(linb): do rollback on error
|
|
self.vcns_driver.delete_firewall_rule(
|
|
context, fwr['id'], edge_id)
|
|
return fwp
|
|
|
|
#
|
|
# LBAAS service plugin implementation
|
|
#
|
|
def _get_edge_id_by_vip_id(self, context, vip_id):
|
|
try:
|
|
router_binding = self._get_resource_router_id_bindings(
|
|
context, loadbalancer_db.Vip, resource_ids=[vip_id])[0]
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to find the edge with "
|
|
"vip_id: %s"), vip_id)
|
|
service_binding = vcns_db.get_vcns_router_binding(
|
|
context.session, router_binding.router_id)
|
|
return service_binding.edge_id
|
|
|
|
def _get_all_vip_addrs_by_router_id(
|
|
self, context, router_id):
|
|
vip_bindings = self._get_resource_router_id_bindings(
|
|
context, loadbalancer_db.Vip, router_ids=[router_id])
|
|
vip_addrs = []
|
|
for vip_binding in vip_bindings:
|
|
vip = self.get_vip(context, vip_binding.resource_id)
|
|
vip_addrs.append(vip.get('address'))
|
|
return vip_addrs
|
|
|
|
def _add_router_service_insertion_binding(self, context, resource_id,
|
|
router_id,
|
|
model):
|
|
res = {
|
|
'id': resource_id,
|
|
'router_id': router_id
|
|
}
|
|
self._process_create_resource_router_id(context, res,
|
|
model)
|
|
|
|
def _resource_set_status(self, context, model, id, status, obj=None,
|
|
pool_id=None):
|
|
with context.session.begin(subtransactions=True):
|
|
try:
|
|
qry = context.session.query(model)
|
|
if issubclass(model, loadbalancer_db.PoolMonitorAssociation):
|
|
res = qry.filter_by(monitor_id=id,
|
|
pool_id=pool_id).one()
|
|
else:
|
|
res = qry.filter_by(id=id).one()
|
|
if status == service_constants.PENDING_UPDATE and (
|
|
res.get('status') == service_constants.PENDING_DELETE):
|
|
msg = (_("Operation can't be performed, Since resource "
|
|
"%(model)s : %(id)s is in DELETEing status!") %
|
|
{'model': model,
|
|
'id': id})
|
|
LOG.error(msg)
|
|
raise nvp_exc.NvpServicePluginException(err_msg=msg)
|
|
else:
|
|
res.status = status
|
|
except sa_exc.NoResultFound:
|
|
msg = (_("Resource %(model)s : %(id)s not found!") %
|
|
{'model': model,
|
|
'id': id})
|
|
LOG.exception(msg)
|
|
raise nvp_exc.NvpServicePluginException(err_msg=msg)
|
|
if obj:
|
|
obj['status'] = status
|
|
|
|
def _vcns_create_pool_and_monitors(self, context, pool_id, **kwargs):
|
|
pool = self.get_pool(context, pool_id)
|
|
edge_id = kwargs.get('edge_id')
|
|
if not edge_id:
|
|
edge_id = self._get_edge_id_by_vip_id(
|
|
context, pool['vip_id'])
|
|
#Check wheter the pool is already created on the router
|
|
#in case of future's M:N relation between Pool and Vip
|
|
|
|
#Check associated HealthMonitors and then create them
|
|
for monitor_id in pool.get('health_monitors'):
|
|
hm = self.get_health_monitor(context, monitor_id)
|
|
try:
|
|
self.vcns_driver.create_health_monitor(
|
|
context, edge_id, hm)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to create healthmonitor "
|
|
"associated with pool id: %s!") % pool_id)
|
|
for monitor_ide in pool.get('health_monitors'):
|
|
if monitor_ide == monitor_id:
|
|
break
|
|
self.vcns_driver.delete_health_monitor(
|
|
context, monitor_ide, edge_id)
|
|
#Create the pool on the edge
|
|
members = [
|
|
super(NvpAdvancedPlugin, self).get_member(
|
|
context, member_id)
|
|
for member_id in pool.get('members')
|
|
]
|
|
try:
|
|
self.vcns_driver.create_pool(context, edge_id, pool, members)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to create pool on vshield edge"))
|
|
self.vcns_driver.delete_pool(
|
|
context, pool_id, edge_id)
|
|
for monitor_id in pool.get('health_monitors'):
|
|
self.vcns_driver.delete_health_monitor(
|
|
context, monitor_id, edge_id)
|
|
|
|
def _vcns_update_pool(self, context, pool, **kwargs):
|
|
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
|
|
members = kwargs.get('members')
|
|
if not members:
|
|
members = [
|
|
super(NvpAdvancedPlugin, self).get_member(
|
|
context, member_id)
|
|
for member_id in pool.get('members')
|
|
]
|
|
self.vcns_driver.update_pool(context, edge_id, pool, members)
|
|
|
|
def create_vip(self, context, vip):
|
|
LOG.debug(_("create_vip() called"))
|
|
router_id = vip['vip'].get(vcns_const.ROUTER_ID)
|
|
if not router_id:
|
|
msg = _("router_id is not provided!")
|
|
LOG.error(msg)
|
|
raise q_exc.BadRequest(resource='router', msg=msg)
|
|
|
|
if not self._is_advanced_service_router(context, router_id):
|
|
msg = _("router_id: %s is not an advanced router!") % router_id
|
|
LOG.error(msg)
|
|
raise nvp_exc.NvpServicePluginException(err_msg=msg)
|
|
|
|
#Check whether the vip port is an external port
|
|
subnet_id = vip['vip']['subnet_id']
|
|
network_id = self.get_subnet(context, subnet_id)['network_id']
|
|
ext_net = self._get_network(context, network_id)
|
|
if not ext_net.external:
|
|
msg = (_("Network '%s' is not a valid external "
|
|
"network") % network_id)
|
|
raise nvp_exc.NvpServicePluginException(err_msg=msg)
|
|
|
|
v = super(NvpAdvancedPlugin, self).create_vip(context, vip)
|
|
#Get edge_id for the resource
|
|
router_binding = vcns_db.get_vcns_router_binding(
|
|
context.session,
|
|
router_id)
|
|
edge_id = router_binding.edge_id
|
|
#Add vip_router binding
|
|
self._add_router_service_insertion_binding(context, v['id'],
|
|
router_id,
|
|
loadbalancer_db.Vip)
|
|
#Create the vip port on vShield Edge
|
|
router = self._get_router(context, router_id)
|
|
self._update_interface(context, router, sync=True)
|
|
#Create the vip and associated pool/monitor on the corresponding edge
|
|
try:
|
|
self._vcns_create_pool_and_monitors(
|
|
context, v['pool_id'], edge_id=edge_id)
|
|
self.vcns_driver.create_vip(context, edge_id, v)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to create vip!"))
|
|
self._delete_resource_router_id_binding(
|
|
context, v['id'], loadbalancer_db.Vip)
|
|
super(NvpAdvancedPlugin, self).delete_vip(context, v['id'])
|
|
self._resource_set_status(context, loadbalancer_db.Vip,
|
|
v['id'], service_constants.ACTIVE, v)
|
|
|
|
return v
|
|
|
|
def update_vip(self, context, id, vip):
|
|
edge_id = self._get_edge_id_by_vip_id(context, id)
|
|
old_vip = self.get_vip(context, id)
|
|
vip['vip']['status'] = service_constants.PENDING_UPDATE
|
|
v = super(NvpAdvancedPlugin, self).update_vip(context, id, vip)
|
|
if old_vip['pool_id'] != v['pool_id']:
|
|
self.vcns_driver.delete_vip(context, id)
|
|
#Delete old pool/monitor on the edge
|
|
#TODO(linb): Factor out procedure for removing pool and health
|
|
#separate method
|
|
old_pool = self.get_pool(context, old_vip['pool_id'])
|
|
self.vcns_driver.delete_pool(
|
|
context, old_vip['pool_id'], edge_id)
|
|
for monitor_id in old_pool.get('health_monitors'):
|
|
self.vcns_driver.delete_health_monitor(
|
|
context, monitor_id, edge_id)
|
|
#Create new pool/monitor object on the edge
|
|
#TODO(linb): add exception handle if error
|
|
self._vcns_create_pool_and_monitors(
|
|
context, v['pool_id'], edge_id=edge_id)
|
|
self.vcns_driver.create_vip(context, edge_id, v)
|
|
return v
|
|
try:
|
|
self.vcns_driver.update_vip(context, v)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to update vip with id: %s!"), id)
|
|
self._resource_set_status(context, loadbalancer_db.Vip,
|
|
id, service_constants.ERROR, v)
|
|
|
|
self._resource_set_status(context, loadbalancer_db.Vip,
|
|
v['id'], service_constants.ACTIVE, v)
|
|
return v
|
|
|
|
def delete_vip(self, context, id):
|
|
v = self.get_vip(context, id)
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Vip,
|
|
id, service_constants.PENDING_DELETE)
|
|
try:
|
|
self.vcns_driver.delete_vip(context, id)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to delete vip with id: %s!"), id)
|
|
self._resource_set_status(context, loadbalancer_db.Vip,
|
|
id, service_constants.ERROR)
|
|
edge_id = self._get_edge_id_by_vip_id(context, id)
|
|
#Check associated HealthMonitors and then delete them
|
|
pool = self.get_pool(context, v['pool_id'])
|
|
self.vcns_driver.delete_pool(context, v['pool_id'], edge_id)
|
|
for monitor_id in pool.get('health_monitors'):
|
|
#TODO(linb): do exception handle if error
|
|
self.vcns_driver.delete_health_monitor(
|
|
context, monitor_id, edge_id)
|
|
|
|
router_binding = self._get_resource_router_id_binding(
|
|
context, loadbalancer_db.Vip, resource_id=id)
|
|
router = self._get_router(context, router_binding.router_id)
|
|
self._delete_resource_router_id_binding(
|
|
context, id, loadbalancer_db.Vip)
|
|
super(NvpAdvancedPlugin, self).delete_vip(context, id)
|
|
self._update_interface(context, router, sync=True)
|
|
|
|
def update_pool(self, context, id, pool):
|
|
pool['pool']['status'] = service_constants.PENDING_UPDATE
|
|
p = super(NvpAdvancedPlugin, self).update_pool(context, id, pool)
|
|
#Check whether the pool is already associated with the vip
|
|
if not p.get('vip_id'):
|
|
self._resource_set_status(context, loadbalancer_db.Pool,
|
|
p['id'], service_constants.ACTIVE, p)
|
|
return p
|
|
try:
|
|
self._vcns_update_pool(context, p)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to update pool with id: %s!"), id)
|
|
self._resource_set_status(context, loadbalancer_db.Pool,
|
|
p['id'], service_constants.ERROR, p)
|
|
self._resource_set_status(context, loadbalancer_db.Pool,
|
|
p['id'], service_constants.ACTIVE, p)
|
|
return p
|
|
|
|
def create_member(self, context, member):
|
|
m = super(NvpAdvancedPlugin, self).create_member(context, member)
|
|
pool_id = m.get('pool_id')
|
|
pool = self.get_pool(context, pool_id)
|
|
if not pool.get('vip_id'):
|
|
self._resource_set_status(context, loadbalancer_db.Member,
|
|
m['id'], service_constants.ACTIVE, m)
|
|
return m
|
|
self._resource_set_status(context, loadbalancer_db.Pool,
|
|
pool_id,
|
|
service_constants.PENDING_UPDATE)
|
|
try:
|
|
self._vcns_update_pool(context, pool)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to update pool with the member"))
|
|
super(NvpAdvancedPlugin, self).delete_member(context, m['id'])
|
|
|
|
self._resource_set_status(context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.ACTIVE)
|
|
self._resource_set_status(context, loadbalancer_db.Member,
|
|
m['id'], service_constants.ACTIVE, m)
|
|
return m
|
|
|
|
def update_member(self, context, id, member):
|
|
member['member']['status'] = service_constants.PENDING_UPDATE
|
|
old_member = self.get_member(context, id)
|
|
m = super(NvpAdvancedPlugin, self).update_member(
|
|
context, id, member)
|
|
|
|
if m['pool_id'] != old_member['pool_id']:
|
|
old_pool_id = old_member['pool_id']
|
|
old_pool = self.get_pool(context, old_pool_id)
|
|
if old_pool.get('vip_id'):
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
old_pool_id, service_constants.PENDING_UPDATE)
|
|
try:
|
|
self._vcns_update_pool(context, old_pool)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to update old pool "
|
|
"with the member"))
|
|
super(NvpAdvancedPlugin, self).delete_member(
|
|
context, m['id'])
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
old_pool_id, service_constants.ACTIVE)
|
|
|
|
pool_id = m['pool_id']
|
|
pool = self.get_pool(context, pool_id)
|
|
if not pool.get('vip_id'):
|
|
self._resource_set_status(context, loadbalancer_db.Member,
|
|
m['id'], service_constants.ACTIVE, m)
|
|
return m
|
|
self._resource_set_status(context, loadbalancer_db.Pool,
|
|
pool_id,
|
|
service_constants.PENDING_UPDATE)
|
|
try:
|
|
self._vcns_update_pool(context, pool)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to update pool with the member"))
|
|
super(NvpAdvancedPlugin, self).delete_member(
|
|
context, m['id'])
|
|
|
|
self._resource_set_status(context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.ACTIVE)
|
|
self._resource_set_status(context, loadbalancer_db.Member,
|
|
m['id'], service_constants.ACTIVE, m)
|
|
return m
|
|
|
|
def delete_member(self, context, id):
|
|
m = self.get_member(context, id)
|
|
super(NvpAdvancedPlugin, self).delete_member(context, id)
|
|
pool_id = m['pool_id']
|
|
pool = self.get_pool(context, pool_id)
|
|
if not pool.get('vip_id'):
|
|
return
|
|
self._resource_set_status(context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.PENDING_UPDATE)
|
|
try:
|
|
self._vcns_update_pool(context, pool)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to update pool with the member"))
|
|
self._resource_set_status(context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.ACTIVE)
|
|
|
|
def update_health_monitor(self, context, id, health_monitor):
|
|
old_hm = super(NvpAdvancedPlugin, self).get_health_monitor(
|
|
context, id)
|
|
hm = super(NvpAdvancedPlugin, self).update_health_monitor(
|
|
context, id, health_monitor)
|
|
for hm_pool in hm.get('pools'):
|
|
pool_id = hm_pool['pool_id']
|
|
pool = self.get_pool(context, pool_id)
|
|
if pool.get('vip_id'):
|
|
edge_id = self._get_edge_id_by_vip_id(
|
|
context, pool['vip_id'])
|
|
try:
|
|
self.vcns_driver.update_health_monitor(
|
|
context, edge_id, old_hm, hm)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to update monitor "
|
|
"with id: %s!"), id)
|
|
return hm
|
|
|
|
def delete_health_monitor(self, context, id):
|
|
with context.session.begin(subtransactions=True):
|
|
qry = context.session.query(
|
|
loadbalancer_db.PoolMonitorAssociation
|
|
).filter_by(monitor_id=id)
|
|
for assoc in qry:
|
|
pool_id = assoc['pool_id']
|
|
super(NvpAdvancedPlugin,
|
|
self).delete_pool_health_monitor(context,
|
|
id,
|
|
pool_id)
|
|
pool = self.get_pool(context, pool_id)
|
|
if not pool.get('vip_id'):
|
|
continue
|
|
edge_id = self._get_edge_id_by_vip_id(
|
|
context, pool['vip_id'])
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.PENDING_UPDATE)
|
|
try:
|
|
self._vcns_update_pool(context, pool)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to update pool with monitor!"))
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.ACTIVE)
|
|
try:
|
|
self.vcns_driver.delete_health_monitor(
|
|
context, id, edge_id)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to delete monitor "
|
|
"with id: %s!"), id)
|
|
super(NvpAdvancedPlugin,
|
|
self).delete_health_monitor(context, id)
|
|
self._delete_resource_router_id_binding(
|
|
context, id, loadbalancer_db.HealthMonitor)
|
|
|
|
super(NvpAdvancedPlugin, self).delete_health_monitor(context, id)
|
|
self._delete_resource_router_id_binding(
|
|
context, id, loadbalancer_db.HealthMonitor)
|
|
|
|
def create_pool_health_monitor(self, context,
|
|
health_monitor, pool_id):
|
|
monitor_id = health_monitor['health_monitor']['id']
|
|
pool = self.get_pool(context, pool_id)
|
|
monitors = pool.get('health_monitors')
|
|
if len(monitors) > 0:
|
|
msg = _("Vcns right now can only support "
|
|
"one monitor per pool")
|
|
LOG.error(msg)
|
|
raise nvp_exc.NvpServicePluginException(err_msg=msg)
|
|
#Check whether the pool is already associated with the vip
|
|
if not pool.get('vip_id'):
|
|
res = super(NvpAdvancedPlugin,
|
|
self).create_pool_health_monitor(context,
|
|
health_monitor,
|
|
pool_id)
|
|
return res
|
|
#Get the edge_id
|
|
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
|
|
res = super(NvpAdvancedPlugin,
|
|
self).create_pool_health_monitor(context,
|
|
health_monitor,
|
|
pool_id)
|
|
monitor = self.get_health_monitor(context, monitor_id)
|
|
#TODO(linb)Add Exception handle if error
|
|
self.vcns_driver.create_health_monitor(context, edge_id, monitor)
|
|
#Get updated pool
|
|
pool['health_monitors'].append(monitor['id'])
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.PENDING_UPDATE)
|
|
try:
|
|
self._vcns_update_pool(context, pool)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(_("Failed to associate monitor with pool!"))
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.ERROR)
|
|
super(NvpAdvancedPlugin, self).delete_pool_health_monitor(
|
|
context, monitor_id, pool_id)
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.ACTIVE)
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.PoolMonitorAssociation,
|
|
monitor_id, service_constants.ACTIVE, res,
|
|
pool_id=pool_id)
|
|
return res
|
|
|
|
def delete_pool_health_monitor(self, context, id, pool_id):
|
|
super(NvpAdvancedPlugin, self).delete_pool_health_monitor(
|
|
context, id, pool_id)
|
|
pool = self.get_pool(context, pool_id)
|
|
#Check whether the pool is already associated with the vip
|
|
if pool.get('vip_id'):
|
|
#Delete the monitor on vshield edge
|
|
edge_id = self._get_edge_id_by_vip_id(context, pool['vip_id'])
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.PENDING_UPDATE)
|
|
try:
|
|
self._vcns_update_pool(context, pool)
|
|
except Exception:
|
|
with excutils.save_and_reraise_exception():
|
|
LOG.exception(
|
|
_("Failed to update pool with pool_monitor!"))
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.ERROR)
|
|
#TODO(linb): Add exception handle if error
|
|
self.vcns_driver.delete_health_monitor(context, id, edge_id)
|
|
self._resource_set_status(
|
|
context, loadbalancer_db.Pool,
|
|
pool_id, service_constants.ACTIVE)
|
|
|
|
|
|
class VcnsCallbacks(object):
|
|
"""Edge callback implementation Callback functions for
|
|
asynchronous tasks.
|
|
"""
|
|
def __init__(self, plugin):
|
|
self.plugin = plugin
|
|
|
|
def edge_deploy_started(self, task):
|
|
"""callback when deployment task started."""
|
|
jobdata = task.userdata['jobdata']
|
|
lrouter = jobdata['lrouter']
|
|
context = jobdata['context']
|
|
edge_id = task.userdata.get('edge_id')
|
|
name = task.userdata['router_name']
|
|
if edge_id:
|
|
LOG.debug(_("Start deploying %(edge_id)s for router %(name)s"), {
|
|
'edge_id': edge_id,
|
|
'name': name})
|
|
vcns_db.update_vcns_router_binding(
|
|
context.session, lrouter['uuid'], edge_id=edge_id)
|
|
else:
|
|
LOG.debug(_("Failed to deploy Edge for router %s"), name)
|
|
vcns_db.update_vcns_router_binding(
|
|
context.session, lrouter['uuid'],
|
|
status=service_constants.ERROR)
|
|
|
|
def edge_deploy_result(self, task):
|
|
"""callback when deployment task finished."""
|
|
jobdata = task.userdata['jobdata']
|
|
lrouter = jobdata['lrouter']
|
|
context = jobdata['context']
|
|
name = task.userdata['router_name']
|
|
router_db = None
|
|
try:
|
|
router_db = self.plugin._get_router(context, lrouter['uuid'])
|
|
except l3.RouterNotFound:
|
|
# Router might have been deleted before deploy finished
|
|
LOG.exception(_("Router %s not found"), lrouter['uuid'])
|
|
|
|
if task.status == TaskStatus.COMPLETED:
|
|
LOG.debug(_("Successfully deployed %(edge_id)s for "
|
|
"router %(name)s"), {
|
|
'edge_id': task.userdata['edge_id'],
|
|
'name': name})
|
|
if (router_db and
|
|
router_db['status'] == service_constants.PENDING_CREATE):
|
|
router_db['status'] = service_constants.ACTIVE
|
|
|
|
binding = vcns_db.get_vcns_router_binding(
|
|
context.session, lrouter['uuid'])
|
|
# only update status to active if its status is pending create
|
|
if binding['status'] == service_constants.PENDING_CREATE:
|
|
vcns_db.update_vcns_router_binding(
|
|
context.session, lrouter['uuid'],
|
|
status=service_constants.ACTIVE)
|
|
else:
|
|
LOG.debug(_("Failed to deploy Edge for router %s"), name)
|
|
if router_db:
|
|
router_db['status'] = service_constants.ERROR
|
|
vcns_db.update_vcns_router_binding(
|
|
context.session, lrouter['uuid'],
|
|
status=service_constants.ERROR)
|
|
|
|
def edge_delete_result(self, task):
|
|
jobdata = task.userdata['jobdata']
|
|
router_id = task.userdata['router_id']
|
|
context = jobdata['context']
|
|
if task.status == TaskStatus.COMPLETED:
|
|
vcns_db.delete_vcns_router_binding(context.session,
|
|
router_id)
|
|
|
|
def interface_update_result(self, task):
|
|
LOG.debug(_("interface_update_result %d"), task.status)
|
|
|
|
def snat_create_result(self, task):
|
|
LOG.debug(_("snat_create_result %d"), task.status)
|
|
|
|
def snat_delete_result(self, task):
|
|
LOG.debug(_("snat_delete_result %d"), task.status)
|
|
|
|
def dnat_create_result(self, task):
|
|
LOG.debug(_("dnat_create_result %d"), task.status)
|
|
|
|
def dnat_delete_result(self, task):
|
|
LOG.debug(_("dnat_delete_result %d"), task.status)
|
|
|
|
def routes_update_result(self, task):
|
|
LOG.debug(_("routes_update_result %d"), task.status)
|
|
|
|
def nat_update_result(self, task):
|
|
LOG.debug(_("nat_update_result %d"), task.status)
|
|
|
|
|
|
def _process_base_create_lswitch_args(*args, **kwargs):
|
|
tags = [{"tag": nvplib.NEUTRON_VERSION, "scope": "quantum"}]
|
|
if args[1]:
|
|
tags.append({"tag": args[1], "scope": "os_tid"})
|
|
switch_name = args[2]
|
|
tz_config = args[3]
|
|
if "neutron_net_id" in kwargs or len(args) >= 5:
|
|
neutron_net_id = kwargs.get('neutron_net_id')
|
|
if neutron_net_id is None:
|
|
neutron_net_id = args[4]
|
|
tags.append({"tag": neutron_net_id,
|
|
"scope": "quantum_net_id"})
|
|
if kwargs.get("shared", False) or len(args) >= 6:
|
|
tags.append({"tag": "true", "scope": "shared"})
|
|
if kwargs.get("tags"):
|
|
tags.extend(kwargs["tags"])
|
|
return switch_name, tz_config, tags
|