L3 API support for nicira plugin
blueprint nvp-l3-api This patch adds supports the Quantum L3 API extension to the nicira NVP Quantum Plugin. The patch includes code for managing Quantum routers, router interfaces, external gateways, and floating IPs, and for applying the relevant configurations on the NVP controller using the NVP API. The patch also includes NVP-specific support for access to the nova metadata server via injection of a host route in VM instances. Change-Id: I20b45190f1eaca0229d30871b25484b90298a18b
This commit is contained in:
parent
63007f6f87
commit
268d39c230
@ -1,5 +1,10 @@
|
|||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
|
|
||||||
|
# The following flag will cause a host route to the metadata server
|
||||||
|
# to be injected into instances. The metadata server will be reached
|
||||||
|
# via the dhcp server.
|
||||||
|
metadata_dhcp_host_route = False
|
||||||
|
|
||||||
[DATABASE]
|
[DATABASE]
|
||||||
# This line MUST be changed to actually run the plugin.
|
# This line MUST be changed to actually run the plugin.
|
||||||
# Example:
|
# Example:
|
||||||
@ -28,9 +33,6 @@ reconnect_interval = 2
|
|||||||
# max_lp_per_bridged_ls = 64
|
# max_lp_per_bridged_ls = 64
|
||||||
# Maximum number of ports for each overlay (stt, gre) logical switch
|
# Maximum number of ports for each overlay (stt, gre) logical switch
|
||||||
# max_lp_per_overlay_ls = 256
|
# max_lp_per_overlay_ls = 256
|
||||||
# Time from when a connection pool is switched to another controller
|
|
||||||
# during failure.
|
|
||||||
# failover_time = 5
|
|
||||||
# Number of connects to each controller node.
|
# Number of connects to each controller node.
|
||||||
# concurrent_connections = 3
|
# concurrent_connections = 3
|
||||||
# Name of the default cluster where requests should be sent if a nova zone id
|
# Name of the default cluster where requests should be sent if a nova zone id
|
||||||
@ -53,6 +55,11 @@ reconnect_interval = 2
|
|||||||
# console "admin" section.
|
# console "admin" section.
|
||||||
# nvp_cluster_uuid = 615be8e4-82e9-4fd2-b4b3-fd141e51a5a7 # (Optional)
|
# nvp_cluster_uuid = 615be8e4-82e9-4fd2-b4b3-fd141e51a5a7 # (Optional)
|
||||||
|
|
||||||
|
# UUID of the default layer 3 gateway service to use for this cluster
|
||||||
|
# This is optional, but should be filled if planning to use logical routers
|
||||||
|
# with external gateways
|
||||||
|
# default_l3_gw_service_uuid =
|
||||||
|
|
||||||
# This parameter describes a connection to a single NVP controller. Format:
|
# This parameter describes a connection to a single NVP controller. Format:
|
||||||
# <ip>:<port>:<user>:<pw>:<req_timeout>:<http_timeout>:<retries>:<redirects>
|
# <ip>:<port>:<user>:<pw>:<req_timeout>:<http_timeout>:<retries>:<redirects>
|
||||||
# <ip> is the ip address of the controller
|
# <ip> is the ip address of the controller
|
||||||
|
@ -0,0 +1,60 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
#
|
||||||
|
# Copyright 2013 OpenStack LLC
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
"""nvp_portmap
|
||||||
|
|
||||||
|
Revision ID: 38335592a0dc
|
||||||
|
Revises: 49332180ca96
|
||||||
|
Create Date: 2013-01-15 06:04:56.328991
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = '38335592a0dc'
|
||||||
|
down_revision = '49332180ca96'
|
||||||
|
|
||||||
|
# Change to ['*'] if this migration applies to all plugins
|
||||||
|
|
||||||
|
migration_for_plugins = [
|
||||||
|
'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2'
|
||||||
|
]
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
from sqlalchemy.dialects import mysql
|
||||||
|
|
||||||
|
from quantum.db import migration
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(active_plugin=None, options=None):
|
||||||
|
if not migration.should_run(active_plugin, migration_for_plugins):
|
||||||
|
return
|
||||||
|
|
||||||
|
op.create_table(
|
||||||
|
'quantum_nvp_port_mapping',
|
||||||
|
sa.Column('quantum_id', sa.String(length=36), nullable=False),
|
||||||
|
sa.Column('nvp_id', sa.String(length=36), nullable=True),
|
||||||
|
sa.ForeignKeyConstraint(['quantum_id'], ['ports.id'],
|
||||||
|
ondelete='CASCADE'),
|
||||||
|
sa.PrimaryKeyConstraint('quantum_id'))
|
||||||
|
|
||||||
|
|
||||||
|
def downgrade(active_plugin=None, options=None):
|
||||||
|
if not migration.should_run(active_plugin, migration_for_plugins):
|
||||||
|
return
|
||||||
|
|
||||||
|
op.drop_table('quantum_nvp_port_mapping')
|
File diff suppressed because it is too large
Load Diff
@ -17,6 +17,10 @@
|
|||||||
from quantum.openstack.common import cfg
|
from quantum.openstack.common import cfg
|
||||||
|
|
||||||
|
|
||||||
|
core_opts = [
|
||||||
|
cfg.BoolOpt('metadata_dhcp_host_route', default=False),
|
||||||
|
]
|
||||||
|
|
||||||
nvp_opts = [
|
nvp_opts = [
|
||||||
cfg.IntOpt('max_lp_per_bridged_ls', default=64,
|
cfg.IntOpt('max_lp_per_bridged_ls', default=64,
|
||||||
help=_("Maximum number of ports of a logical switch on a "
|
help=_("Maximum number of ports of a logical switch on a "
|
||||||
@ -51,8 +55,15 @@ cluster_opts = [
|
|||||||
"controller. A different connection for each "
|
"controller. A different connection for each "
|
||||||
"controller in the cluster can be specified; "
|
"controller in the cluster can be specified; "
|
||||||
"there must be at least one connection per "
|
"there must be at least one connection per "
|
||||||
"cluster."))
|
"cluster.")),
|
||||||
|
cfg.StrOpt('default_l3_gw_service_uuid',
|
||||||
|
help=_("Unique identifier of the NVP L3 Gateway service "
|
||||||
|
"which will be used for implementing routers and "
|
||||||
|
"floating IPs"))
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# Register the configuration options
|
||||||
|
cfg.CONF.register_opts(core_opts)
|
||||||
cfg.CONF.register_opts(nvp_opts, "NVP")
|
cfg.CONF.register_opts(nvp_opts, "NVP")
|
||||||
|
|
||||||
|
|
||||||
|
@ -40,3 +40,9 @@ class NvpNoMorePortsException(NvpPluginException):
|
|||||||
|
|
||||||
class NvpOutOfSyncException(NvpPluginException):
|
class NvpOutOfSyncException(NvpPluginException):
|
||||||
message = _("Quantum state has diverged from the networking backend!")
|
message = _("Quantum state has diverged from the networking backend!")
|
||||||
|
|
||||||
|
|
||||||
|
class NvpNatRuleMismatch(NvpPluginException):
|
||||||
|
message = _("While retrieving NAT rules, %(actual_rules)s were found "
|
||||||
|
"whereas rules in the (%(min_rules)s,%(max_rules)s) interval "
|
||||||
|
"were expected")
|
||||||
|
@ -54,3 +54,20 @@ def add_network_binding(session, network_id, binding_type, tz_uuid, vlan_id):
|
|||||||
tz_uuid, vlan_id)
|
tz_uuid, vlan_id)
|
||||||
session.add(binding)
|
session.add(binding)
|
||||||
return binding
|
return binding
|
||||||
|
|
||||||
|
|
||||||
|
def add_quantum_nvp_port_mapping(session, quantum_id, nvp_id):
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
mapping = nicira_models.QuantumNvpPortMapping(quantum_id, nvp_id)
|
||||||
|
session.add(mapping)
|
||||||
|
return mapping
|
||||||
|
|
||||||
|
|
||||||
|
def get_nvp_port_id(session, quantum_id):
|
||||||
|
try:
|
||||||
|
mapping = (session.query(nicira_models.QuantumNvpPortMapping).
|
||||||
|
filter_by(quantum_id=quantum_id).
|
||||||
|
one())
|
||||||
|
return mapping['nvp_id']
|
||||||
|
except exc.NoResultFound:
|
||||||
|
return
|
||||||
|
@ -50,3 +50,17 @@ class NvpNetworkBinding(model_base.BASEV2):
|
|||||||
self.binding_type,
|
self.binding_type,
|
||||||
self.tz_uuid,
|
self.tz_uuid,
|
||||||
self.vlan_id)
|
self.vlan_id)
|
||||||
|
|
||||||
|
|
||||||
|
class QuantumNvpPortMapping(model_base.BASEV2):
|
||||||
|
"""Represents the mapping between quantum and nvp port uuids."""
|
||||||
|
|
||||||
|
__tablename__ = 'quantum_nvp_port_mapping'
|
||||||
|
quantum_id = Column(String(36),
|
||||||
|
ForeignKey('ports.id', ondelete="CASCADE"),
|
||||||
|
primary_key=True)
|
||||||
|
nvp_id = Column(String(36))
|
||||||
|
|
||||||
|
def __init__(self, quantum_id, nvp_id):
|
||||||
|
self.quantum_id = quantum_id
|
||||||
|
self.nvp_id = nvp_id
|
||||||
|
@ -14,6 +14,16 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
#
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
#
|
||||||
|
|
||||||
|
import re
|
||||||
|
|
||||||
|
from quantum.api.v2 import attributes
|
||||||
|
from quantum.openstack.common import log as logging
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class NVPCluster(object):
|
class NVPCluster(object):
|
||||||
@ -45,8 +55,9 @@ class NVPCluster(object):
|
|||||||
return ''.join(ss)
|
return ''.join(ss)
|
||||||
|
|
||||||
def add_controller(self, ip, port, user, password, request_timeout,
|
def add_controller(self, ip, port, user, password, request_timeout,
|
||||||
http_timeout, retries, redirects,
|
http_timeout, retries, redirects, default_tz_uuid,
|
||||||
default_tz_uuid, uuid=None, zone=None):
|
uuid=None, zone=None,
|
||||||
|
default_l3_gw_service_uuid=None):
|
||||||
"""Add a new set of controller parameters.
|
"""Add a new set of controller parameters.
|
||||||
|
|
||||||
:param ip: IP address of controller.
|
:param ip: IP address of controller.
|
||||||
@ -59,14 +70,33 @@ class NVPCluster(object):
|
|||||||
:param redirects: maximum number of server redirect responses to
|
:param redirects: maximum number of server redirect responses to
|
||||||
follow.
|
follow.
|
||||||
:param default_tz_uuid: default transport zone uuid.
|
:param default_tz_uuid: default transport zone uuid.
|
||||||
|
:param default_next_hop: default next hop for routers in this cluster.
|
||||||
:param uuid: UUID of this cluster (used in MDI configs).
|
:param uuid: UUID of this cluster (used in MDI configs).
|
||||||
:param zone: Zone of this cluster (used in MDI configs).
|
:param zone: Zone of this cluster (used in MDI configs).
|
||||||
"""
|
"""
|
||||||
|
|
||||||
keys = [
|
keys = ['ip', 'user', 'password', 'default_tz_uuid',
|
||||||
'ip', 'user', 'password', 'default_tz_uuid', 'uuid', 'zone']
|
'default_l3_gw_service_uuid', 'uuid', 'zone']
|
||||||
controller_dict = dict([(k, locals()[k]) for k in keys])
|
controller_dict = dict([(k, locals()[k]) for k in keys])
|
||||||
|
default_tz_uuid = controller_dict.get('default_tz_uuid')
|
||||||
|
if not re.match(attributes.UUID_PATTERN, default_tz_uuid):
|
||||||
|
LOG.warning(_("default_tz_uuid:%(default_tz_uuid)s is not a "
|
||||||
|
"valid UUID in the cluster %(cluster_name)s. "
|
||||||
|
"Network operations might not work "
|
||||||
|
"properly in this cluster"),
|
||||||
|
{'default_tz_uuid': default_tz_uuid,
|
||||||
|
'cluster_name': self.name})
|
||||||
|
# default_l3_gw_service_uuid is an optional parameter
|
||||||
|
# validate only if specified
|
||||||
|
l3_gw_service_uuid = controller_dict.get('default_l3_gw_service_uuid')
|
||||||
|
if (l3_gw_service_uuid and
|
||||||
|
not re.match(attributes.UUID_PATTERN, l3_gw_service_uuid)):
|
||||||
|
LOG.warning(_("default_l3_gw_service_uuid:%(l3_gw_service_uuid)s "
|
||||||
|
"is not a valid UUID in the cluster "
|
||||||
|
"%(cluster_name)s. Logical router operations "
|
||||||
|
"might not work properly in this cluster"),
|
||||||
|
{'l3_gw_service_uuid': l3_gw_service_uuid,
|
||||||
|
'cluster_name': self.name})
|
||||||
int_keys = [
|
int_keys = [
|
||||||
'port', 'request_timeout', 'http_timeout', 'retries', 'redirects']
|
'port', 'request_timeout', 'http_timeout', 'retries', 'redirects']
|
||||||
for k in int_keys:
|
for k in int_keys:
|
||||||
@ -121,6 +151,10 @@ class NVPCluster(object):
|
|||||||
def default_tz_uuid(self):
|
def default_tz_uuid(self):
|
||||||
return self.controllers[0]['default_tz_uuid']
|
return self.controllers[0]['default_tz_uuid']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def default_l3_gw_service_uuid(self):
|
||||||
|
return self.controllers[0]['default_l3_gw_service_uuid']
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def zone(self):
|
def zone(self):
|
||||||
return self.controllers[0]['zone']
|
return self.controllers[0]['zone']
|
||||||
|
@ -22,7 +22,6 @@
|
|||||||
|
|
||||||
from copy import copy
|
from copy import copy
|
||||||
import hashlib
|
import hashlib
|
||||||
import itertools
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -30,6 +29,8 @@ import logging
|
|||||||
# no quantum-specific logic in it
|
# no quantum-specific logic in it
|
||||||
from quantum.common import constants
|
from quantum.common import constants
|
||||||
from quantum.common import exceptions as exception
|
from quantum.common import exceptions as exception
|
||||||
|
from quantum.plugins.nicira.nicira_nvp_plugin.common import (
|
||||||
|
exceptions as nvp_exc)
|
||||||
from quantum.plugins.nicira.nicira_nvp_plugin import NvpApiClient
|
from quantum.plugins.nicira.nicira_nvp_plugin import NvpApiClient
|
||||||
|
|
||||||
|
|
||||||
@ -42,7 +43,21 @@ DEF_TRANSPORT_TYPE = "stt"
|
|||||||
URI_PREFIX = "/ws.v1"
|
URI_PREFIX = "/ws.v1"
|
||||||
# Resources exposed by NVP API
|
# Resources exposed by NVP API
|
||||||
LSWITCH_RESOURCE = "lswitch"
|
LSWITCH_RESOURCE = "lswitch"
|
||||||
LPORT_RESOURCE = "lport"
|
LSWITCHPORT_RESOURCE = "lport-%s" % LSWITCH_RESOURCE
|
||||||
|
LROUTER_RESOURCE = "lrouter"
|
||||||
|
LROUTERPORT_RESOURCE = "lport-%s" % LROUTER_RESOURCE
|
||||||
|
LROUTERNAT_RESOURCE = "nat-lrouter"
|
||||||
|
|
||||||
|
# Constants for NAT rules
|
||||||
|
MATCH_KEYS = ["destination_ip_addresses", "destination_port_max",
|
||||||
|
"destination_port_min", "source_ip_addresses",
|
||||||
|
"source_port_max", "source_port_min", "protocol"]
|
||||||
|
|
||||||
|
SNAT_KEYS = ["to_src_port_min", "to_src_port_max", "to_src_ip_min",
|
||||||
|
"to_src_ip_max"]
|
||||||
|
|
||||||
|
DNAT_KEYS = ["to_dst_port", "to_dst_ip_min", "to_dst_ip_max"]
|
||||||
|
|
||||||
|
|
||||||
LOCAL_LOGGING = False
|
LOCAL_LOGGING = False
|
||||||
if LOCAL_LOGGING:
|
if LOCAL_LOGGING:
|
||||||
@ -71,27 +86,25 @@ def _build_uri_path(resource,
|
|||||||
resource_id=None,
|
resource_id=None,
|
||||||
parent_resource_id=None,
|
parent_resource_id=None,
|
||||||
fields=None,
|
fields=None,
|
||||||
relations=None, filters=None):
|
relations=None, filters=None, is_attachment=False):
|
||||||
# TODO(salvatore-orlando): This is ugly. do something more clever
|
resources = resource.split('-')
|
||||||
# and aovid the if statement
|
res_path = resources[0] + (resource_id and "/%s" % resource_id or '')
|
||||||
if resource == LPORT_RESOURCE:
|
if len(resources) > 1:
|
||||||
res_path = ("%s/%s/%s" % (LSWITCH_RESOURCE,
|
# There is also a parent resource to account for in the uri
|
||||||
|
res_path = "%s/%s/%s" % (resources[1],
|
||||||
parent_resource_id,
|
parent_resource_id,
|
||||||
resource) +
|
res_path)
|
||||||
(resource_id and "/%s" % resource_id or ''))
|
if is_attachment:
|
||||||
else:
|
res_path = "%s/attachment" % res_path
|
||||||
res_path = resource + (resource_id and
|
|
||||||
"/%s" % resource_id or '')
|
|
||||||
|
|
||||||
params = []
|
params = []
|
||||||
params.append(fields and "fields=%s" % fields)
|
params.append(fields and "fields=%s" % fields)
|
||||||
params.append(relations and "relations=%s" % relations)
|
params.append(relations and "relations=%s" % relations)
|
||||||
if filters:
|
if filters:
|
||||||
params.extend(['%s=%s' % (k, v) for (k, v) in filters.iteritems()])
|
params.extend(['%s=%s' % (k, v) for (k, v) in filters.iteritems()])
|
||||||
uri_path = "%s/%s" % (URI_PREFIX, res_path)
|
uri_path = "%s/%s" % (URI_PREFIX, res_path)
|
||||||
query_string = reduce(lambda x, y: "%s&%s" % (x, y),
|
non_empty_params = [x for x in params if x is not None]
|
||||||
itertools.ifilter(lambda x: x is not None, params),
|
if len(non_empty_params):
|
||||||
"")
|
query_string = '&'.join(non_empty_params)
|
||||||
if query_string:
|
if query_string:
|
||||||
uri_path += "?%s" % query_string
|
uri_path += "?%s" % query_string
|
||||||
return uri_path
|
return uri_path
|
||||||
@ -279,6 +292,110 @@ def update_lswitch(cluster, lswitch_id, display_name,
|
|||||||
return obj
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def create_lrouter(cluster, tenant_id, display_name, nexthop):
|
||||||
|
""" Create a NVP logical router on the specified cluster.
|
||||||
|
|
||||||
|
:param cluster: The target NVP cluster
|
||||||
|
:param tenant_id: Identifier of the Openstack tenant for which
|
||||||
|
the logical router is being created
|
||||||
|
:param display_name: Descriptive name of this logical router
|
||||||
|
:param nexthop: External gateway IP address for the logical router
|
||||||
|
:raise NvpApiException: if there is a problem while communicating
|
||||||
|
with the NVP controller
|
||||||
|
"""
|
||||||
|
tags = [{"tag": tenant_id, "scope": "os_tid"}]
|
||||||
|
lrouter_obj = {
|
||||||
|
"display_name": display_name,
|
||||||
|
"tags": tags,
|
||||||
|
"routing_config": {
|
||||||
|
"default_route_next_hop": {
|
||||||
|
"gateway_ip_address": nexthop,
|
||||||
|
"type": "RouterNextHop"
|
||||||
|
},
|
||||||
|
"type": "SingleDefaultRouteImplicitRoutingConfig"
|
||||||
|
},
|
||||||
|
"type": "LogicalRouterConfig"
|
||||||
|
}
|
||||||
|
try:
|
||||||
|
return json.loads(do_single_request("POST",
|
||||||
|
_build_uri_path(LROUTER_RESOURCE),
|
||||||
|
json.dumps(lrouter_obj),
|
||||||
|
cluster=cluster))
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
# just log and re-raise - let the caller handle it
|
||||||
|
LOG.exception(_("An exception occured while communicating with "
|
||||||
|
"the NVP controller for cluster:%s"), cluster.name)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def delete_lrouter(cluster, lrouter_id):
|
||||||
|
try:
|
||||||
|
do_single_request("DELETE",
|
||||||
|
_build_uri_path(LROUTER_RESOURCE,
|
||||||
|
resource_id=lrouter_id),
|
||||||
|
cluster=cluster)
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
# just log and re-raise - let the caller handle it
|
||||||
|
LOG.exception(_("An exception occured while communicating with "
|
||||||
|
"the NVP controller for cluster:%s"), cluster.name)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def get_lrouter(cluster, lrouter_id):
|
||||||
|
try:
|
||||||
|
return json.loads(do_single_request("GET",
|
||||||
|
_build_uri_path(LROUTER_RESOURCE,
|
||||||
|
resource_id=lrouter_id,
|
||||||
|
relations='LogicalRouterStatus'),
|
||||||
|
cluster=cluster))
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
# just log and re-raise - let the caller handle it
|
||||||
|
LOG.exception(_("An exception occured while communicating with "
|
||||||
|
"the NVP controller for cluster:%s"), cluster.name)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def get_lrouters(cluster, tenant_id, fields=None, filters=None):
|
||||||
|
actual_filters = {}
|
||||||
|
if filters:
|
||||||
|
actual_filters.update(filters)
|
||||||
|
if tenant_id:
|
||||||
|
actual_filters['tag'] = tenant_id
|
||||||
|
actual_filters['tag_scope'] = 'os_tid'
|
||||||
|
lrouter_fields = "uuid,display_name,fabric_status,tags"
|
||||||
|
return get_all_query_pages(
|
||||||
|
_build_uri_path(LROUTER_RESOURCE,
|
||||||
|
fields=lrouter_fields,
|
||||||
|
relations='LogicalRouterStatus',
|
||||||
|
filters=actual_filters),
|
||||||
|
cluster)
|
||||||
|
|
||||||
|
|
||||||
|
def update_lrouter(cluster, lrouter_id, display_name, nexthop):
|
||||||
|
lrouter_obj = get_lrouter(cluster, lrouter_id)
|
||||||
|
if not display_name and not nexthop:
|
||||||
|
# Nothing to update
|
||||||
|
return lrouter_obj
|
||||||
|
# It seems that this is faster than the doing an if on display_name
|
||||||
|
lrouter_obj["display_name"] = display_name or lrouter_obj["display_name"]
|
||||||
|
if nexthop:
|
||||||
|
nh_element = lrouter_obj["routing_config"].get(
|
||||||
|
"default_route_next_hop")
|
||||||
|
if nh_element:
|
||||||
|
nh_element["gateway_ip_address"] = nexthop
|
||||||
|
try:
|
||||||
|
return json.loads(do_single_request("PUT",
|
||||||
|
_build_uri_path(LROUTER_RESOURCE,
|
||||||
|
resource_id=lrouter_id),
|
||||||
|
json.dumps(lrouter_obj),
|
||||||
|
cluster=cluster))
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
# just log and re-raise - let the caller handle it
|
||||||
|
LOG.exception(_("An exception occured while communicating with "
|
||||||
|
"the NVP controller for cluster:%s"), cluster.name)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
def get_all_networks(cluster, tenant_id, networks):
|
def get_all_networks(cluster, tenant_id, networks):
|
||||||
"""Append the quantum network uuids we can find in the given cluster to
|
"""Append the quantum network uuids we can find in the given cluster to
|
||||||
"networks"
|
"networks"
|
||||||
@ -330,26 +447,46 @@ def delete_networks(cluster, net_id, lswitch_ids):
|
|||||||
raise exception.QuantumException()
|
raise exception.QuantumException()
|
||||||
|
|
||||||
|
|
||||||
def query_ports(cluster, network, relations=None, fields="*", filters=None):
|
def query_lswitch_lports(cluster, ls_uuid, fields="*",
|
||||||
uri = "/ws.v1/lswitch/" + network + "/lport?"
|
filters=None, relations=None):
|
||||||
if relations:
|
# Fix filter for attachments
|
||||||
uri += "relations=%s" % relations
|
|
||||||
uri += "&fields=%s" % fields
|
|
||||||
if filters and "attachment" in filters:
|
if filters and "attachment" in filters:
|
||||||
uri += "&attachment_vif_uuid=%s" % filters["attachment"]
|
filters['attachment_vif_uuid'] = filters["attachment"]
|
||||||
|
del filters['attachment']
|
||||||
|
uri = _build_uri_path(LSWITCHPORT_RESOURCE, parent_resource_id=ls_uuid,
|
||||||
|
fields=fields, filters=filters, relations=relations)
|
||||||
try:
|
try:
|
||||||
resp_obj = do_single_request("GET", uri, cluster=cluster)
|
resp_obj = do_single_request("GET", uri, cluster=cluster)
|
||||||
except NvpApiClient.ResourceNotFound as e:
|
except NvpApiClient.ResourceNotFound:
|
||||||
LOG.error(_("Network not found, Error: %s"), str(e))
|
LOG.exception(_("Logical switch: %s not found"), ls_uuid)
|
||||||
raise exception.NetworkNotFound(net_id=network)
|
raise
|
||||||
except NvpApiClient.NvpApiException as e:
|
except NvpApiClient.NvpApiException:
|
||||||
raise exception.QuantumException()
|
LOG.exception(_("An error occurred while querying logical ports on "
|
||||||
|
"the NVP platform"))
|
||||||
|
raise
|
||||||
return json.loads(resp_obj)["results"]
|
return json.loads(resp_obj)["results"]
|
||||||
|
|
||||||
|
|
||||||
def delete_port(cluster, port):
|
def query_lrouter_lports(cluster, lr_uuid, fields="*",
|
||||||
|
filters=None, relations=None):
|
||||||
|
uri = _build_uri_path(LROUTERPORT_RESOURCE, parent_resource_id=lr_uuid,
|
||||||
|
fields=fields, filters=filters, relations=relations)
|
||||||
try:
|
try:
|
||||||
do_single_request("DELETE", port['_href'], cluster=cluster)
|
resp_obj = do_single_request("GET", uri, cluster=cluster)
|
||||||
|
except NvpApiClient.ResourceNotFound:
|
||||||
|
LOG.exception(_("Logical router: %s not found"), lr_uuid)
|
||||||
|
raise
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
LOG.exception(_("An error occured while querying logical router "
|
||||||
|
"ports on the NVP platfom"))
|
||||||
|
raise
|
||||||
|
return json.loads(resp_obj)["results"]
|
||||||
|
|
||||||
|
|
||||||
|
def delete_port(cluster, switch, port):
|
||||||
|
uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
|
||||||
|
try:
|
||||||
|
do_single_request("DELETE", uri, cluster=cluster)
|
||||||
except NvpApiClient.ResourceNotFound as e:
|
except NvpApiClient.ResourceNotFound as e:
|
||||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||||
raise exception.PortNotFound(port_id=port['uuid'])
|
raise exception.PortNotFound(port_id=port['uuid'])
|
||||||
@ -357,27 +494,21 @@ def delete_port(cluster, port):
|
|||||||
raise exception.QuantumException()
|
raise exception.QuantumException()
|
||||||
|
|
||||||
|
|
||||||
def get_port_by_quantum_tag(clusters, lswitch, quantum_tag):
|
def get_logical_port_status(cluster, switch, port):
|
||||||
"""Return (url, cluster_id) of port or raises ResourceNotFound
|
query = ("/ws.v1/lswitch/" + switch + "/lport/"
|
||||||
"""
|
+ port + "?relations=LogicalPortStatus")
|
||||||
query = ("/ws.v1/lswitch/%s/lport?fields=admin_status_enabled,"
|
|
||||||
"fabric_status_up,uuid&tag=%s&tag_scope=q_port_id"
|
|
||||||
"&relations=LogicalPortStatus" % (lswitch, quantum_tag))
|
|
||||||
|
|
||||||
LOG.debug(_("Looking for port with q_tag '%(quantum_tag)s' "
|
|
||||||
"on: %(lswitch)s"),
|
|
||||||
locals())
|
|
||||||
for c in clusters:
|
|
||||||
try:
|
try:
|
||||||
res_obj = do_single_request('GET', query, cluster=c)
|
res_obj = do_single_request('GET', query, cluster=cluster)
|
||||||
except Exception:
|
except NvpApiClient.ResourceNotFound as e:
|
||||||
continue
|
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||||
|
raise exception.PortNotFound(port_id=port, net_id=switch)
|
||||||
|
except NvpApiClient.NvpApiException as e:
|
||||||
|
raise exception.QuantumException()
|
||||||
res = json.loads(res_obj)
|
res = json.loads(res_obj)
|
||||||
if len(res["results"]) == 1:
|
# copy over admin_status_enabled
|
||||||
return (res["results"][0], c)
|
res["_relations"]["LogicalPortStatus"]["admin_status_enabled"] = (
|
||||||
|
res["admin_status_enabled"])
|
||||||
LOG.error(_("Port or Network not found"))
|
return res["_relations"]["LogicalPortStatus"]
|
||||||
raise exception.PortNotFound(port_id=quantum_tag, net_id=lswitch)
|
|
||||||
|
|
||||||
|
|
||||||
def get_port_by_display_name(clusters, lswitch, display_name):
|
def get_port_by_display_name(clusters, lswitch, display_name):
|
||||||
@ -483,7 +614,8 @@ def create_lport(cluster, lswitch_uuid, tenant_id, quantum_port_id,
|
|||||||
_configure_extensions(lport_obj, mac_address, fixed_ips,
|
_configure_extensions(lport_obj, mac_address, fixed_ips,
|
||||||
port_security_enabled, security_profiles)
|
port_security_enabled, security_profiles)
|
||||||
|
|
||||||
path = _build_uri_path(LPORT_RESOURCE, parent_resource_id=lswitch_uuid)
|
path = _build_uri_path(LSWITCHPORT_RESOURCE,
|
||||||
|
parent_resource_id=lswitch_uuid)
|
||||||
try:
|
try:
|
||||||
resp_obj = do_single_request("POST", path,
|
resp_obj = do_single_request("POST", path,
|
||||||
json.dumps(lport_obj),
|
json.dumps(lport_obj),
|
||||||
@ -498,6 +630,150 @@ def create_lport(cluster, lswitch_uuid, tenant_id, quantum_port_id,
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def create_router_lport(cluster, lrouter_uuid, tenant_id, quantum_port_id,
|
||||||
|
display_name, admin_status_enabled, ip_addresses):
|
||||||
|
""" Creates a logical port on the assigned logical router """
|
||||||
|
tags = [dict(scope='os_tid', tag=tenant_id),
|
||||||
|
dict(scope='q_port_id', tag=quantum_port_id)]
|
||||||
|
lport_obj = dict(
|
||||||
|
admin_status_enabled=admin_status_enabled,
|
||||||
|
display_name=display_name,
|
||||||
|
tags=tags,
|
||||||
|
ip_addresses=ip_addresses,
|
||||||
|
type="LogicalRouterPortConfig"
|
||||||
|
)
|
||||||
|
path = _build_uri_path(LROUTERPORT_RESOURCE,
|
||||||
|
parent_resource_id=lrouter_uuid)
|
||||||
|
try:
|
||||||
|
resp_obj = do_single_request("POST", path,
|
||||||
|
json.dumps(lport_obj),
|
||||||
|
cluster=cluster)
|
||||||
|
except NvpApiClient.ResourceNotFound as e:
|
||||||
|
LOG.error(_("Logical router not found, Error: %s"), str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
result = json.loads(resp_obj)
|
||||||
|
LOG.debug(_("Created logical port %(lport_uuid)s on "
|
||||||
|
"logical router %(lrouter_uuid)s"),
|
||||||
|
{'lport_uuid': result['uuid'],
|
||||||
|
'lrouter_uuid': lrouter_uuid})
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid,
|
||||||
|
tenant_id, quantum_port_id, display_name,
|
||||||
|
admin_status_enabled, ip_addresses):
|
||||||
|
""" Updates a logical port on the assigned logical router """
|
||||||
|
lport_obj = dict(
|
||||||
|
admin_status_enabled=admin_status_enabled,
|
||||||
|
display_name=display_name,
|
||||||
|
tags=[dict(scope='os_tid', tag=tenant_id),
|
||||||
|
dict(scope='q_port_id', tag=quantum_port_id)],
|
||||||
|
ip_addresses=ip_addresses,
|
||||||
|
type="LogicalRouterPortConfig"
|
||||||
|
)
|
||||||
|
# Do not pass null items to NVP
|
||||||
|
for key in lport_obj.keys():
|
||||||
|
if lport_obj[key] is None:
|
||||||
|
del lport_obj[key]
|
||||||
|
path = _build_uri_path(LROUTERPORT_RESOURCE,
|
||||||
|
lrouter_port_uuid,
|
||||||
|
parent_resource_id=lrouter_uuid)
|
||||||
|
try:
|
||||||
|
resp_obj = do_single_request("PUT", path,
|
||||||
|
json.dumps(lport_obj),
|
||||||
|
cluster=cluster)
|
||||||
|
except NvpApiClient.ResourceNotFound as e:
|
||||||
|
LOG.error(_("Logical router or router port not found, "
|
||||||
|
"Error: %s"), str(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
result = json.loads(resp_obj)
|
||||||
|
LOG.debug(_("Updated logical port %(lport_uuid)s on "
|
||||||
|
"logical router %(lrouter_uuid)s"),
|
||||||
|
{'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def delete_router_lport(cluster, lrouter_uuid, lport_uuid):
|
||||||
|
""" Creates a logical port on the assigned logical router """
|
||||||
|
path = _build_uri_path(LROUTERPORT_RESOURCE, lport_uuid, lrouter_uuid)
|
||||||
|
try:
|
||||||
|
do_single_request("DELETE", path, cluster=cluster)
|
||||||
|
except NvpApiClient.ResourceNotFound as e:
|
||||||
|
LOG.error(_("Logical router not found, Error: %s"), str(e))
|
||||||
|
raise
|
||||||
|
LOG.debug(_("Delete logical router port %(lport_uuid)s on "
|
||||||
|
"logical router %(lrouter_uuid)s"),
|
||||||
|
{'lport_uuid': lport_uuid,
|
||||||
|
'lrouter_uuid': lrouter_uuid})
|
||||||
|
|
||||||
|
|
||||||
|
def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid):
|
||||||
|
nvp_port = get_port(cluster, ls_uuid, lp_uuid,
|
||||||
|
relations="LogicalPortAttachment")
|
||||||
|
try:
|
||||||
|
relations = nvp_port.get('_relations')
|
||||||
|
if relations:
|
||||||
|
att_data = relations.get('LogicalPortAttachment')
|
||||||
|
if att_data:
|
||||||
|
lrp_uuid = att_data.get('peer_port_uuid')
|
||||||
|
if lrp_uuid:
|
||||||
|
delete_router_lport(cluster, lr_uuid, lrp_uuid)
|
||||||
|
except (NvpApiClient.NvpApiException, NvpApiClient.ResourceNotFound):
|
||||||
|
LOG.exception(_("Unable to fetch and delete peer logical "
|
||||||
|
"router port for logical switch port:%s"),
|
||||||
|
lp_uuid)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def find_router_gw_port(context, cluster, router_id):
|
||||||
|
""" Retrieves the external gateway port for a NVP logical router """
|
||||||
|
|
||||||
|
# Find the uuid of nvp ext gw logical router port
|
||||||
|
# TODO(salvatore-orlando): Consider storing it in Quantum DB
|
||||||
|
results = query_lrouter_lports(
|
||||||
|
cluster, router_id,
|
||||||
|
filters={'attachment_gwsvc_uuid': cluster.default_l3_gw_service_uuid})
|
||||||
|
if len(results):
|
||||||
|
# Return logical router port
|
||||||
|
return results[0]
|
||||||
|
|
||||||
|
|
||||||
|
def plug_router_port_attachment(cluster, router_id, port_id,
|
||||||
|
attachment_uuid, nvp_attachment_type):
|
||||||
|
"""Attach a router port to the given attachment.
|
||||||
|
Current attachment types:
|
||||||
|
- PatchAttachment [-> logical switch port uuid]
|
||||||
|
- L3GatewayAttachment [-> L3GatewayService uuid]
|
||||||
|
"""
|
||||||
|
uri = _build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id,
|
||||||
|
is_attachment=True)
|
||||||
|
attach_obj = {}
|
||||||
|
attach_obj["type"] = nvp_attachment_type
|
||||||
|
if nvp_attachment_type == "PatchAttachment":
|
||||||
|
attach_obj["peer_port_uuid"] = attachment_uuid
|
||||||
|
elif nvp_attachment_type == "L3GatewayAttachment":
|
||||||
|
attach_obj["l3_gateway_service_uuid"] = attachment_uuid
|
||||||
|
else:
|
||||||
|
raise Exception(_("Invalid NVP attachment type '%s'"),
|
||||||
|
nvp_attachment_type)
|
||||||
|
try:
|
||||||
|
resp_obj = do_single_request(
|
||||||
|
"PUT", uri, json.dumps(attach_obj), cluster=cluster)
|
||||||
|
except NvpApiClient.ResourceNotFound as e:
|
||||||
|
LOG.exception(_("Router Port not found, Error: %s"), str(e))
|
||||||
|
raise
|
||||||
|
except NvpApiClient.Conflict as e:
|
||||||
|
LOG.exception(_("Conflict while setting router port attachment"))
|
||||||
|
raise
|
||||||
|
except NvpApiClient.NvpApiException as e:
|
||||||
|
LOG.exception(_("Unable to plug attachment into logical router port"))
|
||||||
|
raise
|
||||||
|
result = json.loads(resp_obj)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
def get_port_status(cluster, lswitch_id, port_id):
|
def get_port_status(cluster, lswitch_id, port_id):
|
||||||
"""Retrieve the operational status of the port"""
|
"""Retrieve the operational status of the port"""
|
||||||
try:
|
try:
|
||||||
@ -653,7 +929,7 @@ def create_security_profile(cluster, tenant_id, security_profile):
|
|||||||
'logical_port_ingress_rules': []}
|
'logical_port_ingress_rules': []}
|
||||||
|
|
||||||
update_security_group_rules(cluster, rsp['uuid'], rules)
|
update_security_group_rules(cluster, rsp['uuid'], rules)
|
||||||
LOG.debug("Created Security Profile: %s" % rsp)
|
LOG.debug(_("Created Security Profile: %s"), rsp)
|
||||||
return rsp
|
return rsp
|
||||||
|
|
||||||
|
|
||||||
@ -674,7 +950,7 @@ def update_security_group_rules(cluster, spid, rules):
|
|||||||
except NvpApiClient.NvpApiException as e:
|
except NvpApiClient.NvpApiException as e:
|
||||||
LOG.error(format_exception("Unknown", e, locals()))
|
LOG.error(format_exception("Unknown", e, locals()))
|
||||||
raise exception.QuantumException()
|
raise exception.QuantumException()
|
||||||
LOG.debug("Updated Security Profile: %s" % rsp)
|
LOG.debug(_("Updated Security Profile: %s"), rsp)
|
||||||
return rsp
|
return rsp
|
||||||
|
|
||||||
|
|
||||||
@ -686,3 +962,154 @@ def delete_security_profile(cluster, spid):
|
|||||||
except NvpApiClient.NvpApiException as e:
|
except NvpApiClient.NvpApiException as e:
|
||||||
LOG.error(format_exception("Unknown", e, locals()))
|
LOG.error(format_exception("Unknown", e, locals()))
|
||||||
raise exception.QuantumException()
|
raise exception.QuantumException()
|
||||||
|
|
||||||
|
|
||||||
|
def _create_nat_match_obj(**kwargs):
|
||||||
|
nat_match_obj = {'ethertype': 'IPv4'}
|
||||||
|
delta = set(kwargs.keys()) - set(MATCH_KEYS)
|
||||||
|
if delta:
|
||||||
|
raise Exception(_("Invalid keys for NAT match: %s"), delta)
|
||||||
|
nat_match_obj.update(kwargs)
|
||||||
|
return nat_match_obj
|
||||||
|
|
||||||
|
|
||||||
|
def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
|
||||||
|
LOG.debug(_("Creating NAT rule: %s"), nat_rule_obj)
|
||||||
|
uri = _build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id)
|
||||||
|
try:
|
||||||
|
resp = do_single_request("POST", uri, json.dumps(nat_rule_obj),
|
||||||
|
cluster=cluster)
|
||||||
|
except NvpApiClient.ResourceNotFound:
|
||||||
|
LOG.exception(_("NVP Logical Router %s not found"), router_id)
|
||||||
|
raise
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
LOG.exception(_("An error occurred while creating the NAT rule "
|
||||||
|
"on the NVP platform"))
|
||||||
|
raise
|
||||||
|
rule = json.loads(resp)
|
||||||
|
return rule
|
||||||
|
|
||||||
|
|
||||||
|
def create_lrouter_snat_rule(cluster, router_id,
|
||||||
|
min_src_ip, max_src_ip, **kwargs):
|
||||||
|
|
||||||
|
nat_match_obj = _create_nat_match_obj(**kwargs)
|
||||||
|
nat_rule_obj = {
|
||||||
|
"to_source_ip_address_min": min_src_ip,
|
||||||
|
"to_source_ip_address_max": max_src_ip,
|
||||||
|
"type": "SourceNatRule",
|
||||||
|
"match": nat_match_obj
|
||||||
|
}
|
||||||
|
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
||||||
|
|
||||||
|
|
||||||
|
def create_lrouter_dnat_rule(cluster, router_id, to_min_dst_ip,
|
||||||
|
to_max_dst_ip, to_dst_port=None, **kwargs):
|
||||||
|
|
||||||
|
nat_match_obj = _create_nat_match_obj(**kwargs)
|
||||||
|
nat_rule_obj = {
|
||||||
|
"to_destination_ip_address_min": to_min_dst_ip,
|
||||||
|
"to_destination_ip_address_max": to_max_dst_ip,
|
||||||
|
"type": "DestinationNatRule",
|
||||||
|
"match": nat_match_obj
|
||||||
|
}
|
||||||
|
if to_dst_port:
|
||||||
|
nat_rule_obj['to_destination_port'] = to_dst_port
|
||||||
|
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_nat_rules_by_match(cluster, router_id, rule_type,
|
||||||
|
max_num_expected,
|
||||||
|
min_num_expected=0,
|
||||||
|
**kwargs):
|
||||||
|
# remove nat rules
|
||||||
|
nat_rules = query_nat_rules(cluster, router_id)
|
||||||
|
to_delete_ids = []
|
||||||
|
for r in nat_rules:
|
||||||
|
if (r['type'] != rule_type):
|
||||||
|
continue
|
||||||
|
|
||||||
|
for key, value in kwargs.iteritems():
|
||||||
|
if not (key in r['match'] and r['match'][key] == value):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
to_delete_ids.append(r['uuid'])
|
||||||
|
if not (len(to_delete_ids) in
|
||||||
|
range(min_num_expected, max_num_expected + 1)):
|
||||||
|
raise nvp_exc.NvpNatRuleMismatch(actual_rules=len(to_delete_ids),
|
||||||
|
min_rules=min_num_expected,
|
||||||
|
max_rules=max_num_expected)
|
||||||
|
|
||||||
|
for rule_id in to_delete_ids:
|
||||||
|
delete_router_nat_rule(cluster, router_id, rule_id)
|
||||||
|
|
||||||
|
|
||||||
|
def delete_router_nat_rule(cluster, router_id, rule_id):
|
||||||
|
uri = _build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id)
|
||||||
|
try:
|
||||||
|
do_single_request("DELETE", uri, cluster=cluster)
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
LOG.exception(_("An error occurred while removing NAT rule "
|
||||||
|
"'%(nat_rule_uuid)s' for logical "
|
||||||
|
"router '%(lrouter_uuid)s'"),
|
||||||
|
{'nat_rule_uuid': rule_id, 'lrouter_uuid': router_id})
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def get_router_nat_rule(cluster, tenant_id, router_id, rule_id):
|
||||||
|
uri = _build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id)
|
||||||
|
try:
|
||||||
|
resp = do_single_request("GET", uri, cluster=cluster)
|
||||||
|
except NvpApiClient.ResourceNotFound:
|
||||||
|
LOG.exception(_("NAT rule %s not found"), rule_id)
|
||||||
|
raise
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
LOG.exception(_("An error occured while retrieving NAT rule '%s'"
|
||||||
|
"from NVP platform"), rule_id)
|
||||||
|
raise
|
||||||
|
res = json.loads(resp)
|
||||||
|
return res
|
||||||
|
|
||||||
|
|
||||||
|
def query_nat_rules(cluster, router_id, fields="*", filters=None):
|
||||||
|
uri = _build_uri_path(LROUTERNAT_RESOURCE, parent_resource_id=router_id,
|
||||||
|
fields=fields, filters=filters)
|
||||||
|
try:
|
||||||
|
resp = do_single_request("GET", uri, cluster=cluster)
|
||||||
|
except NvpApiClient.ResourceNotFound:
|
||||||
|
LOG.exception(_("NVP Logical Router '%s' not found"), router_id)
|
||||||
|
raise
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
LOG.exception(_("An error occured while retrieving NAT rules for "
|
||||||
|
"NVP logical router '%s'"), router_id)
|
||||||
|
raise
|
||||||
|
res = json.loads(resp)
|
||||||
|
return res["results"]
|
||||||
|
|
||||||
|
|
||||||
|
# NOTE(salvatore-orlando): The following FIXME applies in general to
|
||||||
|
# each operation on list attributes.
|
||||||
|
# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface
|
||||||
|
def update_lrouter_port_ips(cluster, lrouter_id, lport_id,
|
||||||
|
ips_to_add, ips_to_remove):
|
||||||
|
uri = _build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id)
|
||||||
|
try:
|
||||||
|
port = json.loads(do_single_request("GET", uri, cluster=cluster))
|
||||||
|
# TODO(salvatore-orlando): Enforce ips_to_add intersection with
|
||||||
|
# ips_to_remove is empty
|
||||||
|
ip_address_set = set(port['ip_addresses'])
|
||||||
|
ip_address_set = ip_address_set - set(ips_to_remove)
|
||||||
|
ip_address_set = ip_address_set | set(ips_to_add)
|
||||||
|
# Set is not JSON serializable - convert to list
|
||||||
|
port['ip_addresses'] = list(ip_address_set)
|
||||||
|
do_single_request("PUT", uri, json.dumps(port), cluster=cluster)
|
||||||
|
except NvpApiClient.ResourceNotFound as e:
|
||||||
|
msg = (_("Router Port %(lport_id)s not found on router "
|
||||||
|
"%(lrouter_id)s") % locals())
|
||||||
|
LOG.exception(msg)
|
||||||
|
raise nvp_exc.NvpPluginException(err_desc=msg)
|
||||||
|
except NvpApiClient.NvpApiException as e:
|
||||||
|
msg = _("An exception occurred while updating IP addresses on a "
|
||||||
|
"router logical port:%s") % str(e)
|
||||||
|
LOG.exception(msg)
|
||||||
|
raise nvp_exc.NvpPluginException(err_desc=msg)
|
||||||
|
28
quantum/tests/unit/nicira/etc/fake_get_lrouter.json
Normal file
28
quantum/tests/unit/nicira/etc/fake_get_lrouter.json
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"display_name": "%(display_name)s",
|
||||||
|
"uuid": "%(uuid)s",
|
||||||
|
"tags": %(tags_json)s,
|
||||||
|
"routing_config": {
|
||||||
|
"type": "SingleDefaultRouteImplicitRoutingConfig",
|
||||||
|
"_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig",
|
||||||
|
"default_route_next_hop": {
|
||||||
|
"type": "RouterNextHop",
|
||||||
|
"_schema": "/ws.v1/schema/RouterNextHop",
|
||||||
|
"gateway_ip_address": "%(default_next_hop)s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"_schema": "/ws.v1/schema/LogicalRouterConfig",
|
||||||
|
"_relations": {
|
||||||
|
"LogicalRouterStatus": {
|
||||||
|
"_href": "/ws.v1/lrouter/%(uuid)s/status",
|
||||||
|
"lport_admin_up_count": %(lport_count)d,
|
||||||
|
"_schema": "/ws.v1/schema/LogicalRouterStatus",
|
||||||
|
"lport_count": %(lport_count)d,
|
||||||
|
"fabric_status": true,
|
||||||
|
"type": "LogicalRouterStatus",
|
||||||
|
"lport_link_up_count": %(lport_count)d
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"type": "LogicalRouterConfig",
|
||||||
|
"_href": "/ws.v1/lrouter/%(uuid)s"
|
||||||
|
}
|
11
quantum/tests/unit/nicira/etc/fake_get_lrouter_lport.json
Normal file
11
quantum/tests/unit/nicira/etc/fake_get_lrouter_lport.json
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"display_name": "%(display_name)s",
|
||||||
|
"_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s",
|
||||||
|
"tags":
|
||||||
|
[{"scope": "q_port_id", "tag": "%(quantum_port_id)s"},
|
||||||
|
{"scope": "os_tid", "tag": "%(tenant_id)s"}],
|
||||||
|
"ip_addresses": %(ip_addresses_json)s,
|
||||||
|
"_schema": "/ws.v1/schema/LogicalRouterPortConfig",
|
||||||
|
"type": "LogicalRouterPortConfig",
|
||||||
|
"uuid": "%(uuid)s"
|
||||||
|
}
|
@ -0,0 +1,9 @@
|
|||||||
|
{
|
||||||
|
"LogicalPortAttachment":
|
||||||
|
{
|
||||||
|
%(peer_port_href_field)s
|
||||||
|
%(peer_port_uuid_field)s
|
||||||
|
"type": "%(type)s",
|
||||||
|
"schema": "/ws.v1/schema/%(type)s"
|
||||||
|
}
|
||||||
|
}
|
6
quantum/tests/unit/nicira/etc/fake_get_lrouter_nat.json
Normal file
6
quantum/tests/unit/nicira/etc/fake_get_lrouter_nat.json
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s",
|
||||||
|
"type": "%(type)s",
|
||||||
|
"match": %(match_json)s,
|
||||||
|
"uuid": "%(uuid)s"
|
||||||
|
}
|
@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"LogicalPortAttachment":
|
||||||
|
{
|
||||||
|
%(peer_port_href_field)s
|
||||||
|
%(peer_port_uuid_field)s
|
||||||
|
%(vif_uuid_field)s
|
||||||
|
"type": "%(type)s",
|
||||||
|
"schema": "/ws.v1/schema/%(type)s"
|
||||||
|
}
|
||||||
|
}
|
22
quantum/tests/unit/nicira/etc/fake_post_lrouter.json
Normal file
22
quantum/tests/unit/nicira/etc/fake_post_lrouter.json
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"display_name": "%(display_name)s",
|
||||||
|
"uuid": "%(uuid)s",
|
||||||
|
"tags": [
|
||||||
|
{
|
||||||
|
"scope": "os_tid",
|
||||||
|
"tag": "%(tenant_id)s"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"routing_config": {
|
||||||
|
"type": "SingleDefaultRouteImplicitRoutingConfig",
|
||||||
|
"_schema": "/ws.v1/schema/SingleDefaultRouteImplicitRoutingConfig",
|
||||||
|
"default_route_next_hop": {
|
||||||
|
"type": "RouterNextHop",
|
||||||
|
"_schema": "/ws.v1/schema/RouterNextHop",
|
||||||
|
"gateway_ip_address": "%(default_next_hop)s"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"_schema": "/ws.v1/schema/LogicalRouterConfig",
|
||||||
|
"type": "LogicalRouterConfig",
|
||||||
|
"_href": "/ws.v1/lrouter/%(uuid)s"
|
||||||
|
}
|
10
quantum/tests/unit/nicira/etc/fake_post_lrouter_lport.json
Normal file
10
quantum/tests/unit/nicira/etc/fake_post_lrouter_lport.json
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"display_name": "%(display_name)s",
|
||||||
|
"_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(uuid)s",
|
||||||
|
"_schema": "/ws.v1/schema/LogicalRouterPortConfig",
|
||||||
|
"mac_address": "00:00:00:00:00:00",
|
||||||
|
"admin_status_enabled": true,
|
||||||
|
"ip_addresses": %(ip_addresses_json)s,
|
||||||
|
"type": "LogicalRouterPortConfig",
|
||||||
|
"uuid": "%(uuid)s"
|
||||||
|
}
|
6
quantum/tests/unit/nicira/etc/fake_post_lrouter_nat.json
Normal file
6
quantum/tests/unit/nicira/etc/fake_post_lrouter_nat.json
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
{
|
||||||
|
"_href": "/ws.v1/lrouter/%(lr_uuid)s/nat/%(uuid)s",
|
||||||
|
"type": "%(type)s",
|
||||||
|
"match": %(match_json)s,
|
||||||
|
"uuid": "%(uuid)s"
|
||||||
|
}
|
@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"LogicalPortAttachment":
|
||||||
|
{
|
||||||
|
%(peer_port_href_field)s
|
||||||
|
%(peer_port_uuid_field)s
|
||||||
|
"_href": "/ws.v1/lrouter/%(lr_uuid)s/lport/%(lp_uuid)s/attachment",
|
||||||
|
"type": "%(type)s",
|
||||||
|
"schema": "/ws.v1/schema/%(type)s"
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"LogicalPortAttachment":
|
||||||
|
{
|
||||||
|
%(peer_port_href_field)s
|
||||||
|
%(peer_port_uuid_field)s
|
||||||
|
%(vif_uuid_field)s
|
||||||
|
"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(lp_uuid)s/attachment",
|
||||||
|
"type": "%(type)s",
|
||||||
|
"schema": "/ws.v1/schema/%(type)s"
|
||||||
|
}
|
||||||
|
}
|
@ -5,3 +5,4 @@ default_tz_uuid = fake_tz_uuid
|
|||||||
nova_zone_id = whatever
|
nova_zone_id = whatever
|
||||||
nvp_cluster_uuid = fake_cluster_uuid
|
nvp_cluster_uuid = fake_cluster_uuid
|
||||||
nvp_controller_connection=fake:443:admin:admin:30:10:2:2
|
nvp_controller_connection=fake:443:admin:admin:30:10:2:2
|
||||||
|
default_l3_gw_uuid = whatever
|
||||||
|
@ -15,39 +15,82 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
import urlparse
|
import urlparse
|
||||||
|
|
||||||
|
from quantum.openstack.common import log as logging
|
||||||
from quantum.openstack.common import uuidutils
|
from quantum.openstack.common import uuidutils
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger("fake_nvpapiclient")
|
LOG = logging.getLogger(__name__)
|
||||||
LOG.setLevel(logging.DEBUG)
|
|
||||||
|
|
||||||
|
|
||||||
class FakeClient:
|
class FakeClient:
|
||||||
|
|
||||||
|
LSWITCH_RESOURCE = 'lswitch'
|
||||||
|
LPORT_RESOURCE = 'lport'
|
||||||
|
LROUTER_RESOURCE = 'lrouter'
|
||||||
|
NAT_RESOURCE = 'nat'
|
||||||
|
SECPROF_RESOURCE = 'securityprofile'
|
||||||
|
LSWITCH_STATUS = 'lswitchstatus'
|
||||||
|
LROUTER_STATUS = 'lrouterstatus'
|
||||||
|
LSWITCH_LPORT_RESOURCE = 'lswitch_lport'
|
||||||
|
LROUTER_LPORT_RESOURCE = 'lrouter_lport'
|
||||||
|
LROUTER_NAT_RESOURCE = 'lrouter_nat'
|
||||||
|
LSWITCH_LPORT_STATUS = 'lswitch_lportstatus'
|
||||||
|
LSWITCH_LPORT_ATT = 'lswitch_lportattachment'
|
||||||
|
LROUTER_LPORT_STATUS = 'lrouter_lportstatus'
|
||||||
|
LROUTER_LPORT_ATT = 'lrouter_lportattachment'
|
||||||
|
|
||||||
|
RESOURCES = [LSWITCH_RESOURCE, LROUTER_RESOURCE,
|
||||||
|
LPORT_RESOURCE, NAT_RESOURCE, SECPROF_RESOURCE]
|
||||||
|
|
||||||
FAKE_GET_RESPONSES = {
|
FAKE_GET_RESPONSES = {
|
||||||
"lswitch": "fake_get_lswitch.json",
|
LSWITCH_RESOURCE: "fake_get_lswitch.json",
|
||||||
"lport": "fake_get_lport.json",
|
LSWITCH_LPORT_RESOURCE: "fake_get_lswitch_lport.json",
|
||||||
"lportstatus": "fake_get_lport_status.json"
|
LSWITCH_LPORT_STATUS: "fake_get_lswitch_lport_status.json",
|
||||||
|
LSWITCH_LPORT_ATT: "fake_get_lswitch_lport_att.json",
|
||||||
|
LROUTER_RESOURCE: "fake_get_lrouter.json",
|
||||||
|
LROUTER_LPORT_RESOURCE: "fake_get_lrouter_lport.json",
|
||||||
|
LROUTER_LPORT_STATUS: "fake_get_lrouter_lport_status.json",
|
||||||
|
LROUTER_LPORT_ATT: "fake_get_lrouter_lport_att.json",
|
||||||
|
LROUTER_STATUS: "fake_get_lrouter_status.json",
|
||||||
|
LROUTER_NAT_RESOURCE: "fake_get_lrouter_nat.json"
|
||||||
}
|
}
|
||||||
|
|
||||||
FAKE_POST_RESPONSES = {
|
FAKE_POST_RESPONSES = {
|
||||||
"lswitch": "fake_post_lswitch.json",
|
LSWITCH_RESOURCE: "fake_post_lswitch.json",
|
||||||
"lport": "fake_post_lport.json",
|
LROUTER_RESOURCE: "fake_post_lrouter.json",
|
||||||
"securityprofile": "fake_post_security_profile.json"
|
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
|
||||||
|
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
|
||||||
|
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
|
||||||
|
SECPROF_RESOURCE: "fake_post_security_profile.json"
|
||||||
}
|
}
|
||||||
|
|
||||||
FAKE_PUT_RESPONSES = {
|
FAKE_PUT_RESPONSES = {
|
||||||
"lswitch": "fake_post_lswitch.json",
|
LSWITCH_RESOURCE: "fake_post_lswitch.json",
|
||||||
"lport": "fake_post_lport.json",
|
LROUTER_RESOURCE: "fake_post_lrouter.json",
|
||||||
"securityprofile": "fake_post_security_profile.json"
|
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
|
||||||
|
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
|
||||||
|
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
|
||||||
|
LSWITCH_LPORT_ATT: "fake_put_lswitch_lport_att.json",
|
||||||
|
LROUTER_LPORT_ATT: "fake_put_lrouter_lport_att.json",
|
||||||
|
SECPROF_RESOURCE: "fake_post_security_profile.json"
|
||||||
|
}
|
||||||
|
|
||||||
|
MANAGED_RELATIONS = {
|
||||||
|
LSWITCH_RESOURCE: [],
|
||||||
|
LROUTER_RESOURCE: [],
|
||||||
|
LSWITCH_LPORT_RESOURCE: ['LogicalPortAttachment'],
|
||||||
|
LROUTER_LPORT_RESOURCE: ['LogicalPortAttachment'],
|
||||||
}
|
}
|
||||||
|
|
||||||
_fake_lswitch_dict = {}
|
_fake_lswitch_dict = {}
|
||||||
_fake_lport_dict = {}
|
_fake_lrouter_dict = {}
|
||||||
_fake_lportstatus_dict = {}
|
_fake_lswitch_lport_dict = {}
|
||||||
|
_fake_lrouter_lport_dict = {}
|
||||||
|
_fake_lrouter_nat_dict = {}
|
||||||
|
_fake_lswitch_lportstatus_dict = {}
|
||||||
|
_fake_lrouter_lportstatus_dict = {}
|
||||||
_fake_securityprofile_dict = {}
|
_fake_securityprofile_dict = {}
|
||||||
|
|
||||||
def __init__(self, fake_files_path):
|
def __init__(self, fake_files_path):
|
||||||
@ -83,9 +126,22 @@ class FakeClient:
|
|||||||
fake_lswitch['lport_count'] = 0
|
fake_lswitch['lport_count'] = 0
|
||||||
return fake_lswitch
|
return fake_lswitch
|
||||||
|
|
||||||
def _add_lport(self, body, ls_uuid):
|
def _add_lrouter(self, body):
|
||||||
|
fake_lrouter = json.loads(body)
|
||||||
|
fake_lrouter['uuid'] = uuidutils.generate_uuid()
|
||||||
|
self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter
|
||||||
|
fake_lrouter['tenant_id'] = self._get_tag(fake_lrouter, 'os_tid')
|
||||||
|
fake_lrouter['lport_count'] = 0
|
||||||
|
default_nexthop = fake_lrouter['routing_config'].get(
|
||||||
|
'default_route_next_hop')
|
||||||
|
fake_lrouter['default_next_hop'] = default_nexthop.get(
|
||||||
|
'gateway_ip_address', '0.0.0.0')
|
||||||
|
return fake_lrouter
|
||||||
|
|
||||||
|
def _add_lswitch_lport(self, body, ls_uuid):
|
||||||
fake_lport = json.loads(body)
|
fake_lport = json.loads(body)
|
||||||
fake_lport['uuid'] = uuidutils.generate_uuid()
|
new_uuid = uuidutils.generate_uuid()
|
||||||
|
fake_lport['uuid'] = new_uuid
|
||||||
# put the tenant_id and the ls_uuid in the main dict
|
# put the tenant_id and the ls_uuid in the main dict
|
||||||
# for simplyfying templating
|
# for simplyfying templating
|
||||||
fake_lport['ls_uuid'] = ls_uuid
|
fake_lport['ls_uuid'] = ls_uuid
|
||||||
@ -93,7 +149,7 @@ class FakeClient:
|
|||||||
fake_lport['quantum_port_id'] = self._get_tag(fake_lport,
|
fake_lport['quantum_port_id'] = self._get_tag(fake_lport,
|
||||||
'q_port_id')
|
'q_port_id')
|
||||||
fake_lport['quantum_device_id'] = self._get_tag(fake_lport, 'vm_id')
|
fake_lport['quantum_device_id'] = self._get_tag(fake_lport, 'vm_id')
|
||||||
self._fake_lport_dict[fake_lport['uuid']] = fake_lport
|
self._fake_lswitch_lport_dict[fake_lport['uuid']] = fake_lport
|
||||||
|
|
||||||
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
|
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
|
||||||
fake_lswitch['lport_count'] += 1
|
fake_lswitch['lport_count'] += 1
|
||||||
@ -102,7 +158,31 @@ class FakeClient:
|
|||||||
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
|
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
|
||||||
fake_lport_status['ls_name'] = fake_lswitch['display_name']
|
fake_lport_status['ls_name'] = fake_lswitch['display_name']
|
||||||
fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
|
fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
|
||||||
self._fake_lportstatus_dict[fake_lport['uuid']] = fake_lport_status
|
self._fake_lswitch_lportstatus_dict[new_uuid] = fake_lport_status
|
||||||
|
return fake_lport
|
||||||
|
|
||||||
|
def _add_lrouter_lport(self, body, lr_uuid):
|
||||||
|
fake_lport = json.loads(body)
|
||||||
|
new_uuid = uuidutils.generate_uuid()
|
||||||
|
fake_lport['uuid'] = new_uuid
|
||||||
|
# put the tenant_id and the ls_uuid in the main dict
|
||||||
|
# for simplyfying templating
|
||||||
|
fake_lport['lr_uuid'] = lr_uuid
|
||||||
|
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
|
||||||
|
fake_lport['quantum_port_id'] = self._get_tag(fake_lport,
|
||||||
|
'q_port_id')
|
||||||
|
# replace ip_address with its json dump
|
||||||
|
if 'ip_addresses' in fake_lport:
|
||||||
|
ip_addresses_json = json.dumps(fake_lport['ip_addresses'])
|
||||||
|
fake_lport['ip_addresses_json'] = ip_addresses_json
|
||||||
|
self._fake_lrouter_lport_dict[fake_lport['uuid']] = fake_lport
|
||||||
|
fake_lrouter = self._fake_lrouter_dict[lr_uuid]
|
||||||
|
fake_lrouter['lport_count'] += 1
|
||||||
|
fake_lport_status = fake_lport.copy()
|
||||||
|
fake_lport_status['lr_tenant_id'] = fake_lrouter['tenant_id']
|
||||||
|
fake_lport_status['lr_uuid'] = fake_lrouter['uuid']
|
||||||
|
fake_lport_status['lr_name'] = fake_lrouter['display_name']
|
||||||
|
self._fake_lrouter_lportstatus_dict[new_uuid] = fake_lport_status
|
||||||
return fake_lport
|
return fake_lport
|
||||||
|
|
||||||
def _add_securityprofile(self, body):
|
def _add_securityprofile(self, body):
|
||||||
@ -117,29 +197,91 @@ class FakeClient:
|
|||||||
fake_securityprofile)
|
fake_securityprofile)
|
||||||
return fake_securityprofile
|
return fake_securityprofile
|
||||||
|
|
||||||
|
def _add_lrouter_nat(self, body, lr_uuid):
|
||||||
|
fake_nat = json.loads(body)
|
||||||
|
new_uuid = uuidutils.generate_uuid()
|
||||||
|
fake_nat['uuid'] = new_uuid
|
||||||
|
fake_nat['lr_uuid'] = lr_uuid
|
||||||
|
self._fake_lrouter_nat_dict[fake_nat['uuid']] = fake_nat
|
||||||
|
if 'match' in fake_nat:
|
||||||
|
match_json = json.dumps(fake_nat['match'])
|
||||||
|
fake_nat['match_json'] = match_json
|
||||||
|
return fake_nat
|
||||||
|
|
||||||
|
def _build_relation(self, src, dst, resource_type, relation):
|
||||||
|
if not relation in self.MANAGED_RELATIONS[resource_type]:
|
||||||
|
return # Relation is not desired in output
|
||||||
|
if not '_relations' in src or not src['_relations'].get(relation):
|
||||||
|
return # Item does not have relation
|
||||||
|
relation_data = src['_relations'].get(relation)
|
||||||
|
dst_relations = dst.get('_relations')
|
||||||
|
if not dst_relations:
|
||||||
|
dst_relations = {}
|
||||||
|
dst_relations[relation] = relation_data
|
||||||
|
|
||||||
|
def _fill_attachment(self, att_data, ls_uuid=None,
|
||||||
|
lr_uuid=None, lp_uuid=None):
|
||||||
|
new_data = att_data.copy()
|
||||||
|
for k in ('ls_uuid', 'lr_uuid', 'lp_uuid'):
|
||||||
|
if locals().get(k):
|
||||||
|
new_data[k] = locals()[k]
|
||||||
|
|
||||||
|
def populate_field(field_name):
|
||||||
|
if field_name in att_data:
|
||||||
|
new_data['%s_field' % field_name] = ('"%s" : "%s",'
|
||||||
|
% (field_name,
|
||||||
|
att_data[field_name]))
|
||||||
|
del new_data[field_name]
|
||||||
|
else:
|
||||||
|
new_data['%s_field' % field_name] = ""
|
||||||
|
|
||||||
|
for field in ['vif_uuid', 'peer_port_href', 'peer_port_uuid']:
|
||||||
|
populate_field(field)
|
||||||
|
return new_data
|
||||||
|
|
||||||
def _get_resource_type(self, path):
|
def _get_resource_type(self, path):
|
||||||
uri_split = path.split('/')
|
"""
|
||||||
resource_type = ('status' in uri_split and
|
Identifies resource type and relevant uuids in the uri
|
||||||
'lport' in uri_split and 'lportstatus'
|
|
||||||
or 'lport' in uri_split and 'lport'
|
/ws.v1/lswitch/xxx
|
||||||
or 'lswitch' in uri_split and 'lswitch' or
|
/ws.v1/lswitch/xxx/status
|
||||||
'security-profile' in uri_split and 'securityprofile')
|
/ws.v1/lswitch/xxx/lport/yyy
|
||||||
switch_uuid = ('lswitch' in uri_split and
|
/ws.v1/lswitch/xxx/lport/yyy/status
|
||||||
len(uri_split) > 3 and uri_split[3])
|
/ws.v1/lrouter/zzz
|
||||||
port_uuid = ('lport' in uri_split and
|
/ws.v1/lrouter/zzz/status
|
||||||
len(uri_split) > 5 and uri_split[5])
|
/ws.v1/lrouter/zzz/lport/www
|
||||||
securityprofile_uuid = ('security-profile' in uri_split and
|
/ws.v1/lrouter/zzz/lport/www/status
|
||||||
len(uri_split) > 3 and uri_split[3])
|
"""
|
||||||
return (resource_type, switch_uuid, port_uuid, securityprofile_uuid)
|
# The first element will always be 'ws.v1' - so we just discard it
|
||||||
|
uri_split = path.split('/')[1:]
|
||||||
|
# parse uri_split backwards
|
||||||
|
suffix = ""
|
||||||
|
idx = len(uri_split) - 1
|
||||||
|
if 'status' in uri_split[idx]:
|
||||||
|
suffix = "status"
|
||||||
|
idx = idx - 1
|
||||||
|
elif 'attachment' in uri_split[idx]:
|
||||||
|
suffix = "attachment"
|
||||||
|
idx = idx - 1
|
||||||
|
# then check if we have an uuid
|
||||||
|
uuids = []
|
||||||
|
if uri_split[idx].replace('-', '') not in self.RESOURCES:
|
||||||
|
uuids.append(uri_split[idx])
|
||||||
|
idx = idx - 1
|
||||||
|
resource_type = "%s%s" % (uri_split[idx], suffix)
|
||||||
|
if idx > 1:
|
||||||
|
uuids.insert(0, uri_split[idx - 1])
|
||||||
|
resource_type = "%s_%s" % (uri_split[idx - 2], resource_type)
|
||||||
|
return (resource_type.replace('-', ''), uuids)
|
||||||
|
|
||||||
def _list(self, resource_type, response_file,
|
def _list(self, resource_type, response_file,
|
||||||
switch_uuid=None, query=None):
|
parent_uuid=None, query=None, relations=None):
|
||||||
(tag_filter, attr_filter) = self._get_filters(query)
|
(tag_filter, attr_filter) = self._get_filters(query)
|
||||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||||
response_template = f.read()
|
response_template = f.read()
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
||||||
if switch_uuid == "*":
|
if parent_uuid == '*':
|
||||||
switch_uuid = None
|
parent_uuid = None
|
||||||
|
|
||||||
def _attr_match(res_uuid):
|
def _attr_match(res_uuid):
|
||||||
if not attr_filter:
|
if not attr_filter:
|
||||||
@ -158,16 +300,49 @@ class FakeClient:
|
|||||||
for x in res_dict[res_uuid]['tags']])
|
for x in res_dict[res_uuid]['tags']])
|
||||||
|
|
||||||
def _lswitch_match(res_uuid):
|
def _lswitch_match(res_uuid):
|
||||||
if (not switch_uuid or
|
# verify that the switch exist
|
||||||
res_dict[res_uuid].get('ls_uuid') == switch_uuid):
|
if parent_uuid and not parent_uuid in self._fake_lswitch_dict:
|
||||||
|
raise Exception(_("lswitch:%s not found" % parent_uuid))
|
||||||
|
if (not parent_uuid
|
||||||
|
or res_dict[res_uuid].get('ls_uuid') == parent_uuid):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def _lrouter_match(res_uuid):
|
||||||
|
# verify that the router exist
|
||||||
|
if parent_uuid and not parent_uuid in self._fake_lrouter_dict:
|
||||||
|
raise Exception(_("lrouter:%s not found" % parent_uuid))
|
||||||
|
if (not parent_uuid or
|
||||||
|
res_dict[res_uuid].get('lr_uuid') == parent_uuid):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _build_item(resource):
|
||||||
|
item = json.loads(response_template % resource)
|
||||||
|
if relations:
|
||||||
|
for relation in relations:
|
||||||
|
self._build_relation(resource, item,
|
||||||
|
resource_type, relation)
|
||||||
|
return item
|
||||||
|
|
||||||
for item in res_dict.itervalues():
|
for item in res_dict.itervalues():
|
||||||
if 'tags' in item:
|
if 'tags' in item:
|
||||||
item['tags_json'] = json.dumps(item['tags'])
|
item['tags_json'] = json.dumps(item['tags'])
|
||||||
items = [json.loads(response_template % res_dict[res_uuid])
|
if resource_type in (self.LSWITCH_LPORT_RESOURCE,
|
||||||
|
self.LSWITCH_LPORT_ATT,
|
||||||
|
self.LSWITCH_LPORT_STATUS):
|
||||||
|
parent_func = _lswitch_match
|
||||||
|
elif resource_type in (self.LROUTER_LPORT_RESOURCE,
|
||||||
|
self.LROUTER_LPORT_ATT,
|
||||||
|
self.LROUTER_NAT_RESOURCE,
|
||||||
|
self.LROUTER_LPORT_STATUS):
|
||||||
|
parent_func = _lrouter_match
|
||||||
|
else:
|
||||||
|
parent_func = lambda x: True
|
||||||
|
|
||||||
|
items = [_build_item(res_dict[res_uuid])
|
||||||
for res_uuid in res_dict
|
for res_uuid in res_dict
|
||||||
if (_lswitch_match(res_uuid) and
|
if (parent_func(res_uuid) and
|
||||||
_tag_match(res_uuid) and
|
_tag_match(res_uuid) and
|
||||||
_attr_match(res_uuid))]
|
_attr_match(res_uuid))]
|
||||||
|
|
||||||
@ -175,8 +350,8 @@ class FakeClient:
|
|||||||
'result_count': len(items)})
|
'result_count': len(items)})
|
||||||
|
|
||||||
def _show(self, resource_type, response_file,
|
def _show(self, resource_type, response_file,
|
||||||
switch_uuid, port_uuid=None):
|
uuid1, uuid2=None, relations=None):
|
||||||
target_uuid = port_uuid or switch_uuid
|
target_uuid = uuid2 or uuid1
|
||||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||||
response_template = f.read()
|
response_template = f.read()
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
||||||
@ -194,32 +369,33 @@ class FakeClient:
|
|||||||
def handle_get(self, url):
|
def handle_get(self, url):
|
||||||
#TODO(salvatore-orlando): handle field selection
|
#TODO(salvatore-orlando): handle field selection
|
||||||
parsedurl = urlparse.urlparse(url)
|
parsedurl = urlparse.urlparse(url)
|
||||||
(res_type, s_uuid, p_uuid, sec_uuid) = self._get_resource_type(
|
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
||||||
parsedurl.path)
|
relations = urlparse.parse_qs(parsedurl.query).get('relations')
|
||||||
response_file = self.FAKE_GET_RESPONSES.get(res_type)
|
response_file = self.FAKE_GET_RESPONSES.get(res_type)
|
||||||
if not response_file:
|
if not response_file:
|
||||||
raise Exception("resource not found")
|
raise Exception("resource not found")
|
||||||
if res_type == 'lport':
|
if 'lport' in res_type or 'nat' in res_type:
|
||||||
if p_uuid:
|
if len(uuids) > 1:
|
||||||
return self._show(res_type, response_file, s_uuid, p_uuid)
|
return self._show(res_type, response_file, uuids[0],
|
||||||
|
uuids[1], relations=relations)
|
||||||
else:
|
else:
|
||||||
return self._list(res_type, response_file, s_uuid,
|
return self._list(res_type, response_file, uuids[0],
|
||||||
query=parsedurl.query)
|
query=parsedurl.query, relations=relations)
|
||||||
elif res_type == 'lportstatus':
|
elif ('lswitch' in res_type or 'lrouter' in res_type
|
||||||
return self._show(res_type, response_file, s_uuid, p_uuid)
|
or self.SECPROF_RESOURCE in res_type):
|
||||||
elif res_type == 'lswitch':
|
if len(uuids) > 0:
|
||||||
if s_uuid:
|
return self._show(res_type, response_file, uuids[0],
|
||||||
return self._show(res_type, response_file, s_uuid)
|
relations=relations)
|
||||||
else:
|
else:
|
||||||
return self._list(res_type, response_file,
|
return self._list(res_type, response_file,
|
||||||
query=parsedurl.query)
|
query=parsedurl.query,
|
||||||
|
relations=relations)
|
||||||
else:
|
else:
|
||||||
raise Exception("unknown resource:%s" % res_type)
|
raise Exception("unknown resource:%s" % res_type)
|
||||||
|
|
||||||
def handle_post(self, url, body):
|
def handle_post(self, url, body):
|
||||||
parsedurl = urlparse.urlparse(url)
|
parsedurl = urlparse.urlparse(url)
|
||||||
(res_type, s_uuid, _p, sec_uuid) = self._get_resource_type(
|
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
||||||
parsedurl.path)
|
|
||||||
response_file = self.FAKE_POST_RESPONSES.get(res_type)
|
response_file = self.FAKE_POST_RESPONSES.get(res_type)
|
||||||
if not response_file:
|
if not response_file:
|
||||||
raise Exception("resource not found")
|
raise Exception("resource not found")
|
||||||
@ -227,37 +403,76 @@ class FakeClient:
|
|||||||
response_template = f.read()
|
response_template = f.read()
|
||||||
add_resource = getattr(self, '_add_%s' % res_type)
|
add_resource = getattr(self, '_add_%s' % res_type)
|
||||||
args = [body]
|
args = [body]
|
||||||
if s_uuid:
|
if len(uuids):
|
||||||
args.append(s_uuid)
|
args.append(uuids[0])
|
||||||
response = response_template % add_resource(*args)
|
response = response_template % add_resource(*args)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def handle_put(self, url, body):
|
def handle_put(self, url, body):
|
||||||
parsedurl = urlparse.urlparse(url)
|
parsedurl = urlparse.urlparse(url)
|
||||||
(res_type, s_uuid, p_uuid, sec_uuid) = self._get_resource_type(
|
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
||||||
parsedurl.path)
|
|
||||||
target_uuid = p_uuid or s_uuid or sec_uuid
|
|
||||||
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
||||||
if not response_file:
|
if not response_file:
|
||||||
raise Exception("resource not found")
|
raise Exception("resource not found")
|
||||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||||
response_template = f.read()
|
response_template = f.read()
|
||||||
|
# Manage attachment operations
|
||||||
|
is_attachment = False
|
||||||
|
if res_type.endswith('attachment'):
|
||||||
|
is_attachment = True
|
||||||
|
res_type = res_type[:res_type.index('attachment')]
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
||||||
resource = res_dict[target_uuid]
|
resource = res_dict[uuids[-1]]
|
||||||
|
if not is_attachment:
|
||||||
resource.update(json.loads(body))
|
resource.update(json.loads(body))
|
||||||
|
else:
|
||||||
|
relations = resource.get("_relations")
|
||||||
|
if not relations:
|
||||||
|
relations = {}
|
||||||
|
relations['LogicalPortAttachment'] = json.loads(body)
|
||||||
|
resource['_relations'] = relations
|
||||||
|
body_2 = json.loads(body)
|
||||||
|
if body_2['type'] == "PatchAttachment":
|
||||||
|
# We need to do a trick here
|
||||||
|
if self.LROUTER_RESOURCE in res_type:
|
||||||
|
res_type_2 = res_type.replace(self.LROUTER_RESOURCE,
|
||||||
|
self.LSWITCH_RESOURCE)
|
||||||
|
elif self.LSWITCH_RESOURCE in res_type:
|
||||||
|
res_type_2 = res_type.replace(self.LSWITCH_RESOURCE,
|
||||||
|
self.LROUTER_RESOURCE)
|
||||||
|
res_dict_2 = getattr(self, '_fake_%s_dict' % res_type_2)
|
||||||
|
body_2['peer_port_uuid'] = uuids[-1]
|
||||||
|
resource_2 = res_dict_2[json.loads(body)['peer_port_uuid']]
|
||||||
|
relations_2 = resource_2.get("_relations")
|
||||||
|
if not relations_2:
|
||||||
|
relations_2 = {}
|
||||||
|
relations_2['LogicalPortAttachment'] = body_2
|
||||||
|
resource_2['_relations'] = relations_2
|
||||||
|
elif body_2['type'] == "L3GatewayAttachment":
|
||||||
|
resource['attachment_gwsvc_uuid'] = (
|
||||||
|
body_2['l3_gateway_service_uuid'])
|
||||||
|
if not is_attachment:
|
||||||
response = response_template % resource
|
response = response_template % resource
|
||||||
|
else:
|
||||||
|
if res_type == self.LROUTER_LPORT_RESOURCE:
|
||||||
|
lr_uuid = uuids[0]
|
||||||
|
ls_uuid = None
|
||||||
|
elif res_type == self.LSWITCH_LPORT_RESOURCE:
|
||||||
|
ls_uuid = uuids[0]
|
||||||
|
lr_uuid = None
|
||||||
|
lp_uuid = uuids[1]
|
||||||
|
response = response_template % self._fill_attachment(
|
||||||
|
json.loads(body), ls_uuid, lr_uuid, lp_uuid)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
def handle_delete(self, url):
|
def handle_delete(self, url):
|
||||||
parsedurl = urlparse.urlparse(url)
|
parsedurl = urlparse.urlparse(url)
|
||||||
(res_type, s_uuid, p_uuid, sec_uuid) = self._get_resource_type(
|
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
||||||
parsedurl.path)
|
|
||||||
target_uuid = p_uuid or s_uuid or sec_uuid
|
|
||||||
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
||||||
if not response_file:
|
if not response_file:
|
||||||
raise Exception("resource not found")
|
raise Exception("resource not found")
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
||||||
del res_dict[target_uuid]
|
del res_dict[uuids[-1]]
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
def fake_request(self, *args, **kwargs):
|
def fake_request(self, *args, **kwargs):
|
||||||
@ -267,5 +482,8 @@ class FakeClient:
|
|||||||
|
|
||||||
def reset_all(self):
|
def reset_all(self):
|
||||||
self._fake_lswitch_dict.clear()
|
self._fake_lswitch_dict.clear()
|
||||||
self._fake_lport_dict.clear()
|
self._fake_lrouter_dict.clear()
|
||||||
self._fake_lportstatus_dict.clear()
|
self._fake_lswitch_lport_dict.clear()
|
||||||
|
self._fake_lrouter_lport_dict.clear()
|
||||||
|
self._fake_lswitch_lportstatus_dict.clear()
|
||||||
|
self._fake_lrouter_lportstatus_dict.clear()
|
||||||
|
30
quantum/tests/unit/nicira/test_defaults.py
Normal file
30
quantum/tests/unit/nicira/test_defaults.py
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
# Copyright 2013 Nicira Networks, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
import unittest2 as unittest
|
||||||
|
|
||||||
|
from quantum.openstack.common import cfg
|
||||||
|
from quantum.plugins.nicira.nicira_nvp_plugin.common import config
|
||||||
|
|
||||||
|
|
||||||
|
class ConfigurationTest(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_defaults(self):
|
||||||
|
self.assertEqual('sqlite://', cfg.CONF.DATABASE.sql_connection)
|
||||||
|
self.assertEqual(-1, cfg.CONF.DATABASE.sql_max_retries)
|
||||||
|
self.assertEqual(2, cfg.CONF.DATABASE.reconnect_interval)
|
||||||
|
self.assertEqual(64, cfg.CONF.NVP.max_lp_per_bridged_ls)
|
||||||
|
self.assertEqual(256, cfg.CONF.NVP.max_lp_per_overlay_ls)
|
||||||
|
self.assertEqual(5, cfg.CONF.NVP.concurrent_connections)
|
@ -13,6 +13,7 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import contextlib
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
@ -30,6 +31,7 @@ from quantum.tests.unit.nicira import fake_nvpapiclient
|
|||||||
import quantum.tests.unit.test_db_plugin as test_plugin
|
import quantum.tests.unit.test_db_plugin as test_plugin
|
||||||
import quantum.tests.unit.test_extension_portsecurity as psec
|
import quantum.tests.unit.test_extension_portsecurity as psec
|
||||||
import quantum.tests.unit.test_extension_security_group as ext_sg
|
import quantum.tests.unit.test_extension_security_group as ext_sg
|
||||||
|
import quantum.tests.unit.test_l3_plugin as test_l3_plugin
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
NICIRA_PKG_PATH = 'quantum.plugins.nicira.nicira_nvp_plugin'
|
NICIRA_PKG_PATH = 'quantum.plugins.nicira.nicira_nvp_plugin'
|
||||||
@ -174,6 +176,18 @@ class TestNiciraNetworksV2(test_plugin.TestNetworksV2,
|
|||||||
self._test_create_bridge_network(vlan_id=5000)
|
self._test_create_bridge_network(vlan_id=5000)
|
||||||
self.assertEquals(ctx_manager.exception.code, 400)
|
self.assertEquals(ctx_manager.exception.code, 400)
|
||||||
|
|
||||||
|
def test_list_networks_filter_by_id(self):
|
||||||
|
# We add this unit test to cover some logic specific to the
|
||||||
|
# nvp plugin
|
||||||
|
with contextlib.nested(self.network(name='net1'),
|
||||||
|
self.network(name='net2')) as (net1, net2):
|
||||||
|
query_params = 'id=%s' % net1['network']['id']
|
||||||
|
self._test_list_resources('network', [net1],
|
||||||
|
query_params=query_params)
|
||||||
|
query_params += '&id=%s' % net2['network']['id']
|
||||||
|
self._test_list_resources('network', [net1, net2],
|
||||||
|
query_params=query_params)
|
||||||
|
|
||||||
|
|
||||||
class NiciraPortSecurityTestCase(psec.PortSecurityDBTestCase):
|
class NiciraPortSecurityTestCase(psec.PortSecurityDBTestCase):
|
||||||
|
|
||||||
@ -235,3 +249,12 @@ class NiciraSecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase):
|
|||||||
class TestNiciraSecurityGroup(ext_sg.TestSecurityGroups,
|
class TestNiciraSecurityGroup(ext_sg.TestSecurityGroups,
|
||||||
NiciraSecurityGroupsTestCase):
|
NiciraSecurityGroupsTestCase):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class TestNiciraL3NatTestCase(test_l3_plugin.L3NatDBTestCase,
|
||||||
|
NiciraPluginV2TestCase):
|
||||||
|
|
||||||
|
def test_floatingip_with_assoc_fails(self):
|
||||||
|
self._test_floatingip_with_assoc_fails(
|
||||||
|
'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||||
|
'QuantumPlugin.NvpPluginV2')
|
||||||
|
@ -324,12 +324,17 @@ class L3NatDBTestCase(test_db_plugin.QuantumDbPluginV2TestCase):
|
|||||||
super(L3NatDBTestCase, self).tearDown()
|
super(L3NatDBTestCase, self).tearDown()
|
||||||
|
|
||||||
def _create_router(self, fmt, tenant_id, name=None,
|
def _create_router(self, fmt, tenant_id, name=None,
|
||||||
admin_state_up=None, set_context=False):
|
admin_state_up=None, set_context=False,
|
||||||
|
arg_list=None, **kwargs):
|
||||||
data = {'router': {'tenant_id': tenant_id}}
|
data = {'router': {'tenant_id': tenant_id}}
|
||||||
if name:
|
if name:
|
||||||
data['router']['name'] = name
|
data['router']['name'] = name
|
||||||
if admin_state_up:
|
if admin_state_up:
|
||||||
data['router']['admin_state_up'] = admin_state_up
|
data['router']['admin_state_up'] = admin_state_up
|
||||||
|
for arg in (('admin_state_up', 'tenant_id') + (arg_list or ())):
|
||||||
|
# Arg must be present and not empty
|
||||||
|
if arg in kwargs and kwargs[arg]:
|
||||||
|
data['router'][arg] = kwargs[arg]
|
||||||
router_req = self.new_create_request('routers', data, fmt)
|
router_req = self.new_create_request('routers', data, fmt)
|
||||||
if set_context and tenant_id:
|
if set_context and tenant_id:
|
||||||
# create a specific auth context for this request
|
# create a specific auth context for this request
|
||||||
@ -1080,7 +1085,7 @@ class L3NatDBTestCase(test_db_plugin.QuantumDbPluginV2TestCase):
|
|||||||
self._show('floatingips', fip['floatingip']['id'],
|
self._show('floatingips', fip['floatingip']['id'],
|
||||||
expected_code=exc.HTTPNotFound.code)
|
expected_code=exc.HTTPNotFound.code)
|
||||||
|
|
||||||
def test_floatingip_with_assoc_fails(self):
|
def _test_floatingip_with_assoc_fails(self, plugin_class):
|
||||||
with self.subnet(cidr='200.0.0.1/24') as public_sub:
|
with self.subnet(cidr='200.0.0.1/24') as public_sub:
|
||||||
self._set_net_external(public_sub['subnet']['network_id'])
|
self._set_net_external(public_sub['subnet']['network_id'])
|
||||||
with self.port() as private_port:
|
with self.port() as private_port:
|
||||||
@ -1093,9 +1098,8 @@ class L3NatDBTestCase(test_db_plugin.QuantumDbPluginV2TestCase):
|
|||||||
self._router_interface_action('add', r['router']['id'],
|
self._router_interface_action('add', r['router']['id'],
|
||||||
private_sub['subnet']['id'],
|
private_sub['subnet']['id'],
|
||||||
None)
|
None)
|
||||||
PLUGIN_CLASS = 'quantum.db.l3_db.L3_NAT_db_mixin'
|
method = plugin_class + '._update_fip_assoc'
|
||||||
METHOD = PLUGIN_CLASS + '._update_fip_assoc'
|
with mock.patch(method) as pl:
|
||||||
with mock.patch(METHOD) as pl:
|
|
||||||
pl.side_effect = q_exc.BadRequest(
|
pl.side_effect = q_exc.BadRequest(
|
||||||
resource='floatingip',
|
resource='floatingip',
|
||||||
msg='fake_error')
|
msg='fake_error')
|
||||||
@ -1117,6 +1121,10 @@ class L3NatDBTestCase(test_db_plugin.QuantumDbPluginV2TestCase):
|
|||||||
private_sub['subnet']['id'],
|
private_sub['subnet']['id'],
|
||||||
None)
|
None)
|
||||||
|
|
||||||
|
def test_floatingip_with_assoc_fails(self):
|
||||||
|
self._test_floatingip_with_assoc_fails(
|
||||||
|
'quantum.db.l3_db.L3_NAT_db_mixin')
|
||||||
|
|
||||||
def test_floatingip_update(self):
|
def test_floatingip_update(self):
|
||||||
with self.port() as p:
|
with self.port() as p:
|
||||||
private_sub = {'subnet': {'id':
|
private_sub = {'subnet': {'id':
|
||||||
|
Loading…
Reference in New Issue
Block a user