retire the NSX MH plugin
This patch retires the NSX MH plugin by: - Deleting the nsx_mh plugin and unit test code. - Using the NSX-V and V3 plugin test base classes where needed. - Removing any extensions that are MH specific. Change-Id: Idf65e44c301e790ca4ea69a6a8735aa0309a0dcc
This commit is contained in:
parent
b5f59ece91
commit
26135f34ac
@ -11,6 +11,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from neutron.common import eventlet_utils
|
from neutron.common import eventlet_utils
|
||||||
|
from neutron.db.models import securitygroup # noqa
|
||||||
|
|
||||||
eventlet_utils.monkey_patch()
|
eventlet_utils.monkey_patch()
|
||||||
|
|
||||||
|
@ -1,162 +0,0 @@
|
|||||||
# Copyright 2013 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
from neutron.common import config
|
|
||||||
|
|
||||||
from vmware_nsx._i18n import _
|
|
||||||
from vmware_nsx.common import config as nsx_config # noqa
|
|
||||||
from vmware_nsx.common import nsx_utils
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
|
|
||||||
config.setup_logging()
|
|
||||||
|
|
||||||
|
|
||||||
def help(name):
|
|
||||||
print("Usage: %s path/to/neutron/plugin/ini/config/file" % name)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def get_nsx_controllers(cluster):
|
|
||||||
return cluster.nsx_controllers
|
|
||||||
|
|
||||||
|
|
||||||
def config_helper(config_entity, cluster):
|
|
||||||
try:
|
|
||||||
return nsxlib.do_request('GET',
|
|
||||||
"/ws.v1/%s?fields=uuid" % config_entity,
|
|
||||||
cluster=cluster).get('results', [])
|
|
||||||
except Exception as e:
|
|
||||||
msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.")
|
|
||||||
% {'err': str(e),
|
|
||||||
'ctl': ', '.join(get_nsx_controllers(cluster))})
|
|
||||||
raise Exception(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def get_control_cluster_nodes(cluster):
|
|
||||||
return config_helper("control-cluster/node", cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def get_gateway_services(cluster):
|
|
||||||
ret_gw_services = {"L2GatewayServiceConfig": [],
|
|
||||||
"L3GatewayServiceConfig": []}
|
|
||||||
gw_services = config_helper("gateway-service", cluster)
|
|
||||||
for gw_service in gw_services:
|
|
||||||
ret_gw_services[gw_service['type']].append(gw_service['uuid'])
|
|
||||||
return ret_gw_services
|
|
||||||
|
|
||||||
|
|
||||||
def get_transport_zones(cluster):
|
|
||||||
transport_zones = config_helper("transport-zone", cluster)
|
|
||||||
return [transport_zone['uuid'] for transport_zone in transport_zones]
|
|
||||||
|
|
||||||
|
|
||||||
def get_transport_nodes(cluster):
|
|
||||||
transport_nodes = config_helper("transport-node", cluster)
|
|
||||||
return [transport_node['uuid'] for transport_node in transport_nodes]
|
|
||||||
|
|
||||||
|
|
||||||
def is_transport_node_connected(cluster, node_uuid):
|
|
||||||
try:
|
|
||||||
return nsxlib.do_request('GET',
|
|
||||||
"/ws.v1/transport-node/%s/status" % node_uuid,
|
|
||||||
cluster=cluster)['connection']['connected']
|
|
||||||
except Exception as e:
|
|
||||||
msg = (_("Error '%(err)s' when connecting to controller(s): %(ctl)s.")
|
|
||||||
% {'err': str(e),
|
|
||||||
'ctl': ', '.join(get_nsx_controllers(cluster))})
|
|
||||||
raise Exception(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
if len(sys.argv) != 2:
|
|
||||||
help(sys.argv[0])
|
|
||||||
args = ['--config-file']
|
|
||||||
args.append(sys.argv[1])
|
|
||||||
config.init(args)
|
|
||||||
print("----------------------- Database Options -----------------------")
|
|
||||||
print("\tconnection: %s" % cfg.CONF.database.connection)
|
|
||||||
print("\tretry_interval: %d" % cfg.CONF.database.retry_interval)
|
|
||||||
print("\tmax_retries: %d" % cfg.CONF.database.max_retries)
|
|
||||||
print("----------------------- NSX Options -----------------------")
|
|
||||||
print("\tNSX Generation Timeout %d" % cfg.CONF.NSX.nsx_gen_timeout)
|
|
||||||
print("\tNumber of concurrent connections to each controller %d" %
|
|
||||||
cfg.CONF.NSX.concurrent_connections)
|
|
||||||
print("\tmax_lp_per_bridged_ls: %s" % cfg.CONF.NSX.max_lp_per_bridged_ls)
|
|
||||||
print("\tmax_lp_per_overlay_ls: %s" % cfg.CONF.NSX.max_lp_per_overlay_ls)
|
|
||||||
print("----------------------- Cluster Options -----------------------")
|
|
||||||
print("\tretries: %s" % cfg.CONF.retries)
|
|
||||||
print("\tredirects: %s" % cfg.CONF.redirects)
|
|
||||||
print("\thttp_timeout: %s" % cfg.CONF.http_timeout)
|
|
||||||
cluster = nsx_utils.create_nsx_cluster(
|
|
||||||
cfg.CONF,
|
|
||||||
cfg.CONF.NSX.concurrent_connections,
|
|
||||||
cfg.CONF.NSX.nsx_gen_timeout)
|
|
||||||
nsx_controllers = get_nsx_controllers(cluster)
|
|
||||||
num_controllers = len(nsx_controllers)
|
|
||||||
print("Number of controllers found: %s" % num_controllers)
|
|
||||||
if num_controllers == 0:
|
|
||||||
print("You must specify at least one controller!")
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
get_control_cluster_nodes(cluster)
|
|
||||||
for controller in nsx_controllers:
|
|
||||||
print("\tController endpoint: %s" % controller)
|
|
||||||
gateway_services = get_gateway_services(cluster)
|
|
||||||
default_gateways = {
|
|
||||||
"L2GatewayServiceConfig": cfg.CONF.default_l2_gw_service_uuid,
|
|
||||||
"L3GatewayServiceConfig": cfg.CONF.default_l3_gw_service_uuid}
|
|
||||||
errors = 0
|
|
||||||
for svc_type in default_gateways.keys():
|
|
||||||
for uuid in gateway_services[svc_type]:
|
|
||||||
print("\t\tGateway(%s) uuid: %s" % (svc_type, uuid))
|
|
||||||
if (default_gateways[svc_type] and
|
|
||||||
default_gateways[svc_type] not in gateway_services[svc_type]):
|
|
||||||
print("\t\t\tError: specified default %s gateway (%s) is "
|
|
||||||
"missing from NSX Gateway Services!" % (
|
|
||||||
svc_type,
|
|
||||||
default_gateways[svc_type]))
|
|
||||||
errors += 1
|
|
||||||
transport_zones = get_transport_zones(cluster)
|
|
||||||
print("\tTransport zones: %s" % transport_zones)
|
|
||||||
if cfg.CONF.default_tz_uuid not in transport_zones:
|
|
||||||
print("\t\tError: specified default transport zone "
|
|
||||||
"(%s) is missing from NSX transport zones!"
|
|
||||||
% cfg.CONF.default_tz_uuid)
|
|
||||||
errors += 1
|
|
||||||
transport_nodes = get_transport_nodes(cluster)
|
|
||||||
print("\tTransport nodes: %s" % transport_nodes)
|
|
||||||
node_errors = []
|
|
||||||
for node in transport_nodes:
|
|
||||||
if not is_transport_node_connected(cluster, node):
|
|
||||||
node_errors.append(node)
|
|
||||||
|
|
||||||
# Use different exit codes, so that we can distinguish
|
|
||||||
# between config and runtime errors
|
|
||||||
if len(node_errors):
|
|
||||||
print("\nThere are one or more transport nodes that are "
|
|
||||||
"not connected: %s. Please, revise!" % node_errors)
|
|
||||||
sys.exit(10)
|
|
||||||
elif errors:
|
|
||||||
print("\nThere are %d errors with your configuration. "
|
|
||||||
"Please, revise!" % errors)
|
|
||||||
sys.exit(12)
|
|
||||||
else:
|
|
||||||
print("Done.")
|
|
@ -17,42 +17,17 @@ from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef
|
|||||||
from neutron_lib.api.definitions import provider_net as pnet
|
from neutron_lib.api.definitions import provider_net as pnet
|
||||||
from neutron_lib.api import validators
|
from neutron_lib.api import validators
|
||||||
from neutron_lib import constants
|
from neutron_lib import constants
|
||||||
from neutron_lib import exceptions as n_exc
|
|
||||||
from oslo_log import log
|
from oslo_log import log
|
||||||
import six
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import client
|
from vmware_nsx.api_client import client
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
from vmware_nsx.common import utils as vmw_utils
|
from vmware_nsx.common import utils as vmw_utils
|
||||||
from vmware_nsx.db import db as nsx_db
|
from vmware_nsx.db import db as nsx_db
|
||||||
from vmware_nsx.db import networkgw_db
|
|
||||||
from vmware_nsx import nsx_cluster
|
from vmware_nsx import nsx_cluster
|
||||||
from vmware_nsx.nsxlib.mh import l2gateway as l2gwlib
|
|
||||||
from vmware_nsx.nsxlib.mh import router as routerlib
|
|
||||||
from vmware_nsx.nsxlib.mh import secgroup as secgrouplib
|
|
||||||
from vmware_nsx.nsxlib.mh import switch as switchlib
|
from vmware_nsx.nsxlib.mh import switch as switchlib
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def fetch_nsx_switches(session, cluster, neutron_net_id):
|
|
||||||
"""Retrieve logical switches for a neutron network.
|
|
||||||
|
|
||||||
This function is optimized for fetching all the lswitches always
|
|
||||||
with a single NSX query.
|
|
||||||
If there is more than 1 logical switch (chained switches use case)
|
|
||||||
NSX lswitches are queried by 'quantum_net_id' tag. Otherwise the NSX
|
|
||||||
lswitch is directly retrieved by id (more efficient).
|
|
||||||
"""
|
|
||||||
nsx_switch_ids = get_nsx_switch_ids(session, cluster, neutron_net_id)
|
|
||||||
if len(nsx_switch_ids) > 1:
|
|
||||||
lswitches = switchlib.get_lswitches(cluster, neutron_net_id)
|
|
||||||
else:
|
|
||||||
lswitches = [switchlib.get_lswitch_by_id(
|
|
||||||
cluster, nsx_switch_ids[0])]
|
|
||||||
return lswitches
|
|
||||||
|
|
||||||
|
|
||||||
def get_nsx_switch_ids(session, cluster, neutron_network_id):
|
def get_nsx_switch_ids(session, cluster, neutron_network_id):
|
||||||
"""Return the NSX switch id for a given neutron network.
|
"""Return the NSX switch id for a given neutron network.
|
||||||
|
|
||||||
@ -134,77 +109,6 @@ def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
|
|||||||
return nsx_switch_id, nsx_port_id
|
return nsx_switch_id, nsx_port_id
|
||||||
|
|
||||||
|
|
||||||
def get_nsx_security_group_id(session, cluster, neutron_id):
|
|
||||||
"""Return the NSX sec profile uuid for a given neutron sec group.
|
|
||||||
|
|
||||||
First, look up the Neutron database. If not found, execute
|
|
||||||
a query on NSX platform as the mapping might be missing.
|
|
||||||
NOTE: Security groups are called 'security profiles' on the NSX backend.
|
|
||||||
"""
|
|
||||||
nsx_id = nsx_db.get_nsx_security_group_id(session, neutron_id)
|
|
||||||
if not nsx_id:
|
|
||||||
# Find security profile on backend.
|
|
||||||
# This is a rather expensive query, but it won't be executed
|
|
||||||
# more than once for each security group in Neutron's lifetime
|
|
||||||
nsx_sec_profiles = secgrouplib.query_security_profiles(
|
|
||||||
cluster, '*',
|
|
||||||
filters={'tag': neutron_id,
|
|
||||||
'tag_scope': 'q_sec_group_id'})
|
|
||||||
# Only one result expected
|
|
||||||
# NOTE(salv-orlando): Not handling the case where more than one
|
|
||||||
# security profile is found with the same neutron port tag
|
|
||||||
if not nsx_sec_profiles:
|
|
||||||
LOG.warning("Unable to find NSX security profile for Neutron "
|
|
||||||
"security group %s", neutron_id)
|
|
||||||
return
|
|
||||||
elif len(nsx_sec_profiles) > 1:
|
|
||||||
LOG.warning("Multiple NSX security profiles found for Neutron "
|
|
||||||
"security group %s", neutron_id)
|
|
||||||
nsx_sec_profile = nsx_sec_profiles[0]
|
|
||||||
nsx_id = nsx_sec_profile['uuid']
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
# Create DB mapping
|
|
||||||
nsx_db.add_neutron_nsx_security_group_mapping(
|
|
||||||
session, neutron_id, nsx_id)
|
|
||||||
return nsx_id
|
|
||||||
|
|
||||||
|
|
||||||
def get_nsx_router_id(session, cluster, neutron_router_id):
|
|
||||||
"""Return the NSX router uuid for a given neutron router.
|
|
||||||
|
|
||||||
First, look up the Neutron database. If not found, execute
|
|
||||||
a query on NSX platform as the mapping might be missing.
|
|
||||||
"""
|
|
||||||
if not neutron_router_id:
|
|
||||||
return
|
|
||||||
nsx_router_id = nsx_db.get_nsx_router_id(
|
|
||||||
session, neutron_router_id)
|
|
||||||
if not nsx_router_id:
|
|
||||||
# Find logical router from backend.
|
|
||||||
# This is a rather expensive query, but it won't be executed
|
|
||||||
# more than once for each router in Neutron's lifetime
|
|
||||||
nsx_routers = routerlib.query_lrouters(
|
|
||||||
cluster, '*',
|
|
||||||
filters={'tag': neutron_router_id,
|
|
||||||
'tag_scope': 'q_router_id'})
|
|
||||||
# Only one result expected
|
|
||||||
# NOTE(salv-orlando): Not handling the case where more than one
|
|
||||||
# port is found with the same neutron port tag
|
|
||||||
if not nsx_routers:
|
|
||||||
LOG.warning("Unable to find NSX router for Neutron router %s",
|
|
||||||
neutron_router_id)
|
|
||||||
return
|
|
||||||
nsx_router = nsx_routers[0]
|
|
||||||
nsx_router_id = nsx_router['uuid']
|
|
||||||
with session.begin(subtransactions=True):
|
|
||||||
# Create DB mapping
|
|
||||||
nsx_db.add_neutron_nsx_router_mapping(
|
|
||||||
session,
|
|
||||||
neutron_router_id,
|
|
||||||
nsx_router_id)
|
|
||||||
return nsx_router_id
|
|
||||||
|
|
||||||
|
|
||||||
def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout):
|
def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout):
|
||||||
cluster = nsx_cluster.NSXCluster(**cluster_opts)
|
cluster = nsx_cluster.NSXCluster(**cluster_opts)
|
||||||
|
|
||||||
@ -223,39 +127,6 @@ def create_nsx_cluster(cluster_opts, concurrent_connections, gen_timeout):
|
|||||||
return cluster
|
return cluster
|
||||||
|
|
||||||
|
|
||||||
def get_nsx_device_status(cluster, nsx_uuid):
|
|
||||||
try:
|
|
||||||
status_up = l2gwlib.get_gateway_device_status(
|
|
||||||
cluster, nsx_uuid)
|
|
||||||
if status_up:
|
|
||||||
return networkgw_db.STATUS_ACTIVE
|
|
||||||
else:
|
|
||||||
return networkgw_db.STATUS_DOWN
|
|
||||||
except api_exc.NsxApiException:
|
|
||||||
return networkgw_db.STATUS_UNKNOWN
|
|
||||||
except n_exc.NotFound:
|
|
||||||
return networkgw_db.ERROR
|
|
||||||
|
|
||||||
|
|
||||||
def get_nsx_device_statuses(cluster, tenant_id):
|
|
||||||
try:
|
|
||||||
status_dict = l2gwlib.get_gateway_devices_status(
|
|
||||||
cluster, tenant_id)
|
|
||||||
return dict((nsx_device_id,
|
|
||||||
networkgw_db.STATUS_ACTIVE if connected
|
|
||||||
else networkgw_db.STATUS_DOWN) for
|
|
||||||
(nsx_device_id, connected) in six.iteritems(status_dict))
|
|
||||||
except api_exc.NsxApiException:
|
|
||||||
# Do not make a NSX API exception fatal
|
|
||||||
if tenant_id:
|
|
||||||
LOG.warning("Unable to retrieve operational status for "
|
|
||||||
"gateway devices belonging to tenant: %s",
|
|
||||||
tenant_id)
|
|
||||||
else:
|
|
||||||
LOG.warning("Unable to retrieve operational status for "
|
|
||||||
"gateway devices")
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_bindings_to_nsx_transport_zones(bindings):
|
def _convert_bindings_to_nsx_transport_zones(bindings):
|
||||||
nsx_transport_zones_config = []
|
nsx_transport_zones_config = []
|
||||||
for binding in bindings:
|
for binding in bindings:
|
||||||
|
@ -1,136 +0,0 @@
|
|||||||
# Copyright 2013 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo_log import log
|
|
||||||
import six
|
|
||||||
|
|
||||||
from vmware_nsx.common import nsx_utils
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
# Protocol number look up for supported protocols
|
|
||||||
protocol_num_look_up = {'tcp': 6, 'icmp': 1, 'udp': 17, 'ipv6-icmp': 58}
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_nsx_rule(session, cluster, rule, with_id=False):
|
|
||||||
"""Converts a Neutron security group rule to the NSX format.
|
|
||||||
|
|
||||||
This routine also replaces Neutron IDs with NSX UUIDs.
|
|
||||||
"""
|
|
||||||
nsx_rule = {}
|
|
||||||
params = ['remote_ip_prefix', 'protocol',
|
|
||||||
'remote_group_id', 'port_range_min',
|
|
||||||
'port_range_max', 'ethertype']
|
|
||||||
if with_id:
|
|
||||||
params.append('id')
|
|
||||||
|
|
||||||
for param in params:
|
|
||||||
value = rule.get(param)
|
|
||||||
if param not in rule:
|
|
||||||
nsx_rule[param] = value
|
|
||||||
elif not value:
|
|
||||||
pass
|
|
||||||
elif param == 'remote_ip_prefix':
|
|
||||||
nsx_rule['ip_prefix'] = rule['remote_ip_prefix']
|
|
||||||
elif param == 'remote_group_id':
|
|
||||||
nsx_rule['profile_uuid'] = nsx_utils.get_nsx_security_group_id(
|
|
||||||
session, cluster, rule['remote_group_id'])
|
|
||||||
|
|
||||||
elif param == 'protocol':
|
|
||||||
try:
|
|
||||||
nsx_rule['protocol'] = int(rule['protocol'])
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
nsx_rule['protocol'] = (
|
|
||||||
protocol_num_look_up[rule['protocol']])
|
|
||||||
else:
|
|
||||||
nsx_rule[param] = value
|
|
||||||
return nsx_rule
|
|
||||||
|
|
||||||
|
|
||||||
def _convert_to_nsx_rules(session, cluster, rules, with_id=False):
|
|
||||||
"""Converts a list of Neutron security group rules to the NSX format."""
|
|
||||||
nsx_rules = {'logical_port_ingress_rules': [],
|
|
||||||
'logical_port_egress_rules': []}
|
|
||||||
for direction in ['logical_port_ingress_rules',
|
|
||||||
'logical_port_egress_rules']:
|
|
||||||
for rule in rules[direction]:
|
|
||||||
nsx_rules[direction].append(
|
|
||||||
_convert_to_nsx_rule(session, cluster, rule, with_id))
|
|
||||||
return nsx_rules
|
|
||||||
|
|
||||||
|
|
||||||
def get_security_group_rules_nsx_format(session, cluster,
|
|
||||||
security_group_rules, with_id=False):
|
|
||||||
"""Convert neutron security group rules into NSX format.
|
|
||||||
|
|
||||||
This routine splits Neutron security group rules into two lists, one
|
|
||||||
for ingress rules and the other for egress rules.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def fields(rule):
|
|
||||||
_fields = ['remote_ip_prefix', 'remote_group_id', 'protocol',
|
|
||||||
'port_range_min', 'port_range_max', 'protocol', 'ethertype']
|
|
||||||
if with_id:
|
|
||||||
_fields.append('id')
|
|
||||||
return dict((k, v) for k, v in six.iteritems(rule) if k in _fields)
|
|
||||||
|
|
||||||
ingress_rules = []
|
|
||||||
egress_rules = []
|
|
||||||
for rule in security_group_rules:
|
|
||||||
if rule.get('souce_group_id'):
|
|
||||||
rule['remote_group_id'] = nsx_utils.get_nsx_security_group_id(
|
|
||||||
session, cluster, rule['remote_group_id'])
|
|
||||||
|
|
||||||
if rule['direction'] == 'ingress':
|
|
||||||
ingress_rules.append(fields(rule))
|
|
||||||
elif rule['direction'] == 'egress':
|
|
||||||
egress_rules.append(fields(rule))
|
|
||||||
rules = {'logical_port_ingress_rules': egress_rules,
|
|
||||||
'logical_port_egress_rules': ingress_rules}
|
|
||||||
return _convert_to_nsx_rules(session, cluster, rules, with_id)
|
|
||||||
|
|
||||||
|
|
||||||
def merge_security_group_rules_with_current(session, cluster,
|
|
||||||
new_rules, current_rules):
|
|
||||||
merged_rules = get_security_group_rules_nsx_format(
|
|
||||||
session, cluster, current_rules)
|
|
||||||
for new_rule in new_rules:
|
|
||||||
rule = new_rule['security_group_rule']
|
|
||||||
if rule['direction'] == 'ingress':
|
|
||||||
merged_rules['logical_port_egress_rules'].append(
|
|
||||||
_convert_to_nsx_rule(session, cluster, rule))
|
|
||||||
elif rule['direction'] == 'egress':
|
|
||||||
merged_rules['logical_port_ingress_rules'].append(
|
|
||||||
_convert_to_nsx_rule(session, cluster, rule))
|
|
||||||
return merged_rules
|
|
||||||
|
|
||||||
|
|
||||||
def remove_security_group_with_id_and_id_field(rules, rule_id):
|
|
||||||
"""Remove rule by rule_id.
|
|
||||||
|
|
||||||
This function receives all of the current rule associated with a
|
|
||||||
security group and then removes the rule that matches the rule_id. In
|
|
||||||
addition it removes the id field in the dict with each rule since that
|
|
||||||
should not be passed to nsx.
|
|
||||||
"""
|
|
||||||
for rule_direction in rules.values():
|
|
||||||
item_to_remove = None
|
|
||||||
for port_rule in rule_direction:
|
|
||||||
if port_rule['id'] == rule_id:
|
|
||||||
item_to_remove = port_rule
|
|
||||||
else:
|
|
||||||
# remove key from dictionary for NSX
|
|
||||||
del port_rule['id']
|
|
||||||
if item_to_remove:
|
|
||||||
rule_direction.remove(item_to_remove)
|
|
@ -1,688 +0,0 @@
|
|||||||
# Copyright 2013 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import copy
|
|
||||||
import random
|
|
||||||
|
|
||||||
from neutron_lib import constants
|
|
||||||
from neutron_lib import context as n_context
|
|
||||||
from neutron_lib.db import api as db_api
|
|
||||||
from neutron_lib.db import model_query
|
|
||||||
from neutron_lib import exceptions
|
|
||||||
from neutron_lib.exceptions import l3 as l3_exc
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
from oslo_service import loopingcall
|
|
||||||
from oslo_utils import timeutils
|
|
||||||
import six
|
|
||||||
|
|
||||||
from neutron.db.models import external_net as external_net_db
|
|
||||||
from neutron.db.models import l3 as l3_db
|
|
||||||
from neutron.db import models_v2
|
|
||||||
|
|
||||||
from vmware_nsx._i18n import _
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
|
||||||
from vmware_nsx.common import nsx_utils
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx.nsxlib.mh import router as routerlib
|
|
||||||
from vmware_nsx.nsxlib.mh import switch as switchlib
|
|
||||||
|
|
||||||
# Maximum page size for a single request
|
|
||||||
# NOTE(salv-orlando): This might become a version-dependent map should the
|
|
||||||
# limit be raised in future versions
|
|
||||||
MAX_PAGE_SIZE = 5000
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class NsxCache(object):
|
|
||||||
"""A simple Cache for NSX resources.
|
|
||||||
|
|
||||||
Associates resource id with resource hash to rapidly identify
|
|
||||||
updated resources.
|
|
||||||
Each entry in the cache also stores the following information:
|
|
||||||
- changed: the resource in the cache has been altered following
|
|
||||||
an update or a delete
|
|
||||||
- hit: the resource has been visited during an update (and possibly
|
|
||||||
left unchanged)
|
|
||||||
- data: current resource data
|
|
||||||
- data_bk: backup of resource data prior to its removal
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
# Maps an uuid to the dict containing it
|
|
||||||
self._uuid_dict_mappings = {}
|
|
||||||
# Dicts for NSX cached resources
|
|
||||||
self._lswitches = {}
|
|
||||||
self._lswitchports = {}
|
|
||||||
self._lrouters = {}
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
# uuids are unique across the various types of resources
|
|
||||||
# TODO(salv-orlando): Avoid lookups over all dictionaries
|
|
||||||
# when retrieving items
|
|
||||||
# Fetch lswitches, lports, or lrouters
|
|
||||||
resources = self._uuid_dict_mappings[key]
|
|
||||||
return resources[key]
|
|
||||||
|
|
||||||
def _clear_changed_flag_and_remove_from_cache(self, resources):
|
|
||||||
# Clear the 'changed' attribute for all items
|
|
||||||
# NOTE(arosen): the copy.copy is to avoid: 'RuntimeError:
|
|
||||||
# dictionary changed size during iteration' for py3
|
|
||||||
|
|
||||||
for uuid, item in copy.copy(resources).items():
|
|
||||||
if item.pop('changed', None) and not item.get('data'):
|
|
||||||
# The item is not anymore in NSX, so delete it
|
|
||||||
del resources[uuid]
|
|
||||||
del self._uuid_dict_mappings[uuid]
|
|
||||||
LOG.debug("Removed item %s from NSX object cache", uuid)
|
|
||||||
|
|
||||||
def _update_resources(self, resources, new_resources, clear_changed=True):
|
|
||||||
if clear_changed:
|
|
||||||
self._clear_changed_flag_and_remove_from_cache(resources)
|
|
||||||
|
|
||||||
def do_hash(item):
|
|
||||||
return hash(jsonutils.dumps(item))
|
|
||||||
|
|
||||||
# Parse new data and identify new, deleted, and updated resources
|
|
||||||
for item in new_resources:
|
|
||||||
item_id = item['uuid']
|
|
||||||
if resources.get(item_id):
|
|
||||||
new_hash = do_hash(item)
|
|
||||||
if new_hash != resources[item_id]['hash']:
|
|
||||||
resources[item_id]['hash'] = new_hash
|
|
||||||
resources[item_id]['changed'] = True
|
|
||||||
resources[item_id]['data_bk'] = (
|
|
||||||
resources[item_id]['data'])
|
|
||||||
resources[item_id]['data'] = item
|
|
||||||
# Mark the item as hit in any case
|
|
||||||
resources[item_id]['hit'] = True
|
|
||||||
LOG.debug("Updating item %s in NSX object cache", item_id)
|
|
||||||
else:
|
|
||||||
resources[item_id] = {'hash': do_hash(item)}
|
|
||||||
resources[item_id]['hit'] = True
|
|
||||||
resources[item_id]['changed'] = True
|
|
||||||
resources[item_id]['data'] = item
|
|
||||||
# add an uuid to dict mapping for easy retrieval
|
|
||||||
# with __getitem__
|
|
||||||
self._uuid_dict_mappings[item_id] = resources
|
|
||||||
LOG.debug("Added item %s to NSX object cache", item_id)
|
|
||||||
|
|
||||||
def _delete_resources(self, resources):
|
|
||||||
# Mark for removal all the elements which have not been visited.
|
|
||||||
# And clear the 'hit' attribute.
|
|
||||||
for to_delete in [k for (k, v) in six.iteritems(resources)
|
|
||||||
if not v.pop('hit', False)]:
|
|
||||||
resources[to_delete]['changed'] = True
|
|
||||||
resources[to_delete]['data_bk'] = (
|
|
||||||
resources[to_delete].pop('data', None))
|
|
||||||
|
|
||||||
def _get_resource_ids(self, resources, changed_only):
|
|
||||||
if changed_only:
|
|
||||||
return [k for (k, v) in six.iteritems(resources)
|
|
||||||
if v.get('changed')]
|
|
||||||
return resources.keys()
|
|
||||||
|
|
||||||
def get_lswitches(self, changed_only=False):
|
|
||||||
return self._get_resource_ids(self._lswitches, changed_only)
|
|
||||||
|
|
||||||
def get_lrouters(self, changed_only=False):
|
|
||||||
return self._get_resource_ids(self._lrouters, changed_only)
|
|
||||||
|
|
||||||
def get_lswitchports(self, changed_only=False):
|
|
||||||
return self._get_resource_ids(self._lswitchports, changed_only)
|
|
||||||
|
|
||||||
def update_lswitch(self, lswitch):
|
|
||||||
self._update_resources(self._lswitches, [lswitch], clear_changed=False)
|
|
||||||
|
|
||||||
def update_lrouter(self, lrouter):
|
|
||||||
self._update_resources(self._lrouters, [lrouter], clear_changed=False)
|
|
||||||
|
|
||||||
def update_lswitchport(self, lswitchport):
|
|
||||||
self._update_resources(self._lswitchports, [lswitchport],
|
|
||||||
clear_changed=False)
|
|
||||||
|
|
||||||
def process_updates(self, lswitches=None,
|
|
||||||
lrouters=None, lswitchports=None):
|
|
||||||
self._update_resources(self._lswitches, lswitches)
|
|
||||||
self._update_resources(self._lrouters, lrouters)
|
|
||||||
self._update_resources(self._lswitchports, lswitchports)
|
|
||||||
return (self._get_resource_ids(self._lswitches, changed_only=True),
|
|
||||||
self._get_resource_ids(self._lrouters, changed_only=True),
|
|
||||||
self._get_resource_ids(self._lswitchports, changed_only=True))
|
|
||||||
|
|
||||||
def process_deletes(self):
|
|
||||||
self._delete_resources(self._lswitches)
|
|
||||||
self._delete_resources(self._lrouters)
|
|
||||||
self._delete_resources(self._lswitchports)
|
|
||||||
return (self._get_resource_ids(self._lswitches, changed_only=True),
|
|
||||||
self._get_resource_ids(self._lrouters, changed_only=True),
|
|
||||||
self._get_resource_ids(self._lswitchports, changed_only=True))
|
|
||||||
|
|
||||||
|
|
||||||
class SyncParameters(object):
|
|
||||||
"""Defines attributes used by the synchronization procedure.
|
|
||||||
|
|
||||||
chunk_size: Actual chunk size
|
|
||||||
extra_chunk_size: Additional data to fetch because of chunk size
|
|
||||||
adjustment
|
|
||||||
current_chunk: Counter of the current data chunk being synchronized
|
|
||||||
Page cursors: markers for the next resource to fetch.
|
|
||||||
'start' means page cursor unset for fetching 1st page
|
|
||||||
init_sync_performed: True if the initial synchronization concluded
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, min_chunk_size):
|
|
||||||
self.chunk_size = min_chunk_size
|
|
||||||
self.extra_chunk_size = 0
|
|
||||||
self.current_chunk = 0
|
|
||||||
self.ls_cursor = 'start'
|
|
||||||
self.lr_cursor = 'start'
|
|
||||||
self.lp_cursor = 'start'
|
|
||||||
self.init_sync_performed = False
|
|
||||||
self.total_size = 0
|
|
||||||
|
|
||||||
|
|
||||||
def _start_loopingcall(min_chunk_size, state_sync_interval, func,
|
|
||||||
initial_delay=5):
|
|
||||||
"""Start a loopingcall for the synchronization task."""
|
|
||||||
# Start a looping call to synchronize operational status
|
|
||||||
# for neutron resources
|
|
||||||
if not state_sync_interval:
|
|
||||||
# do not start the looping call if specified
|
|
||||||
# sync interval is 0
|
|
||||||
return
|
|
||||||
state_synchronizer = loopingcall.DynamicLoopingCall(
|
|
||||||
func, sp=SyncParameters(min_chunk_size))
|
|
||||||
state_synchronizer.start(
|
|
||||||
initial_delay=initial_delay,
|
|
||||||
periodic_interval_max=state_sync_interval)
|
|
||||||
return state_synchronizer
|
|
||||||
|
|
||||||
|
|
||||||
class NsxSynchronizer(object):
|
|
||||||
|
|
||||||
LS_URI = nsxlib._build_uri_path(
|
|
||||||
switchlib.LSWITCH_RESOURCE, fields='uuid,tags,fabric_status',
|
|
||||||
relations='LogicalSwitchStatus')
|
|
||||||
LR_URI = nsxlib._build_uri_path(
|
|
||||||
routerlib.LROUTER_RESOURCE, fields='uuid,tags,fabric_status',
|
|
||||||
relations='LogicalRouterStatus')
|
|
||||||
LP_URI = nsxlib._build_uri_path(
|
|
||||||
switchlib.LSWITCHPORT_RESOURCE,
|
|
||||||
parent_resource_id='*',
|
|
||||||
fields='uuid,tags,fabric_status_up',
|
|
||||||
relations='LogicalPortStatus')
|
|
||||||
|
|
||||||
def __init__(self, plugin, cluster, state_sync_interval,
|
|
||||||
req_delay, min_chunk_size, max_rand_delay=0,
|
|
||||||
initial_delay=5):
|
|
||||||
random.seed()
|
|
||||||
self._nsx_cache = NsxCache()
|
|
||||||
# Store parameters as instance members
|
|
||||||
# NOTE(salv-orlando): apologies if it looks java-ish
|
|
||||||
self._plugin = plugin
|
|
||||||
self._cluster = cluster
|
|
||||||
self._req_delay = req_delay
|
|
||||||
self._sync_interval = state_sync_interval
|
|
||||||
self._max_rand_delay = max_rand_delay
|
|
||||||
# Validate parameters
|
|
||||||
if self._sync_interval < self._req_delay:
|
|
||||||
err_msg = (_("Minimum request delay:%(req_delay)s must not "
|
|
||||||
"exceed synchronization interval:%(sync_interval)s") %
|
|
||||||
{'req_delay': self._req_delay,
|
|
||||||
'sync_interval': self._sync_interval})
|
|
||||||
LOG.error(err_msg)
|
|
||||||
raise nsx_exc.NsxPluginException(err_msg=err_msg)
|
|
||||||
# Backoff time in case of failures while fetching sync data
|
|
||||||
self._sync_backoff = 1
|
|
||||||
# Store the looping call in an instance variable to allow unit tests
|
|
||||||
# for controlling its lifecycle
|
|
||||||
self._sync_looping_call = _start_loopingcall(
|
|
||||||
min_chunk_size, state_sync_interval,
|
|
||||||
self._synchronize_state, initial_delay=initial_delay)
|
|
||||||
|
|
||||||
def _get_tag_dict(self, tags):
|
|
||||||
return dict((tag.get('scope'), tag['tag']) for tag in tags)
|
|
||||||
|
|
||||||
def synchronize_network(self, context, neutron_network_data,
|
|
||||||
lswitches=None):
|
|
||||||
"""Synchronize a Neutron network with its NSX counterpart.
|
|
||||||
|
|
||||||
This routine synchronizes a set of switches when a Neutron
|
|
||||||
network is mapped to multiple lswitches.
|
|
||||||
"""
|
|
||||||
if not lswitches:
|
|
||||||
# Try to get logical switches from nsx
|
|
||||||
try:
|
|
||||||
lswitches = nsx_utils.fetch_nsx_switches(
|
|
||||||
context.session, self._cluster,
|
|
||||||
neutron_network_data['id'])
|
|
||||||
except exceptions.NetworkNotFound:
|
|
||||||
# TODO(salv-orlando): We should be catching
|
|
||||||
# api_exc.ResourceNotFound here
|
|
||||||
# The logical switch was not found
|
|
||||||
LOG.warning("Logical switch for neutron network %s not "
|
|
||||||
"found on NSX.", neutron_network_data['id'])
|
|
||||||
lswitches = []
|
|
||||||
else:
|
|
||||||
for lswitch in lswitches:
|
|
||||||
self._nsx_cache.update_lswitch(lswitch)
|
|
||||||
# By default assume things go wrong
|
|
||||||
status = constants.NET_STATUS_ERROR
|
|
||||||
# In most cases lswitches will contain a single element
|
|
||||||
for ls in lswitches:
|
|
||||||
if not ls:
|
|
||||||
# Logical switch was deleted
|
|
||||||
break
|
|
||||||
ls_status = ls['_relations']['LogicalSwitchStatus']
|
|
||||||
if not ls_status['fabric_status']:
|
|
||||||
status = constants.NET_STATUS_DOWN
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
# No switch was down or missing. Set status to ACTIVE unless
|
|
||||||
# there were no switches in the first place!
|
|
||||||
if lswitches:
|
|
||||||
status = constants.NET_STATUS_ACTIVE
|
|
||||||
# Update db object
|
|
||||||
if status == neutron_network_data['status']:
|
|
||||||
# do nothing
|
|
||||||
return
|
|
||||||
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
try:
|
|
||||||
network = self._plugin._get_network(context,
|
|
||||||
neutron_network_data['id'])
|
|
||||||
except exceptions.NetworkNotFound:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
network.status = status
|
|
||||||
LOG.debug("Updating status for neutron resource %(q_id)s to:"
|
|
||||||
" %(status)s",
|
|
||||||
{'q_id': neutron_network_data['id'],
|
|
||||||
'status': status})
|
|
||||||
|
|
||||||
def _synchronize_lswitches(self, ctx, ls_uuids, scan_missing=False):
|
|
||||||
if not ls_uuids and not scan_missing:
|
|
||||||
return
|
|
||||||
neutron_net_ids = set()
|
|
||||||
neutron_nsx_mappings = {}
|
|
||||||
# TODO(salvatore-orlando): Deal with the case the tag
|
|
||||||
# has been tampered with
|
|
||||||
for ls_uuid in ls_uuids:
|
|
||||||
# If the lswitch has been deleted, get backup copy of data
|
|
||||||
lswitch = (self._nsx_cache[ls_uuid].get('data') or
|
|
||||||
self._nsx_cache[ls_uuid].get('data_bk'))
|
|
||||||
tags = self._get_tag_dict(lswitch['tags'])
|
|
||||||
neutron_id = tags.get('quantum_net_id')
|
|
||||||
neutron_net_ids.add(neutron_id)
|
|
||||||
neutron_nsx_mappings[neutron_id] = (
|
|
||||||
neutron_nsx_mappings.get(neutron_id, []) +
|
|
||||||
[self._nsx_cache[ls_uuid]])
|
|
||||||
# Fetch neutron networks from database
|
|
||||||
filters = {'router:external': [False]}
|
|
||||||
if not scan_missing:
|
|
||||||
filters['id'] = neutron_net_ids
|
|
||||||
|
|
||||||
networks = model_query.get_collection(
|
|
||||||
ctx, models_v2.Network, self._plugin._make_network_dict,
|
|
||||||
filters=filters)
|
|
||||||
|
|
||||||
for network in networks:
|
|
||||||
lswitches = neutron_nsx_mappings.get(network['id'], [])
|
|
||||||
lswitches = [lsw.get('data') for lsw in lswitches]
|
|
||||||
self.synchronize_network(ctx, network, lswitches)
|
|
||||||
|
|
||||||
def synchronize_router(self, context, neutron_router_data,
|
|
||||||
lrouter=None):
|
|
||||||
"""Synchronize a neutron router with its NSX counterpart."""
|
|
||||||
if not lrouter:
|
|
||||||
# Try to get router from nsx
|
|
||||||
try:
|
|
||||||
# This query will return the logical router status too
|
|
||||||
nsx_router_id = nsx_utils.get_nsx_router_id(
|
|
||||||
context.session, self._cluster, neutron_router_data['id'])
|
|
||||||
if nsx_router_id:
|
|
||||||
lrouter = routerlib.get_lrouter(
|
|
||||||
self._cluster, nsx_router_id)
|
|
||||||
except exceptions.NotFound:
|
|
||||||
# NOTE(salv-orlando): We should be catching
|
|
||||||
# api_exc.ResourceNotFound here
|
|
||||||
# The logical router was not found
|
|
||||||
LOG.warning("Logical router for neutron router %s not "
|
|
||||||
"found on NSX.", neutron_router_data['id'])
|
|
||||||
if lrouter:
|
|
||||||
# Update the cache
|
|
||||||
self._nsx_cache.update_lrouter(lrouter)
|
|
||||||
|
|
||||||
# Note(salv-orlando): It might worth adding a check to verify neutron
|
|
||||||
# resource tag in nsx entity matches a Neutron id.
|
|
||||||
# By default assume things go wrong
|
|
||||||
status = constants.NET_STATUS_ERROR
|
|
||||||
if lrouter:
|
|
||||||
lr_status = (lrouter['_relations']
|
|
||||||
['LogicalRouterStatus']
|
|
||||||
['fabric_status'])
|
|
||||||
status = (lr_status and
|
|
||||||
constants.NET_STATUS_ACTIVE or
|
|
||||||
constants.NET_STATUS_DOWN)
|
|
||||||
# Update db object
|
|
||||||
if status == neutron_router_data['status']:
|
|
||||||
# do nothing
|
|
||||||
return
|
|
||||||
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
try:
|
|
||||||
router = self._plugin._get_router(context,
|
|
||||||
neutron_router_data['id'])
|
|
||||||
except l3_exc.RouterNotFound:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
router.status = status
|
|
||||||
LOG.debug("Updating status for neutron resource %(q_id)s to:"
|
|
||||||
" %(status)s",
|
|
||||||
{'q_id': neutron_router_data['id'],
|
|
||||||
'status': status})
|
|
||||||
|
|
||||||
def _synchronize_lrouters(self, ctx, lr_uuids, scan_missing=False):
|
|
||||||
if not lr_uuids and not scan_missing:
|
|
||||||
return
|
|
||||||
# TODO(salvatore-orlando): Deal with the case the tag
|
|
||||||
# has been tampered with
|
|
||||||
neutron_router_mappings = {}
|
|
||||||
for lr_uuid in lr_uuids:
|
|
||||||
lrouter = (self._nsx_cache[lr_uuid].get('data') or
|
|
||||||
self._nsx_cache[lr_uuid].get('data_bk'))
|
|
||||||
tags = self._get_tag_dict(lrouter['tags'])
|
|
||||||
neutron_router_id = tags.get('q_router_id')
|
|
||||||
if neutron_router_id:
|
|
||||||
neutron_router_mappings[neutron_router_id] = (
|
|
||||||
self._nsx_cache[lr_uuid])
|
|
||||||
else:
|
|
||||||
LOG.warning("Unable to find Neutron router id for "
|
|
||||||
"NSX logical router: %s", lr_uuid)
|
|
||||||
# Fetch neutron routers from database
|
|
||||||
filters = ({} if scan_missing else
|
|
||||||
{'id': neutron_router_mappings.keys()})
|
|
||||||
routers = model_query.get_collection(
|
|
||||||
ctx, l3_db.Router, self._plugin._make_router_dict,
|
|
||||||
filters=filters)
|
|
||||||
for router in routers:
|
|
||||||
lrouter = neutron_router_mappings.get(router['id'])
|
|
||||||
self.synchronize_router(
|
|
||||||
ctx, router, lrouter and lrouter.get('data'))
|
|
||||||
|
|
||||||
def synchronize_port(self, context, neutron_port_data,
|
|
||||||
lswitchport=None, ext_networks=None):
|
|
||||||
"""Synchronize a Neutron port with its NSX counterpart."""
|
|
||||||
# Skip synchronization for ports on external networks
|
|
||||||
if not ext_networks:
|
|
||||||
ext_networks = [net['id'] for net in context.session.query(
|
|
||||||
models_v2.Network).join(
|
|
||||||
external_net_db.ExternalNetwork,
|
|
||||||
(models_v2.Network.id ==
|
|
||||||
external_net_db.ExternalNetwork.network_id))]
|
|
||||||
if neutron_port_data['network_id'] in ext_networks:
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
neutron_port_data['status'] = constants.PORT_STATUS_ACTIVE
|
|
||||||
return
|
|
||||||
|
|
||||||
if not lswitchport:
|
|
||||||
# Try to get port from nsx
|
|
||||||
try:
|
|
||||||
ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id(
|
|
||||||
context.session, self._cluster, neutron_port_data['id'])
|
|
||||||
if lp_uuid:
|
|
||||||
lswitchport = switchlib.get_port(
|
|
||||||
self._cluster, ls_uuid, lp_uuid,
|
|
||||||
relations='LogicalPortStatus')
|
|
||||||
except (exceptions.PortNotFoundOnNetwork):
|
|
||||||
# NOTE(salv-orlando): We should be catching
|
|
||||||
# api_exc.ResourceNotFound here instead
|
|
||||||
# of PortNotFoundOnNetwork when the id exists but
|
|
||||||
# the logical switch port was not found
|
|
||||||
LOG.warning("Logical switch port for neutron port %s "
|
|
||||||
"not found on NSX.", neutron_port_data['id'])
|
|
||||||
lswitchport = None
|
|
||||||
else:
|
|
||||||
# If lswitchport is not None, update the cache.
|
|
||||||
# It could be none if the port was deleted from the backend
|
|
||||||
if lswitchport:
|
|
||||||
self._nsx_cache.update_lswitchport(lswitchport)
|
|
||||||
# Note(salv-orlando): It might worth adding a check to verify neutron
|
|
||||||
# resource tag in nsx entity matches Neutron id.
|
|
||||||
# By default assume things go wrong
|
|
||||||
status = constants.PORT_STATUS_ERROR
|
|
||||||
if lswitchport:
|
|
||||||
lp_status = (lswitchport['_relations']
|
|
||||||
['LogicalPortStatus']
|
|
||||||
['fabric_status_up'])
|
|
||||||
status = (lp_status and
|
|
||||||
constants.PORT_STATUS_ACTIVE or
|
|
||||||
constants.PORT_STATUS_DOWN)
|
|
||||||
|
|
||||||
# Update db object
|
|
||||||
if status == neutron_port_data['status']:
|
|
||||||
# do nothing
|
|
||||||
return
|
|
||||||
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
try:
|
|
||||||
port = self._plugin._get_port(context,
|
|
||||||
neutron_port_data['id'])
|
|
||||||
except exceptions.PortNotFound:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
port.status = status
|
|
||||||
LOG.debug("Updating status for neutron resource %(q_id)s to:"
|
|
||||||
" %(status)s",
|
|
||||||
{'q_id': neutron_port_data['id'],
|
|
||||||
'status': status})
|
|
||||||
|
|
||||||
def _synchronize_lswitchports(self, ctx, lp_uuids, scan_missing=False):
|
|
||||||
if not lp_uuids and not scan_missing:
|
|
||||||
return
|
|
||||||
# Find Neutron port id by tag - the tag is already
|
|
||||||
# loaded in memory, no reason for doing a db query
|
|
||||||
# TODO(salvatore-orlando): Deal with the case the tag
|
|
||||||
# has been tampered with
|
|
||||||
neutron_port_mappings = {}
|
|
||||||
for lp_uuid in lp_uuids:
|
|
||||||
lport = (self._nsx_cache[lp_uuid].get('data') or
|
|
||||||
self._nsx_cache[lp_uuid].get('data_bk'))
|
|
||||||
tags = self._get_tag_dict(lport['tags'])
|
|
||||||
neutron_port_id = tags.get('q_port_id')
|
|
||||||
if neutron_port_id:
|
|
||||||
neutron_port_mappings[neutron_port_id] = (
|
|
||||||
self._nsx_cache[lp_uuid])
|
|
||||||
# Fetch neutron ports from database
|
|
||||||
# At the first sync we need to fetch all ports
|
|
||||||
filters = ({} if scan_missing else
|
|
||||||
{'id': neutron_port_mappings.keys()})
|
|
||||||
# TODO(salv-orlando): Work out a solution for avoiding
|
|
||||||
# this query
|
|
||||||
ext_nets = [net['id'] for net in ctx.session.query(
|
|
||||||
models_v2.Network).join(
|
|
||||||
external_net_db.ExternalNetwork,
|
|
||||||
(models_v2.Network.id ==
|
|
||||||
external_net_db.ExternalNetwork.network_id))]
|
|
||||||
ports = model_query.get_collection(
|
|
||||||
ctx, models_v2.Port, self._plugin._make_port_dict,
|
|
||||||
filters=filters)
|
|
||||||
for port in ports:
|
|
||||||
lswitchport = neutron_port_mappings.get(port['id'])
|
|
||||||
self.synchronize_port(
|
|
||||||
ctx, port, lswitchport and lswitchport.get('data'),
|
|
||||||
ext_networks=ext_nets)
|
|
||||||
|
|
||||||
def _get_chunk_size(self, sp):
|
|
||||||
# NOTE(salv-orlando): Try to use __future__ for this routine only?
|
|
||||||
ratio = ((float(sp.total_size) / float(sp.chunk_size)) /
|
|
||||||
(float(self._sync_interval) / float(self._req_delay)))
|
|
||||||
new_size = max(1.0, ratio) * float(sp.chunk_size)
|
|
||||||
return int(new_size) + (new_size - int(new_size) > 0)
|
|
||||||
|
|
||||||
def _fetch_data(self, uri, cursor, page_size):
|
|
||||||
# If not cursor there is nothing to retrieve
|
|
||||||
if cursor:
|
|
||||||
if cursor == 'start':
|
|
||||||
cursor = None
|
|
||||||
# Chunk size tuning might, in some conditions, make it larger
|
|
||||||
# than 5,000, which is the maximum page size allowed by the NSX
|
|
||||||
# API. In this case the request should be split in multiple
|
|
||||||
# requests. This is not ideal, and therefore a log warning will
|
|
||||||
# be emitted.
|
|
||||||
num_requests = page_size // (MAX_PAGE_SIZE + 1) + 1
|
|
||||||
if num_requests > 1:
|
|
||||||
LOG.warning("Requested page size is %(cur_chunk_size)d. "
|
|
||||||
"It might be necessary to do %(num_requests)d "
|
|
||||||
"round-trips to NSX for fetching data. Please "
|
|
||||||
"tune sync parameters to ensure chunk size "
|
|
||||||
"is less than %(max_page_size)d",
|
|
||||||
{'cur_chunk_size': page_size,
|
|
||||||
'num_requests': num_requests,
|
|
||||||
'max_page_size': MAX_PAGE_SIZE})
|
|
||||||
# Only the first request might return the total size,
|
|
||||||
# subsequent requests will definitely not
|
|
||||||
results, cursor, total_size = nsxlib.get_single_query_page(
|
|
||||||
uri, self._cluster, cursor,
|
|
||||||
min(page_size, MAX_PAGE_SIZE))
|
|
||||||
for _req in range(num_requests - 1):
|
|
||||||
# If no cursor is returned break the cycle as there is no
|
|
||||||
# actual need to perform multiple requests (all fetched)
|
|
||||||
# This happens when the overall size of resources exceeds
|
|
||||||
# the maximum page size, but the number for each single
|
|
||||||
# resource type is below this threshold
|
|
||||||
if not cursor:
|
|
||||||
break
|
|
||||||
req_results, cursor = nsxlib.get_single_query_page(
|
|
||||||
uri, self._cluster, cursor,
|
|
||||||
min(page_size, MAX_PAGE_SIZE))[:2]
|
|
||||||
results.extend(req_results)
|
|
||||||
# reset cursor before returning if we queried just to
|
|
||||||
# know the number of entities
|
|
||||||
return results, cursor if page_size else 'start', total_size
|
|
||||||
return [], cursor, None
|
|
||||||
|
|
||||||
def _fetch_nsx_data_chunk(self, sp):
|
|
||||||
base_chunk_size = sp.chunk_size
|
|
||||||
chunk_size = base_chunk_size + sp.extra_chunk_size
|
|
||||||
LOG.info("Fetching up to %s resources "
|
|
||||||
"from NSX backend", chunk_size)
|
|
||||||
fetched = ls_count = lr_count = lp_count = 0
|
|
||||||
lswitches = lrouters = lswitchports = []
|
|
||||||
if sp.ls_cursor or sp.ls_cursor == 'start':
|
|
||||||
(lswitches, sp.ls_cursor, ls_count) = self._fetch_data(
|
|
||||||
self.LS_URI, sp.ls_cursor, chunk_size)
|
|
||||||
fetched = len(lswitches)
|
|
||||||
if fetched < chunk_size and sp.lr_cursor or sp.lr_cursor == 'start':
|
|
||||||
(lrouters, sp.lr_cursor, lr_count) = self._fetch_data(
|
|
||||||
self.LR_URI, sp.lr_cursor, max(chunk_size - fetched, 0))
|
|
||||||
fetched += len(lrouters)
|
|
||||||
if fetched < chunk_size and sp.lp_cursor or sp.lp_cursor == 'start':
|
|
||||||
(lswitchports, sp.lp_cursor, lp_count) = self._fetch_data(
|
|
||||||
self.LP_URI, sp.lp_cursor, max(chunk_size - fetched, 0))
|
|
||||||
fetched += len(lswitchports)
|
|
||||||
if sp.current_chunk == 0:
|
|
||||||
# No cursors were provided. Then it must be possible to
|
|
||||||
# calculate the total amount of data to fetch
|
|
||||||
sp.total_size = ls_count + lr_count + lp_count
|
|
||||||
LOG.debug("Total data size: %d", sp.total_size)
|
|
||||||
sp.chunk_size = self._get_chunk_size(sp)
|
|
||||||
# Calculate chunk size adjustment
|
|
||||||
sp.extra_chunk_size = sp.chunk_size - base_chunk_size
|
|
||||||
LOG.debug("Fetched %(num_lswitches)d logical switches, "
|
|
||||||
"%(num_lswitchports)d logical switch ports,"
|
|
||||||
"%(num_lrouters)d logical routers",
|
|
||||||
{'num_lswitches': len(lswitches),
|
|
||||||
'num_lswitchports': len(lswitchports),
|
|
||||||
'num_lrouters': len(lrouters)})
|
|
||||||
return (lswitches, lrouters, lswitchports)
|
|
||||||
|
|
||||||
def _synchronize_state(self, sp):
|
|
||||||
# If the plugin has been destroyed, stop the LoopingCall
|
|
||||||
if not self._plugin:
|
|
||||||
raise loopingcall.LoopingCallDone()
|
|
||||||
start = timeutils.utcnow()
|
|
||||||
# Reset page cursor variables if necessary
|
|
||||||
if sp.current_chunk == 0:
|
|
||||||
sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
|
|
||||||
LOG.info("Running state synchronization task. Chunk: %s",
|
|
||||||
sp.current_chunk)
|
|
||||||
# Fetch chunk_size data from NSX
|
|
||||||
try:
|
|
||||||
(lswitches, lrouters, lswitchports) = (
|
|
||||||
self._fetch_nsx_data_chunk(sp))
|
|
||||||
except (api_exc.RequestTimeout, api_exc.NsxApiException):
|
|
||||||
sleep_interval = self._sync_backoff
|
|
||||||
# Cap max back off to 64 seconds
|
|
||||||
self._sync_backoff = min(self._sync_backoff * 2, 64)
|
|
||||||
LOG.exception("An error occurred while communicating with "
|
|
||||||
"NSX backend. Will retry synchronization "
|
|
||||||
"in %d seconds", sleep_interval)
|
|
||||||
return sleep_interval
|
|
||||||
LOG.debug("Time elapsed querying NSX: %s",
|
|
||||||
timeutils.utcnow() - start)
|
|
||||||
if sp.total_size:
|
|
||||||
num_chunks = ((sp.total_size / sp.chunk_size) +
|
|
||||||
(sp.total_size % sp.chunk_size != 0))
|
|
||||||
else:
|
|
||||||
num_chunks = 1
|
|
||||||
LOG.debug("Number of chunks: %d", num_chunks)
|
|
||||||
# Find objects which have changed on NSX side and need
|
|
||||||
# to be synchronized
|
|
||||||
LOG.debug("Processing NSX cache for updated objects")
|
|
||||||
(ls_uuids, lr_uuids, lp_uuids) = self._nsx_cache.process_updates(
|
|
||||||
lswitches, lrouters, lswitchports)
|
|
||||||
# Process removed objects only at the last chunk
|
|
||||||
scan_missing = (sp.current_chunk == num_chunks - 1 and
|
|
||||||
not sp.init_sync_performed)
|
|
||||||
if sp.current_chunk == num_chunks - 1:
|
|
||||||
LOG.debug("Processing NSX cache for deleted objects")
|
|
||||||
self._nsx_cache.process_deletes()
|
|
||||||
ls_uuids = self._nsx_cache.get_lswitches(
|
|
||||||
changed_only=not scan_missing)
|
|
||||||
lr_uuids = self._nsx_cache.get_lrouters(
|
|
||||||
changed_only=not scan_missing)
|
|
||||||
lp_uuids = self._nsx_cache.get_lswitchports(
|
|
||||||
changed_only=not scan_missing)
|
|
||||||
LOG.debug("Time elapsed hashing data: %s",
|
|
||||||
timeutils.utcnow() - start)
|
|
||||||
# Get an admin context
|
|
||||||
ctx = n_context.get_admin_context()
|
|
||||||
# Synchronize with database
|
|
||||||
self._synchronize_lswitches(ctx, ls_uuids,
|
|
||||||
scan_missing=scan_missing)
|
|
||||||
self._synchronize_lrouters(ctx, lr_uuids,
|
|
||||||
scan_missing=scan_missing)
|
|
||||||
self._synchronize_lswitchports(ctx, lp_uuids,
|
|
||||||
scan_missing=scan_missing)
|
|
||||||
# Increase chunk counter
|
|
||||||
LOG.info("Synchronization for chunk %(chunk_num)d of "
|
|
||||||
"%(total_chunks)d performed",
|
|
||||||
{'chunk_num': sp.current_chunk + 1,
|
|
||||||
'total_chunks': num_chunks})
|
|
||||||
sp.current_chunk = (sp.current_chunk + 1) % num_chunks
|
|
||||||
added_delay = 0
|
|
||||||
if sp.current_chunk == 0:
|
|
||||||
# Ensure init_sync_performed is True
|
|
||||||
if not sp.init_sync_performed:
|
|
||||||
sp.init_sync_performed = True
|
|
||||||
# Add additional random delay
|
|
||||||
added_delay = random.randint(0, self._max_rand_delay)
|
|
||||||
LOG.debug("Time elapsed at end of sync: %s",
|
|
||||||
timeutils.utcnow() - start)
|
|
||||||
return self._sync_interval / num_chunks + added_delay
|
|
@ -1,475 +0,0 @@
|
|||||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from sqlalchemy.orm import exc as sa_orm_exc
|
|
||||||
|
|
||||||
from neutron_lib import constants
|
|
||||||
from neutron_lib.db import api as db_api
|
|
||||||
from neutron_lib.db import model_query
|
|
||||||
from neutron_lib.db import utils as db_utils
|
|
||||||
from neutron_lib import exceptions
|
|
||||||
from neutron_lib.plugins import utils
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_utils import uuidutils
|
|
||||||
import six
|
|
||||||
|
|
||||||
from vmware_nsx._i18n import _
|
|
||||||
from vmware_nsx.db import nsx_models
|
|
||||||
from vmware_nsx.extensions import networkgw
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface'
|
|
||||||
NETWORK_ID = 'network_id'
|
|
||||||
SEGMENTATION_TYPE = 'segmentation_type'
|
|
||||||
SEGMENTATION_ID = 'segmentation_id'
|
|
||||||
ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID,
|
|
||||||
SEGMENTATION_TYPE,
|
|
||||||
SEGMENTATION_ID))
|
|
||||||
# Constants for gateway device operational status
|
|
||||||
STATUS_UNKNOWN = "UNKNOWN"
|
|
||||||
STATUS_ERROR = "ERROR"
|
|
||||||
STATUS_ACTIVE = "ACTIVE"
|
|
||||||
STATUS_DOWN = "DOWN"
|
|
||||||
|
|
||||||
|
|
||||||
class GatewayInUse(exceptions.InUse):
|
|
||||||
message = _("Network Gateway '%(gateway_id)s' still has active mappings "
|
|
||||||
"with one or more neutron networks.")
|
|
||||||
|
|
||||||
|
|
||||||
class GatewayNotFound(exceptions.NotFound):
|
|
||||||
message = _("Network Gateway %(gateway_id)s could not be found")
|
|
||||||
|
|
||||||
|
|
||||||
class GatewayDeviceInUse(exceptions.InUse):
|
|
||||||
message = _("Network Gateway Device '%(device_id)s' is still used by "
|
|
||||||
"one or more network gateways.")
|
|
||||||
|
|
||||||
|
|
||||||
class GatewayDeviceNotFound(exceptions.NotFound):
|
|
||||||
message = _("Network Gateway Device %(device_id)s could not be found.")
|
|
||||||
|
|
||||||
|
|
||||||
class GatewayDevicesNotFound(exceptions.NotFound):
|
|
||||||
message = _("One or more Network Gateway Devices could not be found: "
|
|
||||||
"%(device_ids)s.")
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkGatewayPortInUse(exceptions.InUse):
|
|
||||||
message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and "
|
|
||||||
"therefore cannot be deleted directly via the port API.")
|
|
||||||
|
|
||||||
|
|
||||||
class GatewayConnectionInUse(exceptions.InUse):
|
|
||||||
message = _("The specified mapping '%(mapping)s' is already in use on "
|
|
||||||
"network gateway '%(gateway_id)s'.")
|
|
||||||
|
|
||||||
|
|
||||||
class MultipleGatewayConnections(exceptions.Conflict):
|
|
||||||
message = _("Multiple network connections found on '%(gateway_id)s' "
|
|
||||||
"with provided criteria.")
|
|
||||||
|
|
||||||
|
|
||||||
class GatewayConnectionNotFound(exceptions.NotFound):
|
|
||||||
message = _("The connection %(network_mapping_info)s was not found on the "
|
|
||||||
"network gateway '%(network_gateway_id)s'")
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkGatewayUnchangeable(exceptions.InUse):
|
|
||||||
message = _("The network gateway %(gateway_id)s "
|
|
||||||
"cannot be updated or deleted")
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
|
|
||||||
|
|
||||||
gateway_resource = networkgw.GATEWAY_RESOURCE_NAME
|
|
||||||
device_resource = networkgw.DEVICE_RESOURCE_NAME
|
|
||||||
|
|
||||||
def _get_network_gateway(self, context, gw_id):
|
|
||||||
try:
|
|
||||||
gw = model_query.get_by_id(context, nsx_models.NetworkGateway,
|
|
||||||
gw_id)
|
|
||||||
except sa_orm_exc.NoResultFound:
|
|
||||||
raise GatewayNotFound(gateway_id=gw_id)
|
|
||||||
return gw
|
|
||||||
|
|
||||||
def _make_gw_connection_dict(self, gw_conn):
|
|
||||||
return {'port_id': gw_conn['port_id'],
|
|
||||||
'segmentation_type': gw_conn['segmentation_type'],
|
|
||||||
'segmentation_id': gw_conn['segmentation_id']}
|
|
||||||
|
|
||||||
def _make_network_gateway_dict(self, network_gateway, fields=None):
|
|
||||||
device_list = []
|
|
||||||
for d in network_gateway['devices']:
|
|
||||||
device_list.append({'id': d['id'],
|
|
||||||
'interface_name': d['interface_name']})
|
|
||||||
res = {'id': network_gateway['id'],
|
|
||||||
'name': network_gateway['name'],
|
|
||||||
'default': network_gateway['default'],
|
|
||||||
'devices': device_list,
|
|
||||||
'tenant_id': network_gateway['tenant_id']}
|
|
||||||
# Query gateway connections only if needed
|
|
||||||
if not fields or 'ports' in fields:
|
|
||||||
res['ports'] = [self._make_gw_connection_dict(conn)
|
|
||||||
for conn in network_gateway.network_connections]
|
|
||||||
return db_utils.resource_fields(res, fields)
|
|
||||||
|
|
||||||
def _set_mapping_info_defaults(self, mapping_info):
|
|
||||||
if not mapping_info.get('segmentation_type'):
|
|
||||||
mapping_info['segmentation_type'] = 'flat'
|
|
||||||
if not mapping_info.get('segmentation_id'):
|
|
||||||
mapping_info['segmentation_id'] = 0
|
|
||||||
|
|
||||||
def _validate_network_mapping_info(self, network_mapping_info):
|
|
||||||
self._set_mapping_info_defaults(network_mapping_info)
|
|
||||||
network_id = network_mapping_info.get(NETWORK_ID)
|
|
||||||
if not network_id:
|
|
||||||
raise exceptions.InvalidInput(
|
|
||||||
error_message=_("A network identifier must be specified "
|
|
||||||
"when connecting a network to a network "
|
|
||||||
"gateway. Unable to complete operation"))
|
|
||||||
connection_attrs = set(network_mapping_info.keys())
|
|
||||||
if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES):
|
|
||||||
raise exceptions.InvalidInput(
|
|
||||||
error_message=(_("Invalid keys found among the ones provided "
|
|
||||||
"in request body: %(connection_attrs)s."),
|
|
||||||
connection_attrs))
|
|
||||||
seg_type = network_mapping_info.get(SEGMENTATION_TYPE)
|
|
||||||
seg_id = network_mapping_info.get(SEGMENTATION_ID)
|
|
||||||
# It is important to validate that the segmentation ID is actually an
|
|
||||||
# integer value
|
|
||||||
try:
|
|
||||||
seg_id = int(seg_id)
|
|
||||||
except ValueError:
|
|
||||||
msg = _("An invalid segmentation ID was specified. The "
|
|
||||||
"segmentation ID must be a positive integer number")
|
|
||||||
raise exceptions.InvalidInput(error_message=msg)
|
|
||||||
# The NSX plugin accepts 0 as a valid vlan tag
|
|
||||||
seg_id_valid = seg_id == 0 or utils.is_valid_vlan_tag(seg_id)
|
|
||||||
if seg_type.lower() == 'flat' and seg_id:
|
|
||||||
msg = _("Cannot specify a segmentation id when "
|
|
||||||
"the segmentation type is flat")
|
|
||||||
raise exceptions.InvalidInput(error_message=msg)
|
|
||||||
elif (seg_type.lower() == 'vlan' and not seg_id_valid):
|
|
||||||
msg = _("Invalid segmentation id (%s) for "
|
|
||||||
"vlan segmentation type") % seg_id
|
|
||||||
raise exceptions.InvalidInput(error_message=msg)
|
|
||||||
return network_id
|
|
||||||
|
|
||||||
def _retrieve_gateway_connections(self, context, gateway_id,
|
|
||||||
mapping_info=None, only_one=False):
|
|
||||||
mapping_info = mapping_info or {}
|
|
||||||
filters = {'network_gateway_id': [gateway_id]}
|
|
||||||
for k, v in six.iteritems(mapping_info):
|
|
||||||
if v and k != NETWORK_ID:
|
|
||||||
filters[k] = [v]
|
|
||||||
query = model_query.get_collection_query(context,
|
|
||||||
nsx_models.NetworkConnection,
|
|
||||||
filters)
|
|
||||||
return query.one() if only_one else query.all()
|
|
||||||
|
|
||||||
def _unset_default_network_gateways(self, context):
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
context.session.query(nsx_models.NetworkGateway).update(
|
|
||||||
{nsx_models.NetworkGateway.default: False})
|
|
||||||
|
|
||||||
def _set_default_network_gateway(self, context, gw_id):
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
gw = (context.session.query(nsx_models.NetworkGateway).
|
|
||||||
filter_by(id=gw_id).one())
|
|
||||||
gw['default'] = True
|
|
||||||
|
|
||||||
def prevent_network_gateway_port_deletion(self, context, port):
|
|
||||||
"""Pre-deletion check.
|
|
||||||
|
|
||||||
Ensures a port will not be deleted if is being used by a network
|
|
||||||
gateway. In that case an exception will be raised.
|
|
||||||
"""
|
|
||||||
if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF:
|
|
||||||
raise NetworkGatewayPortInUse(port_id=port['id'],
|
|
||||||
device_owner=port['device_owner'])
|
|
||||||
|
|
||||||
def _validate_device_list(self, context, tenant_id, gateway_data):
|
|
||||||
device_query = self._query_gateway_devices(
|
|
||||||
context, filters={'id': [device['id']
|
|
||||||
for device in gateway_data['devices']]})
|
|
||||||
retrieved_device_ids = set()
|
|
||||||
for device in device_query:
|
|
||||||
retrieved_device_ids.add(device['id'])
|
|
||||||
if device['tenant_id'] != tenant_id:
|
|
||||||
raise GatewayDeviceNotFound(device_id=device['id'])
|
|
||||||
missing_device_ids = (
|
|
||||||
set(device['id'] for device in gateway_data['devices']) -
|
|
||||||
retrieved_device_ids)
|
|
||||||
if missing_device_ids:
|
|
||||||
raise GatewayDevicesNotFound(
|
|
||||||
device_ids=",".join(missing_device_ids))
|
|
||||||
|
|
||||||
def create_network_gateway(self, context, network_gateway,
|
|
||||||
validate_device_list=True):
|
|
||||||
gw_data = network_gateway[self.gateway_resource]
|
|
||||||
tenant_id = gw_data['tenant_id']
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
gw_db = nsx_models.NetworkGateway(
|
|
||||||
id=gw_data.get('id', uuidutils.generate_uuid()),
|
|
||||||
tenant_id=tenant_id,
|
|
||||||
name=gw_data.get('name'))
|
|
||||||
# Device list is guaranteed to be a valid list, but some devices
|
|
||||||
# might still either not exist or belong to a different tenant
|
|
||||||
if validate_device_list:
|
|
||||||
self._validate_device_list(context, tenant_id, gw_data)
|
|
||||||
gw_db.devices.extend(
|
|
||||||
[nsx_models.NetworkGatewayDeviceReference(**device)
|
|
||||||
for device in gw_data['devices']])
|
|
||||||
context.session.add(gw_db)
|
|
||||||
LOG.debug("Created network gateway with id:%s", gw_db['id'])
|
|
||||||
return self._make_network_gateway_dict(gw_db)
|
|
||||||
|
|
||||||
def update_network_gateway(self, context, id, network_gateway):
|
|
||||||
gw_data = network_gateway[self.gateway_resource]
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
gw_db = self._get_network_gateway(context, id)
|
|
||||||
if gw_db.default:
|
|
||||||
raise NetworkGatewayUnchangeable(gateway_id=id)
|
|
||||||
# Ensure there is something to update before doing it
|
|
||||||
if any([gw_db[k] != gw_data[k] for k in gw_data]):
|
|
||||||
gw_db.update(gw_data)
|
|
||||||
LOG.debug("Updated network gateway with id:%s", id)
|
|
||||||
return self._make_network_gateway_dict(gw_db)
|
|
||||||
|
|
||||||
def get_network_gateway(self, context, id, fields=None):
|
|
||||||
gw_db = self._get_network_gateway(context, id)
|
|
||||||
return self._make_network_gateway_dict(gw_db, fields)
|
|
||||||
|
|
||||||
def delete_network_gateway(self, context, id):
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
gw_db = self._get_network_gateway(context, id)
|
|
||||||
if gw_db.network_connections:
|
|
||||||
raise GatewayInUse(gateway_id=id)
|
|
||||||
if gw_db.default:
|
|
||||||
raise NetworkGatewayUnchangeable(gateway_id=id)
|
|
||||||
context.session.delete(gw_db)
|
|
||||||
LOG.debug("Network gateway '%s' was destroyed.", id)
|
|
||||||
|
|
||||||
def get_network_gateways(self, context, filters=None, fields=None,
|
|
||||||
sorts=None, limit=None, marker=None,
|
|
||||||
page_reverse=False):
|
|
||||||
marker_obj = db_utils.get_marker_obj(self,
|
|
||||||
context, 'network_gateway', limit, marker)
|
|
||||||
return model_query.get_collection(context, nsx_models.NetworkGateway,
|
|
||||||
self._make_network_gateway_dict,
|
|
||||||
filters=filters, fields=fields,
|
|
||||||
sorts=sorts, limit=limit,
|
|
||||||
marker_obj=marker_obj,
|
|
||||||
page_reverse=page_reverse)
|
|
||||||
|
|
||||||
def connect_network(self, context, network_gateway_id,
|
|
||||||
network_mapping_info):
|
|
||||||
network_id = self._validate_network_mapping_info(network_mapping_info)
|
|
||||||
LOG.debug("Connecting network '%(network_id)s' to gateway "
|
|
||||||
"'%(network_gateway_id)s'",
|
|
||||||
{'network_id': network_id,
|
|
||||||
'network_gateway_id': network_gateway_id})
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
gw_db = self._get_network_gateway(context, network_gateway_id)
|
|
||||||
tenant_id = gw_db['tenant_id']
|
|
||||||
if context.is_admin and not tenant_id:
|
|
||||||
tenant_id = context.tenant_id
|
|
||||||
# TODO(salvatore-orlando): Leverage unique constraint instead
|
|
||||||
# of performing another query!
|
|
||||||
if self._retrieve_gateway_connections(context,
|
|
||||||
network_gateway_id,
|
|
||||||
network_mapping_info):
|
|
||||||
raise GatewayConnectionInUse(mapping=network_mapping_info,
|
|
||||||
gateway_id=network_gateway_id)
|
|
||||||
# TODO(salvatore-orlando): Creating a port will give it an IP,
|
|
||||||
# but we actually do not need any. Instead of wasting an IP we
|
|
||||||
# should have a way to say a port shall not be associated with
|
|
||||||
# any subnet
|
|
||||||
try:
|
|
||||||
# We pass the segmentation type and id too - the plugin
|
|
||||||
# might find them useful as the network connection object
|
|
||||||
# does not exist yet.
|
|
||||||
# NOTE: they're not extended attributes, rather extra data
|
|
||||||
# passed in the port structure to the plugin
|
|
||||||
# TODO(salvatore-orlando): Verify optimal solution for
|
|
||||||
# ownership of the gateway port
|
|
||||||
port = self.create_port(context, {
|
|
||||||
'port':
|
|
||||||
{'tenant_id': tenant_id,
|
|
||||||
'network_id': network_id,
|
|
||||||
'mac_address': constants.ATTR_NOT_SPECIFIED,
|
|
||||||
'admin_state_up': True,
|
|
||||||
'fixed_ips': [],
|
|
||||||
'device_id': network_gateway_id,
|
|
||||||
'device_owner': DEVICE_OWNER_NET_GW_INTF,
|
|
||||||
'name': '',
|
|
||||||
'gw:segmentation_type':
|
|
||||||
network_mapping_info.get('segmentation_type'),
|
|
||||||
'gw:segmentation_id':
|
|
||||||
network_mapping_info.get('segmentation_id')}})
|
|
||||||
except exceptions.NetworkNotFound:
|
|
||||||
err_msg = (_("Requested network '%(network_id)s' not found."
|
|
||||||
"Unable to create network connection on "
|
|
||||||
"gateway '%(network_gateway_id)s") %
|
|
||||||
{'network_id': network_id,
|
|
||||||
'network_gateway_id': network_gateway_id})
|
|
||||||
LOG.error(err_msg)
|
|
||||||
raise exceptions.InvalidInput(error_message=err_msg)
|
|
||||||
port_id = port['id']
|
|
||||||
LOG.debug("Gateway port for '%(network_gateway_id)s' "
|
|
||||||
"created on network '%(network_id)s':%(port_id)s",
|
|
||||||
{'network_gateway_id': network_gateway_id,
|
|
||||||
'network_id': network_id,
|
|
||||||
'port_id': port_id})
|
|
||||||
# Create NetworkConnection record
|
|
||||||
network_mapping_info['port_id'] = port_id
|
|
||||||
network_mapping_info['tenant_id'] = tenant_id
|
|
||||||
gw_db.network_connections.append(
|
|
||||||
nsx_models.NetworkConnection(**network_mapping_info))
|
|
||||||
port_id = port['id']
|
|
||||||
# now deallocate and recycle ip from the port
|
|
||||||
for fixed_ip in port.get('fixed_ips', []):
|
|
||||||
self._delete_ip_allocation(context, network_id,
|
|
||||||
fixed_ip['subnet_id'],
|
|
||||||
fixed_ip['ip_address'])
|
|
||||||
LOG.debug("Ensured no Ip addresses are configured on port %s",
|
|
||||||
port_id)
|
|
||||||
return {'connection_info':
|
|
||||||
{'network_gateway_id': network_gateway_id,
|
|
||||||
'network_id': network_id,
|
|
||||||
'port_id': port_id}}
|
|
||||||
|
|
||||||
def disconnect_network(self, context, network_gateway_id,
|
|
||||||
network_mapping_info):
|
|
||||||
network_id = self._validate_network_mapping_info(network_mapping_info)
|
|
||||||
LOG.debug("Disconnecting network '%(network_id)s' from gateway "
|
|
||||||
"'%(network_gateway_id)s'",
|
|
||||||
{'network_id': network_id,
|
|
||||||
'network_gateway_id': network_gateway_id})
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
# Uniquely identify connection, otherwise raise
|
|
||||||
try:
|
|
||||||
net_connection = self._retrieve_gateway_connections(
|
|
||||||
context, network_gateway_id,
|
|
||||||
network_mapping_info, only_one=True)
|
|
||||||
except sa_orm_exc.NoResultFound:
|
|
||||||
raise GatewayConnectionNotFound(
|
|
||||||
network_mapping_info=network_mapping_info,
|
|
||||||
network_gateway_id=network_gateway_id)
|
|
||||||
except sa_orm_exc.MultipleResultsFound:
|
|
||||||
raise MultipleGatewayConnections(
|
|
||||||
gateway_id=network_gateway_id)
|
|
||||||
# Remove gateway port from network
|
|
||||||
# FIXME(salvatore-orlando): Ensure state of port in NSX is
|
|
||||||
# consistent with outcome of transaction
|
|
||||||
self.delete_port(context, net_connection['port_id'],
|
|
||||||
nw_gw_port_check=False)
|
|
||||||
# Remove NetworkConnection record
|
|
||||||
context.session.delete(net_connection)
|
|
||||||
|
|
||||||
def _make_gateway_device_dict(self, gateway_device, fields=None,
|
|
||||||
include_nsx_id=False):
|
|
||||||
res = {'id': gateway_device['id'],
|
|
||||||
'name': gateway_device['name'],
|
|
||||||
'status': gateway_device['status'],
|
|
||||||
'connector_type': gateway_device['connector_type'],
|
|
||||||
'connector_ip': gateway_device['connector_ip'],
|
|
||||||
'tenant_id': gateway_device['tenant_id']}
|
|
||||||
if include_nsx_id:
|
|
||||||
# Return the NSX mapping as well. This attribute will not be
|
|
||||||
# returned in the API response anyway. Ensure it will not be
|
|
||||||
# filtered out in field selection.
|
|
||||||
if fields:
|
|
||||||
fields.append('nsx_id')
|
|
||||||
res['nsx_id'] = gateway_device['nsx_id']
|
|
||||||
return db_utils.resource_fields(res, fields)
|
|
||||||
|
|
||||||
def _get_gateway_device(self, context, device_id):
|
|
||||||
try:
|
|
||||||
return model_query.get_by_id(context,
|
|
||||||
nsx_models.NetworkGatewayDevice,
|
|
||||||
device_id)
|
|
||||||
except sa_orm_exc.NoResultFound:
|
|
||||||
raise GatewayDeviceNotFound(device_id=device_id)
|
|
||||||
|
|
||||||
def _is_device_in_use(self, context, device_id):
|
|
||||||
query = model_query.get_collection_query(
|
|
||||||
context, nsx_models.NetworkGatewayDeviceReference,
|
|
||||||
{'id': [device_id]})
|
|
||||||
return query.first()
|
|
||||||
|
|
||||||
def get_gateway_device(self, context, device_id, fields=None,
|
|
||||||
include_nsx_id=False):
|
|
||||||
return self._make_gateway_device_dict(
|
|
||||||
self._get_gateway_device(context, device_id),
|
|
||||||
fields, include_nsx_id)
|
|
||||||
|
|
||||||
def _query_gateway_devices(self, context,
|
|
||||||
filters=None, sorts=None,
|
|
||||||
limit=None, marker=None,
|
|
||||||
page_reverse=None):
|
|
||||||
marker_obj = db_utils.get_marker_obj(self,
|
|
||||||
context, 'gateway_device', limit, marker)
|
|
||||||
return model_query.get_collection_query(
|
|
||||||
context, nsx_models.NetworkGatewayDevice,
|
|
||||||
filters=filters, sorts=sorts, limit=limit,
|
|
||||||
marker_obj=marker_obj, page_reverse=page_reverse)
|
|
||||||
|
|
||||||
def get_gateway_devices(self, context, filters=None, fields=None,
|
|
||||||
sorts=None, limit=None, marker=None,
|
|
||||||
page_reverse=False, include_nsx_id=False):
|
|
||||||
query = self._query_gateway_devices(context, filters, sorts, limit,
|
|
||||||
marker, page_reverse)
|
|
||||||
return [self._make_gateway_device_dict(row, fields, include_nsx_id)
|
|
||||||
for row in query]
|
|
||||||
|
|
||||||
def create_gateway_device(self, context, gateway_device,
|
|
||||||
initial_status=STATUS_UNKNOWN):
|
|
||||||
device_data = gateway_device[self.device_resource]
|
|
||||||
tenant_id = device_data['tenant_id']
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
device_db = nsx_models.NetworkGatewayDevice(
|
|
||||||
id=device_data.get('id', uuidutils.generate_uuid()),
|
|
||||||
tenant_id=tenant_id,
|
|
||||||
name=device_data.get('name'),
|
|
||||||
connector_type=device_data['connector_type'],
|
|
||||||
connector_ip=device_data['connector_ip'],
|
|
||||||
status=initial_status)
|
|
||||||
context.session.add(device_db)
|
|
||||||
LOG.debug("Created network gateway device: %s", device_db['id'])
|
|
||||||
return self._make_gateway_device_dict(device_db)
|
|
||||||
|
|
||||||
def update_gateway_device(self, context, gateway_device_id,
|
|
||||||
gateway_device, include_nsx_id=False):
|
|
||||||
device_data = gateway_device[self.device_resource]
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
device_db = self._get_gateway_device(context, gateway_device_id)
|
|
||||||
# Ensure there is something to update before doing it
|
|
||||||
if any([device_db[k] != device_data[k] for k in device_data]):
|
|
||||||
device_db.update(device_data)
|
|
||||||
LOG.debug("Updated network gateway device: %s",
|
|
||||||
gateway_device_id)
|
|
||||||
return self._make_gateway_device_dict(
|
|
||||||
device_db, include_nsx_id=include_nsx_id)
|
|
||||||
|
|
||||||
def delete_gateway_device(self, context, device_id):
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
# A gateway device should not be deleted
|
|
||||||
# if it is used in any network gateway service
|
|
||||||
if self._is_device_in_use(context, device_id):
|
|
||||||
raise GatewayDeviceInUse(device_id=device_id)
|
|
||||||
device_db = self._get_gateway_device(context, device_id)
|
|
||||||
context.session.delete(device_db)
|
|
||||||
LOG.debug("Deleted network gateway device: %s.", device_id)
|
|
@ -22,7 +22,6 @@ This module defines data models used by the VMware NSX plugin family.
|
|||||||
from neutron_lib.db import model_base
|
from neutron_lib.db import model_base
|
||||||
import sqlalchemy as sa
|
import sqlalchemy as sa
|
||||||
from sqlalchemy import orm
|
from sqlalchemy import orm
|
||||||
from sqlalchemy import sql
|
|
||||||
|
|
||||||
from neutron.db import models_v2
|
from neutron.db import models_v2
|
||||||
from oslo_db.sqlalchemy import models
|
from oslo_db.sqlalchemy import models
|
||||||
@ -296,49 +295,6 @@ class Lsn(models_v2.model_base.BASEV2, models.TimestampMixin):
|
|||||||
self.lsn_id = lsn_id
|
self.lsn_id = lsn_id
|
||||||
|
|
||||||
|
|
||||||
class QoSQueue(model_base.BASEV2, model_base.HasId, model_base.HasProject,
|
|
||||||
models.TimestampMixin):
|
|
||||||
name = sa.Column(sa.String(255))
|
|
||||||
default = sa.Column(sa.Boolean, default=False, server_default=sql.false())
|
|
||||||
min = sa.Column(sa.Integer, nullable=False)
|
|
||||||
max = sa.Column(sa.Integer, nullable=True)
|
|
||||||
qos_marking = sa.Column(sa.Enum('untrusted', 'trusted',
|
|
||||||
name='qosqueues_qos_marking'))
|
|
||||||
dscp = sa.Column(sa.Integer)
|
|
||||||
|
|
||||||
|
|
||||||
class PortQueueMapping(model_base.BASEV2, models.TimestampMixin):
|
|
||||||
port_id = sa.Column(sa.String(36),
|
|
||||||
sa.ForeignKey("ports.id", ondelete="CASCADE"),
|
|
||||||
primary_key=True)
|
|
||||||
|
|
||||||
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"),
|
|
||||||
primary_key=True)
|
|
||||||
|
|
||||||
# Add a relationship to the Port model adding a backref which will
|
|
||||||
# allow SQLAlchemy for eagerly load the queue binding
|
|
||||||
port = orm.relationship(
|
|
||||||
models_v2.Port,
|
|
||||||
backref=orm.backref("qos_queue", uselist=False,
|
|
||||||
cascade='delete', lazy='joined'))
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkQueueMapping(model_base.BASEV2, models.TimestampMixin):
|
|
||||||
network_id = sa.Column(sa.String(36),
|
|
||||||
sa.ForeignKey("networks.id", ondelete="CASCADE"),
|
|
||||||
primary_key=True)
|
|
||||||
|
|
||||||
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id",
|
|
||||||
ondelete="CASCADE"))
|
|
||||||
|
|
||||||
# Add a relationship to the Network model adding a backref which will
|
|
||||||
# allow SQLAlcremy for eagerly load the queue binding
|
|
||||||
network = orm.relationship(
|
|
||||||
models_v2.Network,
|
|
||||||
backref=orm.backref("qos_queue", uselist=False,
|
|
||||||
cascade='delete', lazy='joined'))
|
|
||||||
|
|
||||||
|
|
||||||
class NsxL2GWConnectionMapping(model_base.BASEV2, models.TimestampMixin):
|
class NsxL2GWConnectionMapping(model_base.BASEV2, models.TimestampMixin):
|
||||||
"""Define a mapping between L2 gateway connection and bridge endpoint."""
|
"""Define a mapping between L2 gateway connection and bridge endpoint."""
|
||||||
__tablename__ = 'nsx_l2gw_connection_mappings'
|
__tablename__ = 'nsx_l2gw_connection_mappings'
|
||||||
|
@ -1,265 +0,0 @@
|
|||||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
from sqlalchemy.orm import exc
|
|
||||||
|
|
||||||
from neutron.db import models_v2
|
|
||||||
|
|
||||||
from neutron_lib.api.definitions import network as net_def
|
|
||||||
from neutron_lib.api.definitions import port as port_def
|
|
||||||
from neutron_lib.db import api as db_api
|
|
||||||
from neutron_lib.db import model_query
|
|
||||||
from neutron_lib.db import resource_extend
|
|
||||||
from neutron_lib.db import utils as db_utils
|
|
||||||
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_utils import uuidutils
|
|
||||||
|
|
||||||
from vmware_nsx.db import nsx_models
|
|
||||||
from vmware_nsx.extensions import qos_queue as qos
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
@resource_extend.has_resource_extenders
|
|
||||||
class QoSDbMixin(qos.QueuePluginBase):
|
|
||||||
"""Mixin class to add queues."""
|
|
||||||
|
|
||||||
def create_qos_queue(self, context, qos_queue):
|
|
||||||
q = qos_queue['qos_queue']
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
qos_queue = nsx_models.QoSQueue(
|
|
||||||
id=q.get('id', uuidutils.generate_uuid()),
|
|
||||||
name=q.get('name'),
|
|
||||||
tenant_id=q['tenant_id'],
|
|
||||||
default=q.get('default'),
|
|
||||||
min=q.get('min'),
|
|
||||||
max=q.get('max'),
|
|
||||||
qos_marking=q.get('qos_marking'),
|
|
||||||
dscp=q.get('dscp'))
|
|
||||||
context.session.add(qos_queue)
|
|
||||||
return self._make_qos_queue_dict(qos_queue)
|
|
||||||
|
|
||||||
def get_qos_queue(self, context, queue_id, fields=None):
|
|
||||||
return self._make_qos_queue_dict(
|
|
||||||
self._get_qos_queue(context, queue_id), fields)
|
|
||||||
|
|
||||||
def _get_qos_queue(self, context, queue_id):
|
|
||||||
try:
|
|
||||||
return model_query.get_by_id(context, nsx_models.QoSQueue,
|
|
||||||
queue_id)
|
|
||||||
except exc.NoResultFound:
|
|
||||||
raise qos.QueueNotFound(id=queue_id)
|
|
||||||
|
|
||||||
def get_qos_queues(self, context, filters=None, fields=None, sorts=None,
|
|
||||||
limit=None, marker=None, page_reverse=False):
|
|
||||||
marker_obj = db_utils.get_marker_obj(self, context, 'qos_queue',
|
|
||||||
limit, marker)
|
|
||||||
return model_query.get_collection(context, nsx_models.QoSQueue,
|
|
||||||
self._make_qos_queue_dict,
|
|
||||||
filters=filters, fields=fields,
|
|
||||||
sorts=sorts, limit=limit,
|
|
||||||
marker_obj=marker_obj,
|
|
||||||
page_reverse=page_reverse)
|
|
||||||
|
|
||||||
def delete_qos_queue(self, context, queue_id):
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
qos_queue = self._get_qos_queue(context, queue_id)
|
|
||||||
context.session.delete(qos_queue)
|
|
||||||
|
|
||||||
def _process_port_queue_mapping(self, context, port_data, queue_id):
|
|
||||||
port_data[qos.QUEUE] = queue_id
|
|
||||||
if not queue_id:
|
|
||||||
return
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
context.session.add(nsx_models.PortQueueMapping(
|
|
||||||
port_id=port_data['id'],
|
|
||||||
queue_id=queue_id))
|
|
||||||
|
|
||||||
def _get_port_queue_bindings(self, context, filters=None, fields=None):
|
|
||||||
return model_query.get_collection(context,
|
|
||||||
nsx_models.PortQueueMapping,
|
|
||||||
self._make_port_queue_binding_dict,
|
|
||||||
filters=filters, fields=fields)
|
|
||||||
|
|
||||||
def _delete_port_queue_mapping(self, context, port_id):
|
|
||||||
query = model_query.query_with_hooks(context,
|
|
||||||
nsx_models.PortQueueMapping)
|
|
||||||
try:
|
|
||||||
binding = query.filter(
|
|
||||||
nsx_models.PortQueueMapping.port_id == port_id).one()
|
|
||||||
except exc.NoResultFound:
|
|
||||||
# return since this can happen if we are updating a port that
|
|
||||||
# did not already have a queue on it. There is no need to check
|
|
||||||
# if there is one before deleting if we return here.
|
|
||||||
return
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
context.session.delete(binding)
|
|
||||||
|
|
||||||
def _process_network_queue_mapping(self, context, net_data, queue_id):
|
|
||||||
net_data[qos.QUEUE] = queue_id
|
|
||||||
if not queue_id:
|
|
||||||
return
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
context.session.add(
|
|
||||||
nsx_models.NetworkQueueMapping(network_id=net_data['id'],
|
|
||||||
queue_id=queue_id))
|
|
||||||
|
|
||||||
def _get_network_queue_bindings(self, context, filters=None, fields=None):
|
|
||||||
return model_query.get_collection(
|
|
||||||
context,
|
|
||||||
nsx_models.NetworkQueueMapping,
|
|
||||||
self._make_network_queue_binding_dict,
|
|
||||||
filters=filters, fields=fields)
|
|
||||||
|
|
||||||
def _delete_network_queue_mapping(self, context, network_id):
|
|
||||||
query = model_query.query_with_hooks(
|
|
||||||
context, nsx_models.NetworkQueueMapping)
|
|
||||||
with db_api.CONTEXT_WRITER.using(context):
|
|
||||||
binding = query.filter_by(network_id=network_id).first()
|
|
||||||
if binding:
|
|
||||||
context.session.delete(binding)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@resource_extend.extends([net_def.COLLECTION_NAME])
|
|
||||||
@resource_extend.extends([port_def.COLLECTION_NAME])
|
|
||||||
def _extend_dict_qos_queue(obj_res, obj_db):
|
|
||||||
queue_mapping = obj_db['qos_queue']
|
|
||||||
if queue_mapping:
|
|
||||||
obj_res[qos.QUEUE] = queue_mapping.get('queue_id')
|
|
||||||
return obj_res
|
|
||||||
|
|
||||||
def _make_qos_queue_dict(self, queue, fields=None):
|
|
||||||
res = {'id': queue['id'],
|
|
||||||
'name': queue.get('name'),
|
|
||||||
'default': queue.get('default'),
|
|
||||||
'tenant_id': queue['tenant_id'],
|
|
||||||
'min': queue.get('min'),
|
|
||||||
'max': queue.get('max'),
|
|
||||||
'qos_marking': queue.get('qos_marking'),
|
|
||||||
'dscp': queue.get('dscp')}
|
|
||||||
return db_utils.resource_fields(res, fields)
|
|
||||||
|
|
||||||
def _make_port_queue_binding_dict(self, queue, fields=None):
|
|
||||||
res = {'port_id': queue['port_id'],
|
|
||||||
'queue_id': queue['queue_id']}
|
|
||||||
return db_utils.resource_fields(res, fields)
|
|
||||||
|
|
||||||
def _make_network_queue_binding_dict(self, queue, fields=None):
|
|
||||||
res = {'network_id': queue['network_id'],
|
|
||||||
'queue_id': queue['queue_id']}
|
|
||||||
return db_utils.resource_fields(res, fields)
|
|
||||||
|
|
||||||
def _check_for_queue_and_create(self, context, port):
|
|
||||||
"""Check for queue and create.
|
|
||||||
|
|
||||||
This function determines if a port should be associated with a
|
|
||||||
queue. It works by first querying NetworkQueueMapping to determine
|
|
||||||
if the network is associated with a queue. If so, then it queries
|
|
||||||
NetworkQueueMapping for all the networks that are associated with
|
|
||||||
this queue. Next, it queries against all the ports on these networks
|
|
||||||
with the port device_id. Finally it queries PortQueueMapping. If that
|
|
||||||
query returns a queue_id that is returned. Otherwise a queue is
|
|
||||||
created that is the size of the queue associated with the network and
|
|
||||||
that queue_id is returned.
|
|
||||||
|
|
||||||
If the network is not associated with a queue we then query to see
|
|
||||||
if there is a default queue in the system. If so, a copy of that is
|
|
||||||
created and the queue_id is returned.
|
|
||||||
|
|
||||||
Otherwise None is returned. None is also returned if the port does not
|
|
||||||
have a device_id or if the device_owner is network:
|
|
||||||
"""
|
|
||||||
|
|
||||||
queue_to_create = None
|
|
||||||
# If there is no device_id don't create a queue. The queue will be
|
|
||||||
# created on update port when the device_id is present. Also don't
|
|
||||||
# apply QoS to network ports.
|
|
||||||
if (not port.get('device_id') or
|
|
||||||
port['device_owner'].startswith('network:')):
|
|
||||||
return
|
|
||||||
|
|
||||||
# Check if there is a queue associated with the network
|
|
||||||
filters = {'network_id': [port['network_id']]}
|
|
||||||
network_queue_id = self._get_network_queue_bindings(
|
|
||||||
context, filters, ['queue_id'])
|
|
||||||
if network_queue_id:
|
|
||||||
# get networks that queue is associated with
|
|
||||||
filters = {'queue_id': [network_queue_id[0]['queue_id']]}
|
|
||||||
networks_with_same_queue = self._get_network_queue_bindings(
|
|
||||||
context, filters)
|
|
||||||
|
|
||||||
# get the ports on these networks with the same_queue and device_id
|
|
||||||
filters = {'device_id': [port.get('device_id')],
|
|
||||||
'network_id': [network['network_id'] for
|
|
||||||
network in networks_with_same_queue]}
|
|
||||||
query = model_query.query_with_hooks(context, models_v2.Port.id)
|
|
||||||
model_query.apply_filters(query, models_v2.Port,
|
|
||||||
filters, context)
|
|
||||||
ports_ids = [p[0] for p in query]
|
|
||||||
if ports_ids:
|
|
||||||
# shared queue already exists find the queue id
|
|
||||||
queues = self._get_port_queue_bindings(context,
|
|
||||||
{'port_id': ports_ids},
|
|
||||||
['queue_id'])
|
|
||||||
if queues:
|
|
||||||
return queues[0]['queue_id']
|
|
||||||
|
|
||||||
# get the size of the queue we want to create
|
|
||||||
queue_to_create = self.get_qos_queue(
|
|
||||||
context, network_queue_id[0]['queue_id'])
|
|
||||||
|
|
||||||
else:
|
|
||||||
# check for default queue
|
|
||||||
filters = {'default': [True]}
|
|
||||||
# context is elevated since default queue is owned by admin
|
|
||||||
queue_to_create = self.get_qos_queues(context.elevated(), filters)
|
|
||||||
if not queue_to_create:
|
|
||||||
return
|
|
||||||
queue_to_create = queue_to_create[0]
|
|
||||||
|
|
||||||
# create the queue
|
|
||||||
tenant_id = port['tenant_id']
|
|
||||||
if port.get(qos.RXTX_FACTOR) and queue_to_create.get('max'):
|
|
||||||
queue_to_create['max'] = int(queue_to_create['max'] *
|
|
||||||
port[qos.RXTX_FACTOR])
|
|
||||||
queue = {'qos_queue': {'name': queue_to_create.get('name'),
|
|
||||||
'min': queue_to_create.get('min'),
|
|
||||||
'max': queue_to_create.get('max'),
|
|
||||||
'dscp': queue_to_create.get('dscp'),
|
|
||||||
'qos_marking':
|
|
||||||
queue_to_create.get('qos_marking'),
|
|
||||||
'tenant_id': tenant_id}}
|
|
||||||
return self.create_qos_queue(context, queue, False)['id']
|
|
||||||
|
|
||||||
def _validate_qos_queue(self, context, qos_queue):
|
|
||||||
if qos_queue.get('default'):
|
|
||||||
if context.is_admin:
|
|
||||||
if self.get_qos_queues(context, filters={'default': [True]}):
|
|
||||||
raise qos.DefaultQueueAlreadyExists()
|
|
||||||
else:
|
|
||||||
raise qos.DefaultQueueCreateNotAdmin()
|
|
||||||
if qos_queue.get('qos_marking') == 'trusted':
|
|
||||||
dscp = qos_queue.pop('dscp')
|
|
||||||
if dscp:
|
|
||||||
# must raise because a non-zero dscp was provided
|
|
||||||
raise qos.QueueInvalidMarking()
|
|
||||||
LOG.info("DSCP value (%s) will be ignored with 'trusted' "
|
|
||||||
"marking", dscp)
|
|
||||||
max = qos_queue.get('max')
|
|
||||||
min = qos_queue.get('min')
|
|
||||||
# Max can be None
|
|
||||||
if max and min > max:
|
|
||||||
raise qos.QueueMinGreaterMax()
|
|
@ -1,266 +0,0 @@
|
|||||||
# Copyright 2013 VMware. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
import abc
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
|
|
||||||
from neutron.api.v2 import resource_helper
|
|
||||||
|
|
||||||
from neutron_lib.api import extensions
|
|
||||||
from neutron_lib.api import validators
|
|
||||||
from neutron_lib import constants
|
|
||||||
from neutron_lib.db import constants as db_const
|
|
||||||
|
|
||||||
from vmware_nsx._i18n import _
|
|
||||||
|
|
||||||
GATEWAY_RESOURCE_NAME = "network_gateway"
|
|
||||||
DEVICE_RESOURCE_NAME = "gateway_device"
|
|
||||||
# Use dash for alias and collection name
|
|
||||||
ALIAS = GATEWAY_RESOURCE_NAME.replace('_', '-')
|
|
||||||
NETWORK_GATEWAYS = "%ss" % ALIAS
|
|
||||||
GATEWAY_DEVICES = "%ss" % DEVICE_RESOURCE_NAME.replace('_', '-')
|
|
||||||
DEVICE_ID_ATTR = 'id'
|
|
||||||
IFACE_NAME_ATTR = 'interface_name'
|
|
||||||
|
|
||||||
|
|
||||||
# TODO(salv-orlando): This type definition is duplicated into
|
|
||||||
# openstack/vmware-nsx. This temporary duplication should be removed once the
|
|
||||||
# plugin decomposition is finished.
|
|
||||||
# Allowed network types for the NSX Plugin
|
|
||||||
class NetworkTypes(object):
|
|
||||||
"""Allowed provider network types for the NSX Plugin."""
|
|
||||||
L3_EXT = 'l3_ext'
|
|
||||||
STT = 'stt'
|
|
||||||
GRE = 'gre'
|
|
||||||
FLAT = 'flat'
|
|
||||||
VLAN = 'vlan'
|
|
||||||
BRIDGE = 'bridge'
|
|
||||||
|
|
||||||
# Attribute Map for Network Gateway Resource
|
|
||||||
# TODO(salvatore-orlando): add admin state as other neutron resources
|
|
||||||
RESOURCE_ATTRIBUTE_MAP = {
|
|
||||||
NETWORK_GATEWAYS: {
|
|
||||||
'id': {'allow_post': False, 'allow_put': False,
|
|
||||||
'is_visible': True},
|
|
||||||
'name': {'allow_post': True, 'allow_put': True,
|
|
||||||
'validate': {'type:string': db_const.NAME_FIELD_SIZE},
|
|
||||||
'is_visible': True, 'default': ''},
|
|
||||||
'default': {'allow_post': False, 'allow_put': False,
|
|
||||||
'is_visible': True},
|
|
||||||
'devices': {'allow_post': True, 'allow_put': False,
|
|
||||||
'validate': {'type:device_list': None},
|
|
||||||
'is_visible': True},
|
|
||||||
'ports': {'allow_post': False, 'allow_put': False,
|
|
||||||
'default': [],
|
|
||||||
'is_visible': True},
|
|
||||||
'tenant_id': {'allow_post': True, 'allow_put': False,
|
|
||||||
'validate': {'type:string':
|
|
||||||
db_const.PROJECT_ID_FIELD_SIZE},
|
|
||||||
'required_by_policy': True,
|
|
||||||
'is_visible': True}
|
|
||||||
},
|
|
||||||
GATEWAY_DEVICES: {
|
|
||||||
'id': {'allow_post': False, 'allow_put': False,
|
|
||||||
'is_visible': True},
|
|
||||||
'name': {'allow_post': True, 'allow_put': True,
|
|
||||||
'validate': {'type:string': db_const.NAME_FIELD_SIZE},
|
|
||||||
'is_visible': True, 'default': ''},
|
|
||||||
'client_certificate': {'allow_post': True, 'allow_put': True,
|
|
||||||
'validate': {'type:string': None},
|
|
||||||
'is_visible': True},
|
|
||||||
'connector_type': {'allow_post': True, 'allow_put': True,
|
|
||||||
'validate': {'type:connector_type': None},
|
|
||||||
'is_visible': True},
|
|
||||||
'connector_ip': {'allow_post': True, 'allow_put': True,
|
|
||||||
'validate': {'type:ip_address': None},
|
|
||||||
'is_visible': True},
|
|
||||||
'tenant_id': {'allow_post': True, 'allow_put': False,
|
|
||||||
'validate': {'type:string':
|
|
||||||
db_const.PROJECT_ID_FIELD_SIZE},
|
|
||||||
'required_by_policy': True,
|
|
||||||
'is_visible': True},
|
|
||||||
'status': {'allow_post': False, 'allow_put': False,
|
|
||||||
'is_visible': True},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def _validate_device_list(data, valid_values=None):
|
|
||||||
"""Validate the list of service definitions."""
|
|
||||||
if not data:
|
|
||||||
# Devices must be provided
|
|
||||||
msg = _("Cannot create a gateway with an empty device list")
|
|
||||||
return msg
|
|
||||||
try:
|
|
||||||
for device in data:
|
|
||||||
key_specs = {DEVICE_ID_ATTR:
|
|
||||||
{'type:regex': constants.UUID_PATTERN,
|
|
||||||
'required': True},
|
|
||||||
IFACE_NAME_ATTR:
|
|
||||||
{'type:string': None,
|
|
||||||
'required': False}}
|
|
||||||
err_msg = validators.validate_dict(
|
|
||||||
device, key_specs=key_specs)
|
|
||||||
if err_msg:
|
|
||||||
return err_msg
|
|
||||||
unexpected_keys = [key for key in device if key not in key_specs]
|
|
||||||
if unexpected_keys:
|
|
||||||
err_msg = (_("Unexpected keys found in device description:%s")
|
|
||||||
% ",".join(unexpected_keys))
|
|
||||||
return err_msg
|
|
||||||
except TypeError:
|
|
||||||
return (_("%s: provided data are not iterable") %
|
|
||||||
_validate_device_list.__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def _validate_connector_type(data, valid_values=None):
|
|
||||||
if not data:
|
|
||||||
# A connector type is compulsory
|
|
||||||
msg = _("A connector type is required to create a gateway device")
|
|
||||||
return msg
|
|
||||||
connector_types = (valid_values if valid_values else
|
|
||||||
[NetworkTypes.GRE,
|
|
||||||
NetworkTypes.STT,
|
|
||||||
NetworkTypes.BRIDGE,
|
|
||||||
'ipsec%s' % NetworkTypes.GRE,
|
|
||||||
'ipsec%s' % NetworkTypes.STT])
|
|
||||||
if data not in connector_types:
|
|
||||||
msg = _("Unknown connector type: %s") % data
|
|
||||||
return msg
|
|
||||||
|
|
||||||
|
|
||||||
nw_gw_quota_opts = [
|
|
||||||
cfg.IntOpt('quota_network_gateway',
|
|
||||||
default=5,
|
|
||||||
help=_('Number of network gateways allowed per tenant, '
|
|
||||||
'-1 for unlimited'))
|
|
||||||
]
|
|
||||||
|
|
||||||
cfg.CONF.register_opts(nw_gw_quota_opts, 'QUOTAS')
|
|
||||||
|
|
||||||
validators.add_validator('device_list', _validate_device_list)
|
|
||||||
validators.add_validator('connector_type', _validate_connector_type)
|
|
||||||
|
|
||||||
|
|
||||||
class Networkgw(extensions.ExtensionDescriptor):
|
|
||||||
"""API extension for Layer-2 Gateway support.
|
|
||||||
|
|
||||||
The Layer-2 gateway feature allows for connecting neutron networks
|
|
||||||
with external networks at the layer-2 level. No assumption is made on
|
|
||||||
the location of the external network, which might not even be directly
|
|
||||||
reachable from the hosts where the VMs are deployed.
|
|
||||||
|
|
||||||
This is achieved by instantiating 'network gateways', and then connecting
|
|
||||||
Neutron network to them.
|
|
||||||
"""
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_name(cls):
|
|
||||||
return "Network Gateway"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_alias(cls):
|
|
||||||
return ALIAS
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_description(cls):
|
|
||||||
return "Connects Neutron networks with external networks at layer 2."
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_updated(cls):
|
|
||||||
return "2014-01-01T00:00:00-00:00"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_resources(cls):
|
|
||||||
"""Returns Ext Resources."""
|
|
||||||
|
|
||||||
member_actions = {
|
|
||||||
GATEWAY_RESOURCE_NAME.replace('_', '-'): {
|
|
||||||
'connect_network': 'PUT',
|
|
||||||
'disconnect_network': 'PUT'}}
|
|
||||||
|
|
||||||
plural_mappings = resource_helper.build_plural_mappings(
|
|
||||||
{}, RESOURCE_ATTRIBUTE_MAP)
|
|
||||||
|
|
||||||
return resource_helper.build_resource_info(plural_mappings,
|
|
||||||
RESOURCE_ATTRIBUTE_MAP,
|
|
||||||
None,
|
|
||||||
action_map=member_actions,
|
|
||||||
register_quota=True,
|
|
||||||
translate_name=True)
|
|
||||||
|
|
||||||
def get_extended_resources(self, version):
|
|
||||||
if version == "2.0":
|
|
||||||
return RESOURCE_ATTRIBUTE_MAP
|
|
||||||
else:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
class NetworkGatewayPluginBase(object):
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def create_network_gateway(self, context, network_gateway):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def update_network_gateway(self, context, id, network_gateway):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def get_network_gateway(self, context, id, fields=None):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def delete_network_gateway(self, context, id):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def get_network_gateways(self, context, filters=None, fields=None,
|
|
||||||
sorts=None, limit=None, marker=None,
|
|
||||||
page_reverse=False):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def connect_network(self, context, network_gateway_id,
|
|
||||||
network_mapping_info):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def disconnect_network(self, context, network_gateway_id,
|
|
||||||
network_mapping_info):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def create_gateway_device(self, context, gateway_device):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def update_gateway_device(self, context, id, gateway_device):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def delete_gateway_device(self, context, id):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def get_gateway_device(self, context, id, fields=None):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def get_gateway_devices(self, context, filters=None, fields=None,
|
|
||||||
sorts=None, limit=None, marker=None,
|
|
||||||
page_reverse=False):
|
|
||||||
pass
|
|
@ -1,232 +0,0 @@
|
|||||||
# Copyright 2013 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
import abc
|
|
||||||
|
|
||||||
from neutron.api import extensions
|
|
||||||
from neutron.api.v2 import base
|
|
||||||
|
|
||||||
from neutron_lib.api import converters
|
|
||||||
from neutron_lib.api import extensions as api_extensions
|
|
||||||
from neutron_lib.db import constants as db_const
|
|
||||||
from neutron_lib import exceptions as nexception
|
|
||||||
from neutron_lib.plugins import directory
|
|
||||||
|
|
||||||
from vmware_nsx._i18n import _
|
|
||||||
|
|
||||||
ALIAS = 'qos-queue'
|
|
||||||
# For policy.json/Auth
|
|
||||||
qos_queue_create = "create_qos_queue"
|
|
||||||
qos_queue_delete = "delete_qos_queue"
|
|
||||||
qos_queue_get = "get_qos_queue"
|
|
||||||
qos_queue_list = "get_qos_queues"
|
|
||||||
|
|
||||||
|
|
||||||
class DefaultQueueCreateNotAdmin(nexception.InUse):
|
|
||||||
message = _("Need to be admin in order to create queue called default")
|
|
||||||
|
|
||||||
|
|
||||||
class DefaultQueueAlreadyExists(nexception.InUse):
|
|
||||||
message = _("Default queue already exists.")
|
|
||||||
|
|
||||||
|
|
||||||
class QueueInvalidDscp(nexception.InvalidInput):
|
|
||||||
message = _("Invalid value for dscp %(data)s must be integer value"
|
|
||||||
" between 0 and 63.")
|
|
||||||
|
|
||||||
|
|
||||||
class QueueInvalidMarking(nexception.InvalidInput):
|
|
||||||
message = _("The qos marking cannot be set to 'trusted' "
|
|
||||||
"when the DSCP field is set")
|
|
||||||
|
|
||||||
|
|
||||||
class QueueMinGreaterMax(nexception.InvalidInput):
|
|
||||||
message = _("Invalid bandwidth rate, min greater than max.")
|
|
||||||
|
|
||||||
|
|
||||||
class QueueInvalidBandwidth(nexception.InvalidInput):
|
|
||||||
message = _("Invalid bandwidth rate, %(data)s must be a non negative"
|
|
||||||
" integer.")
|
|
||||||
|
|
||||||
|
|
||||||
class QueueNotFound(nexception.NotFound):
|
|
||||||
message = _("Queue %(id)s does not exist")
|
|
||||||
|
|
||||||
|
|
||||||
class QueueInUseByPort(nexception.InUse):
|
|
||||||
message = _("Unable to delete queue attached to port.")
|
|
||||||
|
|
||||||
|
|
||||||
class QueuePortBindingNotFound(nexception.NotFound):
|
|
||||||
message = _("Port is not associated with lqueue")
|
|
||||||
|
|
||||||
|
|
||||||
def convert_to_unsigned_int_or_none(val):
|
|
||||||
if val is None:
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
val = int(val)
|
|
||||||
if val < 0:
|
|
||||||
raise ValueError()
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
msg = _("'%s' must be a non negative integer.") % val
|
|
||||||
raise nexception.InvalidInput(error_message=msg)
|
|
||||||
return val
|
|
||||||
|
|
||||||
|
|
||||||
def convert_to_unsigned_int_or_none_max_63(val):
|
|
||||||
val = convert_to_unsigned_int_or_none(val)
|
|
||||||
if val > 63:
|
|
||||||
raise QueueInvalidDscp(data=val)
|
|
||||||
return val
|
|
||||||
|
|
||||||
# As per NSX API, if a queue is trusted, DSCP must be omitted; if a queue is
|
|
||||||
# untrusted, DSCP must be specified. Whichever default values we choose for
|
|
||||||
# the tuple (qos_marking, dscp), there will be at least one combination of a
|
|
||||||
# request with conflicting values: for instance given the default values below,
|
|
||||||
# requests with qos_marking = 'trusted' and the default dscp value will fail.
|
|
||||||
# In order to avoid API users to explicitly specify a setting for clearing
|
|
||||||
# the DSCP field when a trusted queue is created, the code serving this API
|
|
||||||
# will adopt the following behaviour when qos_marking is set to 'trusted':
|
|
||||||
# - if the DSCP attribute is set to the default value (0), silently drop
|
|
||||||
# its value
|
|
||||||
# - if the DSCP attribute is set to anything than 0 (but still a valid DSCP
|
|
||||||
# value) return a 400 error as qos_marking and DSCP setting conflict.
|
|
||||||
# TODO(salv-orlando): Evaluate whether it will be possible from a backward
|
|
||||||
# compatibility perspective to change the default value for DSCP in order to
|
|
||||||
# avoid this peculiar behaviour
|
|
||||||
|
|
||||||
RESOURCE_ATTRIBUTE_MAP = {
|
|
||||||
'qos_queues': {
|
|
||||||
'id': {'allow_post': False, 'allow_put': False,
|
|
||||||
'is_visible': True},
|
|
||||||
'default': {'allow_post': True, 'allow_put': False,
|
|
||||||
'convert_to': converters.convert_to_boolean,
|
|
||||||
'is_visible': True, 'default': False},
|
|
||||||
'name': {'allow_post': True, 'allow_put': False,
|
|
||||||
'validate': {'type:string': db_const.NAME_FIELD_SIZE},
|
|
||||||
'is_visible': True, 'default': ''},
|
|
||||||
'min': {'allow_post': True, 'allow_put': False,
|
|
||||||
'is_visible': True, 'default': '0',
|
|
||||||
'convert_to': convert_to_unsigned_int_or_none},
|
|
||||||
'max': {'allow_post': True, 'allow_put': False,
|
|
||||||
'is_visible': True, 'default': None,
|
|
||||||
'convert_to': convert_to_unsigned_int_or_none},
|
|
||||||
'qos_marking': {'allow_post': True, 'allow_put': False,
|
|
||||||
'validate': {'type:values': ['untrusted', 'trusted']},
|
|
||||||
'default': 'untrusted', 'is_visible': True},
|
|
||||||
'dscp': {'allow_post': True, 'allow_put': False,
|
|
||||||
'is_visible': True, 'default': '0',
|
|
||||||
'convert_to': convert_to_unsigned_int_or_none_max_63},
|
|
||||||
'tenant_id': {'allow_post': True, 'allow_put': False,
|
|
||||||
'required_by_policy': True,
|
|
||||||
'validate': {
|
|
||||||
'type:string': db_const.PROJECT_ID_FIELD_SIZE},
|
|
||||||
'is_visible': True},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
QUEUE = 'queue_id'
|
|
||||||
RXTX_FACTOR = 'rxtx_factor'
|
|
||||||
EXTENDED_ATTRIBUTES_2_0 = {
|
|
||||||
'ports': {
|
|
||||||
RXTX_FACTOR: {'allow_post': True,
|
|
||||||
# FIXME(arosen): the plugin currently does not
|
|
||||||
# implement updating rxtx factor on port.
|
|
||||||
'allow_put': True,
|
|
||||||
'is_visible': False,
|
|
||||||
'default': 1,
|
|
||||||
'enforce_policy': True,
|
|
||||||
'convert_to':
|
|
||||||
converters.convert_to_positive_float_or_none},
|
|
||||||
|
|
||||||
QUEUE: {'allow_post': False,
|
|
||||||
'allow_put': False,
|
|
||||||
'is_visible': True,
|
|
||||||
'default': False,
|
|
||||||
'enforce_policy': True}},
|
|
||||||
'networks': {QUEUE: {'allow_post': True,
|
|
||||||
'allow_put': True,
|
|
||||||
'is_visible': True,
|
|
||||||
'default': False,
|
|
||||||
'enforce_policy': True}}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class Qos_queue(api_extensions.ExtensionDescriptor):
|
|
||||||
"""Port Queue extension."""
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_name(cls):
|
|
||||||
return "QoS Queue"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_alias(cls):
|
|
||||||
return ALIAS
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_description(cls):
|
|
||||||
return "NSX QoS extension."
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_updated(cls):
|
|
||||||
return "2014-01-01T00:00:00-00:00"
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_resources(cls):
|
|
||||||
"""Returns Ext Resources."""
|
|
||||||
exts = []
|
|
||||||
plugin = directory.get_plugin()
|
|
||||||
resource_name = 'qos_queue'
|
|
||||||
collection_name = resource_name.replace('_', '-') + "s"
|
|
||||||
params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + "s", dict())
|
|
||||||
controller = base.create_resource(collection_name,
|
|
||||||
resource_name,
|
|
||||||
plugin, params, allow_bulk=False)
|
|
||||||
|
|
||||||
ex = extensions.ResourceExtension(collection_name,
|
|
||||||
controller)
|
|
||||||
exts.append(ex)
|
|
||||||
|
|
||||||
return exts
|
|
||||||
|
|
||||||
def get_extended_resources(self, version):
|
|
||||||
if version == "2.0":
|
|
||||||
return dict(list(EXTENDED_ATTRIBUTES_2_0.items()) +
|
|
||||||
list(RESOURCE_ATTRIBUTE_MAP.items()))
|
|
||||||
else:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
|
|
||||||
class QueuePluginBase(object):
|
|
||||||
@abc.abstractmethod
|
|
||||||
def create_qos_queue(self, context, queue):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def delete_qos_queue(self, context, id):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def get_qos_queue(self, context, id, fields=None):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def get_qos_queues(self, context, filters=None, fields=None, sorts=None,
|
|
||||||
limit=None, marker=None, page_reverse=False):
|
|
||||||
pass
|
|
@ -1,219 +0,0 @@
|
|||||||
# Copyright 2014 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
|
||||||
from vmware_nsx.common import utils
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx.nsxlib.mh import switch
|
|
||||||
|
|
||||||
HTTP_GET = "GET"
|
|
||||||
HTTP_POST = "POST"
|
|
||||||
HTTP_DELETE = "DELETE"
|
|
||||||
HTTP_PUT = "PUT"
|
|
||||||
|
|
||||||
GWSERVICE_RESOURCE = "gateway-service"
|
|
||||||
TRANSPORTNODE_RESOURCE = "transport-node"
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def create_l2_gw_service(cluster, tenant_id, display_name, devices):
|
|
||||||
"""Create a NSX Layer-2 Network Gateway Service.
|
|
||||||
|
|
||||||
:param cluster: The target NSX cluster
|
|
||||||
:param tenant_id: Identifier of the Openstack tenant for which
|
|
||||||
the gateway service.
|
|
||||||
:param display_name: Descriptive name of this gateway service
|
|
||||||
:param devices: List of transport node uuids (and network
|
|
||||||
interfaces on them) to use for the network gateway service
|
|
||||||
:raise NsxApiException: if there is a problem while communicating
|
|
||||||
with the NSX controller
|
|
||||||
"""
|
|
||||||
# NOTE(salvatore-orlando): This is a little confusing, but device_id in
|
|
||||||
# NSX is actually the identifier a physical interface on the gateway
|
|
||||||
# device, which in the Neutron API is referred as interface_name
|
|
||||||
gateways = [{"transport_node_uuid": device['id'],
|
|
||||||
"device_id": device['interface_name'],
|
|
||||||
"type": "L2Gateway"} for device in devices]
|
|
||||||
gwservice_obj = {
|
|
||||||
"display_name": utils.check_and_truncate(display_name),
|
|
||||||
"tags": utils.get_tags(os_tid=tenant_id),
|
|
||||||
"gateways": gateways,
|
|
||||||
"type": "L2GatewayServiceConfig"
|
|
||||||
}
|
|
||||||
return nsxlib.do_request(
|
|
||||||
HTTP_POST, nsxlib._build_uri_path(GWSERVICE_RESOURCE),
|
|
||||||
jsonutils.dumps(gwservice_obj), cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def plug_l2_gw_service(cluster, lswitch_id, lport_id,
|
|
||||||
gateway_id, vlan_id=None):
|
|
||||||
"""Plug a Layer-2 Gateway Attachment object in a logical port."""
|
|
||||||
att_obj = {'type': 'L2GatewayAttachment',
|
|
||||||
'l2_gateway_service_uuid': gateway_id}
|
|
||||||
if vlan_id:
|
|
||||||
att_obj['vlan_id'] = vlan_id
|
|
||||||
return switch.plug_interface(cluster, lswitch_id, lport_id, att_obj)
|
|
||||||
|
|
||||||
|
|
||||||
def get_l2_gw_service(cluster, gateway_id):
|
|
||||||
return nsxlib.do_request(
|
|
||||||
HTTP_GET, nsxlib._build_uri_path(GWSERVICE_RESOURCE,
|
|
||||||
resource_id=gateway_id),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def get_l2_gw_services(cluster, tenant_id=None,
|
|
||||||
fields=None, filters=None):
|
|
||||||
actual_filters = dict(filters or {})
|
|
||||||
if tenant_id:
|
|
||||||
actual_filters['tag'] = tenant_id
|
|
||||||
actual_filters['tag_scope'] = 'os_tid'
|
|
||||||
return nsxlib.get_all_query_pages(
|
|
||||||
nsxlib._build_uri_path(GWSERVICE_RESOURCE,
|
|
||||||
filters=actual_filters),
|
|
||||||
cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def update_l2_gw_service(cluster, gateway_id, display_name):
|
|
||||||
# TODO(salvatore-orlando): Allow updates for gateways too
|
|
||||||
gwservice_obj = get_l2_gw_service(cluster, gateway_id)
|
|
||||||
if not display_name:
|
|
||||||
# Nothing to update
|
|
||||||
return gwservice_obj
|
|
||||||
gwservice_obj["display_name"] = utils.check_and_truncate(display_name)
|
|
||||||
return nsxlib.do_request(HTTP_PUT,
|
|
||||||
nsxlib._build_uri_path(GWSERVICE_RESOURCE,
|
|
||||||
resource_id=gateway_id),
|
|
||||||
jsonutils.dumps(gwservice_obj), cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_l2_gw_service(cluster, gateway_id):
|
|
||||||
nsxlib.do_request(HTTP_DELETE,
|
|
||||||
nsxlib._build_uri_path(GWSERVICE_RESOURCE,
|
|
||||||
resource_id=gateway_id),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def _build_gateway_device_body(tenant_id, display_name, neutron_id,
|
|
||||||
connector_type, connector_ip,
|
|
||||||
client_certificate, tz_uuid):
|
|
||||||
|
|
||||||
connector_type_mappings = {
|
|
||||||
utils.NetworkTypes.STT: "STTConnector",
|
|
||||||
utils.NetworkTypes.GRE: "GREConnector",
|
|
||||||
utils.NetworkTypes.BRIDGE: "BridgeConnector",
|
|
||||||
'ipsec%s' % utils.NetworkTypes.STT: "IPsecSTT",
|
|
||||||
'ipsec%s' % utils.NetworkTypes.GRE: "IPsecGRE",
|
|
||||||
'ipsec_%s' % utils.NetworkTypes.STT: "IPsecSTT",
|
|
||||||
'ipsec_%s' % utils.NetworkTypes.GRE: "IPsecGRE"}
|
|
||||||
nsx_connector_type = connector_type_mappings.get(connector_type)
|
|
||||||
if connector_type and not nsx_connector_type:
|
|
||||||
LOG.error("There is no NSX mapping for connector type %s",
|
|
||||||
connector_type)
|
|
||||||
raise nsx_exc.InvalidTransportType(transport_type=connector_type)
|
|
||||||
|
|
||||||
body = {"display_name": utils.check_and_truncate(display_name),
|
|
||||||
"tags": utils.get_tags(os_tid=tenant_id,
|
|
||||||
q_gw_dev_id=neutron_id),
|
|
||||||
"admin_status_enabled": True}
|
|
||||||
|
|
||||||
if connector_ip and nsx_connector_type:
|
|
||||||
body["transport_connectors"] = [
|
|
||||||
{"transport_zone_uuid": tz_uuid,
|
|
||||||
"ip_address": connector_ip,
|
|
||||||
"type": nsx_connector_type}]
|
|
||||||
|
|
||||||
if client_certificate:
|
|
||||||
body["credential"] = {"client_certificate":
|
|
||||||
{"pem_encoded": client_certificate},
|
|
||||||
"type": "SecurityCertificateCredential"}
|
|
||||||
return body
|
|
||||||
|
|
||||||
|
|
||||||
def create_gateway_device(cluster, tenant_id, display_name, neutron_id,
|
|
||||||
tz_uuid, connector_type, connector_ip,
|
|
||||||
client_certificate):
|
|
||||||
body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
|
|
||||||
connector_type, connector_ip,
|
|
||||||
client_certificate, tz_uuid)
|
|
||||||
try:
|
|
||||||
return nsxlib.do_request(
|
|
||||||
HTTP_POST, nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE),
|
|
||||||
jsonutils.dumps(body, sort_keys=True), cluster=cluster)
|
|
||||||
except api_exc.InvalidSecurityCertificate:
|
|
||||||
raise nsx_exc.InvalidSecurityCertificate()
|
|
||||||
|
|
||||||
|
|
||||||
def update_gateway_device(cluster, gateway_id, tenant_id,
|
|
||||||
display_name, neutron_id,
|
|
||||||
tz_uuid, connector_type, connector_ip,
|
|
||||||
client_certificate):
|
|
||||||
body = _build_gateway_device_body(tenant_id, display_name, neutron_id,
|
|
||||||
connector_type, connector_ip,
|
|
||||||
client_certificate, tz_uuid)
|
|
||||||
try:
|
|
||||||
return nsxlib.do_request(
|
|
||||||
HTTP_PUT,
|
|
||||||
nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE,
|
|
||||||
resource_id=gateway_id),
|
|
||||||
jsonutils.dumps(body, sort_keys=True), cluster=cluster)
|
|
||||||
except api_exc.InvalidSecurityCertificate:
|
|
||||||
raise nsx_exc.InvalidSecurityCertificate()
|
|
||||||
|
|
||||||
|
|
||||||
def delete_gateway_device(cluster, device_uuid):
|
|
||||||
return nsxlib.do_request(HTTP_DELETE,
|
|
||||||
nsxlib._build_uri_path(TRANSPORTNODE_RESOURCE,
|
|
||||||
device_uuid),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def get_gateway_device_status(cluster, device_uuid):
|
|
||||||
status_res = nsxlib.do_request(HTTP_GET,
|
|
||||||
nsxlib._build_uri_path(
|
|
||||||
TRANSPORTNODE_RESOURCE,
|
|
||||||
device_uuid,
|
|
||||||
extra_action='status'),
|
|
||||||
cluster=cluster)
|
|
||||||
# Returns the connection status
|
|
||||||
return status_res['connection']['connected']
|
|
||||||
|
|
||||||
|
|
||||||
def get_gateway_devices_status(cluster, tenant_id=None):
|
|
||||||
if tenant_id:
|
|
||||||
gw_device_query_path = nsxlib._build_uri_path(
|
|
||||||
TRANSPORTNODE_RESOURCE,
|
|
||||||
fields="uuid,tags",
|
|
||||||
relations="TransportNodeStatus",
|
|
||||||
filters={'tag': tenant_id,
|
|
||||||
'tag_scope': 'os_tid'})
|
|
||||||
else:
|
|
||||||
gw_device_query_path = nsxlib._build_uri_path(
|
|
||||||
TRANSPORTNODE_RESOURCE,
|
|
||||||
fields="uuid,tags",
|
|
||||||
relations="TransportNodeStatus")
|
|
||||||
|
|
||||||
response = nsxlib.get_all_query_pages(gw_device_query_path, cluster)
|
|
||||||
results = {}
|
|
||||||
for item in response:
|
|
||||||
results[item['uuid']] = (item['_relations']['TransportNodeStatus']
|
|
||||||
['connection']['connected'])
|
|
||||||
return results
|
|
@ -1,73 +0,0 @@
|
|||||||
# Copyright 2014 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from neutron_lib.api import validators
|
|
||||||
from neutron_lib import exceptions as exception
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
from oslo_utils import excutils
|
|
||||||
import six
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
from vmware_nsx.common import utils
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
|
|
||||||
HTTP_POST = "POST"
|
|
||||||
HTTP_DELETE = "DELETE"
|
|
||||||
|
|
||||||
LQUEUE_RESOURCE = "lqueue"
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def create_lqueue(cluster, queue_data):
|
|
||||||
params = {
|
|
||||||
'name': 'display_name',
|
|
||||||
'qos_marking': 'qos_marking',
|
|
||||||
'min': 'min_bandwidth_rate',
|
|
||||||
'max': 'max_bandwidth_rate',
|
|
||||||
'dscp': 'dscp'
|
|
||||||
}
|
|
||||||
queue_obj = dict(
|
|
||||||
(nsx_name, queue_data.get(api_name))
|
|
||||||
for api_name, nsx_name in six.iteritems(params)
|
|
||||||
if validators.is_attr_set(queue_data.get(api_name))
|
|
||||||
)
|
|
||||||
if 'display_name' in queue_obj:
|
|
||||||
queue_obj['display_name'] = utils.check_and_truncate(
|
|
||||||
queue_obj['display_name'])
|
|
||||||
|
|
||||||
queue_obj['tags'] = utils.get_tags()
|
|
||||||
try:
|
|
||||||
return nsxlib.do_request(HTTP_POST,
|
|
||||||
nsxlib._build_uri_path(LQUEUE_RESOURCE),
|
|
||||||
jsonutils.dumps(queue_obj),
|
|
||||||
cluster=cluster)['uuid']
|
|
||||||
except api_exc.NsxApiException:
|
|
||||||
# FIXME(salv-orlando): This should not raise NeutronException
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
raise exception.NeutronException()
|
|
||||||
|
|
||||||
|
|
||||||
def delete_lqueue(cluster, queue_id):
|
|
||||||
try:
|
|
||||||
nsxlib.do_request(HTTP_DELETE,
|
|
||||||
nsxlib._build_uri_path(LQUEUE_RESOURCE,
|
|
||||||
resource_id=queue_id),
|
|
||||||
cluster=cluster)
|
|
||||||
except Exception:
|
|
||||||
# FIXME(salv-orlando): This should not raise NeutronException
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
raise exception.NeutronException()
|
|
@ -1,708 +0,0 @@
|
|||||||
# Copyright 2014 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from neutron_lib import exceptions as exception
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
from oslo_utils import excutils
|
|
||||||
import six
|
|
||||||
|
|
||||||
from vmware_nsx._i18n import _
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
|
||||||
from vmware_nsx.common import utils
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx.nsxlib.mh import switch
|
|
||||||
from vmware_nsx.nsxlib.mh import versioning
|
|
||||||
|
|
||||||
# @versioning.versioned decorator makes the apparent function body
|
|
||||||
# totally unrelated to the real function. This confuses pylint :(
|
|
||||||
# pylint: disable=assignment-from-no-return
|
|
||||||
|
|
||||||
HTTP_GET = "GET"
|
|
||||||
HTTP_POST = "POST"
|
|
||||||
HTTP_DELETE = "DELETE"
|
|
||||||
HTTP_PUT = "PUT"
|
|
||||||
|
|
||||||
LROUTER_RESOURCE = "lrouter"
|
|
||||||
LROUTER_RESOURCE = "lrouter"
|
|
||||||
LROUTERPORT_RESOURCE = "lport/%s" % LROUTER_RESOURCE
|
|
||||||
LROUTERRIB_RESOURCE = "rib/%s" % LROUTER_RESOURCE
|
|
||||||
LROUTERNAT_RESOURCE = "nat/lrouter"
|
|
||||||
# Constants for NAT rules
|
|
||||||
MATCH_KEYS = ["destination_ip_addresses", "destination_port_max",
|
|
||||||
"destination_port_min", "source_ip_addresses",
|
|
||||||
"source_port_max", "source_port_min", "protocol"]
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def _prepare_lrouter_body(name, neutron_router_id, tenant_id,
|
|
||||||
router_type, distributed=None, **kwargs):
|
|
||||||
body = {
|
|
||||||
"display_name": utils.check_and_truncate(name),
|
|
||||||
"tags": utils.get_tags(os_tid=tenant_id,
|
|
||||||
q_router_id=neutron_router_id),
|
|
||||||
"routing_config": {
|
|
||||||
"type": router_type
|
|
||||||
},
|
|
||||||
"type": "LogicalRouterConfig",
|
|
||||||
"replication_mode": cfg.CONF.NSX.replication_mode,
|
|
||||||
}
|
|
||||||
# add the distributed key only if not None (ie: True or False)
|
|
||||||
if distributed is not None:
|
|
||||||
body['distributed'] = distributed
|
|
||||||
if kwargs:
|
|
||||||
body["routing_config"].update(kwargs)
|
|
||||||
return body
|
|
||||||
|
|
||||||
|
|
||||||
def _create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
|
|
||||||
display_name, nexthop, distributed=None):
|
|
||||||
implicit_routing_config = {
|
|
||||||
"default_route_next_hop": {
|
|
||||||
"gateway_ip_address": nexthop,
|
|
||||||
"type": "RouterNextHop"
|
|
||||||
},
|
|
||||||
}
|
|
||||||
lrouter_obj = _prepare_lrouter_body(
|
|
||||||
display_name, neutron_router_id, tenant_id,
|
|
||||||
"SingleDefaultRouteImplicitRoutingConfig",
|
|
||||||
distributed=distributed,
|
|
||||||
**implicit_routing_config)
|
|
||||||
return nsxlib.do_request(HTTP_POST,
|
|
||||||
nsxlib._build_uri_path(LROUTER_RESOURCE),
|
|
||||||
jsonutils.dumps(lrouter_obj), cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def create_implicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
|
|
||||||
display_name, nexthop):
|
|
||||||
"""Create a NSX logical router on the specified cluster.
|
|
||||||
|
|
||||||
:param cluster: The target NSX cluster
|
|
||||||
:param tenant_id: Identifier of the Openstack tenant for which
|
|
||||||
the logical router is being created
|
|
||||||
:param display_name: Descriptive name of this logical router
|
|
||||||
:param nexthop: External gateway IP address for the logical router
|
|
||||||
:raise NsxApiException: if there is a problem while communicating
|
|
||||||
with the NSX controller
|
|
||||||
"""
|
|
||||||
return _create_implicit_routing_lrouter(
|
|
||||||
cluster, neutron_router_id, tenant_id, display_name, nexthop)
|
|
||||||
|
|
||||||
|
|
||||||
def create_implicit_routing_lrouter_with_distribution(
|
|
||||||
cluster, neutron_router_id, tenant_id, display_name,
|
|
||||||
nexthop, distributed=None):
|
|
||||||
"""Create a NSX logical router on the specified cluster.
|
|
||||||
|
|
||||||
This function also allows for creating distributed lrouters
|
|
||||||
:param cluster: The target NSX cluster
|
|
||||||
:param tenant_id: Identifier of the Openstack tenant for which
|
|
||||||
the logical router is being created
|
|
||||||
:param display_name: Descriptive name of this logical router
|
|
||||||
:param nexthop: External gateway IP address for the logical router
|
|
||||||
:param distributed: True for distributed logical routers
|
|
||||||
:raise NsxApiException: if there is a problem while communicating
|
|
||||||
with the NSX controller
|
|
||||||
"""
|
|
||||||
return _create_implicit_routing_lrouter(
|
|
||||||
cluster, neutron_router_id, tenant_id,
|
|
||||||
display_name, nexthop, distributed)
|
|
||||||
|
|
||||||
|
|
||||||
def create_explicit_routing_lrouter(cluster, neutron_router_id, tenant_id,
|
|
||||||
display_name, nexthop, distributed=None):
|
|
||||||
lrouter_obj = _prepare_lrouter_body(
|
|
||||||
display_name, neutron_router_id, tenant_id,
|
|
||||||
"RoutingTableRoutingConfig", distributed=distributed)
|
|
||||||
router = nsxlib.do_request(HTTP_POST,
|
|
||||||
nsxlib._build_uri_path(LROUTER_RESOURCE),
|
|
||||||
jsonutils.dumps(lrouter_obj), cluster=cluster)
|
|
||||||
default_gw = {'prefix': '0.0.0.0/0', 'next_hop_ip': nexthop}
|
|
||||||
create_explicit_route_lrouter(cluster, router['uuid'], default_gw)
|
|
||||||
return router
|
|
||||||
|
|
||||||
|
|
||||||
def delete_lrouter(cluster, lrouter_id):
|
|
||||||
nsxlib.do_request(HTTP_DELETE,
|
|
||||||
nsxlib._build_uri_path(LROUTER_RESOURCE,
|
|
||||||
resource_id=lrouter_id),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def get_lrouter(cluster, lrouter_id):
|
|
||||||
return nsxlib.do_request(HTTP_GET,
|
|
||||||
nsxlib._build_uri_path(
|
|
||||||
LROUTER_RESOURCE,
|
|
||||||
resource_id=lrouter_id,
|
|
||||||
relations='LogicalRouterStatus'),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def query_lrouters(cluster, fields=None, filters=None):
|
|
||||||
return nsxlib.get_all_query_pages(
|
|
||||||
nsxlib._build_uri_path(LROUTER_RESOURCE,
|
|
||||||
fields=fields,
|
|
||||||
relations='LogicalRouterStatus',
|
|
||||||
filters=filters),
|
|
||||||
cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def get_lrouters(cluster, tenant_id, fields=None, filters=None):
|
|
||||||
# FIXME(salv-orlando): Fields parameter is ignored in this routine
|
|
||||||
actual_filters = {}
|
|
||||||
if filters:
|
|
||||||
actual_filters.update(filters)
|
|
||||||
if tenant_id:
|
|
||||||
actual_filters['tag'] = tenant_id
|
|
||||||
actual_filters['tag_scope'] = 'os_tid'
|
|
||||||
lrouter_fields = "uuid,display_name,fabric_status,tags"
|
|
||||||
return query_lrouters(cluster, lrouter_fields, actual_filters)
|
|
||||||
|
|
||||||
|
|
||||||
def update_implicit_routing_lrouter(cluster, r_id, display_name, nexthop):
|
|
||||||
lrouter_obj = get_lrouter(cluster, r_id)
|
|
||||||
if not display_name and not nexthop:
|
|
||||||
# Nothing to update
|
|
||||||
return lrouter_obj
|
|
||||||
# It seems that this is faster than the doing an if on display_name
|
|
||||||
lrouter_obj["display_name"] = (utils.check_and_truncate(display_name) or
|
|
||||||
lrouter_obj["display_name"])
|
|
||||||
if nexthop:
|
|
||||||
nh_element = lrouter_obj["routing_config"].get(
|
|
||||||
"default_route_next_hop")
|
|
||||||
if nh_element:
|
|
||||||
nh_element["gateway_ip_address"] = nexthop
|
|
||||||
return nsxlib.do_request(HTTP_PUT,
|
|
||||||
nsxlib._build_uri_path(LROUTER_RESOURCE,
|
|
||||||
resource_id=r_id),
|
|
||||||
jsonutils.dumps(lrouter_obj),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def get_explicit_routes_lrouter(cluster, router_id, protocol_type='static'):
|
|
||||||
static_filter = {'protocol': protocol_type}
|
|
||||||
existing_routes = nsxlib.do_request(
|
|
||||||
HTTP_GET,
|
|
||||||
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
|
|
||||||
filters=static_filter,
|
|
||||||
fields="*",
|
|
||||||
parent_resource_id=router_id),
|
|
||||||
cluster=cluster)['results']
|
|
||||||
return existing_routes
|
|
||||||
|
|
||||||
|
|
||||||
def delete_explicit_route_lrouter(cluster, router_id, route_id):
|
|
||||||
nsxlib.do_request(HTTP_DELETE,
|
|
||||||
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
|
|
||||||
resource_id=route_id,
|
|
||||||
parent_resource_id=router_id),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def create_explicit_route_lrouter(cluster, router_id, route):
|
|
||||||
next_hop_ip = route.get("nexthop") or route.get("next_hop_ip")
|
|
||||||
prefix = route.get("destination") or route.get("prefix")
|
|
||||||
uuid = nsxlib.do_request(
|
|
||||||
HTTP_POST,
|
|
||||||
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
|
|
||||||
parent_resource_id=router_id),
|
|
||||||
jsonutils.dumps({
|
|
||||||
"action": "accept",
|
|
||||||
"next_hop_ip": next_hop_ip,
|
|
||||||
"prefix": prefix,
|
|
||||||
"protocol": "static"
|
|
||||||
}),
|
|
||||||
cluster=cluster)['uuid']
|
|
||||||
return uuid
|
|
||||||
|
|
||||||
|
|
||||||
def update_explicit_routes_lrouter(cluster, router_id, routes):
|
|
||||||
# Update in bulk: delete them all, and add the ones specified
|
|
||||||
# but keep track of what is been modified to allow roll-backs
|
|
||||||
# in case of failures
|
|
||||||
nsx_routes = get_explicit_routes_lrouter(cluster, router_id)
|
|
||||||
try:
|
|
||||||
deleted_routes = []
|
|
||||||
added_routes = []
|
|
||||||
# omit the default route (0.0.0.0/0) from the processing;
|
|
||||||
# this must be handled through the nexthop for the router
|
|
||||||
for route in nsx_routes:
|
|
||||||
prefix = route.get("destination") or route.get("prefix")
|
|
||||||
if prefix != '0.0.0.0/0':
|
|
||||||
delete_explicit_route_lrouter(cluster,
|
|
||||||
router_id,
|
|
||||||
route['uuid'])
|
|
||||||
deleted_routes.append(route)
|
|
||||||
for route in routes:
|
|
||||||
prefix = route.get("destination") or route.get("prefix")
|
|
||||||
if prefix != '0.0.0.0/0':
|
|
||||||
uuid = create_explicit_route_lrouter(cluster,
|
|
||||||
router_id, route)
|
|
||||||
added_routes.append(uuid)
|
|
||||||
except api_exc.NsxApiException:
|
|
||||||
LOG.exception('Cannot update NSX routes %(routes)s for '
|
|
||||||
'router %(router_id)s',
|
|
||||||
{'routes': routes, 'router_id': router_id})
|
|
||||||
# Roll back to keep NSX in consistent state
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
if nsx_routes:
|
|
||||||
if deleted_routes:
|
|
||||||
for route in deleted_routes:
|
|
||||||
create_explicit_route_lrouter(cluster,
|
|
||||||
router_id, route)
|
|
||||||
if added_routes:
|
|
||||||
for route_id in added_routes:
|
|
||||||
delete_explicit_route_lrouter(cluster,
|
|
||||||
router_id, route_id)
|
|
||||||
return nsx_routes
|
|
||||||
|
|
||||||
|
|
||||||
def get_default_route_explicit_routing_lrouter_v33(cluster, router_id):
|
|
||||||
static_filter = {"protocol": "static",
|
|
||||||
"prefix": "0.0.0.0/0"}
|
|
||||||
default_route = nsxlib.do_request(
|
|
||||||
HTTP_GET,
|
|
||||||
nsxlib._build_uri_path(LROUTERRIB_RESOURCE,
|
|
||||||
filters=static_filter,
|
|
||||||
fields="*",
|
|
||||||
parent_resource_id=router_id),
|
|
||||||
cluster=cluster)["results"][0]
|
|
||||||
return default_route
|
|
||||||
|
|
||||||
|
|
||||||
def get_default_route_explicit_routing_lrouter_v32(cluster, router_id):
|
|
||||||
# Scan all routes because 3.2 does not support query by prefix
|
|
||||||
all_routes = get_explicit_routes_lrouter(cluster, router_id)
|
|
||||||
for route in all_routes:
|
|
||||||
if route['prefix'] == '0.0.0.0/0':
|
|
||||||
return route
|
|
||||||
|
|
||||||
|
|
||||||
def update_default_gw_explicit_routing_lrouter(cluster, router_id, next_hop):
|
|
||||||
default_route = get_default_route_explicit_routing_lrouter(cluster,
|
|
||||||
router_id)
|
|
||||||
if next_hop != default_route["next_hop_ip"]:
|
|
||||||
new_default_route = {"action": "accept",
|
|
||||||
"next_hop_ip": next_hop,
|
|
||||||
"prefix": "0.0.0.0/0",
|
|
||||||
"protocol": "static"}
|
|
||||||
nsxlib.do_request(HTTP_PUT,
|
|
||||||
nsxlib._build_uri_path(
|
|
||||||
LROUTERRIB_RESOURCE,
|
|
||||||
resource_id=default_route['uuid'],
|
|
||||||
parent_resource_id=router_id),
|
|
||||||
jsonutils.dumps(new_default_route),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def update_explicit_routing_lrouter(cluster, router_id,
|
|
||||||
display_name, next_hop, routes=None):
|
|
||||||
update_implicit_routing_lrouter(cluster, router_id, display_name, next_hop)
|
|
||||||
if next_hop:
|
|
||||||
update_default_gw_explicit_routing_lrouter(cluster,
|
|
||||||
router_id, next_hop)
|
|
||||||
if routes is not None:
|
|
||||||
return update_explicit_routes_lrouter(cluster, router_id, routes)
|
|
||||||
|
|
||||||
|
|
||||||
def query_lrouter_lports(cluster, lr_uuid, fields="*",
|
|
||||||
filters=None, relations=None):
|
|
||||||
uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
|
|
||||||
parent_resource_id=lr_uuid,
|
|
||||||
fields=fields, filters=filters,
|
|
||||||
relations=relations)
|
|
||||||
return nsxlib.do_request(HTTP_GET, uri, cluster=cluster)['results']
|
|
||||||
|
|
||||||
|
|
||||||
def create_router_lport(cluster, lrouter_uuid, tenant_id, neutron_port_id,
|
|
||||||
display_name, admin_status_enabled, ip_addresses,
|
|
||||||
mac_address=None):
|
|
||||||
"""Creates a logical port on the assigned logical router."""
|
|
||||||
lport_obj = dict(
|
|
||||||
admin_status_enabled=admin_status_enabled,
|
|
||||||
display_name=display_name,
|
|
||||||
tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id),
|
|
||||||
ip_addresses=ip_addresses,
|
|
||||||
type="LogicalRouterPortConfig"
|
|
||||||
)
|
|
||||||
# Only add the mac_address to lport_obj if present. This is because
|
|
||||||
# when creating the fake_ext_gw there is no mac_address present.
|
|
||||||
if mac_address:
|
|
||||||
lport_obj['mac_address'] = mac_address
|
|
||||||
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
|
|
||||||
parent_resource_id=lrouter_uuid)
|
|
||||||
result = nsxlib.do_request(HTTP_POST, path, jsonutils.dumps(lport_obj),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
LOG.debug("Created logical port %(lport_uuid)s on "
|
|
||||||
"logical router %(lrouter_uuid)s",
|
|
||||||
{'lport_uuid': result['uuid'],
|
|
||||||
'lrouter_uuid': lrouter_uuid})
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def update_router_lport(cluster, lrouter_uuid, lrouter_port_uuid,
|
|
||||||
tenant_id, neutron_port_id, display_name,
|
|
||||||
admin_status_enabled, ip_addresses):
|
|
||||||
"""Updates a logical port on the assigned logical router."""
|
|
||||||
lport_obj = dict(
|
|
||||||
admin_status_enabled=admin_status_enabled,
|
|
||||||
display_name=display_name,
|
|
||||||
tags=utils.get_tags(os_tid=tenant_id, q_port_id=neutron_port_id),
|
|
||||||
ip_addresses=ip_addresses,
|
|
||||||
type="LogicalRouterPortConfig"
|
|
||||||
)
|
|
||||||
# Do not pass null items to NSX
|
|
||||||
for key in lport_obj.keys():
|
|
||||||
if lport_obj[key] is None:
|
|
||||||
del lport_obj[key]
|
|
||||||
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE,
|
|
||||||
lrouter_port_uuid,
|
|
||||||
parent_resource_id=lrouter_uuid)
|
|
||||||
result = nsxlib.do_request(HTTP_PUT, path,
|
|
||||||
jsonutils.dumps(lport_obj),
|
|
||||||
cluster=cluster)
|
|
||||||
LOG.debug("Updated logical port %(lport_uuid)s on "
|
|
||||||
"logical router %(lrouter_uuid)s",
|
|
||||||
{'lport_uuid': lrouter_port_uuid, 'lrouter_uuid': lrouter_uuid})
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def delete_router_lport(cluster, lrouter_uuid, lport_uuid):
|
|
||||||
"""Creates a logical port on the assigned logical router."""
|
|
||||||
path = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_uuid,
|
|
||||||
lrouter_uuid)
|
|
||||||
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
|
|
||||||
LOG.debug("Delete logical router port %(lport_uuid)s on "
|
|
||||||
"logical router %(lrouter_uuid)s",
|
|
||||||
{'lport_uuid': lport_uuid,
|
|
||||||
'lrouter_uuid': lrouter_uuid})
|
|
||||||
|
|
||||||
|
|
||||||
def delete_peer_router_lport(cluster, lr_uuid, ls_uuid, lp_uuid):
|
|
||||||
nsx_port = switch.get_port(cluster, ls_uuid, lp_uuid,
|
|
||||||
relations="LogicalPortAttachment")
|
|
||||||
relations = nsx_port.get('_relations')
|
|
||||||
if relations:
|
|
||||||
att_data = relations.get('LogicalPortAttachment')
|
|
||||||
if att_data:
|
|
||||||
lrp_uuid = att_data.get('peer_port_uuid')
|
|
||||||
if lrp_uuid:
|
|
||||||
delete_router_lport(cluster, lr_uuid, lrp_uuid)
|
|
||||||
|
|
||||||
|
|
||||||
def find_router_gw_port(context, cluster, router_id):
|
|
||||||
"""Retrieves the external gateway port for a NSX logical router."""
|
|
||||||
|
|
||||||
# Find the uuid of nsx ext gw logical router port
|
|
||||||
# TODO(salvatore-orlando): Consider storing it in Neutron DB
|
|
||||||
results = query_lrouter_lports(
|
|
||||||
cluster, router_id,
|
|
||||||
relations="LogicalPortAttachment")
|
|
||||||
for lport in results:
|
|
||||||
if '_relations' in lport:
|
|
||||||
attachment = lport['_relations'].get('LogicalPortAttachment')
|
|
||||||
if attachment and attachment.get('type') == 'L3GatewayAttachment':
|
|
||||||
return lport
|
|
||||||
|
|
||||||
|
|
||||||
def plug_router_port_attachment(cluster, router_id, port_id,
|
|
||||||
attachment_uuid, nsx_attachment_type,
|
|
||||||
attachment_vlan=None):
|
|
||||||
"""Attach a router port to the given attachment.
|
|
||||||
|
|
||||||
Current attachment types:
|
|
||||||
- PatchAttachment [-> logical switch port uuid]
|
|
||||||
- L3GatewayAttachment [-> L3GatewayService uuid]
|
|
||||||
For the latter attachment type a VLAN ID can be specified as well.
|
|
||||||
"""
|
|
||||||
uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, port_id, router_id,
|
|
||||||
is_attachment=True)
|
|
||||||
attach_obj = {}
|
|
||||||
attach_obj["type"] = nsx_attachment_type
|
|
||||||
if nsx_attachment_type == "PatchAttachment":
|
|
||||||
attach_obj["peer_port_uuid"] = attachment_uuid
|
|
||||||
elif nsx_attachment_type == "L3GatewayAttachment":
|
|
||||||
attach_obj["l3_gateway_service_uuid"] = attachment_uuid
|
|
||||||
if attachment_vlan:
|
|
||||||
attach_obj['vlan_id'] = attachment_vlan
|
|
||||||
else:
|
|
||||||
raise nsx_exc.InvalidAttachmentType(
|
|
||||||
attachment_type=nsx_attachment_type)
|
|
||||||
return nsxlib.do_request(
|
|
||||||
HTTP_PUT, uri, jsonutils.dumps(attach_obj), cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def _create_nat_match_obj(**kwargs):
|
|
||||||
nat_match_obj = {'ethertype': 'IPv4'}
|
|
||||||
delta = set(kwargs.keys()) - set(MATCH_KEYS)
|
|
||||||
if delta:
|
|
||||||
raise Exception(_("Invalid keys for NAT match: %s"), delta)
|
|
||||||
nat_match_obj.update(kwargs)
|
|
||||||
return nat_match_obj
|
|
||||||
|
|
||||||
|
|
||||||
def _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj):
|
|
||||||
LOG.debug("Creating NAT rule: %s", nat_rule_obj)
|
|
||||||
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
|
|
||||||
parent_resource_id=router_id)
|
|
||||||
return nsxlib.do_request(HTTP_POST, uri, jsonutils.dumps(nat_rule_obj),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj):
|
|
||||||
return {"to_source_ip_address_min": min_src_ip,
|
|
||||||
"to_source_ip_address_max": max_src_ip,
|
|
||||||
"type": "SourceNatRule",
|
|
||||||
"match": nat_match_obj}
|
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_nosnat_rule_v2(cluster, _router_id, _match_criteria=None):
|
|
||||||
LOG.info("No SNAT rules cannot be applied as they are not available "
|
|
||||||
"in this version of the NSX platform")
|
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_nodnat_rule_v2(cluster, _router_id, _match_criteria=None):
|
|
||||||
LOG.info("No DNAT rules cannot be applied as they are not available "
|
|
||||||
"in this version of the NSX platform")
|
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_snat_rule_v2(cluster, router_id,
|
|
||||||
min_src_ip, max_src_ip, match_criteria=None):
|
|
||||||
|
|
||||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
|
||||||
nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
|
|
||||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_dnat_rule_v2(cluster, router_id, dst_ip,
|
|
||||||
to_dst_port=None, match_criteria=None):
|
|
||||||
|
|
||||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
|
||||||
nat_rule_obj = {
|
|
||||||
"to_destination_ip_address_min": dst_ip,
|
|
||||||
"to_destination_ip_address_max": dst_ip,
|
|
||||||
"type": "DestinationNatRule",
|
|
||||||
"match": nat_match_obj
|
|
||||||
}
|
|
||||||
if to_dst_port:
|
|
||||||
nat_rule_obj['to_destination_port'] = to_dst_port
|
|
||||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_nosnat_rule_v3(cluster, router_id, order=None,
|
|
||||||
match_criteria=None):
|
|
||||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
|
||||||
nat_rule_obj = {
|
|
||||||
"type": "NoSourceNatRule",
|
|
||||||
"match": nat_match_obj
|
|
||||||
}
|
|
||||||
if order:
|
|
||||||
nat_rule_obj['order'] = order
|
|
||||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_nodnat_rule_v3(cluster, router_id, order=None,
|
|
||||||
match_criteria=None):
|
|
||||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
|
||||||
nat_rule_obj = {
|
|
||||||
"type": "NoDestinationNatRule",
|
|
||||||
"match": nat_match_obj
|
|
||||||
}
|
|
||||||
if order:
|
|
||||||
nat_rule_obj['order'] = order
|
|
||||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_snat_rule_v3(cluster, router_id, min_src_ip, max_src_ip,
|
|
||||||
order=None, match_criteria=None):
|
|
||||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
|
||||||
nat_rule_obj = _build_snat_rule_obj(min_src_ip, max_src_ip, nat_match_obj)
|
|
||||||
if order:
|
|
||||||
nat_rule_obj['order'] = order
|
|
||||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
|
||||||
|
|
||||||
|
|
||||||
def create_lrouter_dnat_rule_v3(cluster, router_id, dst_ip, to_dst_port=None,
|
|
||||||
order=None, match_criteria=None):
|
|
||||||
|
|
||||||
nat_match_obj = _create_nat_match_obj(**match_criteria)
|
|
||||||
nat_rule_obj = {
|
|
||||||
"to_destination_ip_address": dst_ip,
|
|
||||||
"type": "DestinationNatRule",
|
|
||||||
"match": nat_match_obj
|
|
||||||
}
|
|
||||||
if to_dst_port:
|
|
||||||
nat_rule_obj['to_destination_port'] = to_dst_port
|
|
||||||
if order:
|
|
||||||
nat_rule_obj['order'] = order
|
|
||||||
return _create_lrouter_nat_rule(cluster, router_id, nat_rule_obj)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_nat_rules_by_match(cluster, router_id, rule_type,
|
|
||||||
max_num_expected,
|
|
||||||
min_num_expected=0,
|
|
||||||
raise_on_len_mismatch=True,
|
|
||||||
**kwargs):
|
|
||||||
# remove nat rules
|
|
||||||
nat_rules = query_nat_rules(cluster, router_id)
|
|
||||||
to_delete_ids = []
|
|
||||||
for r in nat_rules:
|
|
||||||
if (r['type'] != rule_type):
|
|
||||||
continue
|
|
||||||
|
|
||||||
for key, value in six.iteritems(kwargs):
|
|
||||||
if not (key in r['match'] and r['match'][key] == value):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
to_delete_ids.append(r['uuid'])
|
|
||||||
num_rules_to_delete = len(to_delete_ids)
|
|
||||||
if (num_rules_to_delete < min_num_expected or
|
|
||||||
num_rules_to_delete > max_num_expected):
|
|
||||||
if raise_on_len_mismatch:
|
|
||||||
raise nsx_exc.NatRuleMismatch(actual_rules=num_rules_to_delete,
|
|
||||||
min_rules=min_num_expected,
|
|
||||||
max_rules=max_num_expected)
|
|
||||||
else:
|
|
||||||
LOG.warning("Found %(actual_rule_num)d matching NAT rules, "
|
|
||||||
"which is not in the expected range "
|
|
||||||
"(%(min_exp_rule_num)d,%(max_exp_rule_num)d)",
|
|
||||||
{'actual_rule_num': num_rules_to_delete,
|
|
||||||
'min_exp_rule_num': min_num_expected,
|
|
||||||
'max_exp_rule_num': max_num_expected})
|
|
||||||
|
|
||||||
for rule_id in to_delete_ids:
|
|
||||||
delete_router_nat_rule(cluster, router_id, rule_id)
|
|
||||||
# Return number of deleted rules - useful at least for
|
|
||||||
# testing purposes
|
|
||||||
return num_rules_to_delete
|
|
||||||
|
|
||||||
|
|
||||||
def delete_router_nat_rule(cluster, router_id, rule_id):
|
|
||||||
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE, rule_id, router_id)
|
|
||||||
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def query_nat_rules(cluster, router_id, fields="*", filters=None):
|
|
||||||
uri = nsxlib._build_uri_path(LROUTERNAT_RESOURCE,
|
|
||||||
parent_resource_id=router_id,
|
|
||||||
fields=fields, filters=filters)
|
|
||||||
return nsxlib.get_all_query_pages(uri, cluster)
|
|
||||||
|
|
||||||
|
|
||||||
# NOTE(salvatore-orlando): The following FIXME applies in general to
|
|
||||||
# each operation on list attributes.
|
|
||||||
# FIXME(salvatore-orlando): need a lock around the list of IPs on an iface
|
|
||||||
def update_lrouter_port_ips(cluster, lrouter_id, lport_id,
|
|
||||||
ips_to_add, ips_to_remove):
|
|
||||||
uri = nsxlib._build_uri_path(LROUTERPORT_RESOURCE, lport_id, lrouter_id)
|
|
||||||
try:
|
|
||||||
port = nsxlib.do_request(HTTP_GET, uri, cluster=cluster)
|
|
||||||
# TODO(salvatore-orlando): Enforce ips_to_add intersection with
|
|
||||||
# ips_to_remove is empty
|
|
||||||
ip_address_set = set(port['ip_addresses'])
|
|
||||||
ip_address_set = ip_address_set - set(ips_to_remove)
|
|
||||||
ip_address_set = ip_address_set | set(ips_to_add)
|
|
||||||
# Set is not JSON serializable - convert to list
|
|
||||||
port['ip_addresses'] = list(ip_address_set)
|
|
||||||
nsxlib.do_request(HTTP_PUT, uri, jsonutils.dumps(port),
|
|
||||||
cluster=cluster)
|
|
||||||
except exception.NotFound:
|
|
||||||
# FIXME(salv-orlando):avoid raising different exception
|
|
||||||
data = {'lport_id': lport_id, 'lrouter_id': lrouter_id}
|
|
||||||
msg = (_("Router Port %(lport_id)s not found on router "
|
|
||||||
"%(lrouter_id)s") % data)
|
|
||||||
LOG.exception(msg)
|
|
||||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
|
||||||
except api_exc.NsxApiException as e:
|
|
||||||
msg = _("An exception occurred while updating IP addresses on a "
|
|
||||||
"router logical port:%s") % e
|
|
||||||
LOG.exception(msg)
|
|
||||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
|
||||||
|
|
||||||
|
|
||||||
ROUTER_FUNC_DICT = {
|
|
||||||
'create_lrouter': {
|
|
||||||
2: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter, },
|
|
||||||
3: {versioning.DEFAULT_VERSION: create_implicit_routing_lrouter,
|
|
||||||
1: create_implicit_routing_lrouter_with_distribution,
|
|
||||||
2: create_explicit_routing_lrouter, }, },
|
|
||||||
'update_lrouter': {
|
|
||||||
2: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter, },
|
|
||||||
3: {versioning.DEFAULT_VERSION: update_implicit_routing_lrouter,
|
|
||||||
2: update_explicit_routing_lrouter, }, },
|
|
||||||
'create_lrouter_dnat_rule': {
|
|
||||||
2: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v2, },
|
|
||||||
3: {versioning.DEFAULT_VERSION: create_lrouter_dnat_rule_v3, }, },
|
|
||||||
'create_lrouter_snat_rule': {
|
|
||||||
2: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v2, },
|
|
||||||
3: {versioning.DEFAULT_VERSION: create_lrouter_snat_rule_v3, }, },
|
|
||||||
'create_lrouter_nosnat_rule': {
|
|
||||||
2: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v2, },
|
|
||||||
3: {versioning.DEFAULT_VERSION: create_lrouter_nosnat_rule_v3, }, },
|
|
||||||
'create_lrouter_nodnat_rule': {
|
|
||||||
2: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v2, },
|
|
||||||
3: {versioning.DEFAULT_VERSION: create_lrouter_nodnat_rule_v3, }, },
|
|
||||||
'get_default_route_explicit_routing_lrouter': {
|
|
||||||
3: {versioning.DEFAULT_VERSION:
|
|
||||||
get_default_route_explicit_routing_lrouter_v32,
|
|
||||||
2: get_default_route_explicit_routing_lrouter_v32, }, },
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
|
||||||
def create_lrouter(cluster, *args, **kwargs):
|
|
||||||
if kwargs.get('distributed', None):
|
|
||||||
v = cluster.api_client.get_version()
|
|
||||||
if (v.major, v.minor) < (3, 1):
|
|
||||||
raise nsx_exc.InvalidVersion(version=v)
|
|
||||||
return v
|
|
||||||
|
|
||||||
|
|
||||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
|
||||||
def get_default_route_explicit_routing_lrouter(cluster, *args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
|
||||||
def update_lrouter(cluster, *args, **kwargs):
|
|
||||||
if kwargs.get('routes', None):
|
|
||||||
v = cluster.api_client.get_version()
|
|
||||||
if (v.major, v.minor) < (3, 2):
|
|
||||||
raise nsx_exc.InvalidVersion(version=v)
|
|
||||||
return v
|
|
||||||
|
|
||||||
|
|
||||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
|
||||||
def create_lrouter_dnat_rule(cluster, *args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
|
||||||
def create_lrouter_snat_rule(cluster, *args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
|
||||||
def create_lrouter_nosnat_rule(cluster, *args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@versioning.versioned(ROUTER_FUNC_DICT)
|
|
||||||
def create_lrouter_nodnat_rule(cluster, *args, **kwargs):
|
|
||||||
pass
|
|
@ -1,216 +0,0 @@
|
|||||||
# Copyright 2014 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from neutron_lib import constants
|
|
||||||
from neutron_lib import exceptions
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
from oslo_utils import excutils
|
|
||||||
|
|
||||||
from vmware_nsx.common import utils
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
|
|
||||||
HTTP_GET = "GET"
|
|
||||||
HTTP_POST = "POST"
|
|
||||||
HTTP_DELETE = "DELETE"
|
|
||||||
HTTP_PUT = "PUT"
|
|
||||||
|
|
||||||
SECPROF_RESOURCE = "security-profile"
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def mk_body(**kwargs):
|
|
||||||
"""Convenience function creates and dumps dictionary to string.
|
|
||||||
|
|
||||||
:param kwargs: the key/value pirs to be dumped into a json string.
|
|
||||||
:returns: a json string.
|
|
||||||
"""
|
|
||||||
return jsonutils.dumps(kwargs, ensure_ascii=False)
|
|
||||||
|
|
||||||
|
|
||||||
def query_security_profiles(cluster, fields=None, filters=None):
|
|
||||||
return nsxlib.get_all_query_pages(
|
|
||||||
nsxlib._build_uri_path(SECPROF_RESOURCE,
|
|
||||||
fields=fields,
|
|
||||||
filters=filters),
|
|
||||||
cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def create_security_profile(cluster, tenant_id, neutron_id, security_profile):
|
|
||||||
"""Create a security profile on the NSX backend.
|
|
||||||
|
|
||||||
:param cluster: a NSX cluster object reference
|
|
||||||
:param tenant_id: identifier of the Neutron tenant
|
|
||||||
:param neutron_id: neutron security group identifier
|
|
||||||
:param security_profile: dictionary with data for
|
|
||||||
configuring the NSX security profile.
|
|
||||||
"""
|
|
||||||
path = "/ws.v1/security-profile"
|
|
||||||
# Allow all dhcp responses and all ingress traffic
|
|
||||||
hidden_rules = {'logical_port_egress_rules':
|
|
||||||
[{'ethertype': 'IPv4',
|
|
||||||
'protocol': constants.PROTO_NUM_UDP,
|
|
||||||
'port_range_min': constants.DHCP_RESPONSE_PORT,
|
|
||||||
'port_range_max': constants.DHCP_RESPONSE_PORT,
|
|
||||||
'ip_prefix': '0.0.0.0/0'}],
|
|
||||||
'logical_port_ingress_rules':
|
|
||||||
[{'ethertype': 'IPv4'},
|
|
||||||
{'ethertype': 'IPv6'}]}
|
|
||||||
display_name = utils.check_and_truncate(security_profile.get('name'))
|
|
||||||
# NOTE(salv-orlando): neutron-id tags are prepended with 'q' for
|
|
||||||
# historical reasons
|
|
||||||
body = mk_body(
|
|
||||||
tags=utils.get_tags(os_tid=tenant_id, q_sec_group_id=neutron_id),
|
|
||||||
display_name=display_name,
|
|
||||||
logical_port_ingress_rules=(
|
|
||||||
hidden_rules['logical_port_ingress_rules']),
|
|
||||||
logical_port_egress_rules=hidden_rules['logical_port_egress_rules']
|
|
||||||
)
|
|
||||||
rsp = nsxlib.do_request(HTTP_POST, path, body, cluster=cluster)
|
|
||||||
if security_profile.get('name') == 'default':
|
|
||||||
# If security group is default allow ip traffic between
|
|
||||||
# members of the same security profile is allowed and ingress traffic
|
|
||||||
# from the switch
|
|
||||||
rules = {'logical_port_egress_rules': [{'ethertype': 'IPv4',
|
|
||||||
'profile_uuid': rsp['uuid']},
|
|
||||||
{'ethertype': 'IPv6',
|
|
||||||
'profile_uuid': rsp['uuid']}],
|
|
||||||
'logical_port_ingress_rules': [{'ethertype': 'IPv4'},
|
|
||||||
{'ethertype': 'IPv6'}]}
|
|
||||||
|
|
||||||
update_security_group_rules(cluster, rsp['uuid'], rules)
|
|
||||||
LOG.debug("Created Security Profile: %s", rsp)
|
|
||||||
return rsp
|
|
||||||
|
|
||||||
|
|
||||||
def update_security_group_rules(cluster, spid, rules):
|
|
||||||
path = "/ws.v1/security-profile/%s" % spid
|
|
||||||
|
|
||||||
# Allow all dhcp responses in
|
|
||||||
rules['logical_port_egress_rules'].append(
|
|
||||||
{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP,
|
|
||||||
'port_range_min': constants.DHCP_RESPONSE_PORT,
|
|
||||||
'port_range_max': constants.DHCP_RESPONSE_PORT,
|
|
||||||
'ip_prefix': '0.0.0.0/0'})
|
|
||||||
# If there are no ingress rules add bunk rule to drop all ingress traffic
|
|
||||||
if not rules['logical_port_ingress_rules']:
|
|
||||||
rules['logical_port_ingress_rules'].append(
|
|
||||||
{'ethertype': 'IPv4', 'ip_prefix': '127.0.0.1/32'})
|
|
||||||
try:
|
|
||||||
body = mk_body(
|
|
||||||
logical_port_ingress_rules=summarize_security_group_rules(rules[
|
|
||||||
'logical_port_ingress_rules']),
|
|
||||||
logical_port_egress_rules=summarize_security_group_rules(rules[
|
|
||||||
'logical_port_egress_rules']))
|
|
||||||
rsp = nsxlib.do_request(HTTP_PUT, path, body, cluster=cluster)
|
|
||||||
except exceptions.NotFound as e:
|
|
||||||
LOG.error(nsxlib.format_exception("Unknown", e, locals()))
|
|
||||||
#FIXME(salvatore-orlando): This should not raise NeutronException
|
|
||||||
raise exceptions.NeutronException()
|
|
||||||
LOG.debug("Updated Security Profile: %s", rsp)
|
|
||||||
return rsp
|
|
||||||
|
|
||||||
|
|
||||||
def update_security_profile(cluster, spid, name):
|
|
||||||
return nsxlib.do_request(
|
|
||||||
HTTP_PUT,
|
|
||||||
nsxlib._build_uri_path(SECPROF_RESOURCE, resource_id=spid),
|
|
||||||
jsonutils.dumps({"display_name": utils.check_and_truncate(name)}),
|
|
||||||
cluster=cluster)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_security_profile(cluster, spid):
|
|
||||||
path = "/ws.v1/security-profile/%s" % spid
|
|
||||||
|
|
||||||
try:
|
|
||||||
nsxlib.do_request(HTTP_DELETE, path, cluster=cluster)
|
|
||||||
except exceptions.NotFound:
|
|
||||||
with excutils.save_and_reraise_exception():
|
|
||||||
# This is not necessarily an error condition
|
|
||||||
LOG.warning("Unable to find security profile %s on NSX "
|
|
||||||
"backend", spid)
|
|
||||||
|
|
||||||
|
|
||||||
def summarize_security_group_rules(logical_port_rules):
|
|
||||||
"""
|
|
||||||
Summarizes security group rules and remove duplicates. Given a set of
|
|
||||||
arbitrary security group rules, determining the optimum (minimum) rule set
|
|
||||||
is a complex (NP-hard) problem. This method does not attempt to obtain the
|
|
||||||
optimum rules. Instead, it summarizes a set of common rule patterns.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Remove port_range_min & port_range_max if it covers the entire port
|
|
||||||
# range. Also, remove quad-zero default IPv4 and default IPv6 routes
|
|
||||||
for rule in logical_port_rules:
|
|
||||||
if ('port_range_min' in rule and 'port_range_max' in rule and
|
|
||||||
rule['port_range_min'] <= 1 and
|
|
||||||
rule['port_range_max'] == 65535):
|
|
||||||
del rule['port_range_min']
|
|
||||||
del rule['port_range_max']
|
|
||||||
|
|
||||||
if ('ip_prefix' in rule and
|
|
||||||
rule['ip_prefix'] in ['0.0.0.0/0', '::/0']):
|
|
||||||
del rule['ip_prefix']
|
|
||||||
|
|
||||||
# Remove duplicate rules. Loop through each rule rule_i and exclude a
|
|
||||||
# rule if it is part of another rule.
|
|
||||||
logical_port_rules_summarized = []
|
|
||||||
for i in range(len(logical_port_rules)):
|
|
||||||
for j in range(len(logical_port_rules)):
|
|
||||||
if i != j:
|
|
||||||
if is_sg_rules_identical(logical_port_rules[i],
|
|
||||||
logical_port_rules[j]):
|
|
||||||
pass
|
|
||||||
elif is_sg_rule_subset(logical_port_rules[i],
|
|
||||||
logical_port_rules[j]):
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
logical_port_rules_summarized.append(logical_port_rules[i])
|
|
||||||
|
|
||||||
return logical_port_rules_summarized
|
|
||||||
|
|
||||||
|
|
||||||
def is_sg_rules_identical(sgr1, sgr2):
|
|
||||||
"""
|
|
||||||
determines if security group rule sgr1 and sgr2 are identical
|
|
||||||
"""
|
|
||||||
return (sgr1['ethertype'] == sgr2['ethertype'] and
|
|
||||||
sgr1.get('protocol') == sgr2.get('protocol') and
|
|
||||||
sgr1.get('port_range_min') == sgr2.get('port_range_min') and
|
|
||||||
sgr1.get('port_range_max') == sgr2.get('port_range_max') and
|
|
||||||
sgr1.get('ip_prefix') == sgr2.get('ip_prefix') and
|
|
||||||
sgr1.get('profile_uuid') == sgr2.get('profile_uuid'))
|
|
||||||
|
|
||||||
|
|
||||||
def is_sg_rule_subset(sgr1, sgr2):
|
|
||||||
"""
|
|
||||||
determine if security group rule sgr1 is a strict subset of sgr2
|
|
||||||
"""
|
|
||||||
all_protocols = set(range(256))
|
|
||||||
sgr1_protocols = {sgr1['protocol']} if 'protocol' in sgr1 else \
|
|
||||||
all_protocols
|
|
||||||
sgr2_protocols = {sgr2['protocol']} if 'protocol' in sgr2 else \
|
|
||||||
all_protocols
|
|
||||||
|
|
||||||
return (sgr1['ethertype'] == sgr2['ethertype'] and
|
|
||||||
sgr1_protocols.issubset(sgr2_protocols) and
|
|
||||||
sgr1.get('port_range_min', 0) >= sgr2.get('port_range_min', 0) and
|
|
||||||
sgr1.get('port_range_max', 65535) <= sgr2.get('port_range_max',
|
|
||||||
65535) and
|
|
||||||
(sgr2.get('ip_prefix') is None or
|
|
||||||
sgr1.get('ip_prefix') == sgr2.get('prefix')) and
|
|
||||||
(sgr2.get('profile_uuid') is None or
|
|
||||||
sgr1.get('profile_uuid') == sgr2.get('profile_uuid')))
|
|
@ -1,68 +0,0 @@
|
|||||||
# Copyright 2014 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
|
|
||||||
from vmware_nsx._i18n import _
|
|
||||||
from vmware_nsx.api_client import exception
|
|
||||||
|
|
||||||
DEFAULT_VERSION = -1
|
|
||||||
|
|
||||||
|
|
||||||
def versioned(func_table):
|
|
||||||
|
|
||||||
def versioned_function(wrapped_func):
|
|
||||||
func_name = wrapped_func.__name__
|
|
||||||
|
|
||||||
def dispatch_versioned_function(cluster, *args, **kwargs):
|
|
||||||
# Call the wrapper function, in case we need to
|
|
||||||
# run validation checks regarding versions. It
|
|
||||||
# should return the NSX version
|
|
||||||
v = (wrapped_func(cluster, *args, **kwargs) or
|
|
||||||
cluster.api_client.get_version())
|
|
||||||
func = get_function_by_version(func_table, func_name, v)
|
|
||||||
func_kwargs = kwargs
|
|
||||||
# pylint: disable=deprecated-method
|
|
||||||
arg_spec = inspect.getargspec(func)
|
|
||||||
if not arg_spec.keywords and not arg_spec.varargs:
|
|
||||||
# drop args unknown to function from func_args
|
|
||||||
arg_set = set(func_kwargs.keys())
|
|
||||||
for arg in arg_set - set(arg_spec.args):
|
|
||||||
del func_kwargs[arg]
|
|
||||||
# NOTE(salvatore-orlando): shall we fail here if a required
|
|
||||||
# argument is not passed, or let the called function raise?
|
|
||||||
return func(cluster, *args, **func_kwargs)
|
|
||||||
|
|
||||||
return dispatch_versioned_function
|
|
||||||
return versioned_function
|
|
||||||
|
|
||||||
|
|
||||||
def get_function_by_version(func_table, func_name, ver):
|
|
||||||
if ver:
|
|
||||||
if ver.major not in func_table[func_name]:
|
|
||||||
major = max(func_table[func_name].keys())
|
|
||||||
minor = max(func_table[func_name][major].keys())
|
|
||||||
if major > ver.major:
|
|
||||||
raise NotImplementedError(_("Operation may not be supported"))
|
|
||||||
else:
|
|
||||||
major = ver.major
|
|
||||||
minor = ver.minor
|
|
||||||
if ver.minor not in func_table[func_name][major]:
|
|
||||||
minor = DEFAULT_VERSION
|
|
||||||
return func_table[func_name][major][minor]
|
|
||||||
else:
|
|
||||||
msg = _('NSX version is not set. Unable to complete request '
|
|
||||||
'correctly. Check log for NSX communication errors.')
|
|
||||||
raise exception.ServiceUnavailable(message=msg)
|
|
@ -16,7 +16,6 @@ import vmware_nsx.common.config
|
|||||||
import vmware_nsx.dhcp_meta.lsnmanager
|
import vmware_nsx.dhcp_meta.lsnmanager
|
||||||
import vmware_nsx.dhcp_meta.nsx
|
import vmware_nsx.dhcp_meta.nsx
|
||||||
import vmware_nsx.dvs.dvs_utils
|
import vmware_nsx.dvs.dvs_utils
|
||||||
import vmware_nsx.extensions.networkgw
|
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
def list_opts():
|
||||||
@ -29,7 +28,6 @@ def list_opts():
|
|||||||
('NSX_SYNC', vmware_nsx.common.config.sync_opts),
|
('NSX_SYNC', vmware_nsx.common.config.sync_opts),
|
||||||
('nsxv', vmware_nsx.common.config.nsxv_opts),
|
('nsxv', vmware_nsx.common.config.nsxv_opts),
|
||||||
('nsx_v3', vmware_nsx.common.config.nsx_v3_opts),
|
('nsx_v3', vmware_nsx.common.config.nsx_v3_opts),
|
||||||
('QUOTAS', vmware_nsx.extensions.networkgw.nw_gw_quota_opts),
|
|
||||||
('dvs', vmware_nsx.dvs.dvs_utils.dvs_opts),
|
('dvs', vmware_nsx.dvs.dvs_utils.dvs_opts),
|
||||||
('nsx_tvd', vmware_nsx.common.config.nsx_tvd_opts),
|
('nsx_tvd', vmware_nsx.common.config.nsx_tvd_opts),
|
||||||
('nsx_p', vmware_nsx.common.config.nsx_p_opts),
|
('nsx_p', vmware_nsx.common.config.nsx_p_opts),
|
||||||
|
@ -18,17 +18,15 @@
|
|||||||
# Note: this import should be here in order to appear before NeutronDbPluginV2
|
# Note: this import should be here in order to appear before NeutronDbPluginV2
|
||||||
# in each of the plugins. If not: security-group/-rule will not have all the
|
# in each of the plugins. If not: security-group/-rule will not have all the
|
||||||
# relevant extend dict registries.
|
# relevant extend dict registries.
|
||||||
from neutron.db.models import securitygroup # noqa
|
from neutron.db import l3_dvr_db # noqa
|
||||||
|
|
||||||
from vmware_nsx.plugins.dvs import plugin as dvs
|
from vmware_nsx.plugins.dvs import plugin as dvs
|
||||||
from vmware_nsx.plugins.nsx import plugin as nsx
|
from vmware_nsx.plugins.nsx import plugin as nsx
|
||||||
from vmware_nsx.plugins.nsx_mh import plugin as nsx_mh
|
|
||||||
from vmware_nsx.plugins.nsx_p import plugin as nsx_p
|
from vmware_nsx.plugins.nsx_p import plugin as nsx_p
|
||||||
from vmware_nsx.plugins.nsx_v import plugin as nsx_v
|
from vmware_nsx.plugins.nsx_v import plugin as nsx_v
|
||||||
from vmware_nsx.plugins.nsx_v3 import plugin as nsx_v3
|
from vmware_nsx.plugins.nsx_v3 import plugin as nsx_v3
|
||||||
|
|
||||||
NsxDvsPlugin = dvs.NsxDvsV2
|
NsxDvsPlugin = dvs.NsxDvsV2
|
||||||
NsxPlugin = nsx_mh.NsxPluginV2
|
|
||||||
NsxVPlugin = nsx_v.NsxVPluginV2
|
NsxVPlugin = nsx_v.NsxVPluginV2
|
||||||
NsxV3Plugin = nsx_v3.NsxV3Plugin
|
NsxV3Plugin = nsx_v3.NsxV3Plugin
|
||||||
NsxPolicyPlugin = nsx_p.NsxPolicyPlugin
|
NsxPolicyPlugin = nsx_p.NsxPolicyPlugin
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -18,7 +18,6 @@ from vmware_nsx.policies import maclearning
|
|||||||
from vmware_nsx.policies import network_gateway
|
from vmware_nsx.policies import network_gateway
|
||||||
from vmware_nsx.policies import nsxpolicy
|
from vmware_nsx.policies import nsxpolicy
|
||||||
from vmware_nsx.policies import providersecuritygroup
|
from vmware_nsx.policies import providersecuritygroup
|
||||||
from vmware_nsx.policies import qos_queue
|
|
||||||
from vmware_nsx.policies import security_group
|
from vmware_nsx.policies import security_group
|
||||||
|
|
||||||
|
|
||||||
@ -28,7 +27,6 @@ def list_rules():
|
|||||||
maclearning.list_rules(),
|
maclearning.list_rules(),
|
||||||
network_gateway.list_rules(),
|
network_gateway.list_rules(),
|
||||||
providersecuritygroup.list_rules(),
|
providersecuritygroup.list_rules(),
|
||||||
qos_queue.list_rules(),
|
|
||||||
security_group.list_rules(),
|
security_group.list_rules(),
|
||||||
nsxpolicy.list_rules(),
|
nsxpolicy.list_rules(),
|
||||||
housekeeper.list_rules(),
|
housekeeper.list_rules(),
|
||||||
|
@ -1,62 +0,0 @@
|
|||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo_policy import policy
|
|
||||||
|
|
||||||
from vmware_nsx.policies import base
|
|
||||||
|
|
||||||
|
|
||||||
rules = [
|
|
||||||
policy.RuleDefault(
|
|
||||||
'create_qos_queue',
|
|
||||||
base.RULE_ADMIN_ONLY,
|
|
||||||
description='Create a QoS queue'),
|
|
||||||
policy.RuleDefault(
|
|
||||||
'get_qos_queue',
|
|
||||||
base.RULE_ADMIN_ONLY,
|
|
||||||
description='Get QoS queues'),
|
|
||||||
|
|
||||||
policy.DocumentedRuleDefault(
|
|
||||||
'get_network:queue_id',
|
|
||||||
base.RULE_ADMIN_ONLY,
|
|
||||||
'Get ``queue_id`` attributes of networks',
|
|
||||||
[
|
|
||||||
{
|
|
||||||
'method': 'GET',
|
|
||||||
'path': '/networks',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'method': 'GET',
|
|
||||||
'path': '/networks/{id}',
|
|
||||||
},
|
|
||||||
]
|
|
||||||
),
|
|
||||||
policy.DocumentedRuleDefault(
|
|
||||||
'get_port:queue_id',
|
|
||||||
base.RULE_ADMIN_ONLY,
|
|
||||||
'Get ``queue_id`` attributes of ports',
|
|
||||||
[
|
|
||||||
{
|
|
||||||
'method': 'GET',
|
|
||||||
'path': '/ports',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
'method': 'GET',
|
|
||||||
'path': '/ports/{id}',
|
|
||||||
},
|
|
||||||
]
|
|
||||||
),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def list_rules():
|
|
||||||
return rules
|
|
@ -30,7 +30,7 @@ from vmware_nsx.plugins.nsx_v.vshield import vcns
|
|||||||
import vmware_nsx.plugins.nsx_v.vshield.vcns_driver as vcnsdriver
|
import vmware_nsx.plugins.nsx_v.vshield.vcns_driver as vcnsdriver
|
||||||
|
|
||||||
|
|
||||||
plugin = neutron_plugin.NsxPlugin
|
plugin = neutron_plugin.NsxV3Plugin
|
||||||
api_client = nsx_client.NsxApiClient
|
api_client = nsx_client.NsxApiClient
|
||||||
evt_client = eventlet_client.EventletApiClient
|
evt_client = eventlet_client.EventletApiClient
|
||||||
vcns_class = vcns.Vcns
|
vcns_class = vcns.Vcns
|
||||||
|
@ -19,13 +19,12 @@ from oslo_config import cfg
|
|||||||
|
|
||||||
from neutron.tests.unit.db import test_allowedaddresspairs_db as ext_pairs
|
from neutron.tests.unit.db import test_allowedaddresspairs_db as ext_pairs
|
||||||
|
|
||||||
from vmware_nsx.tests.unit.nsx_mh import test_plugin as test_nsx_plugin
|
|
||||||
from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsx_v_plugin
|
from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsx_v_plugin
|
||||||
from vmware_nsx.tests.unit.nsx_v3 import test_constants as v3_constants
|
from vmware_nsx.tests.unit.nsx_v3 import test_constants as v3_constants
|
||||||
from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin
|
from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_v3_plugin
|
||||||
|
|
||||||
|
|
||||||
class TestAllowedAddressPairsNSXv2(test_nsx_plugin.NsxPluginV2TestCase,
|
class TestAllowedAddressPairsNSXv2(test_v3_plugin.NsxV3PluginTestCaseMixin,
|
||||||
ext_pairs.TestAllowedAddressPairs):
|
ext_pairs.TestAllowedAddressPairs):
|
||||||
|
|
||||||
# TODO(arosen): move to ext_pairs.TestAllowedAddressPairs once all
|
# TODO(arosen): move to ext_pairs.TestAllowedAddressPairs once all
|
||||||
|
@ -13,16 +13,12 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import mock
|
|
||||||
from neutron.extensions import agent
|
from neutron.extensions import agent
|
||||||
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_db_plugin
|
|
||||||
from neutron_lib import context
|
from neutron_lib import context
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
|
||||||
from vmware_nsx.api_client import version
|
|
||||||
from vmware_nsx.common import sync
|
|
||||||
from vmware_nsx.tests import unit as vmware
|
from vmware_nsx.tests import unit as vmware
|
||||||
from vmware_nsx.tests.unit.nsx_mh.apiclient import fake
|
from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3
|
||||||
from vmware_nsx.tests.unit import test_utils
|
from vmware_nsx.tests.unit import test_utils
|
||||||
|
|
||||||
|
|
||||||
@ -38,33 +34,23 @@ class MacLearningExtensionManager(object):
|
|||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
||||||
class MacLearningDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
|
class MacLearningDBTestCase(test_nsxv3.NsxV3PluginTestCaseMixin):
|
||||||
fmt = 'json'
|
fmt = 'json'
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
test_utils.override_nsx_ini_full_test()
|
test_utils.override_nsx_ini_full_test()
|
||||||
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
|
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
|
||||||
ext_mgr = MacLearningExtensionManager()
|
ext_mgr = MacLearningExtensionManager()
|
||||||
# mock api client
|
|
||||||
self.fc = fake.FakeClient(vmware.STUBS_PATH)
|
|
||||||
self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
|
|
||||||
instance = self.mock_nsx.start()
|
|
||||||
# Avoid runs of the synchronizer looping call
|
|
||||||
patch_sync = mock.patch.object(sync, '_start_loopingcall')
|
|
||||||
patch_sync.start()
|
|
||||||
|
|
||||||
# Emulate tests against NSX 2.x
|
|
||||||
instance.return_value.get_version.return_value = version.Version("3.0")
|
|
||||||
instance.return_value.request.side_effect = self.fc.fake_request
|
|
||||||
cfg.CONF.set_override('metadata_mode', None, 'NSX')
|
cfg.CONF.set_override('metadata_mode', None, 'NSX')
|
||||||
self.addCleanup(self.fc.reset_all)
|
|
||||||
super(MacLearningDBTestCase, self).setUp(plugin=vmware.PLUGIN_NAME,
|
super(MacLearningDBTestCase, self).setUp(plugin=vmware.PLUGIN_NAME,
|
||||||
ext_mgr=ext_mgr)
|
ext_mgr=ext_mgr)
|
||||||
self.adminContext = context.get_admin_context()
|
self.adminContext = context.get_admin_context()
|
||||||
|
|
||||||
def test_create_with_mac_learning(self):
|
def test_create_with_mac_learning(self):
|
||||||
with self.port(arg_list=('mac_learning_enabled',),
|
with self.port(arg_list=('mac_learning_enabled',
|
||||||
mac_learning_enabled=True) as port:
|
'port_security_enabled'),
|
||||||
|
mac_learning_enabled=True,
|
||||||
|
port_security_enabled=False) as port:
|
||||||
# Validate create operation response
|
# Validate create operation response
|
||||||
self.assertEqual(True, port['port']['mac_learning_enabled'])
|
self.assertEqual(True, port['port']['mac_learning_enabled'])
|
||||||
# Verify that db operation successfully set mac learning state
|
# Verify that db operation successfully set mac learning state
|
||||||
@ -79,15 +65,18 @@ class MacLearningDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
|
|||||||
self.assertNotIn('mac_learning_enabled', sport['port'])
|
self.assertNotIn('mac_learning_enabled', sport['port'])
|
||||||
|
|
||||||
def test_update_port_with_mac_learning(self):
|
def test_update_port_with_mac_learning(self):
|
||||||
with self.port(arg_list=('mac_learning_enabled',),
|
with self.port(arg_list=('mac_learning_enabled',
|
||||||
mac_learning_enabled=False) as port:
|
'port_security_enabled'),
|
||||||
|
mac_learning_enabled=False,
|
||||||
|
port_security_enabled=False) as port:
|
||||||
data = {'port': {'mac_learning_enabled': True}}
|
data = {'port': {'mac_learning_enabled': True}}
|
||||||
req = self.new_update_request('ports', data, port['port']['id'])
|
req = self.new_update_request('ports', data, port['port']['id'])
|
||||||
res = self.deserialize(self.fmt, req.get_response(self.api))
|
res = self.deserialize(self.fmt, req.get_response(self.api))
|
||||||
self.assertEqual(True, res['port']['mac_learning_enabled'])
|
self.assertEqual(True, res['port']['mac_learning_enabled'])
|
||||||
|
|
||||||
def test_update_preexisting_port_with_mac_learning(self):
|
def test_update_preexisting_port_with_mac_learning(self):
|
||||||
with self.port() as port:
|
with self.port(arg_list=('port_security_enabled',),
|
||||||
|
port_security_enabled=False) as port:
|
||||||
req = self.new_show_request('ports', port['port']['id'], self.fmt)
|
req = self.new_show_request('ports', port['port']['id'], self.fmt)
|
||||||
sport = self.deserialize(self.fmt, req.get_response(self.api))
|
sport = self.deserialize(self.fmt, req.get_response(self.api))
|
||||||
self.assertNotIn('mac_learning_enabled', sport['port'])
|
self.assertNotIn('mac_learning_enabled', sport['port'])
|
||||||
@ -105,15 +94,19 @@ class MacLearningDBTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
|
|||||||
# for this test we need to enable overlapping ips
|
# for this test we need to enable overlapping ips
|
||||||
cfg.CONF.set_default('allow_overlapping_ips', True)
|
cfg.CONF.set_default('allow_overlapping_ips', True)
|
||||||
no_mac_learning_p = (lambda:
|
no_mac_learning_p = (lambda:
|
||||||
self.port(arg_list=('mac_learning_enabled',),
|
self.port(arg_list=('mac_learning_enabled',
|
||||||
mac_learning_enabled=True))
|
'port_security_enabled'),
|
||||||
|
mac_learning_enabled=True,
|
||||||
|
port_security_enabled=False))
|
||||||
|
|
||||||
with no_mac_learning_p(), no_mac_learning_p(), no_mac_learning_p():
|
with no_mac_learning_p(), no_mac_learning_p(), no_mac_learning_p():
|
||||||
for port in self._list('ports')['ports']:
|
for port in self._list('ports')['ports']:
|
||||||
self.assertEqual(True, port['mac_learning_enabled'])
|
self.assertEqual(True, port['mac_learning_enabled'])
|
||||||
|
|
||||||
def test_show_port(self):
|
def test_show_port(self):
|
||||||
with self.port(arg_list=('mac_learning_enabled',),
|
with self.port(arg_list=('mac_learning_enabled',
|
||||||
mac_learning_enabled=True) as p:
|
'port_security_enabled'),
|
||||||
|
mac_learning_enabled=True,
|
||||||
|
port_security_enabled=False) as p:
|
||||||
port_res = self._show('ports', p['port']['id'])['port']
|
port_res = self._show('ports', p['port']['id'])['port']
|
||||||
self.assertEqual(True, port_res['mac_learning_enabled'])
|
self.assertEqual(True, port_res['mac_learning_enabled'])
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -13,35 +13,19 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import mock
|
|
||||||
|
|
||||||
from neutron.tests.unit.extensions import test_portsecurity as psec
|
from neutron.tests.unit.extensions import test_portsecurity as psec
|
||||||
from vmware_nsx.common import sync
|
|
||||||
from vmware_nsx.tests import unit as vmware
|
from vmware_nsx.tests import unit as vmware
|
||||||
from vmware_nsx.tests.unit.nsx_mh.apiclient import fake
|
|
||||||
from vmware_nsx.tests.unit.nsx_v3 import test_constants as v3_constants
|
from vmware_nsx.tests.unit.nsx_v3 import test_constants as v3_constants
|
||||||
from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3
|
from vmware_nsx.tests.unit.nsx_v3 import test_plugin as test_nsxv3
|
||||||
from vmware_nsx.tests.unit import test_utils
|
from vmware_nsx.tests.unit import test_utils
|
||||||
|
|
||||||
|
|
||||||
class PortSecurityTestCaseNSXv2(psec.PortSecurityDBTestCase):
|
class PortSecurityTestCaseNSXv2(psec.PortSecurityDBTestCase,
|
||||||
|
test_nsxv3.NsxV3PluginTestCaseMixin):
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
test_utils.override_nsx_ini_test()
|
test_utils.override_nsx_ini_test()
|
||||||
# mock api client
|
|
||||||
self.fc = fake.FakeClient(vmware.STUBS_PATH)
|
|
||||||
self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
|
|
||||||
instance = self.mock_nsx.start()
|
|
||||||
instance.return_value.login.return_value = "the_cookie"
|
|
||||||
# Avoid runs of the synchronizer looping call
|
|
||||||
patch_sync = mock.patch.object(sync, '_start_loopingcall')
|
|
||||||
patch_sync.start()
|
|
||||||
|
|
||||||
instance.return_value.request.side_effect = self.fc.fake_request
|
|
||||||
super(PortSecurityTestCaseNSXv2, self).setUp(vmware.PLUGIN_NAME)
|
super(PortSecurityTestCaseNSXv2, self).setUp(vmware.PLUGIN_NAME)
|
||||||
self.addCleanup(self.fc.reset_all)
|
|
||||||
self.addCleanup(self.mock_nsx.stop)
|
|
||||||
self.addCleanup(patch_sync.stop)
|
|
||||||
|
|
||||||
|
|
||||||
class TestPortSecurityNSXv2(PortSecurityTestCaseNSXv2, psec.TestPortSecurity):
|
class TestPortSecurityNSXv2(PortSecurityTestCaseNSXv2, psec.TestPortSecurity):
|
||||||
|
@ -19,10 +19,10 @@ import webob.exc
|
|||||||
from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef
|
from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef
|
||||||
from neutron_lib.api.definitions import provider_net as pnet
|
from neutron_lib.api.definitions import provider_net as pnet
|
||||||
from vmware_nsx.tests import unit as vmware
|
from vmware_nsx.tests import unit as vmware
|
||||||
from vmware_nsx.tests.unit.nsx_mh import test_plugin as test_nsx_plugin
|
from vmware_nsx.tests.unit.nsx_v import test_plugin as test_nsxv
|
||||||
|
|
||||||
|
|
||||||
class TestProvidernet(test_nsx_plugin.NsxPluginV2TestCase):
|
class TestProvidernet(test_nsxv.NsxVPluginV2TestCase):
|
||||||
|
|
||||||
def test_create_delete_provider_network_default_physical_net(self):
|
def test_create_delete_provider_network_default_physical_net(self):
|
||||||
'''Leaves physical_net unspecified'''
|
'''Leaves physical_net unspecified'''
|
||||||
@ -78,7 +78,7 @@ class TestProvidernet(test_nsx_plugin.NsxPluginV2TestCase):
|
|||||||
self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet2')
|
self.assertEqual(net['network'][pnet.PHYSICAL_NETWORK], 'physnet2')
|
||||||
|
|
||||||
|
|
||||||
class TestMultiProviderNetworks(test_nsx_plugin.NsxPluginV2TestCase):
|
class TestMultiProviderNetworks(test_nsxv.NsxVPluginV2TestCase):
|
||||||
|
|
||||||
def setUp(self, plugin=None):
|
def setUp(self, plugin=None):
|
||||||
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
|
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
|
||||||
@ -142,8 +142,9 @@ class TestMultiProviderNetworks(test_nsx_plugin.NsxPluginV2TestCase):
|
|||||||
[{pnet.NETWORK_TYPE: 'vlan',
|
[{pnet.NETWORK_TYPE: 'vlan',
|
||||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
pnet.PHYSICAL_NETWORK: 'physnet1',
|
||||||
pnet.SEGMENTATION_ID: 1},
|
pnet.SEGMENTATION_ID: 1},
|
||||||
{pnet.NETWORK_TYPE: 'stt',
|
{pnet.NETWORK_TYPE: 'vlan',
|
||||||
pnet.PHYSICAL_NETWORK: 'physnet1'}],
|
pnet.PHYSICAL_NETWORK: 'physnet2',
|
||||||
|
pnet.SEGMENTATION_ID: 2}],
|
||||||
'tenant_id': 'tenant_one'}}
|
'tenant_id': 'tenant_one'}}
|
||||||
network_req = self.new_create_request('networks', data)
|
network_req = self.new_create_request('networks', data)
|
||||||
network = self.deserialize(self.fmt,
|
network = self.deserialize(self.fmt,
|
||||||
@ -177,17 +178,3 @@ class TestMultiProviderNetworks(test_nsx_plugin.NsxPluginV2TestCase):
|
|||||||
network_req = self.new_create_request('networks', data)
|
network_req = self.new_create_request('networks', data)
|
||||||
res = network_req.get_response(self.api)
|
res = network_req.get_response(self.api)
|
||||||
self.assertEqual(res.status_int, 400)
|
self.assertEqual(res.status_int, 400)
|
||||||
|
|
||||||
def test_create_network_duplicate_segments(self):
|
|
||||||
data = {'network': {'name': 'net1',
|
|
||||||
mpnet_apidef.SEGMENTS:
|
|
||||||
[{pnet.NETWORK_TYPE: 'vlan',
|
|
||||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
|
||||||
pnet.SEGMENTATION_ID: 1},
|
|
||||||
{pnet.NETWORK_TYPE: 'vlan',
|
|
||||||
pnet.PHYSICAL_NETWORK: 'physnet1',
|
|
||||||
pnet.SEGMENTATION_ID: 1}],
|
|
||||||
'tenant_id': 'tenant_one'}}
|
|
||||||
network_req = self.new_create_request('networks', data)
|
|
||||||
res = network_req.get_response(self.api)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
@ -1,245 +0,0 @@
|
|||||||
# Copyright (c) 2014 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from neutron_lib import context
|
|
||||||
from oslo_config import cfg
|
|
||||||
import webob.exc
|
|
||||||
|
|
||||||
from neutron.tests.unit.api import test_extensions
|
|
||||||
from vmware_nsx.db import qos_db
|
|
||||||
from vmware_nsx.extensions import qos_queue as ext_qos
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx.tests import unit as vmware
|
|
||||||
from vmware_nsx.tests.unit.nsx_mh import test_plugin as test_nsx_plugin
|
|
||||||
|
|
||||||
|
|
||||||
class QoSTestExtensionManager(object):
|
|
||||||
|
|
||||||
def get_resources(self):
|
|
||||||
return ext_qos.Qos_queue.get_resources()
|
|
||||||
|
|
||||||
def get_actions(self):
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_request_extensions(self):
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
class TestQoSQueue(test_nsx_plugin.NsxPluginV2TestCase):
|
|
||||||
|
|
||||||
def setUp(self, plugin=None):
|
|
||||||
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
|
|
||||||
super(TestQoSQueue, self).setUp()
|
|
||||||
ext_mgr = QoSTestExtensionManager()
|
|
||||||
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
|
|
||||||
|
|
||||||
def _create_qos_queue(self, fmt, body, **kwargs):
|
|
||||||
qos_queue = self.new_create_request('qos-queues', body)
|
|
||||||
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
|
|
||||||
# create a specific auth context for this request
|
|
||||||
qos_queue.environ['neutron.context'] = context.Context(
|
|
||||||
'', kwargs['tenant_id'])
|
|
||||||
|
|
||||||
return qos_queue.get_response(self.ext_api)
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def qos_queue(self, name='foo', min='0', max='10',
|
|
||||||
qos_marking=None, dscp='0', default=None, do_delete=True):
|
|
||||||
|
|
||||||
body = {'qos_queue': {'tenant_id': 'tenant',
|
|
||||||
'name': name,
|
|
||||||
'min': min,
|
|
||||||
'max': max}}
|
|
||||||
|
|
||||||
if qos_marking:
|
|
||||||
body['qos_queue']['qos_marking'] = qos_marking
|
|
||||||
if dscp:
|
|
||||||
body['qos_queue']['dscp'] = dscp
|
|
||||||
if default:
|
|
||||||
body['qos_queue']['default'] = default
|
|
||||||
res = self._create_qos_queue('json', body)
|
|
||||||
qos_queue = self.deserialize('json', res)
|
|
||||||
if res.status_int >= 400:
|
|
||||||
raise webob.exc.HTTPClientError(code=res.status_int)
|
|
||||||
|
|
||||||
yield qos_queue
|
|
||||||
|
|
||||||
if do_delete:
|
|
||||||
self._delete('qos-queues',
|
|
||||||
qos_queue['qos_queue']['id'])
|
|
||||||
|
|
||||||
def test_create_qos_queue(self):
|
|
||||||
with self.qos_queue(name='fake_lqueue', min=34, max=44,
|
|
||||||
qos_marking='untrusted', default=False) as q:
|
|
||||||
self.assertEqual(q['qos_queue']['name'], 'fake_lqueue')
|
|
||||||
self.assertEqual(q['qos_queue']['min'], 34)
|
|
||||||
self.assertEqual(q['qos_queue']['max'], 44)
|
|
||||||
self.assertEqual(q['qos_queue']['qos_marking'], 'untrusted')
|
|
||||||
self.assertFalse(q['qos_queue']['default'])
|
|
||||||
|
|
||||||
def test_create_trusted_qos_queue(self):
|
|
||||||
with mock.patch.object(qos_db.LOG, 'info') as log:
|
|
||||||
with mock.patch.object(nsxlib, 'do_request',
|
|
||||||
return_value={"uuid": "fake_queue"}):
|
|
||||||
with self.qos_queue(name='fake_lqueue', min=34, max=44,
|
|
||||||
qos_marking='trusted', default=False) as q:
|
|
||||||
self.assertIsNone(q['qos_queue']['dscp'])
|
|
||||||
self.assertTrue(log.called)
|
|
||||||
|
|
||||||
def test_create_qos_queue_name_exceeds_40_chars(self):
|
|
||||||
name = 'this_is_a_queue_whose_name_is_longer_than_40_chars'
|
|
||||||
with self.qos_queue(name=name) as queue:
|
|
||||||
# Assert Neutron name is not truncated
|
|
||||||
self.assertEqual(queue['qos_queue']['name'], name)
|
|
||||||
|
|
||||||
def test_create_qos_queue_default(self):
|
|
||||||
with self.qos_queue(default=True) as q:
|
|
||||||
self.assertTrue(q['qos_queue']['default'])
|
|
||||||
|
|
||||||
def test_create_qos_queue_two_default_queues_fail(self):
|
|
||||||
with self.qos_queue(default=True):
|
|
||||||
body = {'qos_queue': {'tenant_id': 'tenant',
|
|
||||||
'name': 'second_default_queue',
|
|
||||||
'default': True}}
|
|
||||||
res = self._create_qos_queue('json', body)
|
|
||||||
self.assertEqual(res.status_int, 409)
|
|
||||||
|
|
||||||
def test_create_port_with_queue(self):
|
|
||||||
with self.qos_queue(default=True) as q1:
|
|
||||||
res = self._create_network('json', 'net1', True,
|
|
||||||
arg_list=(ext_qos.QUEUE,),
|
|
||||||
queue_id=q1['qos_queue']['id'])
|
|
||||||
net1 = self.deserialize('json', res)
|
|
||||||
self.assertEqual(net1['network'][ext_qos.QUEUE],
|
|
||||||
q1['qos_queue']['id'])
|
|
||||||
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
|
|
||||||
with self.port(device_id=device_id) as p:
|
|
||||||
self.assertEqual(len(p['port'][ext_qos.QUEUE]), 36)
|
|
||||||
|
|
||||||
def test_create_shared_queue_networks(self):
|
|
||||||
with self.qos_queue(default=True, do_delete=False) as q1:
|
|
||||||
res = self._create_network('json', 'net1', True,
|
|
||||||
arg_list=(ext_qos.QUEUE,),
|
|
||||||
queue_id=q1['qos_queue']['id'])
|
|
||||||
net1 = self.deserialize('json', res)
|
|
||||||
self.assertEqual(net1['network'][ext_qos.QUEUE],
|
|
||||||
q1['qos_queue']['id'])
|
|
||||||
res = self._create_network('json', 'net2', True,
|
|
||||||
arg_list=(ext_qos.QUEUE,),
|
|
||||||
queue_id=q1['qos_queue']['id'])
|
|
||||||
net2 = self.deserialize('json', res)
|
|
||||||
self.assertEqual(net1['network'][ext_qos.QUEUE],
|
|
||||||
q1['qos_queue']['id'])
|
|
||||||
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
|
|
||||||
res = self._create_port('json', net1['network']['id'],
|
|
||||||
device_id=device_id)
|
|
||||||
port1 = self.deserialize('json', res)
|
|
||||||
res = self._create_port('json', net2['network']['id'],
|
|
||||||
device_id=device_id)
|
|
||||||
port2 = self.deserialize('json', res)
|
|
||||||
self.assertEqual(port1['port'][ext_qos.QUEUE],
|
|
||||||
port2['port'][ext_qos.QUEUE])
|
|
||||||
|
|
||||||
self._delete('ports', port1['port']['id'])
|
|
||||||
self._delete('ports', port2['port']['id'])
|
|
||||||
|
|
||||||
def test_remove_queue_in_use_fail(self):
|
|
||||||
with self.qos_queue(do_delete=False) as q1:
|
|
||||||
res = self._create_network('json', 'net1', True,
|
|
||||||
arg_list=(ext_qos.QUEUE,),
|
|
||||||
queue_id=q1['qos_queue']['id'])
|
|
||||||
net1 = self.deserialize('json', res)
|
|
||||||
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
|
|
||||||
res = self._create_port('json', net1['network']['id'],
|
|
||||||
device_id=device_id)
|
|
||||||
port = self.deserialize('json', res)
|
|
||||||
self._delete('qos-queues', port['port'][ext_qos.QUEUE], 409)
|
|
||||||
|
|
||||||
def test_update_network_new_queue(self):
|
|
||||||
with self.qos_queue() as q1:
|
|
||||||
res = self._create_network('json', 'net1', True,
|
|
||||||
arg_list=(ext_qos.QUEUE,),
|
|
||||||
queue_id=q1['qos_queue']['id'])
|
|
||||||
net1 = self.deserialize('json', res)
|
|
||||||
with self.qos_queue() as new_q:
|
|
||||||
data = {'network': {ext_qos.QUEUE: new_q['qos_queue']['id']}}
|
|
||||||
req = self.new_update_request('networks', data,
|
|
||||||
net1['network']['id'])
|
|
||||||
res = req.get_response(self.api)
|
|
||||||
net1 = self.deserialize('json', res)
|
|
||||||
self.assertEqual(net1['network'][ext_qos.QUEUE],
|
|
||||||
new_q['qos_queue']['id'])
|
|
||||||
|
|
||||||
def test_update_port_adding_device_id(self):
|
|
||||||
with self.qos_queue(do_delete=False) as q1:
|
|
||||||
res = self._create_network('json', 'net1', True,
|
|
||||||
arg_list=(ext_qos.QUEUE,),
|
|
||||||
queue_id=q1['qos_queue']['id'])
|
|
||||||
net1 = self.deserialize('json', res)
|
|
||||||
device_id = "00fff4d0-e4a8-4a3a-8906-4c4cdafb59f1"
|
|
||||||
res = self._create_port('json', net1['network']['id'])
|
|
||||||
port = self.deserialize('json', res)
|
|
||||||
self.assertIsNone(port['port'][ext_qos.QUEUE])
|
|
||||||
|
|
||||||
data = {'port': {'device_id': device_id}}
|
|
||||||
req = self.new_update_request('ports', data,
|
|
||||||
port['port']['id'])
|
|
||||||
|
|
||||||
res = req.get_response(self.api)
|
|
||||||
port = self.deserialize('json', res)
|
|
||||||
self.assertEqual(len(port['port'][ext_qos.QUEUE]), 36)
|
|
||||||
|
|
||||||
def test_dscp_value_out_of_range(self):
|
|
||||||
body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '64',
|
|
||||||
'name': 'foo', 'min': 20, 'max': 20}}
|
|
||||||
res = self._create_qos_queue('json', body)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def test_dscp_value_with_qos_marking_trusted_returns_400(self):
|
|
||||||
body = {'qos_queue': {'tenant_id': 'admin', 'dscp': '1',
|
|
||||||
'qos_marking': 'trusted',
|
|
||||||
'name': 'foo', 'min': 20, 'max': 20}}
|
|
||||||
res = self._create_qos_queue('json', body)
|
|
||||||
self.assertEqual(res.status_int, 400)
|
|
||||||
|
|
||||||
def _test_rxtx_factor(self, max_value, rxtx_factor):
|
|
||||||
with self.qos_queue(max=max_value) as q1:
|
|
||||||
|
|
||||||
res = self._create_network('json', 'net1', True,
|
|
||||||
arg_list=(ext_qos.QUEUE,),
|
|
||||||
queue_id=q1['qos_queue']['id'])
|
|
||||||
net1 = self.deserialize('json', res)
|
|
||||||
res = self._create_port('json', net1['network']['id'],
|
|
||||||
arg_list=(ext_qos.RXTX_FACTOR,),
|
|
||||||
rxtx_factor=rxtx_factor, device_id='1')
|
|
||||||
port = self.deserialize('json', res)
|
|
||||||
req = self.new_show_request('qos-queues',
|
|
||||||
port['port'][ext_qos.QUEUE])
|
|
||||||
res = req.get_response(self.ext_api)
|
|
||||||
queue = self.deserialize('json', res)
|
|
||||||
self.assertEqual(queue['qos_queue']['max'],
|
|
||||||
max_value * rxtx_factor)
|
|
||||||
|
|
||||||
def test_rxtx_factor(self):
|
|
||||||
self._test_rxtx_factor(10, 2)
|
|
||||||
|
|
||||||
def test_decimal_rxtx_factor(self):
|
|
||||||
self._test_rxtx_factor(10, 1.5)
|
|
||||||
|
|
||||||
def test_decimal_rxtx_factor_below_1(self):
|
|
||||||
self._test_rxtx_factor(10, 0.5)
|
|
@ -1,662 +0,0 @@
|
|||||||
# Copyright 2012 VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
from oslo_utils import uuidutils
|
|
||||||
import six
|
|
||||||
import six.moves.urllib.parse as urlparse
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
MAX_NAME_LEN = 40
|
|
||||||
|
|
||||||
|
|
||||||
def _validate_name(name):
|
|
||||||
if name and len(name) > MAX_NAME_LEN:
|
|
||||||
raise Exception("Logical switch name exceeds %d characters",
|
|
||||||
MAX_NAME_LEN)
|
|
||||||
|
|
||||||
|
|
||||||
def _validate_resource(body):
|
|
||||||
_validate_name(body.get('display_name'))
|
|
||||||
|
|
||||||
|
|
||||||
class FakeClient(object):
|
|
||||||
|
|
||||||
LSWITCH_RESOURCE = 'lswitch'
|
|
||||||
LPORT_RESOURCE = 'lport'
|
|
||||||
LROUTER_RESOURCE = 'lrouter'
|
|
||||||
NAT_RESOURCE = 'nat'
|
|
||||||
LQUEUE_RESOURCE = 'lqueue'
|
|
||||||
SECPROF_RESOURCE = 'securityprofile'
|
|
||||||
LSWITCH_STATUS = 'lswitchstatus'
|
|
||||||
LROUTER_STATUS = 'lrouterstatus'
|
|
||||||
LSWITCH_LPORT_RESOURCE = 'lswitch_lport'
|
|
||||||
LROUTER_LPORT_RESOURCE = 'lrouter_lport'
|
|
||||||
LROUTER_NAT_RESOURCE = 'lrouter_nat'
|
|
||||||
LSWITCH_LPORT_STATUS = 'lswitch_lportstatus'
|
|
||||||
LSWITCH_LPORT_ATT = 'lswitch_lportattachment'
|
|
||||||
LROUTER_LPORT_STATUS = 'lrouter_lportstatus'
|
|
||||||
LROUTER_LPORT_ATT = 'lrouter_lportattachment'
|
|
||||||
GWSERVICE_RESOURCE = 'gatewayservice'
|
|
||||||
|
|
||||||
RESOURCES = [LSWITCH_RESOURCE, LROUTER_RESOURCE, LQUEUE_RESOURCE,
|
|
||||||
LPORT_RESOURCE, NAT_RESOURCE, SECPROF_RESOURCE,
|
|
||||||
GWSERVICE_RESOURCE]
|
|
||||||
|
|
||||||
FAKE_GET_RESPONSES = {
|
|
||||||
LSWITCH_RESOURCE: "fake_get_lswitch.json",
|
|
||||||
LSWITCH_LPORT_RESOURCE: "fake_get_lswitch_lport.json",
|
|
||||||
LSWITCH_LPORT_STATUS: "fake_get_lswitch_lport_status.json",
|
|
||||||
LSWITCH_LPORT_ATT: "fake_get_lswitch_lport_att.json",
|
|
||||||
LROUTER_RESOURCE: "fake_get_lrouter.json",
|
|
||||||
LROUTER_LPORT_RESOURCE: "fake_get_lrouter_lport.json",
|
|
||||||
LROUTER_LPORT_STATUS: "fake_get_lrouter_lport_status.json",
|
|
||||||
LROUTER_LPORT_ATT: "fake_get_lrouter_lport_att.json",
|
|
||||||
LROUTER_STATUS: "fake_get_lrouter_status.json",
|
|
||||||
LROUTER_NAT_RESOURCE: "fake_get_lrouter_nat.json",
|
|
||||||
SECPROF_RESOURCE: "fake_get_security_profile.json",
|
|
||||||
LQUEUE_RESOURCE: "fake_get_lqueue.json",
|
|
||||||
GWSERVICE_RESOURCE: "fake_get_gwservice.json"
|
|
||||||
}
|
|
||||||
|
|
||||||
FAKE_POST_RESPONSES = {
|
|
||||||
LSWITCH_RESOURCE: "fake_post_lswitch.json",
|
|
||||||
LROUTER_RESOURCE: "fake_post_lrouter.json",
|
|
||||||
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
|
|
||||||
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
|
|
||||||
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
|
|
||||||
SECPROF_RESOURCE: "fake_post_security_profile.json",
|
|
||||||
LQUEUE_RESOURCE: "fake_post_lqueue.json",
|
|
||||||
GWSERVICE_RESOURCE: "fake_post_gwservice.json"
|
|
||||||
}
|
|
||||||
|
|
||||||
FAKE_PUT_RESPONSES = {
|
|
||||||
LSWITCH_RESOURCE: "fake_post_lswitch.json",
|
|
||||||
LROUTER_RESOURCE: "fake_post_lrouter.json",
|
|
||||||
LSWITCH_LPORT_RESOURCE: "fake_post_lswitch_lport.json",
|
|
||||||
LROUTER_LPORT_RESOURCE: "fake_post_lrouter_lport.json",
|
|
||||||
LROUTER_NAT_RESOURCE: "fake_post_lrouter_nat.json",
|
|
||||||
LSWITCH_LPORT_ATT: "fake_put_lswitch_lport_att.json",
|
|
||||||
LROUTER_LPORT_ATT: "fake_put_lrouter_lport_att.json",
|
|
||||||
SECPROF_RESOURCE: "fake_post_security_profile.json",
|
|
||||||
LQUEUE_RESOURCE: "fake_post_lqueue.json",
|
|
||||||
GWSERVICE_RESOURCE: "fake_post_gwservice.json"
|
|
||||||
}
|
|
||||||
|
|
||||||
MANAGED_RELATIONS = {
|
|
||||||
LSWITCH_RESOURCE: [],
|
|
||||||
LROUTER_RESOURCE: [],
|
|
||||||
LSWITCH_LPORT_RESOURCE: ['LogicalPortAttachment'],
|
|
||||||
LROUTER_LPORT_RESOURCE: ['LogicalPortAttachment'],
|
|
||||||
}
|
|
||||||
|
|
||||||
_validators = {
|
|
||||||
LSWITCH_RESOURCE: _validate_resource,
|
|
||||||
LSWITCH_LPORT_RESOURCE: _validate_resource,
|
|
||||||
LROUTER_LPORT_RESOURCE: _validate_resource,
|
|
||||||
SECPROF_RESOURCE: _validate_resource,
|
|
||||||
LQUEUE_RESOURCE: _validate_resource,
|
|
||||||
GWSERVICE_RESOURCE: _validate_resource
|
|
||||||
}
|
|
||||||
|
|
||||||
def __init__(self, fake_files_path):
|
|
||||||
self.fake_files_path = fake_files_path
|
|
||||||
self._fake_lswitch_dict = {}
|
|
||||||
self._fake_lrouter_dict = {}
|
|
||||||
self._fake_lswitch_lport_dict = {}
|
|
||||||
self._fake_lrouter_lport_dict = {}
|
|
||||||
self._fake_lrouter_nat_dict = {}
|
|
||||||
self._fake_lswitch_lportstatus_dict = {}
|
|
||||||
self._fake_lrouter_lportstatus_dict = {}
|
|
||||||
self._fake_securityprofile_dict = {}
|
|
||||||
self._fake_lqueue_dict = {}
|
|
||||||
self._fake_gatewayservice_dict = {}
|
|
||||||
|
|
||||||
def _get_tag(self, resource, scope):
|
|
||||||
tags = [tag['tag'] for tag in resource['tags']
|
|
||||||
if tag['scope'] == scope]
|
|
||||||
return len(tags) > 0 and tags[0]
|
|
||||||
|
|
||||||
def _get_filters(self, querystring):
|
|
||||||
if not querystring:
|
|
||||||
return (None, None, None, None)
|
|
||||||
params = urlparse.parse_qs(querystring)
|
|
||||||
tag_filter = None
|
|
||||||
attr_filter = None
|
|
||||||
if 'tag' in params and 'tag_scope' in params:
|
|
||||||
tag_filter = {'scope': params['tag_scope'][0],
|
|
||||||
'tag': params['tag'][0]}
|
|
||||||
elif 'uuid' in params:
|
|
||||||
attr_filter = {'uuid': params['uuid'][0]}
|
|
||||||
# Handle page length and page cursor parameter
|
|
||||||
page_len = params.get('_page_length')
|
|
||||||
page_cursor = params.get('_page_cursor')
|
|
||||||
if page_len:
|
|
||||||
page_len = int(page_len[0])
|
|
||||||
else:
|
|
||||||
# Explicitly set it to None (avoid 0 or empty list)
|
|
||||||
page_len = None
|
|
||||||
return (tag_filter, attr_filter, page_len, page_cursor)
|
|
||||||
|
|
||||||
def _add_lswitch(self, body):
|
|
||||||
fake_lswitch = jsonutils.loads(body)
|
|
||||||
fake_lswitch['uuid'] = uuidutils.generate_uuid()
|
|
||||||
self._fake_lswitch_dict[fake_lswitch['uuid']] = fake_lswitch
|
|
||||||
# put the tenant_id and the zone_uuid in the main dict
|
|
||||||
# for simplyfying templating
|
|
||||||
zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid']
|
|
||||||
fake_lswitch['zone_uuid'] = zone_uuid
|
|
||||||
fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid')
|
|
||||||
fake_lswitch['lport_count'] = 0
|
|
||||||
# set status value
|
|
||||||
fake_lswitch['status'] = 'true'
|
|
||||||
return fake_lswitch
|
|
||||||
|
|
||||||
def _build_lrouter(self, body, uuid=None):
|
|
||||||
fake_lrouter = jsonutils.loads(body)
|
|
||||||
if uuid:
|
|
||||||
fake_lrouter['uuid'] = uuid
|
|
||||||
fake_lrouter['tenant_id'] = self._get_tag(fake_lrouter, 'os_tid')
|
|
||||||
default_nexthop = fake_lrouter['routing_config'].get(
|
|
||||||
'default_route_next_hop')
|
|
||||||
if default_nexthop:
|
|
||||||
fake_lrouter['default_next_hop'] = default_nexthop.get(
|
|
||||||
'gateway_ip_address', '0.0.0.0')
|
|
||||||
else:
|
|
||||||
fake_lrouter['default_next_hop'] = '0.0.0.0'
|
|
||||||
# NOTE(salv-orlando): We won't make the Fake NSX API client
|
|
||||||
# aware of NSX version. The long term plan is to replace it
|
|
||||||
# with behavioral mocking of NSX API requests
|
|
||||||
if 'distributed' not in fake_lrouter:
|
|
||||||
fake_lrouter['distributed'] = False
|
|
||||||
distributed_json = ('"distributed": %s,' %
|
|
||||||
str(fake_lrouter['distributed']).lower())
|
|
||||||
fake_lrouter['distributed_json'] = distributed_json
|
|
||||||
return fake_lrouter
|
|
||||||
|
|
||||||
def _add_lrouter(self, body):
|
|
||||||
fake_lrouter = self._build_lrouter(body,
|
|
||||||
uuidutils.generate_uuid())
|
|
||||||
self._fake_lrouter_dict[fake_lrouter['uuid']] = fake_lrouter
|
|
||||||
fake_lrouter['lport_count'] = 0
|
|
||||||
# set status value
|
|
||||||
fake_lrouter['status'] = 'true'
|
|
||||||
return fake_lrouter
|
|
||||||
|
|
||||||
def _add_lqueue(self, body):
|
|
||||||
fake_lqueue = jsonutils.loads(body)
|
|
||||||
fake_lqueue['uuid'] = uuidutils.generate_uuid()
|
|
||||||
self._fake_lqueue_dict[fake_lqueue['uuid']] = fake_lqueue
|
|
||||||
return fake_lqueue
|
|
||||||
|
|
||||||
def _add_lswitch_lport(self, body, ls_uuid):
|
|
||||||
fake_lport = jsonutils.loads(body)
|
|
||||||
new_uuid = uuidutils.generate_uuid()
|
|
||||||
fake_lport['uuid'] = new_uuid
|
|
||||||
# put the tenant_id and the ls_uuid in the main dict
|
|
||||||
# for simplyfying templating
|
|
||||||
fake_lport['ls_uuid'] = ls_uuid
|
|
||||||
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
|
|
||||||
fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
|
|
||||||
'q_port_id')
|
|
||||||
fake_lport['neutron_device_id'] = self._get_tag(fake_lport, 'vm_id')
|
|
||||||
fake_lport['att_type'] = "NoAttachment"
|
|
||||||
fake_lport['att_info_json'] = ''
|
|
||||||
self._fake_lswitch_lport_dict[fake_lport['uuid']] = fake_lport
|
|
||||||
|
|
||||||
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
|
|
||||||
fake_lswitch['lport_count'] += 1
|
|
||||||
fake_lport_status = fake_lport.copy()
|
|
||||||
fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id']
|
|
||||||
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
|
|
||||||
fake_lport_status['ls_name'] = fake_lswitch['display_name']
|
|
||||||
fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
|
|
||||||
# set status value
|
|
||||||
fake_lport['status'] = 'true'
|
|
||||||
self._fake_lswitch_lportstatus_dict[new_uuid] = fake_lport_status
|
|
||||||
return fake_lport
|
|
||||||
|
|
||||||
def _build_lrouter_lport(self, body, new_uuid=None, lr_uuid=None):
|
|
||||||
fake_lport = jsonutils.loads(body)
|
|
||||||
if new_uuid:
|
|
||||||
fake_lport['uuid'] = new_uuid
|
|
||||||
# put the tenant_id and the le_uuid in the main dict
|
|
||||||
# for simplyfying templating
|
|
||||||
if lr_uuid:
|
|
||||||
fake_lport['lr_uuid'] = lr_uuid
|
|
||||||
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
|
|
||||||
fake_lport['neutron_port_id'] = self._get_tag(fake_lport,
|
|
||||||
'q_port_id')
|
|
||||||
# replace ip_address with its json dump
|
|
||||||
if 'ip_addresses' in fake_lport:
|
|
||||||
ip_addresses_json = jsonutils.dumps(fake_lport['ip_addresses'])
|
|
||||||
fake_lport['ip_addresses_json'] = ip_addresses_json
|
|
||||||
return fake_lport
|
|
||||||
|
|
||||||
def _add_lrouter_lport(self, body, lr_uuid):
|
|
||||||
new_uuid = uuidutils.generate_uuid()
|
|
||||||
fake_lport = self._build_lrouter_lport(body, new_uuid, lr_uuid)
|
|
||||||
self._fake_lrouter_lport_dict[fake_lport['uuid']] = fake_lport
|
|
||||||
try:
|
|
||||||
fake_lrouter = self._fake_lrouter_dict[lr_uuid]
|
|
||||||
except KeyError:
|
|
||||||
raise api_exc.ResourceNotFound()
|
|
||||||
fake_lrouter['lport_count'] += 1
|
|
||||||
fake_lport_status = fake_lport.copy()
|
|
||||||
fake_lport_status['lr_tenant_id'] = fake_lrouter['tenant_id']
|
|
||||||
fake_lport_status['lr_uuid'] = fake_lrouter['uuid']
|
|
||||||
fake_lport_status['lr_name'] = fake_lrouter['display_name']
|
|
||||||
self._fake_lrouter_lportstatus_dict[new_uuid] = fake_lport_status
|
|
||||||
return fake_lport
|
|
||||||
|
|
||||||
def _add_securityprofile(self, body):
|
|
||||||
fake_securityprofile = jsonutils.loads(body)
|
|
||||||
fake_securityprofile['uuid'] = uuidutils.generate_uuid()
|
|
||||||
fake_securityprofile['tenant_id'] = self._get_tag(
|
|
||||||
fake_securityprofile, 'os_tid')
|
|
||||||
|
|
||||||
fake_securityprofile['nova_spid'] = self._get_tag(fake_securityprofile,
|
|
||||||
'nova_spid')
|
|
||||||
self._fake_securityprofile_dict[fake_securityprofile['uuid']] = (
|
|
||||||
fake_securityprofile)
|
|
||||||
return fake_securityprofile
|
|
||||||
|
|
||||||
def _add_lrouter_nat(self, body, lr_uuid):
|
|
||||||
fake_nat = jsonutils.loads(body)
|
|
||||||
new_uuid = uuidutils.generate_uuid()
|
|
||||||
fake_nat['uuid'] = new_uuid
|
|
||||||
fake_nat['lr_uuid'] = lr_uuid
|
|
||||||
self._fake_lrouter_nat_dict[fake_nat['uuid']] = fake_nat
|
|
||||||
if 'match' in fake_nat:
|
|
||||||
match_json = jsonutils.dumps(fake_nat['match'])
|
|
||||||
fake_nat['match_json'] = match_json
|
|
||||||
return fake_nat
|
|
||||||
|
|
||||||
def _add_gatewayservice(self, body):
|
|
||||||
fake_gwservice = jsonutils.loads(body)
|
|
||||||
fake_gwservice['uuid'] = str(uuidutils.generate_uuid())
|
|
||||||
fake_gwservice['tenant_id'] = self._get_tag(
|
|
||||||
fake_gwservice, 'os_tid')
|
|
||||||
# FIXME(salvatore-orlando): For simplicity we're managing only a
|
|
||||||
# single device. Extend the fake client for supporting multiple devices
|
|
||||||
first_gw = fake_gwservice['gateways'][0]
|
|
||||||
fake_gwservice['transport_node_uuid'] = first_gw['transport_node_uuid']
|
|
||||||
fake_gwservice['device_id'] = first_gw['device_id']
|
|
||||||
self._fake_gatewayservice_dict[fake_gwservice['uuid']] = (
|
|
||||||
fake_gwservice)
|
|
||||||
return fake_gwservice
|
|
||||||
|
|
||||||
def _build_relation(self, src, dst, resource_type, relation):
|
|
||||||
if relation not in self.MANAGED_RELATIONS[resource_type]:
|
|
||||||
return # Relation is not desired in output
|
|
||||||
if '_relations' not in src or not src['_relations'].get(relation):
|
|
||||||
return # Item does not have relation
|
|
||||||
relation_data = src['_relations'].get(relation)
|
|
||||||
dst_relations = dst.get('_relations', {})
|
|
||||||
dst_relations[relation] = relation_data
|
|
||||||
dst['_relations'] = dst_relations
|
|
||||||
|
|
||||||
def _fill_attachment(self, att_data, ls_uuid=None,
|
|
||||||
lr_uuid=None, lp_uuid=None):
|
|
||||||
new_data = att_data.copy()
|
|
||||||
for k in ('ls_uuid', 'lr_uuid', 'lp_uuid'):
|
|
||||||
if locals().get(k):
|
|
||||||
new_data[k] = locals()[k]
|
|
||||||
|
|
||||||
def populate_field(field_name):
|
|
||||||
if field_name in att_data:
|
|
||||||
new_data['%s_field' % field_name] = ('"%s" : "%s",'
|
|
||||||
% (field_name,
|
|
||||||
att_data[field_name]))
|
|
||||||
del new_data[field_name]
|
|
||||||
else:
|
|
||||||
new_data['%s_field' % field_name] = ""
|
|
||||||
|
|
||||||
for field in ['vif_uuid', 'peer_port_href', 'vlan_id',
|
|
||||||
'peer_port_uuid', 'l3_gateway_service_uuid']:
|
|
||||||
populate_field(field)
|
|
||||||
return new_data
|
|
||||||
|
|
||||||
def _get_resource_type(self, path):
|
|
||||||
"""Get resource type.
|
|
||||||
|
|
||||||
Identifies resource type and relevant uuids in the uri
|
|
||||||
|
|
||||||
/ws.v1/lswitch/xxx
|
|
||||||
/ws.v1/lswitch/xxx/status
|
|
||||||
/ws.v1/lswitch/xxx/lport/yyy
|
|
||||||
/ws.v1/lswitch/xxx/lport/yyy/status
|
|
||||||
/ws.v1/lrouter/zzz
|
|
||||||
/ws.v1/lrouter/zzz/status
|
|
||||||
/ws.v1/lrouter/zzz/lport/www
|
|
||||||
/ws.v1/lrouter/zzz/lport/www/status
|
|
||||||
/ws.v1/lqueue/xxx
|
|
||||||
"""
|
|
||||||
# The first element will always be 'ws.v1' - so we just discard it
|
|
||||||
uri_split = path.split('/')[1:]
|
|
||||||
# parse uri_split backwards
|
|
||||||
suffix = ""
|
|
||||||
idx = len(uri_split) - 1
|
|
||||||
if 'status' in uri_split[idx]:
|
|
||||||
suffix = "status"
|
|
||||||
idx = idx - 1
|
|
||||||
elif 'attachment' in uri_split[idx]:
|
|
||||||
suffix = "attachment"
|
|
||||||
idx = idx - 1
|
|
||||||
# then check if we have an uuid
|
|
||||||
uuids = []
|
|
||||||
if uri_split[idx].replace('-', '') not in self.RESOURCES:
|
|
||||||
uuids.append(uri_split[idx])
|
|
||||||
idx = idx - 1
|
|
||||||
resource_type = "%s%s" % (uri_split[idx], suffix)
|
|
||||||
if idx > 1:
|
|
||||||
uuids.insert(0, uri_split[idx - 1])
|
|
||||||
resource_type = "%s_%s" % (uri_split[idx - 2], resource_type)
|
|
||||||
return (resource_type.replace('-', ''), uuids)
|
|
||||||
|
|
||||||
def _list(self, resource_type, response_file,
|
|
||||||
parent_uuid=None, query=None, relations=None):
|
|
||||||
(tag_filter, attr_filter,
|
|
||||||
page_len, page_cursor) = self._get_filters(query)
|
|
||||||
# result_count attribute in response should appear only when
|
|
||||||
# page_cursor is not specified
|
|
||||||
do_result_count = not page_cursor
|
|
||||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
|
||||||
response_template = f.read()
|
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
|
||||||
if parent_uuid == '*':
|
|
||||||
parent_uuid = None
|
|
||||||
# NSX raises ResourceNotFound if lswitch doesn't exist and is not *
|
|
||||||
elif not res_dict and resource_type == self.LSWITCH_LPORT_RESOURCE:
|
|
||||||
raise api_exc.ResourceNotFound()
|
|
||||||
|
|
||||||
def _attr_match(res_uuid):
|
|
||||||
if not attr_filter:
|
|
||||||
return True
|
|
||||||
item = res_dict[res_uuid]
|
|
||||||
for (attr, value) in six.iteritems(attr_filter):
|
|
||||||
if item.get(attr) != value:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
def _tag_match(res_uuid):
|
|
||||||
if not tag_filter:
|
|
||||||
return True
|
|
||||||
return any([x['scope'] == tag_filter['scope'] and
|
|
||||||
x['tag'] == tag_filter['tag']
|
|
||||||
for x in res_dict[res_uuid]['tags']])
|
|
||||||
|
|
||||||
def _lswitch_match(res_uuid):
|
|
||||||
# verify that the switch exist
|
|
||||||
if parent_uuid and parent_uuid not in self._fake_lswitch_dict:
|
|
||||||
raise Exception(_("lswitch:%s not found") % parent_uuid)
|
|
||||||
if (not parent_uuid or
|
|
||||||
res_dict[res_uuid].get('ls_uuid') == parent_uuid):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _lrouter_match(res_uuid):
|
|
||||||
# verify that the router exist
|
|
||||||
if parent_uuid and parent_uuid not in self._fake_lrouter_dict:
|
|
||||||
raise api_exc.ResourceNotFound()
|
|
||||||
if (not parent_uuid or
|
|
||||||
res_dict[res_uuid].get('lr_uuid') == parent_uuid):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _cursor_match(res_uuid, page_cursor):
|
|
||||||
if not page_cursor:
|
|
||||||
return True
|
|
||||||
if page_cursor == res_uuid:
|
|
||||||
# always return True once page_cursor has been found
|
|
||||||
page_cursor = None
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _build_item(resource):
|
|
||||||
item = jsonutils.loads(response_template % resource)
|
|
||||||
if relations:
|
|
||||||
for relation in relations:
|
|
||||||
self._build_relation(resource, item,
|
|
||||||
resource_type, relation)
|
|
||||||
return item
|
|
||||||
|
|
||||||
for item in res_dict.values():
|
|
||||||
if 'tags' in item:
|
|
||||||
item['tags_json'] = jsonutils.dumps(item['tags'])
|
|
||||||
if resource_type in (self.LSWITCH_LPORT_RESOURCE,
|
|
||||||
self.LSWITCH_LPORT_ATT,
|
|
||||||
self.LSWITCH_LPORT_STATUS):
|
|
||||||
parent_func = _lswitch_match
|
|
||||||
elif resource_type in (self.LROUTER_LPORT_RESOURCE,
|
|
||||||
self.LROUTER_LPORT_ATT,
|
|
||||||
self.LROUTER_NAT_RESOURCE,
|
|
||||||
self.LROUTER_LPORT_STATUS):
|
|
||||||
parent_func = _lrouter_match
|
|
||||||
else:
|
|
||||||
parent_func = (lambda x: True)
|
|
||||||
|
|
||||||
items = [_build_item(res_dict[res_uuid])
|
|
||||||
for res_uuid in res_dict
|
|
||||||
if (parent_func(res_uuid) and
|
|
||||||
_tag_match(res_uuid) and
|
|
||||||
_attr_match(res_uuid) and
|
|
||||||
_cursor_match(res_uuid, page_cursor))]
|
|
||||||
# Rather inefficient, but hey this is just a mock!
|
|
||||||
next_cursor = None
|
|
||||||
total_items = len(items)
|
|
||||||
if page_len:
|
|
||||||
try:
|
|
||||||
next_cursor = items[page_len]['uuid']
|
|
||||||
except IndexError:
|
|
||||||
next_cursor = None
|
|
||||||
items = items[:page_len]
|
|
||||||
response_dict = {'results': items}
|
|
||||||
if next_cursor:
|
|
||||||
response_dict['page_cursor'] = next_cursor
|
|
||||||
if do_result_count:
|
|
||||||
response_dict['result_count'] = total_items
|
|
||||||
return jsonutils.dumps(response_dict)
|
|
||||||
|
|
||||||
def _show(self, resource_type, response_file,
|
|
||||||
uuid1, uuid2=None, relations=None):
|
|
||||||
target_uuid = uuid2 or uuid1
|
|
||||||
if resource_type.endswith('attachment'):
|
|
||||||
resource_type = resource_type[:resource_type.index('attachment')]
|
|
||||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
|
||||||
response_template = f.read()
|
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
|
||||||
for item in res_dict.values():
|
|
||||||
if 'tags' in item:
|
|
||||||
item['tags_json'] = jsonutils.dumps(item['tags'])
|
|
||||||
|
|
||||||
# replace sec prof rules with their json dump
|
|
||||||
def jsonify_rules(rule_key):
|
|
||||||
if rule_key in item:
|
|
||||||
rules_json = jsonutils.dumps(item[rule_key])
|
|
||||||
item['%s_json' % rule_key] = rules_json
|
|
||||||
jsonify_rules('logical_port_egress_rules')
|
|
||||||
jsonify_rules('logical_port_ingress_rules')
|
|
||||||
|
|
||||||
items = [jsonutils.loads(response_template % res_dict[res_uuid])
|
|
||||||
for res_uuid in res_dict if res_uuid == target_uuid]
|
|
||||||
if items:
|
|
||||||
return jsonutils.dumps(items[0])
|
|
||||||
raise api_exc.ResourceNotFound()
|
|
||||||
|
|
||||||
def handle_get(self, url):
|
|
||||||
#TODO(salvatore-orlando): handle field selection
|
|
||||||
parsedurl = urlparse.urlparse(url)
|
|
||||||
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
|
||||||
relations = urlparse.parse_qs(parsedurl.query).get('relations')
|
|
||||||
response_file = self.FAKE_GET_RESPONSES.get(res_type)
|
|
||||||
if not response_file:
|
|
||||||
raise api_exc.NsxApiException()
|
|
||||||
if 'lport' in res_type or 'nat' in res_type:
|
|
||||||
if len(uuids) > 1:
|
|
||||||
return self._show(res_type, response_file, uuids[0],
|
|
||||||
uuids[1], relations=relations)
|
|
||||||
else:
|
|
||||||
return self._list(res_type, response_file, uuids[0],
|
|
||||||
query=parsedurl.query, relations=relations)
|
|
||||||
elif ('lswitch' in res_type or
|
|
||||||
'lrouter' in res_type or
|
|
||||||
self.SECPROF_RESOURCE in res_type or
|
|
||||||
self.LQUEUE_RESOURCE in res_type or
|
|
||||||
'gatewayservice' in res_type):
|
|
||||||
LOG.debug("UUIDS:%s", uuids)
|
|
||||||
if uuids:
|
|
||||||
return self._show(res_type, response_file, uuids[0],
|
|
||||||
relations=relations)
|
|
||||||
else:
|
|
||||||
return self._list(res_type, response_file,
|
|
||||||
query=parsedurl.query,
|
|
||||||
relations=relations)
|
|
||||||
else:
|
|
||||||
raise Exception("unknown resource:%s" % res_type)
|
|
||||||
|
|
||||||
def handle_post(self, url, body):
|
|
||||||
parsedurl = urlparse.urlparse(url)
|
|
||||||
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
|
||||||
response_file = self.FAKE_POST_RESPONSES.get(res_type)
|
|
||||||
if not response_file:
|
|
||||||
raise Exception("resource not found")
|
|
||||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
|
||||||
response_template = f.read()
|
|
||||||
add_resource = getattr(self, '_add_%s' % res_type)
|
|
||||||
body_json = jsonutils.loads(body)
|
|
||||||
val_func = self._validators.get(res_type)
|
|
||||||
if val_func:
|
|
||||||
val_func(body_json)
|
|
||||||
args = [body]
|
|
||||||
if uuids:
|
|
||||||
args.append(uuids[0])
|
|
||||||
response = response_template % add_resource(*args)
|
|
||||||
return response
|
|
||||||
|
|
||||||
def handle_put(self, url, body):
|
|
||||||
parsedurl = urlparse.urlparse(url)
|
|
||||||
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
|
||||||
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
|
||||||
if not response_file:
|
|
||||||
raise Exception("resource not found")
|
|
||||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
|
||||||
response_template = f.read()
|
|
||||||
# Manage attachment operations
|
|
||||||
is_attachment = False
|
|
||||||
if res_type.endswith('attachment'):
|
|
||||||
is_attachment = True
|
|
||||||
res_type = res_type[:res_type.index('attachment')]
|
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
|
||||||
body_json = jsonutils.loads(body)
|
|
||||||
val_func = self._validators.get(res_type)
|
|
||||||
if val_func:
|
|
||||||
val_func(body_json)
|
|
||||||
try:
|
|
||||||
resource = res_dict[uuids[-1]]
|
|
||||||
except KeyError:
|
|
||||||
raise api_exc.ResourceNotFound()
|
|
||||||
if not is_attachment:
|
|
||||||
edit_resource = getattr(self, '_build_%s' % res_type, None)
|
|
||||||
if edit_resource:
|
|
||||||
body_json = edit_resource(body)
|
|
||||||
resource.update(body_json)
|
|
||||||
else:
|
|
||||||
relations = resource.get("_relations", {})
|
|
||||||
body_2 = jsonutils.loads(body)
|
|
||||||
resource['att_type'] = body_2['type']
|
|
||||||
relations['LogicalPortAttachment'] = body_2
|
|
||||||
resource['_relations'] = relations
|
|
||||||
if body_2['type'] == "PatchAttachment":
|
|
||||||
# We need to do a trick here
|
|
||||||
if self.LROUTER_RESOURCE in res_type:
|
|
||||||
res_type_2 = res_type.replace(self.LROUTER_RESOURCE,
|
|
||||||
self.LSWITCH_RESOURCE)
|
|
||||||
elif self.LSWITCH_RESOURCE in res_type:
|
|
||||||
res_type_2 = res_type.replace(self.LSWITCH_RESOURCE,
|
|
||||||
self.LROUTER_RESOURCE)
|
|
||||||
res_dict_2 = getattr(self, '_fake_%s_dict' % res_type_2)
|
|
||||||
body_2['peer_port_uuid'] = uuids[-1]
|
|
||||||
resource_2 = \
|
|
||||||
res_dict_2[jsonutils.loads(body)['peer_port_uuid']]
|
|
||||||
relations_2 = resource_2.get("_relations")
|
|
||||||
if not relations_2:
|
|
||||||
relations_2 = {}
|
|
||||||
relations_2['LogicalPortAttachment'] = body_2
|
|
||||||
resource_2['_relations'] = relations_2
|
|
||||||
resource['peer_port_uuid'] = body_2['peer_port_uuid']
|
|
||||||
resource['att_info_json'] = (
|
|
||||||
"\"peer_port_uuid\": \"%s\"," %
|
|
||||||
resource_2['uuid'])
|
|
||||||
resource_2['att_info_json'] = (
|
|
||||||
"\"peer_port_uuid\": \"%s\"," %
|
|
||||||
body_2['peer_port_uuid'])
|
|
||||||
elif body_2['type'] == "L3GatewayAttachment":
|
|
||||||
resource['attachment_gwsvc_uuid'] = (
|
|
||||||
body_2['l3_gateway_service_uuid'])
|
|
||||||
resource['vlan_id'] = body_2.get('vlan_id')
|
|
||||||
elif body_2['type'] == "L2GatewayAttachment":
|
|
||||||
resource['attachment_gwsvc_uuid'] = (
|
|
||||||
body_2['l2_gateway_service_uuid'])
|
|
||||||
elif body_2['type'] == "VifAttachment":
|
|
||||||
resource['vif_uuid'] = body_2['vif_uuid']
|
|
||||||
resource['att_info_json'] = (
|
|
||||||
"\"vif_uuid\": \"%s\"," % body_2['vif_uuid'])
|
|
||||||
|
|
||||||
if not is_attachment:
|
|
||||||
response = response_template % resource
|
|
||||||
else:
|
|
||||||
if res_type == self.LROUTER_LPORT_RESOURCE:
|
|
||||||
lr_uuid = uuids[0]
|
|
||||||
ls_uuid = None
|
|
||||||
elif res_type == self.LSWITCH_LPORT_RESOURCE:
|
|
||||||
ls_uuid = uuids[0]
|
|
||||||
lr_uuid = None
|
|
||||||
lp_uuid = uuids[1]
|
|
||||||
response = response_template % self._fill_attachment(
|
|
||||||
jsonutils.loads(body), ls_uuid, lr_uuid, lp_uuid)
|
|
||||||
return response
|
|
||||||
|
|
||||||
def handle_delete(self, url):
|
|
||||||
parsedurl = urlparse.urlparse(url)
|
|
||||||
(res_type, uuids) = self._get_resource_type(parsedurl.path)
|
|
||||||
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
|
||||||
if not response_file:
|
|
||||||
raise Exception("resource not found")
|
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
|
||||||
try:
|
|
||||||
del res_dict[uuids[-1]]
|
|
||||||
except KeyError:
|
|
||||||
raise api_exc.ResourceNotFound()
|
|
||||||
return ""
|
|
||||||
|
|
||||||
def fake_request(self, *args, **kwargs):
|
|
||||||
method = args[0]
|
|
||||||
handler = getattr(self, "handle_%s" % method.lower())
|
|
||||||
return handler(*args[1:])
|
|
||||||
|
|
||||||
def reset_all(self):
|
|
||||||
self._fake_lswitch_dict.clear()
|
|
||||||
self._fake_lrouter_dict.clear()
|
|
||||||
self._fake_lswitch_lport_dict.clear()
|
|
||||||
self._fake_lrouter_lport_dict.clear()
|
|
||||||
self._fake_lswitch_lportstatus_dict.clear()
|
|
||||||
self._fake_lrouter_lportstatus_dict.clear()
|
|
||||||
self._fake_lqueue_dict.clear()
|
|
||||||
self._fake_securityprofile_dict.clear()
|
|
||||||
self._fake_gatewayservice_dict.clear()
|
|
@ -1,36 +0,0 @@
|
|||||||
# Copyright 2011 VMware, Inc.
|
|
||||||
#
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
|
|
||||||
from neutron.tests import base
|
|
||||||
from six.moves import http_client as httplib
|
|
||||||
|
|
||||||
from vmware_nsx import api_client
|
|
||||||
|
|
||||||
|
|
||||||
class ApiCommonTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
def test_ctrl_conn_to_str(self):
|
|
||||||
conn = httplib.HTTPSConnection('localhost', 4242, timeout=0)
|
|
||||||
self.assertTrue(
|
|
||||||
api_client.ctrl_conn_to_str(conn) == 'https://localhost:4242')
|
|
||||||
|
|
||||||
conn = httplib.HTTPConnection('localhost', 4242, timeout=0)
|
|
||||||
self.assertTrue(
|
|
||||||
api_client.ctrl_conn_to_str(conn) == 'http://localhost:4242')
|
|
||||||
|
|
||||||
self.assertRaises(TypeError, api_client.ctrl_conn_to_str,
|
|
||||||
('not an httplib.HTTPSConnection'))
|
|
@ -1,333 +0,0 @@
|
|||||||
# Copyright (C) 2009-2012 VMware, Inc. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import random
|
|
||||||
import urllib
|
|
||||||
|
|
||||||
import eventlet
|
|
||||||
import mock
|
|
||||||
from neutron.tests import base
|
|
||||||
from oslo_log import log as logging
|
|
||||||
from six.moves import http_client as httplib
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import (
|
|
||||||
eventlet_client as client)
|
|
||||||
from vmware_nsx.api_client import (
|
|
||||||
eventlet_request as request)
|
|
||||||
from vmware_nsx.tests import unit as vmware
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
REQUEST_TIMEOUT = 1
|
|
||||||
|
|
||||||
|
|
||||||
def fetch(url):
|
|
||||||
return urllib.urlopen(url).read()
|
|
||||||
|
|
||||||
|
|
||||||
class ApiRequestEventletTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
|
|
||||||
super(ApiRequestEventletTest, self).setUp()
|
|
||||||
self.client = client.EventletApiClient(
|
|
||||||
[("127.0.0.1", 4401, True)], "admin", "admin")
|
|
||||||
self.url = "/ws.v1/_debug"
|
|
||||||
self.req = request.EventletApiRequest(self.client, self.url)
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
self.client = None
|
|
||||||
self.req = None
|
|
||||||
super(ApiRequestEventletTest, self).tearDown()
|
|
||||||
|
|
||||||
def test_construct_eventlet_api_request(self):
|
|
||||||
e = request.EventletApiRequest(self.client, self.url)
|
|
||||||
self.assertIsNotNone(e)
|
|
||||||
|
|
||||||
def test_apirequest_spawn(self):
|
|
||||||
def x(id):
|
|
||||||
eventlet.greenthread.sleep(random.random())
|
|
||||||
LOG.info('spawned: %d', id)
|
|
||||||
|
|
||||||
for i in range(10):
|
|
||||||
request.EventletApiRequest._spawn(x, i)
|
|
||||||
|
|
||||||
def test_apirequest_start(self):
|
|
||||||
for i in range(10):
|
|
||||||
a = request.EventletApiRequest(
|
|
||||||
self.client, self.url)
|
|
||||||
a._handle_request = mock.Mock()
|
|
||||||
a.start()
|
|
||||||
eventlet.greenthread.sleep(0.1)
|
|
||||||
LOG.info('_handle_request called: %s',
|
|
||||||
a._handle_request.called)
|
|
||||||
request.EventletApiRequest.joinall()
|
|
||||||
|
|
||||||
def test_join_with_handle_request(self):
|
|
||||||
self.req._handle_request = mock.Mock()
|
|
||||||
self.req.start()
|
|
||||||
self.req.join()
|
|
||||||
self.assertTrue(self.req._handle_request.called)
|
|
||||||
|
|
||||||
def test_join_without_handle_request(self):
|
|
||||||
self.req._handle_request = mock.Mock()
|
|
||||||
self.req.join()
|
|
||||||
self.assertFalse(self.req._handle_request.called)
|
|
||||||
|
|
||||||
def test_copy(self):
|
|
||||||
req = self.req.copy()
|
|
||||||
for att in [
|
|
||||||
'_api_client', '_url', '_method', '_body', '_headers',
|
|
||||||
'_http_timeout', '_request_timeout', '_retries',
|
|
||||||
'_redirects', '_auto_login']:
|
|
||||||
self.assertTrue(getattr(req, att) is getattr(self.req, att))
|
|
||||||
|
|
||||||
def test_request_error(self):
|
|
||||||
self.assertIsNone(self.req.request_error)
|
|
||||||
|
|
||||||
def test_run_and_handle_request(self):
|
|
||||||
self.req._request_timeout = None
|
|
||||||
self.req._handle_request = mock.Mock()
|
|
||||||
self.req.start()
|
|
||||||
self.req.join()
|
|
||||||
self.assertTrue(self.req._handle_request.called)
|
|
||||||
|
|
||||||
def test_run_and_timeout(self):
|
|
||||||
def my_handle_request():
|
|
||||||
LOG.info('my_handle_request() self: %s', self.req)
|
|
||||||
LOG.info('my_handle_request() dir(self): %s', dir(self.req))
|
|
||||||
eventlet.greenthread.sleep(REQUEST_TIMEOUT * 2)
|
|
||||||
|
|
||||||
with mock.patch.object(
|
|
||||||
self.req,
|
|
||||||
'_handle_request',
|
|
||||||
new=my_handle_request
|
|
||||||
):
|
|
||||||
self.req._request_timeout = REQUEST_TIMEOUT
|
|
||||||
self.req.start()
|
|
||||||
self.assertIsNone(self.req.join())
|
|
||||||
|
|
||||||
def prep_issue_request(self):
|
|
||||||
mysock = mock.Mock()
|
|
||||||
mysock.gettimeout.return_value = 4242
|
|
||||||
|
|
||||||
myresponse = mock.Mock()
|
|
||||||
myresponse.read.return_value = 'body'
|
|
||||||
myresponse.getheaders.return_value = 'headers'
|
|
||||||
myresponse.status = httplib.MOVED_PERMANENTLY
|
|
||||||
|
|
||||||
myconn = mock.Mock()
|
|
||||||
myconn.request.return_value = None
|
|
||||||
myconn.sock = mysock
|
|
||||||
myconn.getresponse.return_value = myresponse
|
|
||||||
myconn.__str__ = mock.Mock()
|
|
||||||
myconn.__str__.return_value = 'myconn string'
|
|
||||||
|
|
||||||
req = self.req
|
|
||||||
req._redirect_params = mock.Mock()
|
|
||||||
req._redirect_params.return_value = (myconn, 'url')
|
|
||||||
req._request_str = mock.Mock()
|
|
||||||
req._request_str.return_value = 'http://cool/cool'
|
|
||||||
|
|
||||||
client = self.client
|
|
||||||
client.need_login = False
|
|
||||||
client._auto_login = False
|
|
||||||
client._auth_cookie = False
|
|
||||||
client.acquire_connection = mock.Mock()
|
|
||||||
client.acquire_connection.return_value = myconn
|
|
||||||
client.release_connection = mock.Mock()
|
|
||||||
|
|
||||||
return (mysock, myresponse, myconn)
|
|
||||||
|
|
||||||
def test_issue_request_trigger_exception(self):
|
|
||||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
|
||||||
self.client.acquire_connection.return_value = None
|
|
||||||
|
|
||||||
self.req._issue_request()
|
|
||||||
self.assertIsInstance(self.req._request_error, Exception)
|
|
||||||
self.assertTrue(self.client.acquire_connection.called)
|
|
||||||
|
|
||||||
def test_issue_request_handle_none_sock(self):
|
|
||||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
|
||||||
myconn.sock = None
|
|
||||||
self.req.start()
|
|
||||||
self.assertIsNone(self.req.join())
|
|
||||||
self.assertTrue(self.client.acquire_connection.called)
|
|
||||||
|
|
||||||
def test_issue_request_exceed_maximum_retries(self):
|
|
||||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
|
||||||
self.req.start()
|
|
||||||
self.assertIsNone(self.req.join())
|
|
||||||
self.assertTrue(self.client.acquire_connection.called)
|
|
||||||
|
|
||||||
def test_issue_request_trigger_non_redirect(self):
|
|
||||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
|
||||||
myresponse.status = httplib.OK
|
|
||||||
self.req.start()
|
|
||||||
self.assertIsNone(self.req.join())
|
|
||||||
self.assertTrue(self.client.acquire_connection.called)
|
|
||||||
|
|
||||||
def test_issue_request_trigger_internal_server_error(self):
|
|
||||||
(mysock, myresponse, myconn) = self.prep_issue_request()
|
|
||||||
self.req._redirect_params.return_value = (myconn, None)
|
|
||||||
self.req.start()
|
|
||||||
self.assertIsNone(self.req.join())
|
|
||||||
self.assertTrue(self.client.acquire_connection.called)
|
|
||||||
|
|
||||||
def test_redirect_params_break_on_location(self):
|
|
||||||
myconn = mock.Mock()
|
|
||||||
(conn, retval) = self.req._redirect_params(
|
|
||||||
myconn, [('location', None)])
|
|
||||||
self.assertIsNone(retval)
|
|
||||||
|
|
||||||
def test_redirect_params_parse_a_url(self):
|
|
||||||
myconn = mock.Mock()
|
|
||||||
(conn, retval) = self.req._redirect_params(
|
|
||||||
myconn, [('location', '/path/a/b/c')])
|
|
||||||
self.assertIsNotNone(retval)
|
|
||||||
|
|
||||||
def test_redirect_params_invalid_redirect_location(self):
|
|
||||||
myconn = mock.Mock()
|
|
||||||
(conn, retval) = self.req._redirect_params(
|
|
||||||
myconn, [('location', '+path/a/b/c')])
|
|
||||||
self.assertIsNone(retval)
|
|
||||||
|
|
||||||
def test_redirect_params_invalid_scheme(self):
|
|
||||||
myconn = mock.Mock()
|
|
||||||
(conn, retval) = self.req._redirect_params(
|
|
||||||
myconn, [('location', 'invalidscheme://hostname:1/path')])
|
|
||||||
self.assertIsNone(retval)
|
|
||||||
|
|
||||||
def test_redirect_params_setup_https_with_cooki(self):
|
|
||||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
|
||||||
api_client = mock_client.return_value
|
|
||||||
self.req._api_client = api_client
|
|
||||||
myconn = mock.Mock()
|
|
||||||
(conn, retval) = self.req._redirect_params(
|
|
||||||
myconn, [('location', 'https://host:1/path')])
|
|
||||||
|
|
||||||
self.assertIsNotNone(retval)
|
|
||||||
self.assertTrue(api_client.acquire_redirect_connection.called)
|
|
||||||
|
|
||||||
def test_redirect_params_setup_htttps_and_query(self):
|
|
||||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
|
||||||
api_client = mock_client.return_value
|
|
||||||
self.req._api_client = api_client
|
|
||||||
myconn = mock.Mock()
|
|
||||||
(conn, retval) = self.req._redirect_params(myconn, [
|
|
||||||
('location', 'https://host:1/path?q=1')])
|
|
||||||
|
|
||||||
self.assertIsNotNone(retval)
|
|
||||||
self.assertTrue(api_client.acquire_redirect_connection.called)
|
|
||||||
|
|
||||||
def test_redirect_params_setup_https_connection_no_cookie(self):
|
|
||||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
|
||||||
api_client = mock_client.return_value
|
|
||||||
self.req._api_client = api_client
|
|
||||||
myconn = mock.Mock()
|
|
||||||
(conn, retval) = self.req._redirect_params(myconn, [
|
|
||||||
('location', 'https://host:1/path')])
|
|
||||||
|
|
||||||
self.assertIsNotNone(retval)
|
|
||||||
self.assertTrue(api_client.acquire_redirect_connection.called)
|
|
||||||
|
|
||||||
def test_redirect_params_setup_https_and_query_no_cookie(self):
|
|
||||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
|
||||||
api_client = mock_client.return_value
|
|
||||||
self.req._api_client = api_client
|
|
||||||
myconn = mock.Mock()
|
|
||||||
(conn, retval) = self.req._redirect_params(
|
|
||||||
myconn, [('location', 'https://host:1/path?q=1')])
|
|
||||||
self.assertIsNotNone(retval)
|
|
||||||
self.assertTrue(api_client.acquire_redirect_connection.called)
|
|
||||||
|
|
||||||
def test_redirect_params_path_only_with_query(self):
|
|
||||||
with mock.patch(vmware.CLIENT_NAME) as mock_client:
|
|
||||||
api_client = mock_client.return_value
|
|
||||||
api_client.wait_for_login.return_value = None
|
|
||||||
api_client.auth_cookie = None
|
|
||||||
api_client.acquire_connection.return_value = True
|
|
||||||
myconn = mock.Mock()
|
|
||||||
(conn, retval) = self.req._redirect_params(myconn, [
|
|
||||||
('location', '/path?q=1')])
|
|
||||||
self.assertIsNotNone(retval)
|
|
||||||
|
|
||||||
def test_handle_request_auto_login(self):
|
|
||||||
self.req._auto_login = True
|
|
||||||
self.req._api_client = mock.Mock()
|
|
||||||
self.req._api_client.need_login = True
|
|
||||||
self.req._request_str = mock.Mock()
|
|
||||||
self.req._request_str.return_value = 'http://cool/cool'
|
|
||||||
self.req.spawn = mock.Mock()
|
|
||||||
self.req._handle_request()
|
|
||||||
|
|
||||||
def test_handle_request_auto_login_unauth(self):
|
|
||||||
self.req._auto_login = True
|
|
||||||
self.req._api_client = mock.Mock()
|
|
||||||
self.req._api_client.need_login = True
|
|
||||||
self.req._request_str = mock.Mock()
|
|
||||||
self.req._request_str.return_value = 'http://cool/cool'
|
|
||||||
|
|
||||||
import socket
|
|
||||||
resp = httplib.HTTPResponse(socket.socket())
|
|
||||||
resp.status = httplib.UNAUTHORIZED
|
|
||||||
mywaiter = mock.Mock()
|
|
||||||
mywaiter.wait = mock.Mock()
|
|
||||||
mywaiter.wait.return_value = resp
|
|
||||||
self.req.spawn = mock.Mock(return_value=mywaiter)
|
|
||||||
self.req._handle_request()
|
|
||||||
|
|
||||||
def test_construct_eventlet_login_request(self):
|
|
||||||
r = request.LoginRequestEventlet(self.client, 'user', 'password')
|
|
||||||
self.assertIsNotNone(r)
|
|
||||||
|
|
||||||
def test_session_cookie_session_cookie_retrieval(self):
|
|
||||||
r = request.LoginRequestEventlet(self.client, 'user', 'password')
|
|
||||||
r.successful = mock.Mock()
|
|
||||||
r.successful.return_value = True
|
|
||||||
r.value = mock.Mock()
|
|
||||||
r.value.get_header = mock.Mock()
|
|
||||||
r.value.get_header.return_value = 'cool'
|
|
||||||
self.assertIsNotNone(r.session_cookie())
|
|
||||||
|
|
||||||
def test_session_cookie_not_retrieved(self):
|
|
||||||
r = request.LoginRequestEventlet(self.client, 'user', 'password')
|
|
||||||
r.successful = mock.Mock()
|
|
||||||
r.successful.return_value = False
|
|
||||||
r.value = mock.Mock()
|
|
||||||
r.value.get_header = mock.Mock()
|
|
||||||
r.value.get_header.return_value = 'cool'
|
|
||||||
self.assertIsNone(r.session_cookie())
|
|
||||||
|
|
||||||
def test_construct_eventlet_get_api_providers_request(self):
|
|
||||||
r = request.GetApiProvidersRequestEventlet(self.client)
|
|
||||||
self.assertIsNotNone(r)
|
|
||||||
|
|
||||||
def test_api_providers_none_api_providers(self):
|
|
||||||
r = request.GetApiProvidersRequestEventlet(self.client)
|
|
||||||
r.successful = mock.Mock(return_value=False)
|
|
||||||
self.assertIsNone(r.api_providers())
|
|
||||||
|
|
||||||
def test_api_providers_non_none_api_providers(self):
|
|
||||||
r = request.GetApiProvidersRequestEventlet(self.client)
|
|
||||||
r.value = mock.Mock()
|
|
||||||
r.value.body = """{
|
|
||||||
"results": [
|
|
||||||
{ "roles": [
|
|
||||||
{ "role": "api_provider",
|
|
||||||
"listen_addr": "pssl:1.1.1.1:1" }]}]}"""
|
|
||||||
r.successful = mock.Mock(return_value=True)
|
|
||||||
LOG.info('%s', r.api_providers())
|
|
||||||
self.assertIsNotNone(r.api_providers())
|
|
@ -1,103 +0,0 @@
|
|||||||
# Copyright 2014 VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from neutron.tests.unit import testlib_api
|
|
||||||
from neutron_lib import context
|
|
||||||
from sqlalchemy import orm
|
|
||||||
|
|
||||||
from vmware_nsx.common import exceptions as p_exc
|
|
||||||
from vmware_nsx.db import lsn_db
|
|
||||||
from vmware_nsx.db import nsx_models
|
|
||||||
|
|
||||||
|
|
||||||
class LSNTestCase(testlib_api.SqlTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(LSNTestCase, self).setUp()
|
|
||||||
self.ctx = context.get_admin_context()
|
|
||||||
self.net_id = 'foo_network_id'
|
|
||||||
self.lsn_id = 'foo_lsn_id'
|
|
||||||
self.lsn_port_id = 'foo_port_id'
|
|
||||||
self.subnet_id = 'foo_subnet_id'
|
|
||||||
self.mac_addr = 'aa:bb:cc:dd:ee:ff'
|
|
||||||
|
|
||||||
def test_lsn_add(self):
|
|
||||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
|
||||||
lsn = (self.ctx.session.query(nsx_models.Lsn).
|
|
||||||
filter_by(lsn_id=self.lsn_id).one())
|
|
||||||
self.assertEqual(self.lsn_id, lsn.lsn_id)
|
|
||||||
|
|
||||||
def test_lsn_remove(self):
|
|
||||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
|
||||||
lsn_db.lsn_remove(self.ctx, self.lsn_id)
|
|
||||||
q = self.ctx.session.query(nsx_models.Lsn).filter_by(
|
|
||||||
lsn_id=self.lsn_id)
|
|
||||||
self.assertRaises(orm.exc.NoResultFound, q.one)
|
|
||||||
|
|
||||||
def test_lsn_remove_for_network(self):
|
|
||||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
|
||||||
lsn_db.lsn_remove_for_network(self.ctx, self.net_id)
|
|
||||||
q = self.ctx.session.query(nsx_models.Lsn).filter_by(
|
|
||||||
lsn_id=self.lsn_id)
|
|
||||||
self.assertRaises(orm.exc.NoResultFound, q.one)
|
|
||||||
|
|
||||||
def test_lsn_get_for_network(self):
|
|
||||||
result = lsn_db.lsn_get_for_network(self.ctx, self.net_id,
|
|
||||||
raise_on_err=False)
|
|
||||||
self.assertIsNone(result)
|
|
||||||
|
|
||||||
def test_lsn_get_for_network_raise_not_found(self):
|
|
||||||
self.assertRaises(p_exc.LsnNotFound,
|
|
||||||
lsn_db.lsn_get_for_network,
|
|
||||||
self.ctx, self.net_id)
|
|
||||||
|
|
||||||
def test_lsn_port_add(self):
|
|
||||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
|
||||||
lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id,
|
|
||||||
self.subnet_id, self.mac_addr, self.lsn_id)
|
|
||||||
result = (self.ctx.session.query(nsx_models.LsnPort).
|
|
||||||
filter_by(lsn_port_id=self.lsn_port_id).one())
|
|
||||||
self.assertEqual(self.lsn_port_id, result.lsn_port_id)
|
|
||||||
|
|
||||||
def test_lsn_port_get_for_mac(self):
|
|
||||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
|
||||||
lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id,
|
|
||||||
self.subnet_id, self.mac_addr, self.lsn_id)
|
|
||||||
result = lsn_db.lsn_port_get_for_mac(self.ctx, self.mac_addr)
|
|
||||||
self.assertEqual(self.mac_addr, result.mac_addr)
|
|
||||||
|
|
||||||
def test_lsn_port_get_for_mac_raise_not_found(self):
|
|
||||||
self.assertRaises(p_exc.LsnPortNotFound,
|
|
||||||
lsn_db.lsn_port_get_for_mac,
|
|
||||||
self.ctx, self.mac_addr)
|
|
||||||
|
|
||||||
def test_lsn_port_get_for_subnet(self):
|
|
||||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
|
||||||
lsn_db.lsn_port_add_for_lsn(self.ctx, self.lsn_port_id,
|
|
||||||
self.subnet_id, self.mac_addr, self.lsn_id)
|
|
||||||
result = lsn_db.lsn_port_get_for_subnet(self.ctx, self.subnet_id)
|
|
||||||
self.assertEqual(self.subnet_id, result.sub_id)
|
|
||||||
|
|
||||||
def test_lsn_port_get_for_subnet_raise_not_found(self):
|
|
||||||
self.assertRaises(p_exc.LsnPortNotFound,
|
|
||||||
lsn_db.lsn_port_get_for_subnet,
|
|
||||||
self.ctx, self.mac_addr)
|
|
||||||
|
|
||||||
def test_lsn_port_remove(self):
|
|
||||||
lsn_db.lsn_add(self.ctx, self.net_id, self.lsn_id)
|
|
||||||
lsn_db.lsn_port_remove(self.ctx, self.lsn_port_id)
|
|
||||||
q = (self.ctx.session.query(nsx_models.LsnPort).
|
|
||||||
filter_by(lsn_port_id=self.lsn_port_id))
|
|
||||||
self.assertRaises(orm.exc.NoResultFound, q.one)
|
|
@ -1,49 +0,0 @@
|
|||||||
# Copyright 2013 VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from neutron.db import models_v2
|
|
||||||
from neutron.tests.unit import testlib_api
|
|
||||||
from neutron_lib import context
|
|
||||||
from oslo_db import exception as d_exc
|
|
||||||
|
|
||||||
from vmware_nsx.db import db as nsx_db
|
|
||||||
|
|
||||||
|
|
||||||
class NsxDBTestCase(testlib_api.SqlTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(NsxDBTestCase, self).setUp()
|
|
||||||
self.ctx = context.get_admin_context()
|
|
||||||
|
|
||||||
def _setup_neutron_network_and_port(self, network_id, port_id):
|
|
||||||
with self.ctx.session.begin(subtransactions=True):
|
|
||||||
self.ctx.session.add(models_v2.Network(id=network_id))
|
|
||||||
port = models_v2.Port(id=port_id,
|
|
||||||
network_id=network_id,
|
|
||||||
mac_address='foo_mac_address',
|
|
||||||
admin_state_up=True,
|
|
||||||
status='ACTIVE',
|
|
||||||
device_id='',
|
|
||||||
device_owner='')
|
|
||||||
self.ctx.session.add(port)
|
|
||||||
|
|
||||||
def test_add_neutron_nsx_port_mapping_raise_integrity_constraint(self):
|
|
||||||
neutron_port_id = 'foo_neutron_port_id'
|
|
||||||
nsx_port_id = 'foo_nsx_port_id'
|
|
||||||
nsx_switch_id = 'foo_nsx_switch_id'
|
|
||||||
self.assertRaises(d_exc.DBError,
|
|
||||||
nsx_db.add_neutron_nsx_port_mapping,
|
|
||||||
self.ctx.session, neutron_port_id,
|
|
||||||
nsx_switch_id, nsx_port_id)
|
|
File diff suppressed because it is too large
Load Diff
@ -1,258 +0,0 @@
|
|||||||
# Copyright 2013 VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from neutron.tests import base
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_utils import uuidutils
|
|
||||||
import six
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import client
|
|
||||||
from vmware_nsx.api_client import version
|
|
||||||
from vmware_nsx.common import config # noqa
|
|
||||||
from vmware_nsx.common import exceptions
|
|
||||||
from vmware_nsx.common import sync
|
|
||||||
from vmware_nsx import nsx_cluster
|
|
||||||
from vmware_nsx.nsxlib.mh import lsn as lsnlib
|
|
||||||
from vmware_nsx import plugin as mh_plugin
|
|
||||||
from vmware_nsx.tests import unit as vmware
|
|
||||||
|
|
||||||
BASE_CONF_PATH = vmware.get_fake_conf('neutron.conf.test')
|
|
||||||
NSX_INI_PATH = vmware.get_fake_conf('nsx.ini.basic.test')
|
|
||||||
NSX_INI_FULL_PATH = vmware.get_fake_conf('nsx.ini.full.test')
|
|
||||||
NSX_INI_AGENTLESS_PATH = vmware.get_fake_conf('nsx.ini.agentless.test')
|
|
||||||
NSX_INI_COMBINED_PATH = vmware.get_fake_conf('nsx.ini.combined.test')
|
|
||||||
NVP_INI_DEPR_PATH = vmware.get_fake_conf('nvp.ini.full.test')
|
|
||||||
|
|
||||||
|
|
||||||
class NSXClusterTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
cluster_opts = {'default_tz_uuid': uuidutils.generate_uuid(),
|
|
||||||
'default_l2_gw_service_uuid': uuidutils.generate_uuid(),
|
|
||||||
'nsx_user': 'foo',
|
|
||||||
'nsx_password': 'bar',
|
|
||||||
'http_timeout': 25,
|
|
||||||
'retries': 7,
|
|
||||||
'redirects': 23,
|
|
||||||
'nsx_default_interface_name': 'baz',
|
|
||||||
'nsx_controllers': ['1.1.1.1:443']}
|
|
||||||
|
|
||||||
def test_create_cluster(self):
|
|
||||||
cluster = nsx_cluster.NSXCluster(**self.cluster_opts)
|
|
||||||
for (k, v) in six.iteritems(self.cluster_opts):
|
|
||||||
self.assertEqual(v, getattr(cluster, k))
|
|
||||||
|
|
||||||
def test_create_cluster_default_port(self):
|
|
||||||
opts = self.cluster_opts.copy()
|
|
||||||
opts['nsx_controllers'] = ['1.1.1.1']
|
|
||||||
cluster = nsx_cluster.NSXCluster(**opts)
|
|
||||||
for (k, v) in six.iteritems(self.cluster_opts):
|
|
||||||
self.assertEqual(v, getattr(cluster, k))
|
|
||||||
|
|
||||||
def test_create_cluster_missing_required_attribute_raises(self):
|
|
||||||
opts = self.cluster_opts.copy()
|
|
||||||
opts.pop('default_tz_uuid')
|
|
||||||
self.assertRaises(exceptions.InvalidClusterConfiguration,
|
|
||||||
nsx_cluster.NSXCluster, **opts)
|
|
||||||
|
|
||||||
|
|
||||||
class ConfigurationTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(ConfigurationTest, self).setUp()
|
|
||||||
# Avoid runs of the synchronizer looping call
|
|
||||||
patch_sync = mock.patch.object(sync, '_start_loopingcall')
|
|
||||||
patch_sync.start()
|
|
||||||
|
|
||||||
def _assert_required_options(self, cluster):
|
|
||||||
self.assertEqual(cluster.nsx_controllers, ['fake_1:443', 'fake_2:443'])
|
|
||||||
self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid')
|
|
||||||
self.assertEqual(cluster.nsx_user, 'foo')
|
|
||||||
self.assertEqual(cluster.nsx_password, 'bar')
|
|
||||||
|
|
||||||
def _assert_extra_options(self, cluster):
|
|
||||||
self.assertEqual(13, cluster.http_timeout)
|
|
||||||
self.assertEqual(12, cluster.redirects)
|
|
||||||
self.assertEqual(11, cluster.retries)
|
|
||||||
self.assertEqual('whatever', cluster.default_l2_gw_service_uuid)
|
|
||||||
self.assertEqual('whatever', cluster.default_l3_gw_service_uuid)
|
|
||||||
self.assertEqual('whatever', cluster.nsx_default_interface_name)
|
|
||||||
|
|
||||||
def _get_mh_plugin(self):
|
|
||||||
with mock.patch("neutron_lib.rpc.Connection"):
|
|
||||||
plugin = mh_plugin.NsxPlugin()
|
|
||||||
return plugin
|
|
||||||
|
|
||||||
def test_load_plugin_with_full_options(self):
|
|
||||||
self.config_parse(args=['--config-file', BASE_CONF_PATH,
|
|
||||||
'--config-file', NSX_INI_FULL_PATH])
|
|
||||||
cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
|
|
||||||
plugin = self._get_mh_plugin()
|
|
||||||
cluster = plugin.cluster
|
|
||||||
self._assert_required_options(cluster)
|
|
||||||
self._assert_extra_options(cluster)
|
|
||||||
|
|
||||||
def test_load_plugin_with_required_options_only(self):
|
|
||||||
self.config_parse(args=['--config-file', BASE_CONF_PATH,
|
|
||||||
'--config-file', NSX_INI_PATH])
|
|
||||||
cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
|
|
||||||
plugin = self._get_mh_plugin()
|
|
||||||
self._assert_required_options(plugin.cluster)
|
|
||||||
|
|
||||||
def test_defaults(self):
|
|
||||||
self.assertEqual(5000, cfg.CONF.NSX.max_lp_per_bridged_ls)
|
|
||||||
self.assertEqual(256, cfg.CONF.NSX.max_lp_per_overlay_ls)
|
|
||||||
self.assertEqual(10, cfg.CONF.NSX.concurrent_connections)
|
|
||||||
self.assertEqual('access_network', cfg.CONF.NSX.metadata_mode)
|
|
||||||
self.assertEqual('stt', cfg.CONF.NSX.default_transport_type)
|
|
||||||
self.assertEqual('service', cfg.CONF.NSX.replication_mode)
|
|
||||||
|
|
||||||
self.assertIsNone(cfg.CONF.default_tz_uuid)
|
|
||||||
self.assertEqual('admin', cfg.CONF.nsx_user)
|
|
||||||
self.assertEqual('admin', cfg.CONF.nsx_password)
|
|
||||||
self.assertEqual(75, cfg.CONF.http_timeout)
|
|
||||||
self.assertEqual(2, cfg.CONF.retries)
|
|
||||||
self.assertEqual(2, cfg.CONF.redirects)
|
|
||||||
self.assertEqual([], cfg.CONF.nsx_controllers)
|
|
||||||
self.assertIsNone(cfg.CONF.default_l3_gw_service_uuid)
|
|
||||||
self.assertIsNone(cfg.CONF.default_l2_gw_service_uuid)
|
|
||||||
self.assertEqual('breth0', cfg.CONF.nsx_default_interface_name)
|
|
||||||
self.assertEqual(900, cfg.CONF.conn_idle_timeout)
|
|
||||||
|
|
||||||
def test_load_api_extensions(self):
|
|
||||||
self.config_parse(args=['--config-file', BASE_CONF_PATH,
|
|
||||||
'--config-file', NSX_INI_FULL_PATH])
|
|
||||||
cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
|
|
||||||
# Load the configuration, and initialize the plugin
|
|
||||||
self._get_mh_plugin()
|
|
||||||
self.assertIn('extensions', cfg.CONF.api_extensions_path)
|
|
||||||
|
|
||||||
def test_agentless_extensions(self):
|
|
||||||
self.config_parse(args=['--config-file', BASE_CONF_PATH,
|
|
||||||
'--config-file', NSX_INI_AGENTLESS_PATH])
|
|
||||||
cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
|
|
||||||
self.assertEqual(config.AgentModes.AGENTLESS,
|
|
||||||
cfg.CONF.NSX.agent_mode)
|
|
||||||
# The version returned from NSX matter as it has be exactly 4.1
|
|
||||||
with mock.patch.object(client.NsxApiClient,
|
|
||||||
'get_version',
|
|
||||||
return_value=version.Version("4.1")):
|
|
||||||
with mock.patch.object(lsnlib,
|
|
||||||
'service_cluster_exists',
|
|
||||||
return_value=True):
|
|
||||||
plugin = self._get_mh_plugin()
|
|
||||||
self.assertNotIn('agent',
|
|
||||||
plugin.supported_extension_aliases)
|
|
||||||
self.assertNotIn('dhcp_agent_scheduler',
|
|
||||||
plugin.supported_extension_aliases)
|
|
||||||
self.assertNotIn('lsn',
|
|
||||||
plugin.supported_extension_aliases)
|
|
||||||
|
|
||||||
def test_agentless_extensions_version_fail(self):
|
|
||||||
self.config_parse(args=['--config-file', BASE_CONF_PATH,
|
|
||||||
'--config-file', NSX_INI_AGENTLESS_PATH])
|
|
||||||
cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
|
|
||||||
self.assertEqual(config.AgentModes.AGENTLESS,
|
|
||||||
cfg.CONF.NSX.agent_mode)
|
|
||||||
with mock.patch.object(client.NsxApiClient,
|
|
||||||
'get_version',
|
|
||||||
return_value=version.Version("3.2")):
|
|
||||||
try:
|
|
||||||
self._get_mh_plugin()
|
|
||||||
except exceptions.NsxPluginException:
|
|
||||||
# This is the correct result
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
self.fail('Expected NsxPluginException exception')
|
|
||||||
|
|
||||||
def test_agentless_extensions_unmet_deps_fail(self):
|
|
||||||
self.config_parse(args=['--config-file', BASE_CONF_PATH,
|
|
||||||
'--config-file', NSX_INI_AGENTLESS_PATH])
|
|
||||||
cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
|
|
||||||
self.assertEqual(config.AgentModes.AGENTLESS,
|
|
||||||
cfg.CONF.NSX.agent_mode)
|
|
||||||
with mock.patch.object(client.NsxApiClient,
|
|
||||||
'get_version',
|
|
||||||
return_value=version.Version("3.2")):
|
|
||||||
with mock.patch.object(lsnlib,
|
|
||||||
'service_cluster_exists',
|
|
||||||
return_value=False):
|
|
||||||
try:
|
|
||||||
self._get_mh_plugin()
|
|
||||||
except exceptions.NsxPluginException:
|
|
||||||
# This is the correct result
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
self.fail('Expected NsxPluginException exception')
|
|
||||||
|
|
||||||
def test_agent_extensions(self):
|
|
||||||
self.config_parse(args=['--config-file', BASE_CONF_PATH,
|
|
||||||
'--config-file', NSX_INI_FULL_PATH])
|
|
||||||
cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
|
|
||||||
self.assertEqual(config.AgentModes.AGENT,
|
|
||||||
cfg.CONF.NSX.agent_mode)
|
|
||||||
plugin = self._get_mh_plugin()
|
|
||||||
self.assertIn('agent',
|
|
||||||
plugin.supported_extension_aliases)
|
|
||||||
self.assertIn('dhcp_agent_scheduler',
|
|
||||||
plugin.supported_extension_aliases)
|
|
||||||
|
|
||||||
def test_combined_extensions(self):
|
|
||||||
self.config_parse(args=['--config-file', BASE_CONF_PATH,
|
|
||||||
'--config-file', NSX_INI_COMBINED_PATH])
|
|
||||||
cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
|
|
||||||
self.assertEqual(config.AgentModes.COMBINED,
|
|
||||||
cfg.CONF.NSX.agent_mode)
|
|
||||||
with mock.patch.object(client.NsxApiClient,
|
|
||||||
'get_version',
|
|
||||||
return_value=version.Version("4.1")):
|
|
||||||
with mock.patch.object(lsnlib,
|
|
||||||
'service_cluster_exists',
|
|
||||||
return_value=True):
|
|
||||||
plugin = self._get_mh_plugin()
|
|
||||||
self.assertIn('agent',
|
|
||||||
plugin.supported_extension_aliases)
|
|
||||||
self.assertIn('dhcp_agent_scheduler',
|
|
||||||
plugin.supported_extension_aliases)
|
|
||||||
self.assertIn('lsn',
|
|
||||||
plugin.supported_extension_aliases)
|
|
||||||
|
|
||||||
|
|
||||||
class OldNVPConfigurationTest(base.BaseTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(OldNVPConfigurationTest, self).setUp()
|
|
||||||
# Avoid runs of the synchronizer looping call
|
|
||||||
patch_sync = mock.patch.object(sync, '_start_loopingcall')
|
|
||||||
patch_sync.start()
|
|
||||||
|
|
||||||
def _assert_required_options(self, cluster):
|
|
||||||
self.assertEqual(cluster.nsx_controllers, ['fake_1:443', 'fake_2:443'])
|
|
||||||
self.assertEqual(cluster.nsx_user, 'foo')
|
|
||||||
self.assertEqual(cluster.nsx_password, 'bar')
|
|
||||||
self.assertEqual(cluster.default_tz_uuid, 'fake_tz_uuid')
|
|
||||||
|
|
||||||
def test_load_plugin_with_deprecated_options(self):
|
|
||||||
self.config_parse(args=['--config-file', BASE_CONF_PATH,
|
|
||||||
'--config-file', NVP_INI_DEPR_PATH])
|
|
||||||
cfg.CONF.set_override('core_plugin', vmware.PLUGIN_NAME)
|
|
||||||
with mock.patch("neutron_lib.rpc.Connection"):
|
|
||||||
plugin = mh_plugin.NsxPlugin()
|
|
||||||
cluster = plugin.cluster
|
|
||||||
# Verify old nvp_* params have been fully parsed
|
|
||||||
self._assert_required_options(cluster)
|
|
||||||
self.assertEqual(3, cluster.http_timeout)
|
|
||||||
self.assertEqual(2, cluster.retries)
|
|
||||||
self.assertEqual(2, cluster.redirects)
|
|
File diff suppressed because it is too large
Load Diff
@ -1,731 +0,0 @@
|
|||||||
# Copyright 2013 VMware, Inc.
|
|
||||||
# All Rights Reserved
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
import contextlib
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from neutron_lib import constants
|
|
||||||
from neutron_lib import context
|
|
||||||
from neutron_lib import exceptions as n_exc
|
|
||||||
from neutron_lib.exceptions import l3 as l3_exc
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_log import log
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
|
|
||||||
from neutron.tests import base
|
|
||||||
from neutron.tests.unit.api.v2 import test_base
|
|
||||||
from neutron.tests.unit import testlib_api
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import client
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
from vmware_nsx.api_client import version
|
|
||||||
from vmware_nsx.common import sync
|
|
||||||
from vmware_nsx.db import db
|
|
||||||
from vmware_nsx import nsx_cluster as cluster
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx import plugin
|
|
||||||
from vmware_nsx.tests import unit as vmware
|
|
||||||
from vmware_nsx.tests.unit.nsx_mh.apiclient import fake
|
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
|
||||||
|
|
||||||
_uuid = test_base._uuid
|
|
||||||
LSWITCHES = [{'uuid': _uuid(), 'name': 'ls-1'},
|
|
||||||
{'uuid': _uuid(), 'name': 'ls-2'}]
|
|
||||||
LSWITCHPORTS = [{'uuid': _uuid(), 'name': 'lp-1'},
|
|
||||||
{'uuid': _uuid(), 'name': 'lp-2'}]
|
|
||||||
LROUTERS = [{'uuid': _uuid(), 'name': 'lr-1'},
|
|
||||||
{'uuid': _uuid(), 'name': 'lr-2'}]
|
|
||||||
|
|
||||||
|
|
||||||
class CacheTestCase(base.BaseTestCase):
|
|
||||||
"""Test suite providing coverage for the Cache class."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.nsx_cache = sync.NsxCache()
|
|
||||||
for lswitch in LSWITCHES:
|
|
||||||
self.nsx_cache._uuid_dict_mappings[lswitch['uuid']] = (
|
|
||||||
self.nsx_cache._lswitches)
|
|
||||||
self.nsx_cache._lswitches[lswitch['uuid']] = (
|
|
||||||
{'data': lswitch,
|
|
||||||
'hash': hash(jsonutils.dumps(lswitch))})
|
|
||||||
for lswitchport in LSWITCHPORTS:
|
|
||||||
self.nsx_cache._uuid_dict_mappings[lswitchport['uuid']] = (
|
|
||||||
self.nsx_cache._lswitchports)
|
|
||||||
self.nsx_cache._lswitchports[lswitchport['uuid']] = (
|
|
||||||
{'data': lswitchport,
|
|
||||||
'hash': hash(jsonutils.dumps(lswitchport))})
|
|
||||||
for lrouter in LROUTERS:
|
|
||||||
self.nsx_cache._uuid_dict_mappings[lrouter['uuid']] = (
|
|
||||||
self.nsx_cache._lrouters)
|
|
||||||
self.nsx_cache._lrouters[lrouter['uuid']] = (
|
|
||||||
{'data': lrouter,
|
|
||||||
'hash': hash(jsonutils.dumps(lrouter))})
|
|
||||||
super(CacheTestCase, self).setUp()
|
|
||||||
|
|
||||||
def test_get_lswitches(self):
|
|
||||||
ls_uuids = self.nsx_cache.get_lswitches()
|
|
||||||
self.assertEqual(set(ls_uuids),
|
|
||||||
set([ls['uuid'] for ls in LSWITCHES]))
|
|
||||||
|
|
||||||
def test_get_lswitchports(self):
|
|
||||||
lp_uuids = self.nsx_cache.get_lswitchports()
|
|
||||||
self.assertEqual(set(lp_uuids),
|
|
||||||
set([lp['uuid'] for lp in LSWITCHPORTS]))
|
|
||||||
|
|
||||||
def test_get_lrouters(self):
|
|
||||||
lr_uuids = self.nsx_cache.get_lrouters()
|
|
||||||
self.assertEqual(set(lr_uuids),
|
|
||||||
set([lr['uuid'] for lr in LROUTERS]))
|
|
||||||
|
|
||||||
def test_get_lswitches_changed_only(self):
|
|
||||||
ls_uuids = self.nsx_cache.get_lswitches(changed_only=True)
|
|
||||||
self.assertEqual(0, len(ls_uuids))
|
|
||||||
|
|
||||||
def test_get_lswitchports_changed_only(self):
|
|
||||||
lp_uuids = self.nsx_cache.get_lswitchports(changed_only=True)
|
|
||||||
self.assertEqual(0, len(lp_uuids))
|
|
||||||
|
|
||||||
def test_get_lrouters_changed_only(self):
|
|
||||||
lr_uuids = self.nsx_cache.get_lrouters(changed_only=True)
|
|
||||||
self.assertEqual(0, len(lr_uuids))
|
|
||||||
|
|
||||||
def _verify_update(self, new_resource, changed=True, hit=True):
|
|
||||||
cached_resource = self.nsx_cache[new_resource['uuid']]
|
|
||||||
self.assertEqual(new_resource, cached_resource['data'])
|
|
||||||
self.assertEqual(hit, cached_resource.get('hit', False))
|
|
||||||
self.assertEqual(changed,
|
|
||||||
cached_resource.get('changed', False))
|
|
||||||
|
|
||||||
def test_update_lswitch_new_item(self):
|
|
||||||
new_switch_uuid = _uuid()
|
|
||||||
new_switch = {'uuid': new_switch_uuid, 'name': 'new_switch'}
|
|
||||||
self.nsx_cache.update_lswitch(new_switch)
|
|
||||||
self.assertIn(new_switch_uuid, self.nsx_cache._lswitches.keys())
|
|
||||||
self._verify_update(new_switch)
|
|
||||||
|
|
||||||
def test_update_lswitch_existing_item(self):
|
|
||||||
switch = LSWITCHES[0]
|
|
||||||
switch['name'] = 'new_name'
|
|
||||||
self.nsx_cache.update_lswitch(switch)
|
|
||||||
self.assertIn(switch['uuid'], self.nsx_cache._lswitches.keys())
|
|
||||||
self._verify_update(switch)
|
|
||||||
|
|
||||||
def test_update_lswitchport_new_item(self):
|
|
||||||
new_switchport_uuid = _uuid()
|
|
||||||
new_switchport = {'uuid': new_switchport_uuid,
|
|
||||||
'name': 'new_switchport'}
|
|
||||||
self.nsx_cache.update_lswitchport(new_switchport)
|
|
||||||
self.assertIn(new_switchport_uuid,
|
|
||||||
self.nsx_cache._lswitchports.keys())
|
|
||||||
self._verify_update(new_switchport)
|
|
||||||
|
|
||||||
def test_update_lswitchport_existing_item(self):
|
|
||||||
switchport = LSWITCHPORTS[0]
|
|
||||||
switchport['name'] = 'new_name'
|
|
||||||
self.nsx_cache.update_lswitchport(switchport)
|
|
||||||
self.assertIn(switchport['uuid'],
|
|
||||||
self.nsx_cache._lswitchports.keys())
|
|
||||||
self._verify_update(switchport)
|
|
||||||
|
|
||||||
def test_update_lrouter_new_item(self):
|
|
||||||
new_router_uuid = _uuid()
|
|
||||||
new_router = {'uuid': new_router_uuid,
|
|
||||||
'name': 'new_router'}
|
|
||||||
self.nsx_cache.update_lrouter(new_router)
|
|
||||||
self.assertIn(new_router_uuid,
|
|
||||||
self.nsx_cache._lrouters.keys())
|
|
||||||
self._verify_update(new_router)
|
|
||||||
|
|
||||||
def test_update_lrouter_existing_item(self):
|
|
||||||
router = LROUTERS[0]
|
|
||||||
router['name'] = 'new_name'
|
|
||||||
self.nsx_cache.update_lrouter(router)
|
|
||||||
self.assertIn(router['uuid'],
|
|
||||||
self.nsx_cache._lrouters.keys())
|
|
||||||
self._verify_update(router)
|
|
||||||
|
|
||||||
def test_process_updates_initial(self):
|
|
||||||
# Clear cache content to simulate first-time filling
|
|
||||||
self.nsx_cache._lswitches.clear()
|
|
||||||
self.nsx_cache._lswitchports.clear()
|
|
||||||
self.nsx_cache._lrouters.clear()
|
|
||||||
self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS)
|
|
||||||
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
|
|
||||||
self._verify_update(resource)
|
|
||||||
|
|
||||||
def test_process_updates_no_change(self):
|
|
||||||
self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS)
|
|
||||||
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
|
|
||||||
self._verify_update(resource, changed=False)
|
|
||||||
|
|
||||||
def test_process_updates_with_changes(self):
|
|
||||||
LSWITCHES[0]['name'] = 'altered'
|
|
||||||
self.nsx_cache.process_updates(LSWITCHES, LROUTERS, LSWITCHPORTS)
|
|
||||||
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
|
|
||||||
changed = (True if resource['uuid'] == LSWITCHES[0]['uuid']
|
|
||||||
else False)
|
|
||||||
self._verify_update(resource, changed=changed)
|
|
||||||
|
|
||||||
def _test_process_updates_with_removals(self):
|
|
||||||
lswitches = LSWITCHES[:]
|
|
||||||
lswitch = lswitches.pop()
|
|
||||||
self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS)
|
|
||||||
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
|
|
||||||
hit = (False if resource['uuid'] == lswitch['uuid']
|
|
||||||
else True)
|
|
||||||
self._verify_update(resource, changed=False, hit=hit)
|
|
||||||
return (lswitch, lswitches)
|
|
||||||
|
|
||||||
def test_process_updates_with_removals(self):
|
|
||||||
self._test_process_updates_with_removals()
|
|
||||||
|
|
||||||
def test_process_updates_cleanup_after_delete(self):
|
|
||||||
deleted_lswitch, lswitches = self._test_process_updates_with_removals()
|
|
||||||
self.nsx_cache.process_deletes()
|
|
||||||
self.nsx_cache.process_updates(lswitches, LROUTERS, LSWITCHPORTS)
|
|
||||||
self.assertNotIn(deleted_lswitch['uuid'], self.nsx_cache._lswitches)
|
|
||||||
|
|
||||||
def test_update_resource_does_not_cleanup_deleted_resources(self):
|
|
||||||
deleted_lswitch, lswitches = self._test_process_updates_with_removals()
|
|
||||||
self.nsx_cache.process_deletes()
|
|
||||||
self.nsx_cache.update_lswitch(deleted_lswitch)
|
|
||||||
self.assertIn(deleted_lswitch['uuid'], self.nsx_cache._lswitches)
|
|
||||||
|
|
||||||
def _verify_delete(self, resource, deleted=True, hit=True):
|
|
||||||
cached_resource = self.nsx_cache[resource['uuid']]
|
|
||||||
data_field = 'data_bk' if deleted else 'data'
|
|
||||||
self.assertEqual(resource, cached_resource[data_field])
|
|
||||||
self.assertEqual(hit, cached_resource.get('hit', False))
|
|
||||||
self.assertEqual(deleted,
|
|
||||||
cached_resource.get('changed', False))
|
|
||||||
|
|
||||||
def _set_hit(self, resources, uuid_to_delete=None):
|
|
||||||
for resource in resources:
|
|
||||||
if resource['data']['uuid'] != uuid_to_delete:
|
|
||||||
resource['hit'] = True
|
|
||||||
|
|
||||||
def test_process_deletes_no_change(self):
|
|
||||||
# Mark all resources as hit
|
|
||||||
self._set_hit(self.nsx_cache._lswitches.values())
|
|
||||||
self._set_hit(self.nsx_cache._lswitchports.values())
|
|
||||||
self._set_hit(self.nsx_cache._lrouters.values())
|
|
||||||
self.nsx_cache.process_deletes()
|
|
||||||
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
|
|
||||||
self._verify_delete(resource, hit=False, deleted=False)
|
|
||||||
|
|
||||||
def test_process_deletes_with_removals(self):
|
|
||||||
# Mark all resources but one as hit
|
|
||||||
uuid_to_delete = LSWITCHPORTS[0]['uuid']
|
|
||||||
self._set_hit(self.nsx_cache._lswitches.values(),
|
|
||||||
uuid_to_delete)
|
|
||||||
self._set_hit(self.nsx_cache._lswitchports.values(),
|
|
||||||
uuid_to_delete)
|
|
||||||
self._set_hit(self.nsx_cache._lrouters.values(),
|
|
||||||
uuid_to_delete)
|
|
||||||
self.nsx_cache.process_deletes()
|
|
||||||
for resource in LSWITCHES + LROUTERS + LSWITCHPORTS:
|
|
||||||
deleted = resource['uuid'] == uuid_to_delete
|
|
||||||
self._verify_delete(resource, hit=False, deleted=deleted)
|
|
||||||
|
|
||||||
|
|
||||||
class SyncLoopingCallTestCase(base.BaseTestCase):
|
|
||||||
|
|
||||||
def test_looping_calls(self):
|
|
||||||
# Avoid runs of the synchronization process - just start
|
|
||||||
# the looping call
|
|
||||||
with mock.patch.object(
|
|
||||||
sync.NsxSynchronizer, '_synchronize_state', return_value=0.01,):
|
|
||||||
synchronizer = sync.NsxSynchronizer(mock.ANY, mock.ANY,
|
|
||||||
100, 0, 0,
|
|
||||||
initial_delay=0)
|
|
||||||
time.sleep(0.03)
|
|
||||||
# stop looping call before asserting
|
|
||||||
synchronizer._sync_looping_call.stop()
|
|
||||||
# Just verify the looping call has been called, trying
|
|
||||||
# to assess the exact number of calls would be unreliable
|
|
||||||
self.assertTrue(synchronizer._synchronize_state.call_count)
|
|
||||||
|
|
||||||
|
|
||||||
class SyncTestCase(testlib_api.SqlTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
# mock api client
|
|
||||||
self.fc = fake.FakeClient(vmware.STUBS_PATH)
|
|
||||||
mock_api = mock.patch(vmware.NSXAPI_NAME, autospec=True)
|
|
||||||
# Avoid runs of the synchronizer looping call
|
|
||||||
# These unit tests will excplicitly invoke synchronization
|
|
||||||
patch_sync = mock.patch.object(sync, '_start_loopingcall')
|
|
||||||
self.mock_api = mock_api.start()
|
|
||||||
patch_sync.start()
|
|
||||||
self.mock_api.return_value.login.return_value = "the_cookie"
|
|
||||||
# Emulate tests against NSX 3.x
|
|
||||||
self.mock_api.return_value.get_version.return_value = (
|
|
||||||
version.Version("3.1"))
|
|
||||||
|
|
||||||
self.mock_api.return_value.request.side_effect = self.fc.fake_request
|
|
||||||
self.fake_cluster = cluster.NSXCluster(
|
|
||||||
name='fake-cluster', nsx_controllers=['1.1.1.1:999'],
|
|
||||||
default_tz_uuid=_uuid(), nsx_user='foo', nsx_password='bar')
|
|
||||||
self.fake_cluster.api_client = client.NsxApiClient(
|
|
||||||
('1.1.1.1', '999', True),
|
|
||||||
self.fake_cluster.nsx_user, self.fake_cluster.nsx_password,
|
|
||||||
http_timeout=self.fake_cluster.http_timeout,
|
|
||||||
retries=self.fake_cluster.retries,
|
|
||||||
redirects=self.fake_cluster.redirects)
|
|
||||||
# Instantiate Neutron plugin
|
|
||||||
# and setup needed config variables
|
|
||||||
args = ['--config-file', vmware.get_fake_conf('neutron.conf.test'),
|
|
||||||
'--config-file', vmware.get_fake_conf('nsx.ini.test')]
|
|
||||||
self.config_parse(args=args)
|
|
||||||
cfg.CONF.set_override('allow_overlapping_ips', True)
|
|
||||||
|
|
||||||
with mock.patch("neutron_lib.rpc.Connection"):
|
|
||||||
self._plugin = plugin.NsxPlugin()
|
|
||||||
|
|
||||||
mock_nm_get_plugin = mock.patch(
|
|
||||||
"neutron_lib.plugins.directory.get_plugin")
|
|
||||||
self.mock_nm_get_plugin = mock_nm_get_plugin.start()
|
|
||||||
self.mock_nm_get_plugin.return_value = self._plugin
|
|
||||||
|
|
||||||
super(SyncTestCase, self).setUp()
|
|
||||||
self.addCleanup(self.fc.reset_all)
|
|
||||||
|
|
||||||
@contextlib.contextmanager
|
|
||||||
def _populate_data(self, ctx, net_size=2, port_size=2, router_size=2):
|
|
||||||
|
|
||||||
def network(idx):
|
|
||||||
return {'network': {'name': 'net-%s' % idx,
|
|
||||||
'admin_state_up': True,
|
|
||||||
'shared': False,
|
|
||||||
'port_security_enabled': True,
|
|
||||||
'tenant_id': 'foo'}}
|
|
||||||
|
|
||||||
def subnet(idx, net_id):
|
|
||||||
return {'subnet':
|
|
||||||
{'cidr': '10.10.%s.0/24' % idx,
|
|
||||||
'name': 'sub-%s' % idx,
|
|
||||||
'gateway_ip': constants.ATTR_NOT_SPECIFIED,
|
|
||||||
'allocation_pools': constants.ATTR_NOT_SPECIFIED,
|
|
||||||
'ip_version': 4,
|
|
||||||
'dns_nameservers': constants.ATTR_NOT_SPECIFIED,
|
|
||||||
'host_routes': constants.ATTR_NOT_SPECIFIED,
|
|
||||||
'enable_dhcp': True,
|
|
||||||
'network_id': net_id,
|
|
||||||
'tenant_id': 'foo'}}
|
|
||||||
|
|
||||||
def port(idx, net_id):
|
|
||||||
return {'port': {'network_id': net_id,
|
|
||||||
'name': 'port-%s' % idx,
|
|
||||||
'admin_state_up': True,
|
|
||||||
'device_id': 'miao',
|
|
||||||
'device_owner': 'bau',
|
|
||||||
'fixed_ips': constants.ATTR_NOT_SPECIFIED,
|
|
||||||
'mac_address': constants.ATTR_NOT_SPECIFIED,
|
|
||||||
'tenant_id': 'foo'}}
|
|
||||||
|
|
||||||
def router(idx):
|
|
||||||
# Use random uuids as names
|
|
||||||
return {'router': {'name': 'rtr-%s' % idx,
|
|
||||||
'admin_state_up': True,
|
|
||||||
'tenant_id': 'foo'}}
|
|
||||||
|
|
||||||
networks = []
|
|
||||||
ports = []
|
|
||||||
routers = []
|
|
||||||
for i in range(net_size):
|
|
||||||
net = self._plugin.create_network(ctx, network(i))
|
|
||||||
networks.append(net)
|
|
||||||
self._plugin.create_subnet(ctx, subnet(i, net['id']))
|
|
||||||
for j in range(port_size):
|
|
||||||
ports.append(self._plugin.create_port(
|
|
||||||
ctx, port("%s-%s" % (i, j), net['id'])))
|
|
||||||
for i in range(router_size):
|
|
||||||
routers.append(self._plugin.create_router(ctx, router(i)))
|
|
||||||
# Do not return anything as the user does need the actual
|
|
||||||
# data created
|
|
||||||
yield
|
|
||||||
|
|
||||||
# Remove everything
|
|
||||||
for router in routers:
|
|
||||||
self._plugin.delete_router(ctx, router['id'])
|
|
||||||
for port in ports:
|
|
||||||
self._plugin.delete_port(ctx, port['id'])
|
|
||||||
# This will remove networks and subnets
|
|
||||||
for network in networks:
|
|
||||||
self._plugin.delete_network(ctx, network['id'])
|
|
||||||
|
|
||||||
def _get_tag_dict(self, tags):
|
|
||||||
return dict((tag['scope'], tag['tag']) for tag in tags)
|
|
||||||
|
|
||||||
def _test_sync(self, exp_net_status,
|
|
||||||
exp_port_status, exp_router_status,
|
|
||||||
action_callback=None, sp=None):
|
|
||||||
ls_uuid = list(self.fc._fake_lswitch_dict)[0]
|
|
||||||
neutron_net_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
|
|
||||||
lp_uuid = list(self.fc._fake_lswitch_lport_dict)[0]
|
|
||||||
neutron_port_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lswitch_lport_dict[lp_uuid]['tags'])['q_port_id']
|
|
||||||
lr_uuid = list(self.fc._fake_lrouter_dict)[0]
|
|
||||||
neutron_rtr_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
|
|
||||||
if action_callback:
|
|
||||||
action_callback(ls_uuid, lp_uuid, lr_uuid)
|
|
||||||
# Make chunk big enough to read everything
|
|
||||||
if not sp:
|
|
||||||
sp = sync.SyncParameters(100)
|
|
||||||
self._plugin._synchronizer._synchronize_state(sp)
|
|
||||||
# Verify element is in expected status
|
|
||||||
# TODO(salv-orlando): Verify status for all elements
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
neutron_net = self._plugin.get_network(ctx, neutron_net_id)
|
|
||||||
neutron_port = self._plugin.get_port(ctx, neutron_port_id)
|
|
||||||
neutron_rtr = self._plugin.get_router(ctx, neutron_rtr_id)
|
|
||||||
self.assertEqual(exp_net_status, neutron_net['status'])
|
|
||||||
self.assertEqual(exp_port_status, neutron_port['status'])
|
|
||||||
self.assertEqual(exp_router_status, neutron_rtr['status'])
|
|
||||||
|
|
||||||
def _action_callback_status_down(self, ls_uuid, lp_uuid, lr_uuid):
|
|
||||||
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
|
|
||||||
self.fc._fake_lswitch_lport_dict[lp_uuid]['status'] = 'false'
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
|
|
||||||
|
|
||||||
def test_initial_sync(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
self._test_sync(
|
|
||||||
constants.NET_STATUS_ACTIVE,
|
|
||||||
constants.PORT_STATUS_ACTIVE,
|
|
||||||
constants.NET_STATUS_ACTIVE)
|
|
||||||
|
|
||||||
def test_initial_sync_with_resources_down(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
self._test_sync(
|
|
||||||
constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN,
|
|
||||||
constants.NET_STATUS_DOWN, self._action_callback_status_down)
|
|
||||||
|
|
||||||
def test_resync_with_resources_down(self):
|
|
||||||
if sys.version_info >= (3, 0):
|
|
||||||
# FIXME(arosen): this does not fail with an error...
|
|
||||||
self.skipTest('not supported')
|
|
||||||
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
sp = sync.SyncParameters(100)
|
|
||||||
self._plugin._synchronizer._synchronize_state(sp)
|
|
||||||
# Ensure the synchronizer performs a resync
|
|
||||||
sp.init_sync_performed = True
|
|
||||||
self._test_sync(
|
|
||||||
constants.NET_STATUS_DOWN, constants.PORT_STATUS_DOWN,
|
|
||||||
constants.NET_STATUS_DOWN, self._action_callback_status_down,
|
|
||||||
sp=sp)
|
|
||||||
|
|
||||||
def _action_callback_del_resource(self, ls_uuid, lp_uuid, lr_uuid):
|
|
||||||
del self.fc._fake_lswitch_dict[ls_uuid]
|
|
||||||
del self.fc._fake_lswitch_lport_dict[lp_uuid]
|
|
||||||
del self.fc._fake_lrouter_dict[lr_uuid]
|
|
||||||
|
|
||||||
def test_initial_sync_with_resources_removed(self):
|
|
||||||
if sys.version_info >= (3, 0):
|
|
||||||
# FIXME(arosen): this does not fail with an error...
|
|
||||||
self.skipTest('not supported')
|
|
||||||
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
self._test_sync(
|
|
||||||
constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR,
|
|
||||||
constants.NET_STATUS_ERROR, self._action_callback_del_resource)
|
|
||||||
|
|
||||||
def test_resync_with_resources_removed(self):
|
|
||||||
if sys.version_info >= (3, 0):
|
|
||||||
# FIXME(arosen): this does not fail with an error...
|
|
||||||
self.skipTest('not supported')
|
|
||||||
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
sp = sync.SyncParameters(100)
|
|
||||||
self._plugin._synchronizer._synchronize_state(sp)
|
|
||||||
# Ensure the synchronizer performs a resync
|
|
||||||
sp.init_sync_performed = True
|
|
||||||
self._test_sync(
|
|
||||||
constants.NET_STATUS_ERROR, constants.PORT_STATUS_ERROR,
|
|
||||||
constants.NET_STATUS_ERROR, self._action_callback_del_resource,
|
|
||||||
sp=sp)
|
|
||||||
|
|
||||||
def _test_sync_with_chunk_larger_maxpagesize(
|
|
||||||
self, net_size, port_size, router_size, chunk_size, exp_calls):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
real_func = nsxlib.get_single_query_page
|
|
||||||
sp = sync.SyncParameters(chunk_size)
|
|
||||||
with self._populate_data(ctx, net_size=net_size,
|
|
||||||
port_size=port_size,
|
|
||||||
router_size=router_size):
|
|
||||||
with mock.patch.object(sync, 'MAX_PAGE_SIZE', 15):
|
|
||||||
# The following mock is just for counting calls,
|
|
||||||
# but we will still run the actual function
|
|
||||||
with mock.patch.object(
|
|
||||||
nsxlib, 'get_single_query_page',
|
|
||||||
side_effect=real_func) as mock_get_page:
|
|
||||||
self._test_sync(
|
|
||||||
constants.NET_STATUS_ACTIVE,
|
|
||||||
constants.PORT_STATUS_ACTIVE,
|
|
||||||
constants.NET_STATUS_ACTIVE,
|
|
||||||
sp=sp)
|
|
||||||
# As each resource type does not exceed the maximum page size,
|
|
||||||
# the method should be called once for each resource type
|
|
||||||
self.assertEqual(exp_calls, mock_get_page.call_count)
|
|
||||||
|
|
||||||
def test_sync_chunk_larger_maxpagesize_no_multiple_requests(self):
|
|
||||||
# total resource size = 20
|
|
||||||
# total size for each resource does not exceed max page size (15)
|
|
||||||
self._test_sync_with_chunk_larger_maxpagesize(
|
|
||||||
net_size=5, port_size=2, router_size=5,
|
|
||||||
chunk_size=20, exp_calls=3)
|
|
||||||
|
|
||||||
def test_sync_chunk_larger_maxpagesize_triggers_multiple_requests(self):
|
|
||||||
# total resource size = 48
|
|
||||||
# total size for each resource does exceed max page size (15)
|
|
||||||
self._test_sync_with_chunk_larger_maxpagesize(
|
|
||||||
net_size=16, port_size=1, router_size=16,
|
|
||||||
chunk_size=48, exp_calls=6)
|
|
||||||
|
|
||||||
def test_sync_multi_chunk(self):
|
|
||||||
# The fake NSX API client cannot be used for this test
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
# Generate 4 networks, 1 port per network, and 4 routers
|
|
||||||
with self._populate_data(ctx, net_size=4, port_size=1, router_size=4):
|
|
||||||
fake_lswitches = jsonutils.loads(
|
|
||||||
self.fc.handle_get('/ws.v1/lswitch'))['results']
|
|
||||||
fake_lrouters = jsonutils.loads(
|
|
||||||
self.fc.handle_get('/ws.v1/lrouter'))['results']
|
|
||||||
fake_lswitchports = jsonutils.loads(
|
|
||||||
self.fc.handle_get('/ws.v1/lswitch/*/lport'))['results']
|
|
||||||
return_values = [
|
|
||||||
# Chunk 0 - lswitches
|
|
||||||
(fake_lswitches, None, 4),
|
|
||||||
# Chunk 0 - lrouters
|
|
||||||
(fake_lrouters[:2], 'xxx', 4),
|
|
||||||
# Chunk 0 - lports (size only)
|
|
||||||
([], 'start', 4),
|
|
||||||
# Chunk 1 - lrouters (2 more) (lswitches are skipped)
|
|
||||||
(fake_lrouters[2:], None, None),
|
|
||||||
# Chunk 1 - lports
|
|
||||||
(fake_lswitchports, None, 4)]
|
|
||||||
|
|
||||||
def fake_fetch_data(*args, **kwargs):
|
|
||||||
return return_values.pop(0)
|
|
||||||
|
|
||||||
# 2 Chunks, with 6 resources each.
|
|
||||||
# 1st chunk lswitches and lrouters
|
|
||||||
# 2nd chunk lrouters and lports
|
|
||||||
# Mock _fetch_data
|
|
||||||
with mock.patch.object(
|
|
||||||
self._plugin._synchronizer, '_fetch_data',
|
|
||||||
side_effect=fake_fetch_data):
|
|
||||||
sp = sync.SyncParameters(6)
|
|
||||||
|
|
||||||
def do_chunk(chunk_idx, ls_cursor, lr_cursor, lp_cursor):
|
|
||||||
self._plugin._synchronizer._synchronize_state(sp)
|
|
||||||
self.assertEqual(chunk_idx, sp.current_chunk)
|
|
||||||
self.assertEqual(ls_cursor, sp.ls_cursor)
|
|
||||||
self.assertEqual(lr_cursor, sp.lr_cursor)
|
|
||||||
self.assertEqual(lp_cursor, sp.lp_cursor)
|
|
||||||
|
|
||||||
# check 1st chunk
|
|
||||||
do_chunk(1, None, 'xxx', 'start')
|
|
||||||
# check 2nd chunk
|
|
||||||
do_chunk(0, None, None, None)
|
|
||||||
# Chunk size should have stayed the same
|
|
||||||
self.assertEqual(sp.chunk_size, 6)
|
|
||||||
|
|
||||||
def test_synchronize_network(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a network down to verify synchronization
|
|
||||||
ls_uuid = list(self.fc._fake_lswitch_dict)[0]
|
|
||||||
q_net_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
|
|
||||||
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
|
|
||||||
self._plugin.get_network(ctx, q_net_id, fields=['status'])
|
|
||||||
# Reload from db
|
|
||||||
q_nets = self._plugin.get_networks(ctx)
|
|
||||||
for q_net in q_nets:
|
|
||||||
if q_net['id'] == q_net_id:
|
|
||||||
exp_status = constants.NET_STATUS_DOWN
|
|
||||||
else:
|
|
||||||
exp_status = constants.NET_STATUS_ACTIVE
|
|
||||||
self.assertEqual(exp_status, q_net['status'])
|
|
||||||
|
|
||||||
def test_synchronize_network_not_found_in_db_no_raise(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a network down to verify synchronization
|
|
||||||
ls_uuid = list(self.fc._fake_lswitch_dict)[0]
|
|
||||||
q_net_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
|
|
||||||
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
|
|
||||||
q_net_data = self._plugin._get_network(ctx, q_net_id)
|
|
||||||
with mock.patch.object(self._plugin,
|
|
||||||
'_get_network') as _get_network:
|
|
||||||
_get_network.side_effect = n_exc.NetworkNotFound(
|
|
||||||
net_id=q_net_data['id'])
|
|
||||||
self._plugin._synchronizer.synchronize_network(ctx, q_net_data)
|
|
||||||
|
|
||||||
def test_synchronize_network_on_get(self):
|
|
||||||
cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC')
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a network down to verify punctual synchronization
|
|
||||||
ls_uuid = list(self.fc._fake_lswitch_dict)[0]
|
|
||||||
q_net_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lswitch_dict[ls_uuid]['tags'])['quantum_net_id']
|
|
||||||
self.fc._fake_lswitch_dict[ls_uuid]['status'] = 'false'
|
|
||||||
q_net_data = self._plugin.get_network(ctx, q_net_id)
|
|
||||||
self.assertEqual(constants.NET_STATUS_DOWN, q_net_data['status'])
|
|
||||||
|
|
||||||
def test_synchronize_port_not_found_in_db_no_raise(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a port down to verify synchronization
|
|
||||||
lp_uuid = list(self.fc._fake_lswitch_lport_dict)[0]
|
|
||||||
lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
|
|
||||||
q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
|
|
||||||
lport['status'] = 'true'
|
|
||||||
q_port_data = self._plugin._get_port(ctx, q_port_id)
|
|
||||||
with mock.patch.object(self._plugin,
|
|
||||||
'_get_port') as _get_port:
|
|
||||||
_get_port.side_effect = n_exc.PortNotFound(
|
|
||||||
port_id=q_port_data['id'])
|
|
||||||
self._plugin._synchronizer.synchronize_port(ctx, q_port_data)
|
|
||||||
|
|
||||||
def test_synchronize_port(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a port down to verify synchronization
|
|
||||||
lp_uuid = list(self.fc._fake_lswitch_lport_dict)[0]
|
|
||||||
lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
|
|
||||||
q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
|
|
||||||
lport['status'] = 'true'
|
|
||||||
self._plugin.get_port(ctx, q_port_id, fields=['status'])
|
|
||||||
# Reload from db
|
|
||||||
q_ports = self._plugin.get_ports(ctx)
|
|
||||||
for q_port in q_ports:
|
|
||||||
if q_port['id'] == q_port_id:
|
|
||||||
exp_status = constants.PORT_STATUS_ACTIVE
|
|
||||||
else:
|
|
||||||
exp_status = constants.PORT_STATUS_DOWN
|
|
||||||
self.assertEqual(exp_status, q_port['status'])
|
|
||||||
|
|
||||||
def test_synchronize_port_on_get(self):
|
|
||||||
cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC')
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a port down to verify punctual synchronization
|
|
||||||
lp_uuid = list(self.fc._fake_lswitch_lport_dict)[0]
|
|
||||||
lport = self.fc._fake_lswitch_lport_dict[lp_uuid]
|
|
||||||
q_port_id = self._get_tag_dict(lport['tags'])['q_port_id']
|
|
||||||
lport['status'] = 'false'
|
|
||||||
q_port_data = self._plugin.get_port(ctx, q_port_id)
|
|
||||||
self.assertEqual(constants.PORT_STATUS_DOWN,
|
|
||||||
q_port_data['status'])
|
|
||||||
|
|
||||||
def test_synchronize_routernot_found_in_db_no_raise(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a router down to verify synchronization
|
|
||||||
lr_uuid = list(self.fc._fake_lrouter_dict)[0]
|
|
||||||
q_rtr_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
|
|
||||||
q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
|
|
||||||
with mock.patch.object(self._plugin,
|
|
||||||
'_get_router') as _get_router:
|
|
||||||
_get_router.side_effect = l3_exc.RouterNotFound(
|
|
||||||
router_id=q_rtr_data['id'])
|
|
||||||
self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data)
|
|
||||||
|
|
||||||
# TODO(asarfaty): make this test pass with the new enginefacade
|
|
||||||
def skip_test_synchronize_router(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a router down to verify synchronization
|
|
||||||
lr_uuid = list(self.fc._fake_lrouter_dict)[0]
|
|
||||||
q_rtr_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
|
|
||||||
self._plugin.get_router(ctx, q_rtr_id, fields=['status'])
|
|
||||||
# Reload from db
|
|
||||||
q_routers = self._plugin.get_routers(ctx)
|
|
||||||
for q_rtr in q_routers:
|
|
||||||
if q_rtr['id'] == q_rtr_id:
|
|
||||||
exp_status = constants.NET_STATUS_DOWN
|
|
||||||
else:
|
|
||||||
exp_status = constants.NET_STATUS_ACTIVE
|
|
||||||
self.assertEqual(exp_status, q_rtr['status'])
|
|
||||||
|
|
||||||
# TODO(asarfaty): Make this test pass with the new enginefacade
|
|
||||||
def skip_test_synchronize_router_nsx_mapping_not_found(self):
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a router down to verify synchronization
|
|
||||||
lr_uuid = list(self.fc._fake_lrouter_dict)[0]
|
|
||||||
q_rtr_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
|
|
||||||
q_rtr_data = self._plugin._get_router(ctx, q_rtr_id)
|
|
||||||
|
|
||||||
# delete router mapping from db.
|
|
||||||
db.delete_neutron_nsx_router_mapping(ctx.session, q_rtr_id)
|
|
||||||
# pop router from fake nsx client
|
|
||||||
router_data = self.fc._fake_lrouter_dict.pop(lr_uuid)
|
|
||||||
|
|
||||||
self._plugin._synchronizer.synchronize_router(ctx, q_rtr_data)
|
|
||||||
# Reload from db
|
|
||||||
q_routers = self._plugin.get_routers(ctx)
|
|
||||||
for q_rtr in q_routers:
|
|
||||||
if q_rtr['id'] == q_rtr_id:
|
|
||||||
exp_status = constants.NET_STATUS_ERROR
|
|
||||||
else:
|
|
||||||
exp_status = constants.NET_STATUS_ACTIVE
|
|
||||||
self.assertEqual(exp_status, q_rtr['status'])
|
|
||||||
# put the router database since we don't handle missing
|
|
||||||
# router data in the fake nsx api_client
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid] = router_data
|
|
||||||
|
|
||||||
def test_synchronize_router_on_get(self):
|
|
||||||
cfg.CONF.set_override('always_read_status', True, 'NSX_SYNC')
|
|
||||||
ctx = context.get_admin_context()
|
|
||||||
with self._populate_data(ctx):
|
|
||||||
# Put a router down to verify punctual synchronization
|
|
||||||
lr_uuid = list(self.fc._fake_lrouter_dict)[0]
|
|
||||||
q_rtr_id = self._get_tag_dict(
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['tags'])['q_router_id']
|
|
||||||
self.fc._fake_lrouter_dict[lr_uuid]['status'] = 'false'
|
|
||||||
q_rtr_data = self._plugin.get_router(ctx, q_rtr_id)
|
|
||||||
self.assertEqual(constants.NET_STATUS_DOWN, q_rtr_data['status'])
|
|
||||||
|
|
||||||
def test_sync_nsx_failure_backoff(self):
|
|
||||||
self.mock_api.return_value.request.side_effect = api_exc.RequestTimeout
|
|
||||||
# chunk size won't matter here
|
|
||||||
sp = sync.SyncParameters(999)
|
|
||||||
for i in range(10):
|
|
||||||
self.assertEqual(
|
|
||||||
min(64, 2 ** i),
|
|
||||||
self._plugin._synchronizer._synchronize_state(sp))
|
|
@ -1,381 +0,0 @@
|
|||||||
# Copyright (c) 2013 VMware.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from neutron.tests import base
|
|
||||||
from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef
|
|
||||||
from neutron_lib.api.definitions import provider_net as pnet
|
|
||||||
from neutron_lib.db import api as db_api
|
|
||||||
from oslo_utils import uuidutils
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
|
||||||
from vmware_nsx.common import nsx_utils
|
|
||||||
from vmware_nsx.common import utils
|
|
||||||
from vmware_nsx.db import nsx_models
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx.tests import unit as vmware
|
|
||||||
from vmware_nsx.tests.unit.nsxlib.mh import base as nsx_base
|
|
||||||
|
|
||||||
|
|
||||||
class NsxUtilsTestCase(base.BaseTestCase):
|
|
||||||
|
|
||||||
def _mock_port_mapping_db_calls(self, ret_value):
|
|
||||||
# Mock relevant db calls
|
|
||||||
# This will allow for avoiding setting up the plugin
|
|
||||||
# for creating db entries
|
|
||||||
mock.patch(vmware.nsx_method('get_nsx_switch_and_port_id',
|
|
||||||
module_name='db.db'),
|
|
||||||
return_value=ret_value).start()
|
|
||||||
mock.patch(vmware.nsx_method('add_neutron_nsx_port_mapping',
|
|
||||||
module_name='db.db')).start()
|
|
||||||
mock.patch(vmware.nsx_method('delete_neutron_nsx_port_mapping',
|
|
||||||
module_name='db.db')).start()
|
|
||||||
|
|
||||||
def _mock_network_mapping_db_calls(self, ret_value):
|
|
||||||
# Mock relevant db calls
|
|
||||||
# This will allow for avoiding setting up the plugin
|
|
||||||
# for creating db entries
|
|
||||||
mock.patch(vmware.nsx_method('get_nsx_switch_ids',
|
|
||||||
module_name='db.db'),
|
|
||||||
return_value=ret_value).start()
|
|
||||||
mock.patch(vmware.nsx_method('add_neutron_nsx_network_mapping',
|
|
||||||
module_name='db.db')).start()
|
|
||||||
|
|
||||||
def _mock_router_mapping_db_calls(self, ret_value):
|
|
||||||
# Mock relevant db calls
|
|
||||||
# This will allow for avoiding setting up the plugin
|
|
||||||
# for creating db entries
|
|
||||||
mock.patch(vmware.nsx_method('get_nsx_router_id',
|
|
||||||
module_name='db.db'),
|
|
||||||
return_value=ret_value).start()
|
|
||||||
mock.patch(vmware.nsx_method('add_neutron_nsx_router_mapping',
|
|
||||||
module_name='db.db')).start()
|
|
||||||
|
|
||||||
def _verify_get_nsx_switch_and_port_id(self, exp_ls_uuid, exp_lp_uuid):
|
|
||||||
# The nsxlib and db calls are mocked, therefore the cluster
|
|
||||||
# and the neutron_port_id parameters can be set to None
|
|
||||||
ls_uuid, lp_uuid = nsx_utils.get_nsx_switch_and_port_id(
|
|
||||||
db_api.get_reader_session(), None, None)
|
|
||||||
self.assertEqual(exp_ls_uuid, ls_uuid)
|
|
||||||
self.assertEqual(exp_lp_uuid, lp_uuid)
|
|
||||||
|
|
||||||
def _verify_get_nsx_switch_ids(self, exp_ls_uuids):
|
|
||||||
# The nsxlib and db calls are mocked, therefore the cluster
|
|
||||||
# and the neutron_router_id parameters can be set to None
|
|
||||||
ls_uuids = nsx_utils.get_nsx_switch_ids(
|
|
||||||
db_api.get_reader_session(), None, None)
|
|
||||||
for ls_uuid in ls_uuids or []:
|
|
||||||
self.assertIn(ls_uuid, exp_ls_uuids)
|
|
||||||
exp_ls_uuids.remove(ls_uuid)
|
|
||||||
self.assertFalse(exp_ls_uuids)
|
|
||||||
|
|
||||||
def _verify_get_nsx_router_id(self, exp_lr_uuid):
|
|
||||||
neutron_router_id = uuidutils.generate_uuid()
|
|
||||||
lr_uuid = nsx_utils.get_nsx_router_id(db_api.get_reader_session(),
|
|
||||||
None,
|
|
||||||
neutron_router_id)
|
|
||||||
self.assertEqual(exp_lr_uuid, lr_uuid)
|
|
||||||
|
|
||||||
def test_get_nsx_switch_and_port_id_from_db_mappings(self):
|
|
||||||
# This test is representative of the 'standard' case in which both the
|
|
||||||
# switch and the port mappings were stored in the neutron db
|
|
||||||
exp_ls_uuid = uuidutils.generate_uuid()
|
|
||||||
exp_lp_uuid = uuidutils.generate_uuid()
|
|
||||||
ret_value = exp_ls_uuid, exp_lp_uuid
|
|
||||||
self._mock_port_mapping_db_calls(ret_value)
|
|
||||||
self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
|
|
||||||
|
|
||||||
def test_get_nsx_switch_and_port_id_only_port_db_mapping(self):
|
|
||||||
# This test is representative of the case in which a port with a nsx
|
|
||||||
# db mapping in the havana db was upgraded to icehouse
|
|
||||||
exp_ls_uuid = uuidutils.generate_uuid()
|
|
||||||
exp_lp_uuid = uuidutils.generate_uuid()
|
|
||||||
ret_value = None, exp_lp_uuid
|
|
||||||
self._mock_port_mapping_db_calls(ret_value)
|
|
||||||
with mock.patch(vmware.nsx_method('query_lswitch_lports',
|
|
||||||
module_name='nsxlib.mh.switch'),
|
|
||||||
return_value=[{'uuid': exp_lp_uuid,
|
|
||||||
'_relations': {
|
|
||||||
'LogicalSwitchConfig': {
|
|
||||||
'uuid': exp_ls_uuid}
|
|
||||||
}}]):
|
|
||||||
self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
|
|
||||||
|
|
||||||
def test_get_nsx_switch_and_port_id_no_db_mapping(self):
|
|
||||||
# This test is representative of the case where db mappings where not
|
|
||||||
# found for a given port identifier
|
|
||||||
exp_ls_uuid = uuidutils.generate_uuid()
|
|
||||||
exp_lp_uuid = uuidutils.generate_uuid()
|
|
||||||
ret_value = None, None
|
|
||||||
self._mock_port_mapping_db_calls(ret_value)
|
|
||||||
with mock.patch(vmware.nsx_method('query_lswitch_lports',
|
|
||||||
module_name='nsxlib.mh.switch'),
|
|
||||||
return_value=[{'uuid': exp_lp_uuid,
|
|
||||||
'_relations': {
|
|
||||||
'LogicalSwitchConfig': {
|
|
||||||
'uuid': exp_ls_uuid}
|
|
||||||
}}]):
|
|
||||||
self._verify_get_nsx_switch_and_port_id(exp_ls_uuid, exp_lp_uuid)
|
|
||||||
|
|
||||||
def test_get_nsx_switch_and_port_id_no_mappings_returns_none(self):
|
|
||||||
# This test verifies that the function return (None, None) if the
|
|
||||||
# mappings are not found both in the db and the backend
|
|
||||||
ret_value = None, None
|
|
||||||
self._mock_port_mapping_db_calls(ret_value)
|
|
||||||
with mock.patch(vmware.nsx_method('query_lswitch_lports',
|
|
||||||
module_name='nsxlib.mh.switch'),
|
|
||||||
return_value=[]):
|
|
||||||
self._verify_get_nsx_switch_and_port_id(None, None)
|
|
||||||
|
|
||||||
def test_get_nsx_switch_ids_from_db_mappings(self):
|
|
||||||
# This test is representative of the 'standard' case in which the
|
|
||||||
# lswitch mappings were stored in the neutron db
|
|
||||||
exp_ls_uuids = [uuidutils.generate_uuid()]
|
|
||||||
self._mock_network_mapping_db_calls(exp_ls_uuids)
|
|
||||||
self._verify_get_nsx_switch_ids(exp_ls_uuids)
|
|
||||||
|
|
||||||
def test_get_nsx_switch_ids_no_db_mapping(self):
|
|
||||||
# This test is representative of the case where db mappings where not
|
|
||||||
# found for a given network identifier
|
|
||||||
exp_ls_uuids = [uuidutils.generate_uuid()]
|
|
||||||
self._mock_network_mapping_db_calls(None)
|
|
||||||
with mock.patch(vmware.nsx_method('get_lswitches',
|
|
||||||
module_name='nsxlib.mh.switch'),
|
|
||||||
return_value=[{'uuid': uuid}
|
|
||||||
for uuid in exp_ls_uuids]):
|
|
||||||
self._verify_get_nsx_switch_ids(exp_ls_uuids)
|
|
||||||
|
|
||||||
def test_get_nsx_switch_ids_no_mapping_returns_None(self):
|
|
||||||
# This test verifies that the function returns None if the mappings
|
|
||||||
# are not found both in the db and in the backend
|
|
||||||
self._mock_network_mapping_db_calls(None)
|
|
||||||
with mock.patch(vmware.nsx_method('get_lswitches',
|
|
||||||
module_name='nsxlib.mh.switch'),
|
|
||||||
return_value=[]):
|
|
||||||
self._verify_get_nsx_switch_ids(None)
|
|
||||||
|
|
||||||
def test_get_nsx_router_id_from_db_mappings(self):
|
|
||||||
# This test is representative of the 'standard' case in which the
|
|
||||||
# router mapping was stored in the neutron db
|
|
||||||
exp_lr_uuid = uuidutils.generate_uuid()
|
|
||||||
self._mock_router_mapping_db_calls(exp_lr_uuid)
|
|
||||||
self._verify_get_nsx_router_id(exp_lr_uuid)
|
|
||||||
|
|
||||||
def test_get_nsx_router_id_no_db_mapping(self):
|
|
||||||
# This test is representative of the case where db mappings where not
|
|
||||||
# found for a given port identifier
|
|
||||||
exp_lr_uuid = uuidutils.generate_uuid()
|
|
||||||
self._mock_router_mapping_db_calls(None)
|
|
||||||
with mock.patch(vmware.nsx_method('query_lrouters',
|
|
||||||
module_name='nsxlib.mh.router'),
|
|
||||||
return_value=[{'uuid': exp_lr_uuid}]):
|
|
||||||
self._verify_get_nsx_router_id(exp_lr_uuid)
|
|
||||||
|
|
||||||
def test_get_nsx_router_id_no_mapping_returns_None(self):
|
|
||||||
# This test verifies that the function returns None if the mapping
|
|
||||||
# are not found both in the db and in the backend
|
|
||||||
self._mock_router_mapping_db_calls(None)
|
|
||||||
with mock.patch(vmware.nsx_method('query_lrouters',
|
|
||||||
module_name='nsxlib.mh.router'),
|
|
||||||
return_value=[]):
|
|
||||||
self._verify_get_nsx_router_id(None)
|
|
||||||
|
|
||||||
def test_check_and_truncate_name_with_none(self):
|
|
||||||
name = None
|
|
||||||
result = utils.check_and_truncate(name)
|
|
||||||
self.assertEqual('', result)
|
|
||||||
|
|
||||||
def test_check_and_truncate_name_with_short_name(self):
|
|
||||||
name = 'foo_port_name'
|
|
||||||
result = utils.check_and_truncate(name)
|
|
||||||
self.assertEqual(name, result)
|
|
||||||
|
|
||||||
def test_check_and_truncate_name_long_name(self):
|
|
||||||
name = 'this_is_a_port_whose_name_is_longer_than_40_chars'
|
|
||||||
result = utils.check_and_truncate(name)
|
|
||||||
self.assertEqual(len(result), utils.MAX_DISPLAY_NAME_LEN)
|
|
||||||
|
|
||||||
def test_build_uri_path_plain(self):
|
|
||||||
result = nsxlib._build_uri_path('RESOURCE')
|
|
||||||
self.assertEqual("%s/%s" % (nsxlib.URI_PREFIX, 'RESOURCE'), result)
|
|
||||||
|
|
||||||
def test_build_uri_path_with_field(self):
|
|
||||||
result = nsxlib._build_uri_path('RESOURCE', fields='uuid')
|
|
||||||
expected = "%s/%s?fields=uuid" % (nsxlib.URI_PREFIX, 'RESOURCE')
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
def test_build_uri_path_with_filters(self):
|
|
||||||
filters = {"tag": 'foo', "tag_scope": "scope_foo"}
|
|
||||||
result = nsxlib._build_uri_path('RESOURCE', filters=filters)
|
|
||||||
expected = (
|
|
||||||
"%s/%s?tag=foo&tag_scope=scope_foo" %
|
|
||||||
(nsxlib.URI_PREFIX, 'RESOURCE'))
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
def test_build_uri_path_with_resource_id(self):
|
|
||||||
res = 'RESOURCE'
|
|
||||||
res_id = 'resource_id'
|
|
||||||
result = nsxlib._build_uri_path(res, resource_id=res_id)
|
|
||||||
expected = "%s/%s/%s" % (nsxlib.URI_PREFIX, res, res_id)
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
def test_build_uri_path_with_parent_and_resource_id(self):
|
|
||||||
parent_res = 'RESOURCE_PARENT'
|
|
||||||
child_res = 'RESOURCE_CHILD'
|
|
||||||
res = '%s/%s' % (child_res, parent_res)
|
|
||||||
par_id = 'parent_resource_id'
|
|
||||||
res_id = 'resource_id'
|
|
||||||
result = nsxlib._build_uri_path(
|
|
||||||
res, parent_resource_id=par_id, resource_id=res_id)
|
|
||||||
expected = ("%s/%s/%s/%s/%s" %
|
|
||||||
(nsxlib.URI_PREFIX, parent_res, par_id, child_res, res_id))
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
def test_build_uri_path_with_attachment(self):
|
|
||||||
parent_res = 'RESOURCE_PARENT'
|
|
||||||
child_res = 'RESOURCE_CHILD'
|
|
||||||
res = '%s/%s' % (child_res, parent_res)
|
|
||||||
par_id = 'parent_resource_id'
|
|
||||||
res_id = 'resource_id'
|
|
||||||
result = nsxlib._build_uri_path(res, parent_resource_id=par_id,
|
|
||||||
resource_id=res_id, is_attachment=True)
|
|
||||||
expected = ("%s/%s/%s/%s/%s/%s" %
|
|
||||||
(nsxlib.URI_PREFIX, parent_res,
|
|
||||||
par_id, child_res, res_id, 'attachment'))
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
def test_build_uri_path_with_extra_action(self):
|
|
||||||
parent_res = 'RESOURCE_PARENT'
|
|
||||||
child_res = 'RESOURCE_CHILD'
|
|
||||||
res = '%s/%s' % (child_res, parent_res)
|
|
||||||
par_id = 'parent_resource_id'
|
|
||||||
res_id = 'resource_id'
|
|
||||||
result = nsxlib._build_uri_path(res, parent_resource_id=par_id,
|
|
||||||
resource_id=res_id, extra_action='doh')
|
|
||||||
expected = ("%s/%s/%s/%s/%s/%s" %
|
|
||||||
(nsxlib.URI_PREFIX, parent_res,
|
|
||||||
par_id, child_res, res_id, 'doh'))
|
|
||||||
self.assertEqual(expected, result)
|
|
||||||
|
|
||||||
def _mock_sec_group_mapping_db_calls(self, ret_value):
|
|
||||||
mock.patch(vmware.nsx_method('get_nsx_security_group_id',
|
|
||||||
module_name='db.db'),
|
|
||||||
return_value=ret_value).start()
|
|
||||||
mock.patch(vmware.nsx_method('add_neutron_nsx_security_group_mapping',
|
|
||||||
module_name='db.db')).start()
|
|
||||||
|
|
||||||
def _verify_get_nsx_sec_profile_id(self, exp_sec_prof_uuid):
|
|
||||||
# The nsxlib and db calls are mocked, therefore the cluster
|
|
||||||
# and the neutron_id parameters can be set to None
|
|
||||||
sec_prof_uuid = nsx_utils.get_nsx_security_group_id(
|
|
||||||
db_api.get_reader_session(), None, None)
|
|
||||||
self.assertEqual(exp_sec_prof_uuid, sec_prof_uuid)
|
|
||||||
|
|
||||||
def test_get_nsx_sec_profile_id_from_db_mappings(self):
|
|
||||||
# This test is representative of the 'standard' case in which the
|
|
||||||
# security group mapping was stored in the neutron db
|
|
||||||
exp_sec_prof_uuid = uuidutils.generate_uuid()
|
|
||||||
self._mock_sec_group_mapping_db_calls(exp_sec_prof_uuid)
|
|
||||||
self._verify_get_nsx_sec_profile_id(exp_sec_prof_uuid)
|
|
||||||
|
|
||||||
def test_get_nsx_sec_profile_id_no_db_mapping(self):
|
|
||||||
# This test is representative of the case where db mappings where not
|
|
||||||
# found for a given security profile identifier
|
|
||||||
exp_sec_prof_uuid = uuidutils.generate_uuid()
|
|
||||||
self._mock_sec_group_mapping_db_calls(None)
|
|
||||||
with mock.patch(vmware.nsx_method('query_security_profiles',
|
|
||||||
module_name='nsxlib.mh.secgroup'),
|
|
||||||
return_value=[{'uuid': exp_sec_prof_uuid}]):
|
|
||||||
self._verify_get_nsx_sec_profile_id(exp_sec_prof_uuid)
|
|
||||||
|
|
||||||
def test_get_nsx_sec_profile_id_no_mapping_returns_None(self):
|
|
||||||
# This test verifies that the function returns None if the mapping
|
|
||||||
# are not found both in the db and in the backend
|
|
||||||
self._mock_sec_group_mapping_db_calls(None)
|
|
||||||
with mock.patch(vmware.nsx_method('query_security_profiles',
|
|
||||||
module_name='nsxlib.mh.secgroup'),
|
|
||||||
return_value=[]):
|
|
||||||
self._verify_get_nsx_sec_profile_id(None)
|
|
||||||
|
|
||||||
def test_convert_to_nsx_transport_zones_no_multiprovider(self):
|
|
||||||
test_net = {'id': 'whatever'}
|
|
||||||
results = nsx_utils.convert_to_nsx_transport_zones(
|
|
||||||
'meh_zone_uuid', test_net,
|
|
||||||
default_transport_type='meh_transport_type')
|
|
||||||
self.assertEqual(1, len(results))
|
|
||||||
result = results[0]
|
|
||||||
self.assertEqual('meh_zone_uuid', result['zone_uuid'])
|
|
||||||
self.assertEqual('meh_transport_type', result['transport_type'])
|
|
||||||
|
|
||||||
def _verify_nsx_transport_zones(self, results):
|
|
||||||
self.assertEqual(2, len(results))
|
|
||||||
result_1 = results[0]
|
|
||||||
self.assertEqual(utils.NetworkTypes.BRIDGE,
|
|
||||||
result_1['transport_type'])
|
|
||||||
self.assertEqual([{'transport': 66}],
|
|
||||||
result_1['binding_config']['vlan_translation'])
|
|
||||||
self.assertEqual('whatever_tz_1', result_1['zone_uuid'])
|
|
||||||
result_2 = results[1]
|
|
||||||
self.assertEqual(utils.NetworkTypes.STT,
|
|
||||||
result_2['transport_type'])
|
|
||||||
self.assertNotIn('binding_config', result_2)
|
|
||||||
self.assertEqual('whatever_tz_2', result_2['zone_uuid'])
|
|
||||||
|
|
||||||
def test_convert_to_nsx_transport_zones_with_bindings(self):
|
|
||||||
binding_1 = nsx_models.TzNetworkBinding(
|
|
||||||
'whatever',
|
|
||||||
utils.NetworkTypes.VLAN,
|
|
||||||
'whatever_tz_1',
|
|
||||||
66)
|
|
||||||
binding_2 = nsx_models.TzNetworkBinding(
|
|
||||||
'whatever',
|
|
||||||
utils.NetworkTypes.STT,
|
|
||||||
'whatever_tz_2',
|
|
||||||
None)
|
|
||||||
results = nsx_utils.convert_to_nsx_transport_zones(
|
|
||||||
'meh_zone_uuid', None, bindings=[binding_1, binding_2])
|
|
||||||
self._verify_nsx_transport_zones(results)
|
|
||||||
|
|
||||||
def test_convert_to_nsx_transport_zones_with_multiprovider(self):
|
|
||||||
segments = [
|
|
||||||
{pnet.NETWORK_TYPE: utils.NetworkTypes.VLAN,
|
|
||||||
pnet.PHYSICAL_NETWORK: 'whatever_tz_1',
|
|
||||||
pnet.SEGMENTATION_ID: 66},
|
|
||||||
{pnet.NETWORK_TYPE: utils.NetworkTypes.STT,
|
|
||||||
pnet.PHYSICAL_NETWORK: 'whatever_tz_2'},
|
|
||||||
]
|
|
||||||
results = nsx_utils.convert_to_nsx_transport_zones(
|
|
||||||
'meh_zone_uuid',
|
|
||||||
{'id': 'whatever_net', mpnet_apidef.SEGMENTS: segments})
|
|
||||||
self._verify_nsx_transport_zones(results)
|
|
||||||
|
|
||||||
|
|
||||||
class ClusterManagementTestCase(nsx_base.NsxlibTestCase):
|
|
||||||
|
|
||||||
def test_cluster_in_readonly_mode(self):
|
|
||||||
with mock.patch.object(self.fake_cluster.api_client,
|
|
||||||
'request',
|
|
||||||
side_effect=api_exc.ReadOnlyMode):
|
|
||||||
self.assertRaises(nsx_exc.MaintenanceInProgress,
|
|
||||||
nsxlib.do_request, cluster=self.fake_cluster)
|
|
||||||
|
|
||||||
def test_cluster_method_not_implemented(self):
|
|
||||||
self.assertRaises(api_exc.NsxApiException,
|
|
||||||
nsxlib.do_request,
|
|
||||||
nsxlib.HTTP_GET,
|
|
||||||
nsxlib._build_uri_path('MY_FAKE_RESOURCE',
|
|
||||||
resource_id='foo'),
|
|
||||||
cluster=self.fake_cluster)
|
|
@ -24,7 +24,7 @@ from vmware_nsx.api_client import version
|
|||||||
from vmware_nsx.common import config # noqa
|
from vmware_nsx.common import config # noqa
|
||||||
from vmware_nsx import nsx_cluster as cluster
|
from vmware_nsx import nsx_cluster as cluster
|
||||||
from vmware_nsx.tests import unit as vmware
|
from vmware_nsx.tests import unit as vmware
|
||||||
from vmware_nsx.tests.unit.nsx_mh.apiclient import fake
|
from vmware_nsx.tests.unit.nsxlib import fake
|
||||||
|
|
||||||
_uuid = test_base._uuid
|
_uuid = test_base._uuid
|
||||||
|
|
||||||
|
@ -1,310 +0,0 @@
|
|||||||
# Copyright (c) 2014 VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from neutron.tests.unit.api.v2 import test_base
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import exception
|
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
|
||||||
from vmware_nsx.common import utils as nsx_utils
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx.nsxlib.mh import l2gateway as l2gwlib
|
|
||||||
from vmware_nsx.nsxlib.mh import switch as switchlib
|
|
||||||
from vmware_nsx.tests.unit.nsxlib.mh import base
|
|
||||||
|
|
||||||
_uuid = test_base._uuid
|
|
||||||
|
|
||||||
|
|
||||||
class L2GatewayNegativeTestCase(base.NsxlibNegativeBaseTestCase):
|
|
||||||
|
|
||||||
def test_create_l2_gw_service_on_failure(self):
|
|
||||||
self.assertRaises(exception.NsxApiException,
|
|
||||||
l2gwlib.create_l2_gw_service,
|
|
||||||
self.fake_cluster,
|
|
||||||
'fake-tenant',
|
|
||||||
'fake-gateway',
|
|
||||||
[{'id': _uuid(),
|
|
||||||
'interface_name': 'xxx'}])
|
|
||||||
|
|
||||||
def test_delete_l2_gw_service_on_failure(self):
|
|
||||||
self.assertRaises(exception.NsxApiException,
|
|
||||||
l2gwlib.delete_l2_gw_service,
|
|
||||||
self.fake_cluster,
|
|
||||||
'fake-gateway')
|
|
||||||
|
|
||||||
def test_get_l2_gw_service_on_failure(self):
|
|
||||||
self.assertRaises(exception.NsxApiException,
|
|
||||||
l2gwlib.get_l2_gw_service,
|
|
||||||
self.fake_cluster,
|
|
||||||
'fake-gateway')
|
|
||||||
|
|
||||||
def test_update_l2_gw_service_on_failure(self):
|
|
||||||
self.assertRaises(exception.NsxApiException,
|
|
||||||
l2gwlib.update_l2_gw_service,
|
|
||||||
self.fake_cluster,
|
|
||||||
'fake-gateway',
|
|
||||||
'pluto')
|
|
||||||
|
|
||||||
|
|
||||||
class L2GatewayTestCase(base.NsxlibTestCase):
|
|
||||||
|
|
||||||
def _create_gw_service(self, node_uuid, display_name,
|
|
||||||
tenant_id='fake_tenant'):
|
|
||||||
return l2gwlib.create_l2_gw_service(self.fake_cluster,
|
|
||||||
tenant_id,
|
|
||||||
display_name,
|
|
||||||
[{'id': node_uuid,
|
|
||||||
'interface_name': 'xxx'}])
|
|
||||||
|
|
||||||
def test_create_l2_gw_service(self):
|
|
||||||
display_name = 'fake-gateway'
|
|
||||||
node_uuid = _uuid()
|
|
||||||
response = self._create_gw_service(node_uuid, display_name)
|
|
||||||
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
|
|
||||||
self.assertEqual(response.get('display_name'), display_name)
|
|
||||||
gateways = response.get('gateways', [])
|
|
||||||
self.assertEqual(len(gateways), 1)
|
|
||||||
self.assertEqual(gateways[0]['type'], 'L2Gateway')
|
|
||||||
self.assertEqual(gateways[0]['device_id'], 'xxx')
|
|
||||||
self.assertEqual(gateways[0]['transport_node_uuid'], node_uuid)
|
|
||||||
|
|
||||||
def test_update_l2_gw_service(self):
|
|
||||||
display_name = 'fake-gateway'
|
|
||||||
new_display_name = 'still-fake-gateway'
|
|
||||||
node_uuid = _uuid()
|
|
||||||
res1 = self._create_gw_service(node_uuid, display_name)
|
|
||||||
gw_id = res1['uuid']
|
|
||||||
res2 = l2gwlib.update_l2_gw_service(
|
|
||||||
self.fake_cluster, gw_id, new_display_name)
|
|
||||||
self.assertEqual(res2['display_name'], new_display_name)
|
|
||||||
|
|
||||||
def test_get_l2_gw_service(self):
|
|
||||||
display_name = 'fake-gateway'
|
|
||||||
node_uuid = _uuid()
|
|
||||||
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
|
|
||||||
response = l2gwlib.get_l2_gw_service(self.fake_cluster, gw_id)
|
|
||||||
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
|
|
||||||
self.assertEqual(response.get('display_name'), display_name)
|
|
||||||
self.assertEqual(response.get('uuid'), gw_id)
|
|
||||||
|
|
||||||
def test_list_l2_gw_service(self):
|
|
||||||
gw_ids = []
|
|
||||||
for name in ('fake-1', 'fake-2'):
|
|
||||||
gw_ids.append(self._create_gw_service(_uuid(), name)['uuid'])
|
|
||||||
results = l2gwlib.get_l2_gw_services(self.fake_cluster)
|
|
||||||
self.assertEqual(len(results), 2)
|
|
||||||
self.assertEqual(sorted(gw_ids), sorted([r['uuid'] for r in results]))
|
|
||||||
|
|
||||||
def test_list_l2_gw_service_by_tenant(self):
|
|
||||||
gw_ids = [self._create_gw_service(
|
|
||||||
_uuid(), name, tenant_id=name)['uuid']
|
|
||||||
for name in ('fake-1', 'fake-2')]
|
|
||||||
results = l2gwlib.get_l2_gw_services(self.fake_cluster,
|
|
||||||
tenant_id='fake-1')
|
|
||||||
self.assertEqual(len(results), 1)
|
|
||||||
self.assertEqual(results[0]['uuid'], gw_ids[0])
|
|
||||||
|
|
||||||
def test_delete_l2_gw_service(self):
|
|
||||||
display_name = 'fake-gateway'
|
|
||||||
node_uuid = _uuid()
|
|
||||||
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
|
|
||||||
l2gwlib.delete_l2_gw_service(self.fake_cluster, gw_id)
|
|
||||||
results = l2gwlib.get_l2_gw_services(self.fake_cluster)
|
|
||||||
self.assertEqual(len(results), 0)
|
|
||||||
|
|
||||||
def test_plug_l2_gw_port_attachment(self):
|
|
||||||
tenant_id = 'pippo'
|
|
||||||
node_uuid = _uuid()
|
|
||||||
transport_zones_config = [{'zone_uuid': _uuid(),
|
|
||||||
'transport_type': 'stt'}]
|
|
||||||
lswitch = switchlib.create_lswitch(
|
|
||||||
self.fake_cluster, _uuid(), tenant_id,
|
|
||||||
'fake-switch', transport_zones_config)
|
|
||||||
gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
|
|
||||||
lport = switchlib.create_lport(
|
|
||||||
self.fake_cluster, lswitch['uuid'], tenant_id, _uuid(),
|
|
||||||
'fake-gw-port', gw_id, True)
|
|
||||||
l2gwlib.plug_l2_gw_service(
|
|
||||||
self.fake_cluster, lswitch['uuid'],
|
|
||||||
lport['uuid'], gw_id)
|
|
||||||
uri = nsxlib._build_uri_path(switchlib.LSWITCHPORT_RESOURCE,
|
|
||||||
lport['uuid'],
|
|
||||||
lswitch['uuid'],
|
|
||||||
is_attachment=True)
|
|
||||||
resp_obj = nsxlib.do_request("GET", uri,
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
self.assertIn('LogicalPortAttachment', resp_obj)
|
|
||||||
self.assertEqual(resp_obj['LogicalPortAttachment']['type'],
|
|
||||||
'L2GatewayAttachment')
|
|
||||||
|
|
||||||
def _create_expected_req_body(self, display_name, neutron_id,
|
|
||||||
connector_type, connector_ip,
|
|
||||||
client_certificate):
|
|
||||||
body = {
|
|
||||||
"display_name": display_name,
|
|
||||||
"tags": [{"tag": neutron_id, "scope": "q_gw_dev_id"},
|
|
||||||
{"tag": 'fake_tenant', "scope": "os_tid"},
|
|
||||||
{"tag": nsx_utils.NEUTRON_VERSION,
|
|
||||||
"scope": "quantum"}],
|
|
||||||
"transport_connectors": [
|
|
||||||
{"transport_zone_uuid": 'fake_tz_uuid',
|
|
||||||
"ip_address": connector_ip,
|
|
||||||
"type": '%sConnector' % connector_type}],
|
|
||||||
"admin_status_enabled": True
|
|
||||||
}
|
|
||||||
body.get("tags").sort(key=lambda x: x['tag'])
|
|
||||||
if client_certificate:
|
|
||||||
body["credential"] = {
|
|
||||||
"client_certificate": {
|
|
||||||
"pem_encoded": client_certificate},
|
|
||||||
"type": "SecurityCertificateCredential"}
|
|
||||||
return body
|
|
||||||
|
|
||||||
def test_create_gw_device(self):
|
|
||||||
# NOTE(salv-orlando): This unit test mocks backend calls rather than
|
|
||||||
# leveraging the fake NSX API client
|
|
||||||
display_name = 'fake-device'
|
|
||||||
neutron_id = 'whatever'
|
|
||||||
connector_type = 'stt'
|
|
||||||
connector_ip = '1.1.1.1'
|
|
||||||
client_certificate = 'this_should_be_a_certificate'
|
|
||||||
with mock.patch.object(nsxlib, 'do_request') as request_mock:
|
|
||||||
expected_req_body = self._create_expected_req_body(
|
|
||||||
display_name, neutron_id, connector_type.upper(),
|
|
||||||
connector_ip, client_certificate)
|
|
||||||
l2gwlib.create_gateway_device(
|
|
||||||
self.fake_cluster, 'fake_tenant', display_name, neutron_id,
|
|
||||||
'fake_tz_uuid', connector_type, connector_ip,
|
|
||||||
client_certificate)
|
|
||||||
request_mock.assert_called_once_with(
|
|
||||||
"POST",
|
|
||||||
"/ws.v1/transport-node",
|
|
||||||
jsonutils.dumps(expected_req_body, sort_keys=True),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
|
|
||||||
def test_create_gw_device_with_invalid_transport_type_raises(self):
|
|
||||||
display_name = 'fake-device'
|
|
||||||
neutron_id = 'whatever'
|
|
||||||
connector_type = 'foo'
|
|
||||||
connector_ip = '1.1.1.1'
|
|
||||||
client_certificate = 'this_should_be_a_certificate'
|
|
||||||
self.assertRaises(nsx_exc.InvalidTransportType,
|
|
||||||
l2gwlib.create_gateway_device,
|
|
||||||
self.fake_cluster, 'fake_tenant', display_name,
|
|
||||||
neutron_id, 'fake_tz_uuid', connector_type,
|
|
||||||
connector_ip, client_certificate)
|
|
||||||
|
|
||||||
def test_update_gw_device(self):
|
|
||||||
# NOTE(salv-orlando): This unit test mocks backend calls rather than
|
|
||||||
# leveraging the fake NSX API client
|
|
||||||
display_name = 'fake-device'
|
|
||||||
neutron_id = 'whatever'
|
|
||||||
connector_type = 'stt'
|
|
||||||
connector_ip = '1.1.1.1'
|
|
||||||
client_certificate = 'this_should_be_a_certificate'
|
|
||||||
with mock.patch.object(nsxlib, 'do_request') as request_mock:
|
|
||||||
expected_req_body = self._create_expected_req_body(
|
|
||||||
display_name, neutron_id, connector_type.upper(),
|
|
||||||
connector_ip, client_certificate)
|
|
||||||
l2gwlib.update_gateway_device(
|
|
||||||
self.fake_cluster, 'whatever', 'fake_tenant',
|
|
||||||
display_name, neutron_id,
|
|
||||||
'fake_tz_uuid', connector_type, connector_ip,
|
|
||||||
client_certificate)
|
|
||||||
|
|
||||||
request_mock.assert_called_once_with(
|
|
||||||
"PUT",
|
|
||||||
"/ws.v1/transport-node/whatever",
|
|
||||||
jsonutils.dumps(expected_req_body, sort_keys=True),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
|
|
||||||
def test_update_gw_device_without_certificate(self):
|
|
||||||
# NOTE(salv-orlando): This unit test mocks backend calls rather than
|
|
||||||
# leveraging the fake NSX API client
|
|
||||||
display_name = 'fake-device'
|
|
||||||
neutron_id = 'whatever'
|
|
||||||
connector_type = 'stt'
|
|
||||||
connector_ip = '1.1.1.1'
|
|
||||||
with mock.patch.object(nsxlib, 'do_request') as request_mock:
|
|
||||||
expected_req_body = self._create_expected_req_body(
|
|
||||||
display_name, neutron_id, connector_type.upper(),
|
|
||||||
connector_ip, None)
|
|
||||||
l2gwlib.update_gateway_device(
|
|
||||||
self.fake_cluster, 'whatever', 'fake_tenant',
|
|
||||||
display_name, neutron_id,
|
|
||||||
'fake_tz_uuid', connector_type, connector_ip,
|
|
||||||
client_certificate=None)
|
|
||||||
|
|
||||||
request_mock.assert_called_once_with(
|
|
||||||
"PUT",
|
|
||||||
"/ws.v1/transport-node/whatever",
|
|
||||||
jsonutils.dumps(expected_req_body, sort_keys=True),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
|
|
||||||
def test_get_gw_device_status(self):
|
|
||||||
# NOTE(salv-orlando): This unit test mocks backend calls rather than
|
|
||||||
# leveraging the fake NSX API client
|
|
||||||
with mock.patch.object(nsxlib, 'do_request') as request_mock:
|
|
||||||
l2gwlib.get_gateway_device_status(self.fake_cluster, 'whatever')
|
|
||||||
request_mock.assert_called_once_with(
|
|
||||||
"GET",
|
|
||||||
"/ws.v1/transport-node/whatever/status",
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
|
|
||||||
def test_get_gw_devices_status(self):
|
|
||||||
# NOTE(salv-orlando): This unit test mocks backend calls rather than
|
|
||||||
# leveraging the fake NSX API client
|
|
||||||
with mock.patch.object(nsxlib, 'do_request') as request_mock:
|
|
||||||
request_mock.return_value = {
|
|
||||||
'results': [],
|
|
||||||
'page_cursor': None,
|
|
||||||
'result_count': 0}
|
|
||||||
l2gwlib.get_gateway_devices_status(self.fake_cluster)
|
|
||||||
request_mock.assert_called_once_with(
|
|
||||||
"GET",
|
|
||||||
("/ws.v1/transport-node?fields=uuid,tags&"
|
|
||||||
"relations=TransportNodeStatus&"
|
|
||||||
"_page_length=1000&tag_scope=quantum"),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
|
|
||||||
def test_get_gw_devices_status_filter_by_tenant(self):
|
|
||||||
# NOTE(salv-orlando): This unit test mocks backend calls rather than
|
|
||||||
# leveraging the fake NSX API client
|
|
||||||
with mock.patch.object(nsxlib, 'do_request') as request_mock:
|
|
||||||
request_mock.return_value = {
|
|
||||||
'results': [],
|
|
||||||
'page_cursor': None,
|
|
||||||
'result_count': 0}
|
|
||||||
l2gwlib.get_gateway_devices_status(self.fake_cluster,
|
|
||||||
tenant_id='ssc_napoli')
|
|
||||||
request_mock.assert_called_once_with(
|
|
||||||
"GET",
|
|
||||||
("/ws.v1/transport-node?fields=uuid,tags&"
|
|
||||||
"relations=TransportNodeStatus&"
|
|
||||||
"tag=ssc_napoli&tag_scope=os_tid&"
|
|
||||||
"_page_length=1000&tag_scope=quantum"),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
|
|
||||||
def test_delete_gw_device(self):
|
|
||||||
# NOTE(salv-orlando): This unit test mocks backend calls rather than
|
|
||||||
# leveraging the fake NSX API client
|
|
||||||
with mock.patch.object(nsxlib, 'do_request') as request_mock:
|
|
||||||
l2gwlib.delete_gateway_device(self.fake_cluster, 'whatever')
|
|
||||||
request_mock.assert_called_once_with(
|
|
||||||
"DELETE",
|
|
||||||
"/ws.v1/transport-node/whatever",
|
|
||||||
cluster=self.fake_cluster)
|
|
@ -1,69 +0,0 @@
|
|||||||
# Copyright (c) 2014 VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from neutron_lib import exceptions
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx.nsxlib.mh import queue as queuelib
|
|
||||||
from vmware_nsx.tests.unit.nsxlib.mh import base
|
|
||||||
|
|
||||||
|
|
||||||
class TestLogicalQueueLib(base.NsxlibTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(TestLogicalQueueLib, self).setUp()
|
|
||||||
self.fake_queue = {
|
|
||||||
'name': 'fake_queue',
|
|
||||||
'min': 0, 'max': 256,
|
|
||||||
'dscp': 0, 'qos_marking': False
|
|
||||||
}
|
|
||||||
|
|
||||||
def test_create_and_get_lqueue(self):
|
|
||||||
queue_id = queuelib.create_lqueue(
|
|
||||||
self.fake_cluster, self.fake_queue)
|
|
||||||
queue_res = nsxlib.do_request(
|
|
||||||
'GET',
|
|
||||||
nsxlib._build_uri_path('lqueue', resource_id=queue_id),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
self.assertEqual(queue_id, queue_res['uuid'])
|
|
||||||
self.assertEqual('fake_queue', queue_res['display_name'])
|
|
||||||
|
|
||||||
def test_create_lqueue_nsx_error_raises(self):
|
|
||||||
def raise_nsx_exc(*args, **kwargs):
|
|
||||||
raise api_exc.NsxApiException()
|
|
||||||
|
|
||||||
with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc):
|
|
||||||
self.assertRaises(
|
|
||||||
exceptions.NeutronException, queuelib.create_lqueue,
|
|
||||||
self.fake_cluster, self.fake_queue)
|
|
||||||
|
|
||||||
def test_delete_lqueue(self):
|
|
||||||
queue_id = queuelib.create_lqueue(
|
|
||||||
self.fake_cluster, self.fake_queue)
|
|
||||||
queuelib.delete_lqueue(self.fake_cluster, queue_id)
|
|
||||||
self.assertRaises(exceptions.NotFound,
|
|
||||||
nsxlib.do_request,
|
|
||||||
'GET',
|
|
||||||
nsxlib._build_uri_path(
|
|
||||||
'lqueue', resource_id=queue_id),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
|
|
||||||
def test_delete_non_existing_lqueue_raises(self):
|
|
||||||
self.assertRaises(exceptions.NeutronException,
|
|
||||||
queuelib.delete_lqueue,
|
|
||||||
self.fake_cluster, 'whatever')
|
|
@ -1,947 +0,0 @@
|
|||||||
# Copyright (c) 2014 VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
import mock
|
|
||||||
from neutron.tests.unit.api.v2 import test_base
|
|
||||||
from neutron_lib import exceptions
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_utils import uuidutils
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import exception as api_exc
|
|
||||||
from vmware_nsx.api_client import version as ver_module
|
|
||||||
from vmware_nsx.common import exceptions as nsx_exc
|
|
||||||
from vmware_nsx.common import utils
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx.nsxlib.mh import router as routerlib
|
|
||||||
from vmware_nsx.nsxlib.mh import switch as switchlib
|
|
||||||
from vmware_nsx.tests.unit.nsxlib.mh import base
|
|
||||||
|
|
||||||
_uuid = test_base._uuid
|
|
||||||
|
|
||||||
|
|
||||||
class TestNatRules(base.NsxlibTestCase):
|
|
||||||
|
|
||||||
def _test_create_lrouter_dnat_rule(self, version):
|
|
||||||
with mock.patch.object(self.fake_cluster.api_client,
|
|
||||||
'get_version',
|
|
||||||
new=lambda: ver_module.Version(version)):
|
|
||||||
tenant_id = 'pippo'
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
tenant_id,
|
|
||||||
'fake_router',
|
|
||||||
'192.168.0.1')
|
|
||||||
nat_rule = routerlib.create_lrouter_dnat_rule(
|
|
||||||
self.fake_cluster, lrouter['uuid'], '10.0.0.99',
|
|
||||||
match_criteria={'destination_ip_addresses':
|
|
||||||
'192.168.0.5'})
|
|
||||||
uri = nsxlib._build_uri_path(routerlib.LROUTERNAT_RESOURCE,
|
|
||||||
nat_rule['uuid'],
|
|
||||||
lrouter['uuid'])
|
|
||||||
resp_obj = nsxlib.do_request("GET", uri, cluster=self.fake_cluster)
|
|
||||||
self.assertEqual('DestinationNatRule', resp_obj['type'])
|
|
||||||
self.assertEqual('192.168.0.5',
|
|
||||||
resp_obj['match']['destination_ip_addresses'])
|
|
||||||
|
|
||||||
def test_create_lrouter_dnat_rule_v2(self):
|
|
||||||
self._test_create_lrouter_dnat_rule('2.9')
|
|
||||||
|
|
||||||
def test_create_lrouter_dnat_rule_v31(self):
|
|
||||||
self._test_create_lrouter_dnat_rule('3.1')
|
|
||||||
|
|
||||||
|
|
||||||
class TestExplicitLRouters(base.NsxlibTestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.fake_version = '3.2'
|
|
||||||
super(TestExplicitLRouters, self).setUp()
|
|
||||||
|
|
||||||
def _get_lrouter(self, tenant_id, router_name, router_id, relations=None):
|
|
||||||
schema = '/ws.v1/schema/RoutingTableRoutingConfig'
|
|
||||||
|
|
||||||
router = {'display_name': router_name,
|
|
||||||
'uuid': router_id,
|
|
||||||
'tags': utils.get_tags(os_tid=tenant_id),
|
|
||||||
'distributed': False,
|
|
||||||
'routing_config': {'type': 'RoutingTableRoutingConfig',
|
|
||||||
'_schema': schema},
|
|
||||||
'_schema': schema,
|
|
||||||
'nat_synchronization_enabled': True,
|
|
||||||
'replication_mode': 'service',
|
|
||||||
'type': 'LogicalRouterConfig',
|
|
||||||
'_href': '/ws.v1/lrouter/%s' % router_id, }
|
|
||||||
if relations:
|
|
||||||
router['_relations'] = relations
|
|
||||||
return router
|
|
||||||
|
|
||||||
def _get_single_route(self, router_id, route_id='fake_route_id_0',
|
|
||||||
prefix='0.0.0.0/0', next_hop_ip='1.1.1.1'):
|
|
||||||
return {'protocol': 'static',
|
|
||||||
'_href': '/ws.v1/lrouter/%s/rib/%s' % (router_id, route_id),
|
|
||||||
'prefix': prefix,
|
|
||||||
'_schema': '/ws.v1/schema/RoutingTableEntry',
|
|
||||||
'next_hop_ip': next_hop_ip,
|
|
||||||
'action': 'accept',
|
|
||||||
'uuid': route_id}
|
|
||||||
|
|
||||||
def test_prepare_body_with_implicit_routing_config(self):
|
|
||||||
router_name = 'fake_router_name'
|
|
||||||
tenant_id = 'fake_tenant_id'
|
|
||||||
neutron_router_id = 'pipita_higuain'
|
|
||||||
router_type = 'SingleDefaultRouteImplicitRoutingConfig'
|
|
||||||
route_config = {
|
|
||||||
'default_route_next_hop': {'gateway_ip_address': 'fake_address',
|
|
||||||
'type': 'RouterNextHop'}, }
|
|
||||||
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
|
|
||||||
tenant_id, router_type,
|
|
||||||
**route_config)
|
|
||||||
expected = {'display_name': 'fake_router_name',
|
|
||||||
'routing_config': {
|
|
||||||
'default_route_next_hop':
|
|
||||||
{'gateway_ip_address': 'fake_address',
|
|
||||||
'type': 'RouterNextHop'},
|
|
||||||
'type': 'SingleDefaultRouteImplicitRoutingConfig'},
|
|
||||||
'tags': utils.get_tags(os_tid='fake_tenant_id',
|
|
||||||
q_router_id='pipita_higuain'),
|
|
||||||
'type': 'LogicalRouterConfig',
|
|
||||||
'replication_mode': cfg.CONF.NSX.replication_mode}
|
|
||||||
self.assertEqual(expected, body)
|
|
||||||
|
|
||||||
def test_prepare_body_without_routing_config(self):
|
|
||||||
router_name = 'fake_router_name'
|
|
||||||
tenant_id = 'fake_tenant_id'
|
|
||||||
neutron_router_id = 'marekiaro_hamsik'
|
|
||||||
router_type = 'RoutingTableRoutingConfig'
|
|
||||||
body = routerlib._prepare_lrouter_body(router_name, neutron_router_id,
|
|
||||||
tenant_id, router_type)
|
|
||||||
expected = {'display_name': 'fake_router_name',
|
|
||||||
'routing_config': {'type': 'RoutingTableRoutingConfig'},
|
|
||||||
'tags': utils.get_tags(os_tid='fake_tenant_id',
|
|
||||||
q_router_id='marekiaro_hamsik'),
|
|
||||||
'type': 'LogicalRouterConfig',
|
|
||||||
'replication_mode': cfg.CONF.NSX.replication_mode}
|
|
||||||
self.assertEqual(expected, body)
|
|
||||||
|
|
||||||
def test_get_lrouter(self):
|
|
||||||
tenant_id = 'fake_tenant_id'
|
|
||||||
router_name = 'fake_router_name'
|
|
||||||
router_id = 'fake_router_id'
|
|
||||||
relations = {
|
|
||||||
'LogicalRouterStatus':
|
|
||||||
{'_href': '/ws.v1/lrouter/%s/status' % router_id,
|
|
||||||
'lport_admin_up_count': 1,
|
|
||||||
'_schema': '/ws.v1/schema/LogicalRouterStatus',
|
|
||||||
'lport_count': 1,
|
|
||||||
'fabric_status': True,
|
|
||||||
'type': 'LogicalRouterStatus',
|
|
||||||
'lport_link_up_count': 0, }, }
|
|
||||||
|
|
||||||
with mock.patch.object(nsxlib, 'do_request',
|
|
||||||
return_value=self._get_lrouter(tenant_id,
|
|
||||||
router_name,
|
|
||||||
router_id,
|
|
||||||
relations)):
|
|
||||||
lrouter = routerlib.get_lrouter(self.fake_cluster, router_id)
|
|
||||||
self.assertTrue(
|
|
||||||
lrouter['_relations']['LogicalRouterStatus']['fabric_status'])
|
|
||||||
|
|
||||||
def test_create_lrouter(self):
|
|
||||||
tenant_id = 'fake_tenant_id'
|
|
||||||
router_name = 'fake_router_name'
|
|
||||||
router_id = 'fake_router_id'
|
|
||||||
nexthop_ip = '10.0.0.1'
|
|
||||||
with mock.patch.object(
|
|
||||||
nsxlib, 'do_request',
|
|
||||||
return_value=self._get_lrouter(tenant_id,
|
|
||||||
router_name,
|
|
||||||
router_id)):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
tenant_id,
|
|
||||||
router_name, nexthop_ip)
|
|
||||||
self.assertEqual(lrouter['routing_config']['type'],
|
|
||||||
'RoutingTableRoutingConfig')
|
|
||||||
self.assertNotIn('default_route_next_hop',
|
|
||||||
lrouter['routing_config'])
|
|
||||||
|
|
||||||
def test_update_lrouter_with_no_routes(self):
|
|
||||||
router_id = 'fake_router_id'
|
|
||||||
new_routes = [{"nexthop": "10.0.0.2",
|
|
||||||
"destination": "169.254.169.0/30"}, ]
|
|
||||||
|
|
||||||
nsx_routes = [self._get_single_route(router_id)]
|
|
||||||
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
|
|
||||||
return_value=nsx_routes):
|
|
||||||
with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
|
|
||||||
return_value='fake_uuid'):
|
|
||||||
old_routes = routerlib.update_explicit_routes_lrouter(
|
|
||||||
self.fake_cluster, router_id, new_routes)
|
|
||||||
self.assertEqual(old_routes, nsx_routes)
|
|
||||||
|
|
||||||
def test_update_lrouter_with_no_routes_raise_nsx_exception(self):
|
|
||||||
router_id = 'fake_router_id'
|
|
||||||
new_routes = [{"nexthop": "10.0.0.2",
|
|
||||||
"destination": "169.254.169.0/30"}, ]
|
|
||||||
|
|
||||||
nsx_routes = [self._get_single_route(router_id)]
|
|
||||||
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
|
|
||||||
return_value=nsx_routes):
|
|
||||||
with mock.patch.object(routerlib, 'create_explicit_route_lrouter',
|
|
||||||
side_effect=api_exc.NsxApiException):
|
|
||||||
self.assertRaises(api_exc.NsxApiException,
|
|
||||||
routerlib.update_explicit_routes_lrouter,
|
|
||||||
self.fake_cluster, router_id, new_routes)
|
|
||||||
|
|
||||||
def test_update_lrouter_with_routes(self):
|
|
||||||
router_id = 'fake_router_id'
|
|
||||||
new_routes = [{"next_hop_ip": "10.0.0.2",
|
|
||||||
"prefix": "169.254.169.0/30"}, ]
|
|
||||||
|
|
||||||
nsx_routes = [self._get_single_route(router_id),
|
|
||||||
self._get_single_route(router_id, 'fake_route_id_1',
|
|
||||||
'0.0.0.1/24', '10.0.0.3'),
|
|
||||||
self._get_single_route(router_id, 'fake_route_id_2',
|
|
||||||
'0.0.0.2/24', '10.0.0.4'), ]
|
|
||||||
|
|
||||||
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
|
|
||||||
return_value=nsx_routes):
|
|
||||||
with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
|
|
||||||
return_value=None):
|
|
||||||
with mock.patch.object(routerlib,
|
|
||||||
'create_explicit_route_lrouter',
|
|
||||||
return_value='fake_uuid'):
|
|
||||||
old_routes = routerlib.update_explicit_routes_lrouter(
|
|
||||||
self.fake_cluster, router_id, new_routes)
|
|
||||||
self.assertEqual(old_routes, nsx_routes)
|
|
||||||
|
|
||||||
def test_update_lrouter_with_routes_raises_nsx_expception(self):
|
|
||||||
router_id = 'fake_router_id'
|
|
||||||
new_routes = [{"nexthop": "10.0.0.2",
|
|
||||||
"destination": "169.254.169.0/30"}, ]
|
|
||||||
|
|
||||||
nsx_routes = [self._get_single_route(router_id),
|
|
||||||
self._get_single_route(router_id, 'fake_route_id_1',
|
|
||||||
'0.0.0.1/24', '10.0.0.3'),
|
|
||||||
self._get_single_route(router_id, 'fake_route_id_2',
|
|
||||||
'0.0.0.2/24', '10.0.0.4'), ]
|
|
||||||
|
|
||||||
with mock.patch.object(routerlib, 'get_explicit_routes_lrouter',
|
|
||||||
return_value=nsx_routes):
|
|
||||||
with mock.patch.object(routerlib, 'delete_explicit_route_lrouter',
|
|
||||||
side_effect=api_exc.NsxApiException):
|
|
||||||
with mock.patch.object(
|
|
||||||
routerlib, 'create_explicit_route_lrouter',
|
|
||||||
return_value='fake_uuid'):
|
|
||||||
self.assertRaises(
|
|
||||||
api_exc.NsxApiException,
|
|
||||||
routerlib.update_explicit_routes_lrouter,
|
|
||||||
self.fake_cluster, router_id, new_routes)
|
|
||||||
|
|
||||||
|
|
||||||
class RouterNegativeTestCase(base.NsxlibNegativeBaseTestCase):
|
|
||||||
|
|
||||||
def test_create_lrouter_on_failure(self):
|
|
||||||
self.assertRaises(api_exc.NsxApiException,
|
|
||||||
routerlib.create_lrouter,
|
|
||||||
self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pluto',
|
|
||||||
'fake_router',
|
|
||||||
'my_hop')
|
|
||||||
|
|
||||||
def test_delete_lrouter_on_failure(self):
|
|
||||||
self.assertRaises(api_exc.NsxApiException,
|
|
||||||
routerlib.delete_lrouter,
|
|
||||||
self.fake_cluster,
|
|
||||||
'fake_router')
|
|
||||||
|
|
||||||
def test_get_lrouter_on_failure(self):
|
|
||||||
self.assertRaises(api_exc.NsxApiException,
|
|
||||||
routerlib.get_lrouter,
|
|
||||||
self.fake_cluster,
|
|
||||||
'fake_router')
|
|
||||||
|
|
||||||
def test_update_lrouter_on_failure(self):
|
|
||||||
self.assertRaises(api_exc.NsxApiException,
|
|
||||||
routerlib.update_lrouter,
|
|
||||||
self.fake_cluster,
|
|
||||||
'fake_router',
|
|
||||||
'pluto',
|
|
||||||
'new_hop')
|
|
||||||
|
|
||||||
|
|
||||||
class TestLogicalRouters(base.NsxlibTestCase):
|
|
||||||
|
|
||||||
def _verify_lrouter(self, res_lrouter,
|
|
||||||
expected_uuid,
|
|
||||||
expected_display_name,
|
|
||||||
expected_nexthop,
|
|
||||||
expected_tenant_id,
|
|
||||||
expected_neutron_id=None,
|
|
||||||
expected_distributed=None):
|
|
||||||
self.assertEqual(res_lrouter['uuid'], expected_uuid)
|
|
||||||
nexthop = (res_lrouter['routing_config']
|
|
||||||
['default_route_next_hop']['gateway_ip_address'])
|
|
||||||
self.assertEqual(nexthop, expected_nexthop)
|
|
||||||
router_tags = self._build_tag_dict(res_lrouter['tags'])
|
|
||||||
self.assertIn('os_tid', router_tags)
|
|
||||||
self.assertEqual(res_lrouter['display_name'], expected_display_name)
|
|
||||||
self.assertEqual(expected_tenant_id, router_tags['os_tid'])
|
|
||||||
if expected_distributed is not None:
|
|
||||||
self.assertEqual(expected_distributed,
|
|
||||||
res_lrouter['distributed'])
|
|
||||||
if expected_neutron_id:
|
|
||||||
self.assertIn('q_router_id', router_tags)
|
|
||||||
self.assertEqual(expected_neutron_id, router_tags['q_router_id'])
|
|
||||||
|
|
||||||
def test_get_lrouters(self):
|
|
||||||
lrouter_uuids = [routerlib.create_lrouter(
|
|
||||||
self.fake_cluster, 'whatever', 'pippo', 'fake-lrouter-%s' % k,
|
|
||||||
'10.0.0.1')['uuid'] for k in range(3)]
|
|
||||||
routers = routerlib.get_lrouters(self.fake_cluster, 'pippo')
|
|
||||||
for router in routers:
|
|
||||||
self.assertIn(router['uuid'], lrouter_uuids)
|
|
||||||
|
|
||||||
def _create_lrouter(self, version, neutron_id=None, distributed=None):
|
|
||||||
with mock.patch.object(
|
|
||||||
self.fake_cluster.api_client, 'get_version',
|
|
||||||
return_value=ver_module.Version(version)):
|
|
||||||
if not neutron_id:
|
|
||||||
neutron_id = uuidutils.generate_uuid()
|
|
||||||
lrouter = routerlib.create_lrouter(
|
|
||||||
self.fake_cluster, neutron_id, 'pippo',
|
|
||||||
'fake-lrouter', '10.0.0.1', distributed=distributed)
|
|
||||||
return routerlib.get_lrouter(self.fake_cluster,
|
|
||||||
lrouter['uuid'])
|
|
||||||
|
|
||||||
def test_create_and_get_lrouter_v30(self):
|
|
||||||
neutron_id = uuidutils.generate_uuid()
|
|
||||||
res_lrouter = self._create_lrouter('3.0', neutron_id=neutron_id)
|
|
||||||
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
|
|
||||||
'fake-lrouter', '10.0.0.1', 'pippo',
|
|
||||||
expected_neutron_id=neutron_id)
|
|
||||||
|
|
||||||
def test_create_and_get_lrouter_v31_centralized(self):
|
|
||||||
neutron_id = uuidutils.generate_uuid()
|
|
||||||
res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
|
|
||||||
distributed=False)
|
|
||||||
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
|
|
||||||
'fake-lrouter', '10.0.0.1', 'pippo',
|
|
||||||
expected_neutron_id=neutron_id,
|
|
||||||
expected_distributed=False)
|
|
||||||
|
|
||||||
def test_create_and_get_lrouter_v31_distributed(self):
|
|
||||||
neutron_id = uuidutils.generate_uuid()
|
|
||||||
res_lrouter = self._create_lrouter('3.1', neutron_id=neutron_id,
|
|
||||||
distributed=True)
|
|
||||||
self._verify_lrouter(res_lrouter, res_lrouter['uuid'],
|
|
||||||
'fake-lrouter', '10.0.0.1', 'pippo',
|
|
||||||
expected_neutron_id=neutron_id,
|
|
||||||
expected_distributed=True)
|
|
||||||
|
|
||||||
def test_create_and_get_lrouter_name_exceeds_40chars(self):
|
|
||||||
neutron_id = uuidutils.generate_uuid()
|
|
||||||
display_name = '*' * 50
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
neutron_id,
|
|
||||||
'pippo',
|
|
||||||
display_name,
|
|
||||||
'10.0.0.1')
|
|
||||||
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
|
|
||||||
lrouter['uuid'])
|
|
||||||
self._verify_lrouter(res_lrouter, lrouter['uuid'],
|
|
||||||
'*' * 40, '10.0.0.1', 'pippo',
|
|
||||||
expected_neutron_id=neutron_id)
|
|
||||||
|
|
||||||
def _test_version_dependent_update_lrouter(self, version):
|
|
||||||
def foo(*args, **kwargs):
|
|
||||||
return version
|
|
||||||
|
|
||||||
foo_func_dict = {
|
|
||||||
'update_lrouter': {
|
|
||||||
2: {-1: foo},
|
|
||||||
3: {-1: foo, 2: foo}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
with mock.patch.object(self.fake_cluster.api_client,
|
|
||||||
'get_version',
|
|
||||||
return_value=ver_module.Version(version)):
|
|
||||||
with mock.patch.dict(routerlib.ROUTER_FUNC_DICT,
|
|
||||||
foo_func_dict, clear=True):
|
|
||||||
return routerlib.update_lrouter(
|
|
||||||
self.fake_cluster, 'foo_router_id', 'foo_router_name',
|
|
||||||
'foo_nexthop', routes={'foo_destination': 'foo_address'})
|
|
||||||
|
|
||||||
def test_version_dependent_update_lrouter_old_versions(self):
|
|
||||||
self.assertRaises(nsx_exc.InvalidVersion,
|
|
||||||
self._test_version_dependent_update_lrouter,
|
|
||||||
"2.9")
|
|
||||||
self.assertRaises(nsx_exc.InvalidVersion,
|
|
||||||
self._test_version_dependent_update_lrouter,
|
|
||||||
"3.0")
|
|
||||||
self.assertRaises(nsx_exc.InvalidVersion,
|
|
||||||
self._test_version_dependent_update_lrouter,
|
|
||||||
"3.1")
|
|
||||||
|
|
||||||
def test_version_dependent_update_lrouter_new_versions(self):
|
|
||||||
self.assertEqual("3.2",
|
|
||||||
self._test_version_dependent_update_lrouter("3.2"))
|
|
||||||
self.assertEqual("4.0",
|
|
||||||
self._test_version_dependent_update_lrouter("4.0"))
|
|
||||||
self.assertEqual("4.1",
|
|
||||||
self._test_version_dependent_update_lrouter("4.1"))
|
|
||||||
|
|
||||||
def test_update_lrouter_no_nexthop(self):
|
|
||||||
neutron_id = uuidutils.generate_uuid()
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
neutron_id,
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter = routerlib.update_lrouter(self.fake_cluster,
|
|
||||||
lrouter['uuid'],
|
|
||||||
'new_name',
|
|
||||||
None)
|
|
||||||
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
|
|
||||||
lrouter['uuid'])
|
|
||||||
self._verify_lrouter(res_lrouter, lrouter['uuid'],
|
|
||||||
'new_name', '10.0.0.1', 'pippo',
|
|
||||||
expected_neutron_id=neutron_id)
|
|
||||||
|
|
||||||
def test_update_lrouter(self):
|
|
||||||
neutron_id = uuidutils.generate_uuid()
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
neutron_id,
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter = routerlib.update_lrouter(self.fake_cluster,
|
|
||||||
lrouter['uuid'],
|
|
||||||
'new_name',
|
|
||||||
'192.168.0.1')
|
|
||||||
res_lrouter = routerlib.get_lrouter(self.fake_cluster,
|
|
||||||
lrouter['uuid'])
|
|
||||||
self._verify_lrouter(res_lrouter, lrouter['uuid'],
|
|
||||||
'new_name', '192.168.0.1', 'pippo',
|
|
||||||
expected_neutron_id=neutron_id)
|
|
||||||
|
|
||||||
def test_update_nonexistent_lrouter_raises(self):
|
|
||||||
self.assertRaises(exceptions.NotFound,
|
|
||||||
routerlib.update_lrouter,
|
|
||||||
self.fake_cluster,
|
|
||||||
'whatever',
|
|
||||||
'foo', '9.9.9.9')
|
|
||||||
|
|
||||||
def test_delete_lrouter(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
routerlib.delete_lrouter(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertRaises(exceptions.NotFound,
|
|
||||||
routerlib.get_lrouter,
|
|
||||||
self.fake_cluster,
|
|
||||||
lrouter['uuid'])
|
|
||||||
|
|
||||||
def test_query_lrouter_ports(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
router_port_uuids = [routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo',
|
|
||||||
'qp_id_%s' % k, 'port-%s' % k, True,
|
|
||||||
['192.168.0.%s' % k], '00:11:22:33:44:55')['uuid']
|
|
||||||
for k in range(3)]
|
|
||||||
ports = routerlib.query_lrouter_lports(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(ports), 3)
|
|
||||||
for res_port in ports:
|
|
||||||
self.assertIn(res_port['uuid'], router_port_uuids)
|
|
||||||
|
|
||||||
def test_query_lrouter_lports_nonexistent_lrouter_raises(self):
|
|
||||||
self.assertRaises(
|
|
||||||
exceptions.NotFound, routerlib.create_router_lport,
|
|
||||||
self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
|
|
||||||
|
|
||||||
def test_create_and_get_lrouter_port(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
|
|
||||||
ports = routerlib.query_lrouter_lports(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(ports), 1)
|
|
||||||
res_port = ports[0]
|
|
||||||
port_tags = self._build_tag_dict(res_port['tags'])
|
|
||||||
self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
|
|
||||||
self.assertIn('os_tid', port_tags)
|
|
||||||
self.assertIn('q_port_id', port_tags)
|
|
||||||
self.assertEqual('pippo', port_tags['os_tid'])
|
|
||||||
self.assertEqual('neutron_port_id', port_tags['q_port_id'])
|
|
||||||
|
|
||||||
def test_create_lrouter_port_nonexistent_router_raises(self):
|
|
||||||
self.assertRaises(
|
|
||||||
exceptions.NotFound, routerlib.create_router_lport,
|
|
||||||
self.fake_cluster, 'booo', 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
|
|
||||||
|
|
||||||
def test_update_lrouter_port(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
|
|
||||||
routerlib.update_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
|
|
||||||
'pippo', 'another_port_id', 'name', False,
|
|
||||||
['192.168.0.1', '10.10.10.254'])
|
|
||||||
|
|
||||||
ports = routerlib.query_lrouter_lports(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(ports), 1)
|
|
||||||
res_port = ports[0]
|
|
||||||
port_tags = self._build_tag_dict(res_port['tags'])
|
|
||||||
self.assertEqual(['192.168.0.1', '10.10.10.254'],
|
|
||||||
res_port['ip_addresses'])
|
|
||||||
self.assertEqual('False', res_port['admin_status_enabled'])
|
|
||||||
self.assertIn('os_tid', port_tags)
|
|
||||||
self.assertIn('q_port_id', port_tags)
|
|
||||||
self.assertEqual('pippo', port_tags['os_tid'])
|
|
||||||
self.assertEqual('another_port_id', port_tags['q_port_id'])
|
|
||||||
|
|
||||||
def test_update_lrouter_port_nonexistent_router_raises(self):
|
|
||||||
self.assertRaises(
|
|
||||||
exceptions.NotFound, routerlib.update_router_lport,
|
|
||||||
self.fake_cluster, 'boo-router', 'boo-port', 'pippo',
|
|
||||||
'neutron_port_id', 'name', True, ['192.168.0.1'])
|
|
||||||
|
|
||||||
def test_update_lrouter_port_nonexistent_port_raises(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
self.assertRaises(
|
|
||||||
exceptions.NotFound, routerlib.update_router_lport,
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'boo-port', 'pippo',
|
|
||||||
'neutron_port_id', 'name', True, ['192.168.0.1'])
|
|
||||||
|
|
||||||
def test_delete_lrouter_port(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
|
|
||||||
'00:11:22:33:44:55')
|
|
||||||
ports = routerlib.query_lrouter_lports(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(ports), 1)
|
|
||||||
routerlib.delete_router_lport(self.fake_cluster, lrouter['uuid'],
|
|
||||||
lrouter_port['uuid'])
|
|
||||||
ports = routerlib.query_lrouter_lports(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertFalse(len(ports))
|
|
||||||
|
|
||||||
def test_delete_lrouter_port_nonexistent_router_raises(self):
|
|
||||||
self.assertRaises(exceptions.NotFound,
|
|
||||||
routerlib.delete_router_lport,
|
|
||||||
self.fake_cluster, 'xyz', 'abc')
|
|
||||||
|
|
||||||
def test_delete_lrouter_port_nonexistent_port_raises(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
self.assertRaises(exceptions.NotFound,
|
|
||||||
routerlib.delete_router_lport,
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'abc')
|
|
||||||
|
|
||||||
def test_delete_peer_lrouter_port(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'x', 'y', True, [],
|
|
||||||
'00:11:22:33:44:55')
|
|
||||||
|
|
||||||
def fakegetport(*args, **kwargs):
|
|
||||||
return {'_relations': {'LogicalPortAttachment':
|
|
||||||
{'peer_port_uuid': lrouter_port['uuid']}}}
|
|
||||||
# mock get_port
|
|
||||||
with mock.patch.object(switchlib, 'get_port', new=fakegetport):
|
|
||||||
routerlib.delete_peer_router_lport(self.fake_cluster,
|
|
||||||
lrouter_port['uuid'],
|
|
||||||
'whatwever', 'whatever')
|
|
||||||
|
|
||||||
def test_update_lrouter_port_ips_add_only(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
|
|
||||||
routerlib.update_lrouter_port_ips(
|
|
||||||
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
|
|
||||||
['10.10.10.254'], [])
|
|
||||||
ports = routerlib.query_lrouter_lports(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(ports), 1)
|
|
||||||
res_port = ports[0]
|
|
||||||
self.assertEqual(sorted(['10.10.10.254', '192.168.0.1']),
|
|
||||||
sorted(res_port['ip_addresses']))
|
|
||||||
|
|
||||||
def test_update_lrouter_port_ips_remove_only(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1', '10.10.10.254'],
|
|
||||||
'00:11:22:33:44:55')
|
|
||||||
routerlib.update_lrouter_port_ips(
|
|
||||||
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
|
|
||||||
[], ['10.10.10.254'])
|
|
||||||
ports = routerlib.query_lrouter_lports(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(ports), 1)
|
|
||||||
res_port = ports[0]
|
|
||||||
self.assertEqual(['192.168.0.1'], res_port['ip_addresses'])
|
|
||||||
|
|
||||||
def test_update_lrouter_port_ips_add_and_remove(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
|
|
||||||
routerlib.update_lrouter_port_ips(
|
|
||||||
self.fake_cluster, lrouter['uuid'], lrouter_port['uuid'],
|
|
||||||
['10.10.10.254'], ['192.168.0.1'])
|
|
||||||
ports = routerlib.query_lrouter_lports(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(ports), 1)
|
|
||||||
res_port = ports[0]
|
|
||||||
self.assertEqual(['10.10.10.254'], res_port['ip_addresses'])
|
|
||||||
|
|
||||||
def test_update_lrouter_port_ips_nonexistent_router_raises(self):
|
|
||||||
self.assertRaises(
|
|
||||||
nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
|
|
||||||
self.fake_cluster, 'boo-router', 'boo-port', [], [])
|
|
||||||
|
|
||||||
def test_update_lrouter_port_ips_nsx_exception_raises(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
|
|
||||||
|
|
||||||
def raise_nsx_exc(*args, **kwargs):
|
|
||||||
raise api_exc.NsxApiException()
|
|
||||||
|
|
||||||
with mock.patch.object(nsxlib, 'do_request', new=raise_nsx_exc):
|
|
||||||
self.assertRaises(
|
|
||||||
nsx_exc.NsxPluginException, routerlib.update_lrouter_port_ips,
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
lrouter_port['uuid'], [], [])
|
|
||||||
|
|
||||||
def test_plug_lrouter_port_patch_attachment(self):
|
|
||||||
tenant_id = 'pippo'
|
|
||||||
transport_zones_config = [{'zone_uuid': _uuid(),
|
|
||||||
'transport_type': 'stt'}]
|
|
||||||
lswitch = switchlib.create_lswitch(self.fake_cluster,
|
|
||||||
_uuid(),
|
|
||||||
tenant_id, 'fake-switch',
|
|
||||||
transport_zones_config)
|
|
||||||
lport = switchlib.create_lport(self.fake_cluster, lswitch['uuid'],
|
|
||||||
tenant_id, 'xyz',
|
|
||||||
'name', 'device_id', True)
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
tenant_id,
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
|
|
||||||
result = routerlib.plug_router_port_attachment(
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
lrouter_port['uuid'],
|
|
||||||
lport['uuid'], 'PatchAttachment')
|
|
||||||
self.assertEqual(lport['uuid'],
|
|
||||||
result['LogicalPortAttachment']['peer_port_uuid'])
|
|
||||||
|
|
||||||
def test_plug_lrouter_port_l3_gw_attachment(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55:66')
|
|
||||||
result = routerlib.plug_router_port_attachment(
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
lrouter_port['uuid'],
|
|
||||||
'gw_att', 'L3GatewayAttachment')
|
|
||||||
self.assertEqual(
|
|
||||||
'gw_att',
|
|
||||||
result['LogicalPortAttachment']['l3_gateway_service_uuid'])
|
|
||||||
|
|
||||||
def test_plug_lrouter_port_l3_gw_attachment_with_vlan(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
|
|
||||||
result = routerlib.plug_router_port_attachment(
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
lrouter_port['uuid'],
|
|
||||||
'gw_att', 'L3GatewayAttachment', 123)
|
|
||||||
self.assertEqual(
|
|
||||||
'gw_att',
|
|
||||||
result['LogicalPortAttachment']['l3_gateway_service_uuid'])
|
|
||||||
self.assertEqual(
|
|
||||||
'123',
|
|
||||||
result['LogicalPortAttachment']['vlan_id'])
|
|
||||||
|
|
||||||
def test_plug_lrouter_port_invalid_attachment_type_raises(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
lrouter_port = routerlib.create_router_lport(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'pippo', 'neutron_port_id',
|
|
||||||
'name', True, ['192.168.0.1'], '00:11:22:33:44:55')
|
|
||||||
self.assertRaises(nsx_exc.InvalidAttachmentType,
|
|
||||||
routerlib.plug_router_port_attachment,
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
lrouter_port['uuid'], 'gw_att', 'BadType')
|
|
||||||
|
|
||||||
def _test_create_router_snat_rule(self, version):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
with mock.patch.object(self.fake_cluster.api_client,
|
|
||||||
'get_version',
|
|
||||||
new=lambda: ver_module.Version(version)):
|
|
||||||
routerlib.create_lrouter_snat_rule(
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
'10.0.0.2', '10.0.0.2', order=200,
|
|
||||||
match_criteria={'source_ip_addresses': '192.168.0.24'})
|
|
||||||
rules = routerlib.query_nat_rules(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 1)
|
|
||||||
|
|
||||||
def test_create_router_snat_rule_v3(self):
|
|
||||||
self._test_create_router_snat_rule('3.0')
|
|
||||||
|
|
||||||
def test_create_router_snat_rule_v2(self):
|
|
||||||
self._test_create_router_snat_rule('2.0')
|
|
||||||
|
|
||||||
def _test_create_router_dnat_rule(self, version, dest_port=None):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
with mock.patch.object(self.fake_cluster.api_client,
|
|
||||||
'get_version',
|
|
||||||
return_value=ver_module.Version(version)):
|
|
||||||
routerlib.create_lrouter_dnat_rule(
|
|
||||||
self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
|
|
||||||
dest_port=dest_port,
|
|
||||||
match_criteria={'destination_ip_addresses': '10.0.0.3'})
|
|
||||||
rules = routerlib.query_nat_rules(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 1)
|
|
||||||
|
|
||||||
def test_create_router_dnat_rule_v3(self):
|
|
||||||
self._test_create_router_dnat_rule('3.0')
|
|
||||||
|
|
||||||
def test_create_router_dnat_rule_v2(self):
|
|
||||||
self._test_create_router_dnat_rule('2.0')
|
|
||||||
|
|
||||||
def test_create_router_dnat_rule_v2_with_destination_port(self):
|
|
||||||
self._test_create_router_dnat_rule('2.0', 8080)
|
|
||||||
|
|
||||||
def test_create_router_dnat_rule_v3_with_destination_port(self):
|
|
||||||
self._test_create_router_dnat_rule('3.0', 8080)
|
|
||||||
|
|
||||||
def test_create_router_snat_rule_invalid_match_keys_raises(self):
|
|
||||||
# In this case the version does not make a difference
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
|
|
||||||
with mock.patch.object(self.fake_cluster.api_client,
|
|
||||||
'get_version',
|
|
||||||
new=lambda: '2.0'):
|
|
||||||
self.assertRaises(AttributeError,
|
|
||||||
routerlib.create_lrouter_snat_rule,
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
'10.0.0.2', '10.0.0.2', order=200,
|
|
||||||
match_criteria={'foo': 'bar'})
|
|
||||||
|
|
||||||
def _test_create_router_nosnat_rule(self, version, expected=1):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
with mock.patch.object(self.fake_cluster.api_client,
|
|
||||||
'get_version',
|
|
||||||
new=lambda: ver_module.Version(version)):
|
|
||||||
routerlib.create_lrouter_nosnat_rule(
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
order=100,
|
|
||||||
match_criteria={'destination_ip_addresses': '192.168.0.0/24'})
|
|
||||||
rules = routerlib.query_nat_rules(
|
|
||||||
self.fake_cluster, lrouter['uuid'])
|
|
||||||
# NoSNAT rules do not exist in V2
|
|
||||||
self.assertEqual(len(rules), expected)
|
|
||||||
|
|
||||||
def test_create_router_nosnat_rule_v2(self):
|
|
||||||
self._test_create_router_nosnat_rule('2.0', expected=0)
|
|
||||||
|
|
||||||
def test_create_router_nosnat_rule_v3(self):
|
|
||||||
self._test_create_router_nosnat_rule('3.0')
|
|
||||||
|
|
||||||
def _prepare_nat_rules_for_delete_tests(self):
|
|
||||||
lrouter = routerlib.create_lrouter(self.fake_cluster,
|
|
||||||
uuidutils.generate_uuid(),
|
|
||||||
'pippo',
|
|
||||||
'fake-lrouter',
|
|
||||||
'10.0.0.1')
|
|
||||||
# v2 or v3 makes no difference for this test
|
|
||||||
with mock.patch.object(self.fake_cluster.api_client,
|
|
||||||
'get_version',
|
|
||||||
new=lambda: ver_module.Version('2.0')):
|
|
||||||
routerlib.create_lrouter_snat_rule(
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
'10.0.0.2', '10.0.0.2', order=220,
|
|
||||||
match_criteria={'source_ip_addresses': '192.168.0.0/24'})
|
|
||||||
routerlib.create_lrouter_snat_rule(
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
'10.0.0.3', '10.0.0.3', order=200,
|
|
||||||
match_criteria={'source_ip_addresses': '192.168.0.2/32'})
|
|
||||||
routerlib.create_lrouter_dnat_rule(
|
|
||||||
self.fake_cluster, lrouter['uuid'], '192.168.0.2', order=200,
|
|
||||||
match_criteria={'destination_ip_addresses': '10.0.0.3'})
|
|
||||||
return lrouter
|
|
||||||
|
|
||||||
def test_delete_router_nat_rules_by_match_on_destination_ip(self):
|
|
||||||
lrouter = self._prepare_nat_rules_for_delete_tests()
|
|
||||||
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 3)
|
|
||||||
routerlib.delete_nat_rules_by_match(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 1, 1,
|
|
||||||
destination_ip_addresses='10.0.0.3')
|
|
||||||
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 2)
|
|
||||||
|
|
||||||
def test_delete_router_nat_rules_by_match_on_source_ip(self):
|
|
||||||
lrouter = self._prepare_nat_rules_for_delete_tests()
|
|
||||||
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 3)
|
|
||||||
routerlib.delete_nat_rules_by_match(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'SourceNatRule', 1, 1,
|
|
||||||
source_ip_addresses='192.168.0.2/32')
|
|
||||||
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 2)
|
|
||||||
|
|
||||||
def test_delete_router_nat_rules_by_match_no_match_expected(self):
|
|
||||||
lrouter = self._prepare_nat_rules_for_delete_tests()
|
|
||||||
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 3)
|
|
||||||
routerlib.delete_nat_rules_by_match(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'SomeWeirdType', 0)
|
|
||||||
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 3)
|
|
||||||
routerlib.delete_nat_rules_by_match(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'DestinationNatRule', 0,
|
|
||||||
destination_ip_addresses='99.99.99.99')
|
|
||||||
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 3)
|
|
||||||
|
|
||||||
def test_delete_router_nat_rules_by_match_no_match_raises(self):
|
|
||||||
lrouter = self._prepare_nat_rules_for_delete_tests()
|
|
||||||
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 3)
|
|
||||||
self.assertRaises(
|
|
||||||
nsx_exc.NatRuleMismatch,
|
|
||||||
routerlib.delete_nat_rules_by_match,
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
'SomeWeirdType', 1, 1)
|
|
||||||
|
|
||||||
def test_delete_nat_rules_by_match_len_mismatch_does_not_raise(self):
|
|
||||||
lrouter = self._prepare_nat_rules_for_delete_tests()
|
|
||||||
rules = routerlib.query_nat_rules(self.fake_cluster, lrouter['uuid'])
|
|
||||||
self.assertEqual(len(rules), 3)
|
|
||||||
deleted_rules = routerlib.delete_nat_rules_by_match(
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
'DestinationNatRule',
|
|
||||||
max_num_expected=1, min_num_expected=1,
|
|
||||||
raise_on_len_mismatch=False,
|
|
||||||
destination_ip_addresses='99.99.99.99')
|
|
||||||
self.assertEqual(0, deleted_rules)
|
|
||||||
# add an extra rule to emulate a duplicate one
|
|
||||||
with mock.patch.object(self.fake_cluster.api_client,
|
|
||||||
'get_version',
|
|
||||||
new=lambda: ver_module.Version('2.0')):
|
|
||||||
routerlib.create_lrouter_snat_rule(
|
|
||||||
self.fake_cluster, lrouter['uuid'],
|
|
||||||
'10.0.0.2', '10.0.0.2', order=220,
|
|
||||||
match_criteria={'source_ip_addresses': '192.168.0.0/24'})
|
|
||||||
deleted_rules_2 = routerlib.delete_nat_rules_by_match(
|
|
||||||
self.fake_cluster, lrouter['uuid'], 'SourceNatRule',
|
|
||||||
min_num_expected=1, max_num_expected=1,
|
|
||||||
raise_on_len_mismatch=False,
|
|
||||||
source_ip_addresses='192.168.0.0/24')
|
|
||||||
self.assertEqual(2, deleted_rules_2)
|
|
@ -1,231 +0,0 @@
|
|||||||
# Copyright (c) 2014 VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
from neutron.tests.unit.api.v2 import test_base
|
|
||||||
from neutron_lib import constants
|
|
||||||
from neutron_lib import exceptions
|
|
||||||
|
|
||||||
from vmware_nsx.nsxlib import mh as nsxlib
|
|
||||||
from vmware_nsx.nsxlib.mh import secgroup as secgrouplib
|
|
||||||
from vmware_nsx.tests.unit.nsxlib.mh import base
|
|
||||||
|
|
||||||
_uuid = test_base._uuid
|
|
||||||
|
|
||||||
|
|
||||||
class SecurityProfileTestCase(base.NsxlibTestCase):
|
|
||||||
|
|
||||||
def test_create_and_get_security_profile(self):
|
|
||||||
sec_prof = secgrouplib.create_security_profile(
|
|
||||||
self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
|
|
||||||
sec_prof_res = nsxlib.do_request(
|
|
||||||
secgrouplib.HTTP_GET,
|
|
||||||
nsxlib._build_uri_path('security-profile',
|
|
||||||
resource_id=sec_prof['uuid']),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
|
|
||||||
# Check for builtin rules
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 1)
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 2)
|
|
||||||
|
|
||||||
def test_create_and_get_default_security_profile(self):
|
|
||||||
sec_prof = secgrouplib.create_security_profile(
|
|
||||||
self.fake_cluster, _uuid(), 'pippo', {'name': 'default'})
|
|
||||||
sec_prof_res = nsxlib.do_request(
|
|
||||||
secgrouplib.HTTP_GET,
|
|
||||||
nsxlib._build_uri_path('security-profile',
|
|
||||||
resource_id=sec_prof['uuid']),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
|
|
||||||
# Check for builtin rules
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 3)
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 2)
|
|
||||||
|
|
||||||
def test_update_security_profile_raise_not_found(self):
|
|
||||||
self.assertRaises(exceptions.NotFound,
|
|
||||||
secgrouplib.update_security_profile,
|
|
||||||
self.fake_cluster,
|
|
||||||
_uuid(), 'tatore_magno(the great)')
|
|
||||||
|
|
||||||
def test_update_security_profile(self):
|
|
||||||
tenant_id = 'foo_tenant_uuid'
|
|
||||||
secgroup_id = 'foo_secgroup_uuid'
|
|
||||||
old_sec_prof = secgrouplib.create_security_profile(
|
|
||||||
self.fake_cluster, tenant_id, secgroup_id,
|
|
||||||
{'name': 'tatore_magno'})
|
|
||||||
new_sec_prof = secgrouplib.update_security_profile(
|
|
||||||
self.fake_cluster, old_sec_prof['uuid'], 'aaron_magno')
|
|
||||||
self.assertEqual('aaron_magno', new_sec_prof['display_name'])
|
|
||||||
|
|
||||||
def test_update_security_profile_rules(self):
|
|
||||||
sec_prof = secgrouplib.create_security_profile(
|
|
||||||
self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
|
|
||||||
ingress_rule = {'ethertype': 'IPv4'}
|
|
||||||
egress_rule = {'ethertype': 'IPv4', 'profile_uuid': 'xyz'}
|
|
||||||
new_rules = {'logical_port_egress_rules': [egress_rule],
|
|
||||||
'logical_port_ingress_rules': [ingress_rule]}
|
|
||||||
secgrouplib.update_security_group_rules(
|
|
||||||
self.fake_cluster, sec_prof['uuid'], new_rules)
|
|
||||||
sec_prof_res = nsxlib.do_request(
|
|
||||||
nsxlib.HTTP_GET,
|
|
||||||
nsxlib._build_uri_path('security-profile',
|
|
||||||
resource_id=sec_prof['uuid']),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
|
|
||||||
# Check for builtin rules
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 2)
|
|
||||||
self.assertIn(egress_rule,
|
|
||||||
sec_prof_res['logical_port_egress_rules'])
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1)
|
|
||||||
self.assertIn(ingress_rule,
|
|
||||||
sec_prof_res['logical_port_ingress_rules'])
|
|
||||||
|
|
||||||
def test_update_security_profile_rules_noingress(self):
|
|
||||||
sec_prof = secgrouplib.create_security_profile(
|
|
||||||
self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
|
|
||||||
hidden_ingress_rule = {'ethertype': 'IPv4',
|
|
||||||
'ip_prefix': '127.0.0.1/32'}
|
|
||||||
egress_rule = {'ethertype': 'IPv4', 'profile_uuid': 'xyz'}
|
|
||||||
new_rules = {'logical_port_egress_rules': [egress_rule],
|
|
||||||
'logical_port_ingress_rules': []}
|
|
||||||
secgrouplib.update_security_group_rules(
|
|
||||||
self.fake_cluster, sec_prof['uuid'], new_rules)
|
|
||||||
sec_prof_res = nsxlib.do_request(
|
|
||||||
nsxlib.HTTP_GET,
|
|
||||||
nsxlib._build_uri_path('security-profile',
|
|
||||||
resource_id=sec_prof['uuid']),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
|
|
||||||
# Check for builtin rules
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_egress_rules']), 2)
|
|
||||||
self.assertIn(egress_rule,
|
|
||||||
sec_prof_res['logical_port_egress_rules'])
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1)
|
|
||||||
self.assertIn(hidden_ingress_rule,
|
|
||||||
sec_prof_res['logical_port_ingress_rules'])
|
|
||||||
|
|
||||||
def test_update_security_profile_rules_summarize_port_range(self):
|
|
||||||
sec_prof = secgrouplib.create_security_profile(
|
|
||||||
self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
|
|
||||||
ingress_rule = [{'ethertype': 'IPv4'}]
|
|
||||||
egress_rules = [
|
|
||||||
{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP,
|
|
||||||
'port_range_min': 1, 'port_range_max': 65535}]
|
|
||||||
new_rules = {'logical_port_egress_rules': egress_rules,
|
|
||||||
'logical_port_ingress_rules': [ingress_rule]}
|
|
||||||
egress_rules_summarized = [{'ethertype': 'IPv4',
|
|
||||||
'protocol': constants.PROTO_NUM_UDP}]
|
|
||||||
secgrouplib.update_security_group_rules(
|
|
||||||
self.fake_cluster, sec_prof['uuid'], new_rules)
|
|
||||||
sec_prof_res = nsxlib.do_request(
|
|
||||||
nsxlib.HTTP_GET,
|
|
||||||
nsxlib._build_uri_path('security-profile',
|
|
||||||
resource_id=sec_prof['uuid']),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
|
|
||||||
|
|
||||||
# Check for builtin rules
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1)
|
|
||||||
self.assertEqual(sec_prof_res['logical_port_egress_rules'],
|
|
||||||
egress_rules_summarized)
|
|
||||||
self.assertIn(ingress_rule,
|
|
||||||
sec_prof_res['logical_port_ingress_rules'])
|
|
||||||
|
|
||||||
def test_update_security_profile_rules_summarize_ip_prefix(self):
|
|
||||||
sec_prof = secgrouplib.create_security_profile(
|
|
||||||
self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
|
|
||||||
ingress_rule = [{'ethertype': 'IPv4'}]
|
|
||||||
egress_rules = [
|
|
||||||
{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP,
|
|
||||||
'ip_prefix': '0.0.0.0/0'},
|
|
||||||
{'ethertype': 'IPv6', 'protocol': constants.PROTO_NUM_UDP,
|
|
||||||
'ip_prefix': '::/0'}]
|
|
||||||
new_rules = {'logical_port_egress_rules': egress_rules,
|
|
||||||
'logical_port_ingress_rules': [ingress_rule]}
|
|
||||||
egress_rules_summarized = [
|
|
||||||
{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP},
|
|
||||||
{'ethertype': 'IPv6', 'protocol': constants.PROTO_NUM_UDP}]
|
|
||||||
secgrouplib.update_security_group_rules(
|
|
||||||
self.fake_cluster, sec_prof['uuid'], new_rules)
|
|
||||||
sec_prof_res = nsxlib.do_request(
|
|
||||||
nsxlib.HTTP_GET,
|
|
||||||
nsxlib._build_uri_path('security-profile',
|
|
||||||
resource_id=sec_prof['uuid']),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
|
|
||||||
|
|
||||||
# Check for builtin rules
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1)
|
|
||||||
self.assertEqual(sec_prof_res['logical_port_egress_rules'],
|
|
||||||
egress_rules_summarized)
|
|
||||||
self.assertIn(ingress_rule,
|
|
||||||
sec_prof_res['logical_port_ingress_rules'])
|
|
||||||
|
|
||||||
def test_update_security_profile_rules_summarize_subset(self):
|
|
||||||
sec_prof = secgrouplib.create_security_profile(
|
|
||||||
self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
|
|
||||||
ingress_rule = [{'ethertype': 'IPv4'}]
|
|
||||||
egress_rules = [
|
|
||||||
{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP,
|
|
||||||
'port_range_min': 1, 'port_range_max': 1,
|
|
||||||
'remote_ip_prefix': '1.1.1.1/20'},
|
|
||||||
{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP,
|
|
||||||
'port_range_min': 2, 'port_range_max': 2,
|
|
||||||
'profile_uuid': 'xyz'},
|
|
||||||
{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP}]
|
|
||||||
new_rules = {'logical_port_egress_rules': egress_rules,
|
|
||||||
'logical_port_ingress_rules': [ingress_rule]}
|
|
||||||
egress_rules_summarized = [
|
|
||||||
{'ethertype': 'IPv4', 'protocol': constants.PROTO_NUM_UDP}]
|
|
||||||
secgrouplib.update_security_group_rules(
|
|
||||||
self.fake_cluster, sec_prof['uuid'], new_rules)
|
|
||||||
sec_prof_res = nsxlib.do_request(
|
|
||||||
nsxlib.HTTP_GET,
|
|
||||||
nsxlib._build_uri_path('security-profile',
|
|
||||||
resource_id=sec_prof['uuid']),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
self.assertEqual(sec_prof['uuid'], sec_prof_res['uuid'])
|
|
||||||
|
|
||||||
# Check for builtin rules
|
|
||||||
self.assertEqual(len(sec_prof_res['logical_port_ingress_rules']), 1)
|
|
||||||
self.assertEqual(sec_prof_res['logical_port_egress_rules'],
|
|
||||||
egress_rules_summarized)
|
|
||||||
self.assertIn(ingress_rule,
|
|
||||||
sec_prof_res['logical_port_ingress_rules'])
|
|
||||||
|
|
||||||
def test_update_non_existing_securityprofile_raises(self):
|
|
||||||
self.assertRaises(exceptions.NeutronException,
|
|
||||||
secgrouplib.update_security_group_rules,
|
|
||||||
self.fake_cluster, 'whatever',
|
|
||||||
{'logical_port_egress_rules': [],
|
|
||||||
'logical_port_ingress_rules': []})
|
|
||||||
|
|
||||||
def test_delete_security_profile(self):
|
|
||||||
sec_prof = secgrouplib.create_security_profile(
|
|
||||||
self.fake_cluster, _uuid(), 'pippo', {'name': 'test'})
|
|
||||||
secgrouplib.delete_security_profile(
|
|
||||||
self.fake_cluster, sec_prof['uuid'])
|
|
||||||
self.assertRaises(exceptions.NotFound,
|
|
||||||
nsxlib.do_request,
|
|
||||||
nsxlib.HTTP_GET,
|
|
||||||
nsxlib._build_uri_path(
|
|
||||||
'security-profile',
|
|
||||||
resource_id=sec_prof['uuid']),
|
|
||||||
cluster=self.fake_cluster)
|
|
||||||
|
|
||||||
def test_delete_non_existing_securityprofile_raises(self):
|
|
||||||
self.assertRaises(exceptions.NeutronException,
|
|
||||||
secgrouplib.delete_security_profile,
|
|
||||||
self.fake_cluster, 'whatever')
|
|
@ -1,60 +0,0 @@
|
|||||||
# Copyright (c) 2014 VMware, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
# implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
|
|
||||||
from neutron.tests import base
|
|
||||||
|
|
||||||
from vmware_nsx.api_client import (
|
|
||||||
version as version_module)
|
|
||||||
from vmware_nsx.api_client import exception
|
|
||||||
from vmware_nsx.nsxlib.mh import router as routerlib
|
|
||||||
from vmware_nsx.nsxlib.mh import versioning
|
|
||||||
|
|
||||||
|
|
||||||
class TestVersioning(base.BaseTestCase):
|
|
||||||
|
|
||||||
def test_function_handling_missing_minor(self):
|
|
||||||
version = version_module.Version('2.0')
|
|
||||||
function = versioning.get_function_by_version(
|
|
||||||
routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version)
|
|
||||||
self.assertEqual(routerlib.create_implicit_routing_lrouter,
|
|
||||||
function)
|
|
||||||
|
|
||||||
def test_function_handling_with_both_major_and_minor(self):
|
|
||||||
version = version_module.Version('3.2')
|
|
||||||
function = versioning.get_function_by_version(
|
|
||||||
routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version)
|
|
||||||
self.assertEqual(routerlib.create_explicit_routing_lrouter,
|
|
||||||
function)
|
|
||||||
|
|
||||||
def test_function_handling_with_newer_major(self):
|
|
||||||
version = version_module.Version('5.2')
|
|
||||||
function = versioning.get_function_by_version(
|
|
||||||
routerlib.ROUTER_FUNC_DICT, 'create_lrouter', version)
|
|
||||||
self.assertEqual(routerlib.create_explicit_routing_lrouter,
|
|
||||||
function)
|
|
||||||
|
|
||||||
def test_function_handling_with_obsolete_major(self):
|
|
||||||
version = version_module.Version('1.2')
|
|
||||||
self.assertRaises(NotImplementedError,
|
|
||||||
versioning.get_function_by_version,
|
|
||||||
routerlib.ROUTER_FUNC_DICT,
|
|
||||||
'create_lrouter', version)
|
|
||||||
|
|
||||||
def test_function_handling_with_unknown_version(self):
|
|
||||||
self.assertRaises(exception.ServiceUnavailable,
|
|
||||||
versioning.get_function_by_version,
|
|
||||||
routerlib.ROUTER_FUNC_DICT,
|
|
||||||
'create_lrouter', None)
|
|
Loading…
Reference in New Issue
Block a user