Remove vmware_nsx_tempest
This now existsin the vmware_nsx_tempest_plugin Change-Id: I76e5856deeeb06b87675314635d06aa0291143eb Depends-On: I804c3fd1e0c9cbeb454677e7951072ad74391fec
This commit is contained in:
parent
02c28ee5ea
commit
508c6262a2
@ -21,8 +21,6 @@ classifier =
|
||||
[files]
|
||||
packages =
|
||||
vmware_nsx
|
||||
vmware_nsx_tempest
|
||||
|
||||
[entry_points]
|
||||
console_scripts =
|
||||
neutron-check-nsx-config = vmware_nsx.check_nsx_config:main
|
||||
@ -53,8 +51,6 @@ vmware_nsx.neutron.nsxv.router_type_drivers =
|
||||
shared = vmware_nsx.plugins.nsx_v.drivers.shared_router_driver:RouterSharedDriver
|
||||
distributed = vmware_nsx.plugins.nsx_v.drivers.distributed_router_driver:RouterDistributedDriver
|
||||
exclusive = vmware_nsx.plugins.nsx_v.drivers.exclusive_router_driver:RouterExclusiveDriver
|
||||
tempest.test_plugins =
|
||||
vmware-nsx-tempest-plugin = vmware_nsx_tempest.plugin:VMwareNsxTempestPlugin
|
||||
oslo.config.opts =
|
||||
nsx = vmware_nsx.opts:list_opts
|
||||
networking_sfc.flowclassifier.drivers =
|
||||
|
@ -59,23 +59,10 @@ check_identical_policy_files () {
|
||||
fi
|
||||
}
|
||||
|
||||
check_no_duplicate_api_test_idempotent_ids() {
|
||||
# For API tests, an idempotent ID is assigned to each single API test,
|
||||
# those IDs should be unique
|
||||
output=$(check-uuid --package vmware_nsx_tempest)
|
||||
if [ "$?" -ne 0 ]; then
|
||||
echo "There are duplicate idempotent ids in the API tests" >>$FAILURES
|
||||
echo "please, assign unique uuids to each API test:" >>$FAILURES
|
||||
echo "$output" >>$FAILURES
|
||||
fi
|
||||
}
|
||||
|
||||
# Add your checks here...
|
||||
check_no_symlinks_allowed
|
||||
check_pot_files_errors
|
||||
#check_identical_policy_files
|
||||
# Remove the comment below when we figure out how to consume pyshark
|
||||
#check_no_duplicate_api_test_idempotent_ids
|
||||
|
||||
# Fail, if there are emitted failures
|
||||
if [ -f $FAILURES ]; then
|
||||
|
1
tox.ini
1
tox.ini
@ -106,7 +106,6 @@ import-order-style = pep8
|
||||
|
||||
[hacking]
|
||||
import_exceptions = vmware_nsx._i18n,
|
||||
vmware_nsx_tempest._i18n
|
||||
local-check-factory = neutron_lib.hacking.checks.factory
|
||||
|
||||
[testenv:genconfig]
|
||||
|
@ -1,92 +0,0 @@
|
||||
Welcome!
|
||||
========
|
||||
vmware_nsx_tempest is a plugin module to openstack tempest project.
|
||||
|
||||
If you are not familiar with tempest, please refer to::
|
||||
|
||||
https://docs.openstack.org/developer/tempest
|
||||
|
||||
It is implemented with tempest external plugin.
|
||||
The official design sepcification is at::
|
||||
|
||||
https://review.openstack.org/#/c/184992/
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
vmware_nsx_tempest hosts vmware_nsx's functional api and scenario tests.
|
||||
|
||||
All vmware_nsx_tempest tests are in "master" branch. For this reason,
|
||||
it is recommended to have your own developer version of vmware-nsx repo
|
||||
installed outside the devstack folder, /opt/stack/.
|
||||
|
||||
For example at /opt/devtest folder. In doing so, you can install
|
||||
editable vmware-nsx repo under tempest VENV environemnt.
|
||||
|
||||
Installation:
|
||||
-------------
|
||||
|
||||
#. On your own development folder, for example /opt/devtest/,
|
||||
install your own tempest development env at /opt/devtest/os-tempest/::
|
||||
|
||||
$ cd /opt/devtest
|
||||
$ git clone https://github.com/openstack/tempest os-tempest
|
||||
|
||||
#. Install virtualenv with the following command::
|
||||
|
||||
$ cd /opt/devtest/os-tempest
|
||||
$ ./run_tempest.sh -u not_exist_tests
|
||||
|
||||
#. Install vmware-nsx master branch at /opt/devtest/vmware-nsx::
|
||||
|
||||
$ cd /opt/devtest
|
||||
$ git clone https://github.com/openstack/vmware-nsx
|
||||
|
||||
#. Install vmware_nsx_tempest in your tempest development environment::
|
||||
|
||||
$ cd /opt/devtest/os-tempest
|
||||
$ source .venv/bin/activate
|
||||
$ pip install -e /opt/devtest/vmware-nsx/
|
||||
|
||||
Run command::
|
||||
|
||||
$ pip show vmware-nsx
|
||||
|
||||
You should observe the following statements::
|
||||
|
||||
Location: /opt/devtest/vmware-nsx
|
||||
|
||||
and under section of Entry-points::
|
||||
|
||||
[tempest.test_plugins]
|
||||
vmware-nsx-tempest-plugin = vmware_nsx_tempest.plugin:VMwareNsxTempestPlugin
|
||||
|
||||
#. Validate installed vmware_nsx_tempest successfully do::
|
||||
|
||||
$ cd /opt/devtest/os-tempest
|
||||
$ tools/with_venv.sh testr list-tests vmware_nsx_tempest.*l2_gateway
|
||||
|
||||
Your installation failed, if no tests are shown.
|
||||
|
||||
Execution:
|
||||
----------
|
||||
|
||||
vmware_nsx_tempest tests are tempest tests, you need to
|
||||
run from tempest directory. For example, to run only l2-gateway tests::
|
||||
|
||||
$ cd /opt/devtest/os-tempest
|
||||
$ ./run_tempest.sh -t vmware_nsx_tempest.*test_l2_gateway
|
||||
$ ./run_tempest.sh -d vmware_nsx_tempest.tests.nsxv.api.test_l2_gateway_connection.L2GatewayConnectionTest.test_csuld_single_device_interface_vlan
|
||||
|
||||
TechNote on vmware_nsx_tempest:
|
||||
-------------------------------
|
||||
|
||||
vmware_nsx_tempest is a plugin to tempest, not neutron, nor vmware_nsx.
|
||||
|
||||
It is defined by tempest.test_plugins.
|
||||
|
||||
Modules within vmware_nsx_tempest can not see resources defined
|
||||
by vmware_nsx. Commands like following are not acceptable, unless
|
||||
vmware_nsx is installed in your tempest environment::
|
||||
|
||||
import vmware_nsx.shell.admin.plugins.common.utils as admin_utils
|
@ -1,32 +0,0 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import oslo_i18n
|
||||
|
||||
DOMAIN = "vmware-nsx-tempest"
|
||||
|
||||
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
# The contextual translation function using the name "_C"
|
||||
_C = _translators.contextual_form
|
||||
|
||||
# The plural translation function using the name "_P"
|
||||
_P = _translators.plural_form
|
||||
|
||||
|
||||
def get_available_languages():
|
||||
return oslo_i18n.get_available_languages(DOMAIN)
|
@ -1,61 +0,0 @@
|
||||
# Copyright 2017 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
# General constants.
|
||||
ONE_SEC = 1
|
||||
|
||||
# L2GW constants.
|
||||
L2GW = "l2_gateway"
|
||||
L2GWS = L2GW + "s"
|
||||
L2_GWS_BASE_URI = "/l2-gateways"
|
||||
EXPECTED_HTTP_RESPONSE_200 = "200"
|
||||
EXPECTED_HTTP_RESPONSE_201 = "201"
|
||||
EXPECTED_HTTP_RESPONSE_204 = "204"
|
||||
L2GWC = "l2_gateway_connection"
|
||||
|
||||
# MAC Learning constants
|
||||
MAC_SW_PROFILE = "MacManagementSwitchingProfile"
|
||||
PORT_SEC_PROFILE = "SpoofGuardSwitchingProfile"
|
||||
SEC_GRPS_PROFILE = "SwitchSecuritySwitchingProfile"
|
||||
|
||||
# NSXV3 MDProxy constants.
|
||||
MD_ERROR_CODE_WHEN_LS_BOUNDED = "10026"
|
||||
INTERVAL_BETWEEN_EXEC_RETRY_ON_SSH = 5
|
||||
MAX_NO_OF_TIMES_EXECUTION_OVER_SSH = 30
|
||||
MD_BASE_URL = "http://169.254.169.254/"
|
||||
|
||||
# NSXV3 Port Security constants.
|
||||
NSX_BACKEND_TIME_INTERVAL = 30
|
||||
NSX_BACKEND_SMALL_TIME_INTERVAL = 10
|
||||
NSX_BACKEND_VERY_SMALL_TIME_INTERVAL = 5
|
||||
|
||||
# DFW
|
||||
NSX_FIREWALL_REALIZED_TIMEOUT = 120
|
||||
|
||||
# FWaaS
|
||||
NO_OF_ENTRIES = 20
|
||||
EXCLUSIVE_ROUTER = 'exclusive'
|
||||
DISTRIBUTED_ROUTER = 'distributed'
|
||||
TCP_PROTOCOL = 'tcp'
|
||||
ICMP_PROTOCOL = 'icmp'
|
||||
|
||||
# NSXV3 Firewall
|
||||
NSX_FIREWALL_REALIZED_DELAY = 2
|
||||
|
||||
APPLIANCE_NAME_STARTS_WITH = "vmw_"
|
@ -1,199 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_config import cfg
|
||||
from tempest import config
|
||||
|
||||
service_option = cfg.BoolOpt("vmware_nsx",
|
||||
default=True,
|
||||
help="Whether or not vmware_nsx is expected to be"
|
||||
" available")
|
||||
|
||||
scenario_group = config.scenario_group
|
||||
ScenarioGroup = [
|
||||
cfg.FloatOpt('waitfor_disassoc',
|
||||
default=15.0,
|
||||
help="Wait for seconds after disassociation."),
|
||||
cfg.FloatOpt('waitfor_assoc',
|
||||
default=5.0,
|
||||
help="Waitfor seconds after association."),
|
||||
cfg.FloatOpt('waitfor_connectivity',
|
||||
default=120.0,
|
||||
help="Wait for seconds to become connected."),
|
||||
cfg.ListOpt('outside_world_servers',
|
||||
default=["8.8.8.8", "8.8.4.4"],
|
||||
help="List of servers reside outside of openstack env."
|
||||
" which is used to test default gateway behavior"
|
||||
" when VMs are under logical routers,"
|
||||
" & DNS are local to provider's settings."),
|
||||
cfg.DictOpt('flat_alloc_pool_dict',
|
||||
default={},
|
||||
help="Define flat network ip range."
|
||||
" required attributes are gateway, start, end"
|
||||
" and cidr. Example value: gateway:10.1.1.253,"
|
||||
" start:10.1.1.30,end:10.1.1.49,cidr=10.1.1.0/24"),
|
||||
cfg.DictOpt('xnet_multiple_subnets_dict',
|
||||
default={},
|
||||
help="External network with multiple subnets."
|
||||
" The primary subnet ip-range will be shrinked,"
|
||||
" This is for the 2nd subnet, required attrs:"
|
||||
" start:10.1.1.31,end:10.1.1.33,cidr=10.1.2.0/24"
|
||||
" AND limit to only 3 ip addresses defined."),
|
||||
]
|
||||
|
||||
network_group = config.network_group
|
||||
NetworkGroup = [
|
||||
cfg.StrOpt('l2gw_switch',
|
||||
default='',
|
||||
help="Distributed Virtual Portgroup to create VLAN port."),
|
||||
cfg.DictOpt('l2gw_switch_dict',
|
||||
default={},
|
||||
help="dict version of l2gw_switch:"
|
||||
"device_name:,interfaces:,segmentation_id:,"),
|
||||
cfg.StrOpt('dns_search_domain',
|
||||
default='vmware.com',
|
||||
help="a valid domain that contains host defined at"
|
||||
" attribute host_in_search_domain"),
|
||||
cfg.StrOpt('host_in_search_domain',
|
||||
default='mail',
|
||||
help="host exists in dns_search_domain"),
|
||||
cfg.StrOpt('public_network_cidr',
|
||||
default='',
|
||||
help="Public network cidr which provides external network"
|
||||
" connectivity"),
|
||||
cfg.StrOpt('backend',
|
||||
default='nsxv',
|
||||
help="NSX backend, valid values are nsxv|nsxv3"),
|
||||
]
|
||||
|
||||
nsxv_group = cfg.OptGroup(name='nsxv',
|
||||
title="NSX-v Configuration Options")
|
||||
NSXvGroup = [
|
||||
cfg.StrOpt('manager_uri',
|
||||
default='https://10.0.0.10',
|
||||
help="NSX-v manager ip address"),
|
||||
cfg.StrOpt('user',
|
||||
default='admin',
|
||||
help="NSX-v manager username"),
|
||||
cfg.StrOpt('password',
|
||||
default='default',
|
||||
help="NSX-v manager password"),
|
||||
cfg.StrOpt('vdn_scope_id',
|
||||
default='vdnscope-1',
|
||||
help="NSX-v vdn scope id"),
|
||||
cfg.IntOpt('max_mtz',
|
||||
default=3,
|
||||
help="Max Multiple Transport Zones used for testing."),
|
||||
cfg.DictOpt('flat_alloc_pool_dict',
|
||||
default={},
|
||||
help=" Define flat network ip range."
|
||||
" required attributes are gateway, start, end"
|
||||
" and cidr. Example value: gateway:10.1.1.253,"
|
||||
" start:10.1.1.30,end:10.1.1.49,cidr=10.1.1.0/24"),
|
||||
cfg.StrOpt('vlan_physical_network',
|
||||
default='',
|
||||
help="physval_network to create vlan."),
|
||||
cfg.IntOpt('provider_vlan_id',
|
||||
default=888,
|
||||
help="The default vlan_id for admin vlan."),
|
||||
cfg.IntOpt('create_router_http_timeout',
|
||||
default=900,
|
||||
help="Specific for router_size tests. This value defines"
|
||||
" how long http.request should retry."),
|
||||
cfg.BoolOpt('no_router_type',
|
||||
default=False,
|
||||
help="router_type is NSXv extension."
|
||||
"Set it to True allow tests to remove this attribute"
|
||||
" when creating router."),
|
||||
cfg.ListOpt('bugs_to_resolve',
|
||||
default=[],
|
||||
help="Bugs to be resolved. Define this at tempest.conf and"
|
||||
" test case testtools.skipIf(condition, reasons) to"
|
||||
" skip test cannot be run at specific plugin env."),
|
||||
cfg.StrOpt('default_policy_id',
|
||||
default='',
|
||||
help="NSX security-policy ID used to create all tenants"
|
||||
" default security-group-policy."
|
||||
" This must be the same as the one at vmware/nsx.ini"),
|
||||
cfg.StrOpt('alt_policy_id',
|
||||
default='',
|
||||
help="NSX security-policy ID used to create the 2nd"
|
||||
" security-group-policy, and != default_policy_id."),
|
||||
cfg.BoolOpt('allow_tenant_rules_with_policy',
|
||||
default=False,
|
||||
help="Default=False; a tenant cannot create security-group."
|
||||
" If True, tenant can create non-policy security-group."
|
||||
" Sync this value with nsx.ini file."),
|
||||
]
|
||||
|
||||
|
||||
l2gw_group = cfg.OptGroup(name='l2gw',
|
||||
title="l2-gateway Configuration Options")
|
||||
L2gwGroup = [
|
||||
cfg.DictOpt('vlan_subnet_ipv4_dict',
|
||||
default={},
|
||||
help="Tenant's VLAN subnet cdir to connect to l2gw/VXLAN."
|
||||
" Example: cidr=192.168.99.0/24,start:192.168.99.41"
|
||||
" ,end:192.168.99.50,gateway=192.168.99.253"),
|
||||
cfg.StrOpt('device_one_vlan',
|
||||
default="",
|
||||
help="l2g2 device with one VLAN"
|
||||
" l2gw-1::dvportgroup-14420|3845"),
|
||||
cfg.StrOpt('device_multiple_vlans',
|
||||
default="",
|
||||
help="l2gw device with multiple VLANs"
|
||||
" l2gw-x::dvportgroup-14429|3880#3381#3382"),
|
||||
cfg.StrOpt('multiple_interfaces_multiple_vlans',
|
||||
default="",
|
||||
help="l2gw multiple devices, interface has multiple VLANs"
|
||||
" m-ifs::dvportgroup-144|138#246;dvportgroup-155|339"),
|
||||
cfg.StrOpt('vlan_1',
|
||||
default="16",
|
||||
help="VLAN id"),
|
||||
cfg.StrOpt('vlan_2',
|
||||
default="17",
|
||||
help="VLAN id"),
|
||||
cfg.StrOpt("subnet_1_cidr",
|
||||
default="192.168.1.0/24",
|
||||
help="Subnet 1 network cidr."
|
||||
"Example: 1.1.1.0/24"),
|
||||
cfg.StrOpt('vm_on_vds_tz1_vlan16_ip',
|
||||
default="192.168.1.203",
|
||||
help="IPv4 IP address of VM3"),
|
||||
cfg.StrOpt('vm_on_switch_vlan16',
|
||||
default="192.168.1.204",
|
||||
help="IPv4 IP address of VM4"),
|
||||
cfg.StrOpt('vm_on_vds_tz2_vlan16_ip',
|
||||
default="192.168.1.205",
|
||||
help="IPv4 IP address of VM5"),
|
||||
cfg.StrOpt('vm_on_vds_tz2_vlan17_ip',
|
||||
default="192.168.1.206",
|
||||
help="IPv4 IP address of VM6"),
|
||||
]
|
||||
|
||||
nsxv3_group = cfg.OptGroup(name='nsxv3',
|
||||
title="NSXv3 Configuration Options")
|
||||
|
||||
NSXv3Group = [
|
||||
cfg.StrOpt('nsx_manager',
|
||||
default='',
|
||||
help="NSX manager IP address"),
|
||||
cfg.StrOpt('nsx_user',
|
||||
default='admin',
|
||||
help="NSX manager username"),
|
||||
cfg.StrOpt('nsx_password',
|
||||
default='default',
|
||||
help="NSX manager password"),
|
||||
cfg.BoolOpt('native_dhcp_metadata',
|
||||
default=False,
|
||||
help="Enable or disable Native DHCP and MDProxy for nsxv3"),
|
||||
]
|
@ -1,90 +0,0 @@
|
||||
Admin Policy
|
||||
============
|
||||
|
||||
Admin policy, neutron extension secuirty-group-policy provides organization
|
||||
to enforce traffic forwarding utilizing NSX security policy.
|
||||
|
||||
The "Admin Policy" feature is admin priviledge, normal project/tenant is not
|
||||
able to create security-group-policy.
|
||||
|
||||
This feature can be enabled from devstack or manually.
|
||||
|
||||
Enable security-group-policy extention at bring up devstack
|
||||
===========================================================
|
||||
|
||||
You can enable security-group-policy when starting up devstack.
|
||||
However, if the policy-id does not exist, starting will fail.
|
||||
|
||||
To enable it, add the following tokens to local.conf:
|
||||
|
||||
NSXV_USE_NSX_POLICIES=True
|
||||
NSXV_DEFAULT_POLICY_ID=policy-11
|
||||
NSXV_ALLOW_TENANT_RULES_WITH_POLICY=True
|
||||
|
||||
Change values according to your needs though.
|
||||
|
||||
Enable security-group-policy extention manually
|
||||
===============================================
|
||||
|
||||
Instruction is from the view of devstack
|
||||
|
||||
#. Add following items to /etc/neutron/policy.json::
|
||||
|
||||
"create_security_group:logging": "rule:admin_only",
|
||||
"update_security_group:logging": "rule:admin_only",
|
||||
"get_security_group:logging": "rule:admin_only",
|
||||
"create_security_group:provider": "rule:admin_only",
|
||||
"create_port:provider_security_groups": "rule:admin_only",
|
||||
"create_security_group:policy": "rule:admin_only",
|
||||
"update_security_group:policy": "rule:admin_only",
|
||||
|
||||
#. Add following key=value pair to session [nsxv] of /etc/neutron/plugin/vmware/nsx.ini::
|
||||
|
||||
use_nsx_policies = True
|
||||
default_policy_id = policy-11
|
||||
allow_tenant_rules_with_policy = False
|
||||
|
||||
# NOTE: For automation, set allow_tenant_rules_with_policy to True
|
||||
|
||||
tempest.conf
|
||||
============
|
||||
|
||||
At session [nsxv] add the following 3 key=value pair:
|
||||
|
||||
default_policy_id = policy-11
|
||||
alt_policy_id = policy-22
|
||||
allow_tenant_rules_with_policy = False
|
||||
|
||||
# NOTE: default_policy_id and allow_tenant_rules_with_policy need to match nsx.ini
|
||||
|
||||
default_policy_id and alt_policy_id:
|
||||
|
||||
For API tests, both must exist at NSX.
|
||||
|
||||
For scenario tests, please refer to nsxv/scenario/test_admin_policy_basic_ops.py
|
||||
|
||||
In short::
|
||||
|
||||
policy-11 (policy-AA at script & test-plan) firewall rules::
|
||||
action-1: dhcp-in/any/policy-security-groups/dhcp/Allow
|
||||
action-2: dhcp-out/policy-security-groups/dhcp/Allow
|
||||
action-3: ping-in/any/policy-security-groups/ICMP/Allow
|
||||
action-4: ping-out/policy-security-groups/any/ICMP/Allow/
|
||||
action-5: ssh-in/any/policy-security-groups/SSH/Allow/
|
||||
action-6: ssh-in/any/policy-security-groups/SSH/Allow/
|
||||
action-7: http-ok/any/policy-security-groups/HTTP,HTTPS/Allow/
|
||||
action-8: sorry-nothing-allowed/any/policy-security-groups/Any/Reject
|
||||
|
||||
You can import policy-AA to NSX using the admin-policy-AA.blueprint
|
||||
|
||||
policy-22 (policy-BB at script & test-plan) firewall rules::
|
||||
action-1: dhcp-in/any/policy-security-groups/dhcp/Allow
|
||||
action-2: dhcp-out/policy-security-groups/dhcp/Allow
|
||||
action-3: group-ping/policy-security-groups/policy-security-groups/ICMP/Allow/
|
||||
action-4: ssh-in/any/policy-security-groups/SSH/Allow/
|
||||
action-5: ssh-in/any/policy-security-groups/SSH/Allow/
|
||||
action-6: http-ok/any/policy-security-groups/HTTP,HTTPS/Allow/
|
||||
pction-7: sorry-nothing-allowed/any/policy-security-groups/Any/Reject
|
||||
|
||||
NOTE on ping: same as policy-11 but only allowed from policy-security-groups
|
||||
You can import policy-BB to NSX using the admin-policy-BB.blueprint
|
@ -1,74 +0,0 @@
|
||||
Overview
|
||||
========
|
||||
|
||||
This document describes what LBaaS tests are not supported at different
|
||||
NSX plugin's and backends.
|
||||
|
||||
NOTE::
|
||||
|
||||
All LBaaS API & Scenario tests should PASS with exceptions
|
||||
due to NSX plugins and features supported by backend.
|
||||
|
||||
For how tests can be skipped for specific plugin and backend,
|
||||
please refer to paragraph "Config for Test Execution".
|
||||
|
||||
NOTE::
|
||||
|
||||
We no longer support LBaaS v1. So this document and LBaaS tests
|
||||
only applys to releases from Mitaka/Marvin or later.
|
||||
|
||||
Limitation:
|
||||
-----------
|
||||
|
||||
NSX-v with VMware LBaaS driver::
|
||||
|
||||
#. LBaaS networks need to attach to exclusive router
|
||||
#. One tenant per subnet
|
||||
#. L7 switching not supported
|
||||
|
||||
NSX-v3 with Octavia driver::
|
||||
|
||||
#. upstream implemenation - all tests should PASS.
|
||||
#. scenario tests take long time, it might fail with fixture timeout.
|
||||
|
||||
Config for Test execution:
|
||||
--------------------------
|
||||
|
||||
Following configuration attributes used to controll test execution::
|
||||
|
||||
#. no_router_type at group/session nsxv
|
||||
|
||||
Default is False, and is used to run LBaaS tests in NSX-v environment.
|
||||
To run in NSX-t environment, set it to True
|
||||
|
||||
#. bugs_to_resolve at group/session nsxv
|
||||
|
||||
For test to skip if bug-ID presented in this attribute.
|
||||
The test will use testtools.skipIf(condition, reason) to skip if its ID in the bugs_to_resolve list.
|
||||
|
||||
local.conf:
|
||||
----------
|
||||
NSX-v::
|
||||
[nsxv]
|
||||
no_router_type=False
|
||||
bugs_to_resolve=1641902,1715126,1703396,1739510
|
||||
|
||||
NSX-v3::
|
||||
[compute]
|
||||
build_timeout=900
|
||||
build_interval=2
|
||||
|
||||
[nsxv]
|
||||
no_router_type=True
|
||||
|
||||
Execution:
|
||||
----------
|
||||
|
||||
#. Use testr list-tests command to generate test suite for run API and Scenario tests::
|
||||
|
||||
tools/with_venv.sh testr list-tests nsxv.api.lbaas
|
||||
tools/with_venv.sh testr list-tests nsxv.scenarion.test_lbaas
|
||||
|
||||
#. l7 switching tests take long time to complete. If got fixture timeout, do::
|
||||
|
||||
OS_TEST_TIMEOUT=2400 ./run_tempest.sh -t test_lbaas_l7_switching_ops
|
@ -1,341 +0,0 @@
|
||||
<securityPolicyHierarchy>
|
||||
<name>admin-policy-AA</name>
|
||||
<description>8 firewall rules - ping, ssh from anywhere are OK</description>
|
||||
<securityPolicy>
|
||||
<revision>0</revision>
|
||||
<name>security-policy-AA</name>
|
||||
<description>Security Policy AA</description>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>false</inheritanceAllowed>
|
||||
<precedence>5500</precedence>
|
||||
<actionsByCategory>
|
||||
<category>firewall</category>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>dhcp-in</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>1</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>DHCP-Client</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>UDP</applicationProtocol>
|
||||
<value>68</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>DHCP-Server</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>UDP</applicationProtocol>
|
||||
<value>67</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>inbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>dhcp-out</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>2</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>DHCP-Client</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>UDP</applicationProtocol>
|
||||
<value>68</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>DHCP-Server</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>UDP</applicationProtocol>
|
||||
<value>67</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>outbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>ping-in</name>
|
||||
<description>Everyone can ping me</description>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>3</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>ICMP Echo</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>ICMP</applicationProtocol>
|
||||
<value>echo-request</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>ICMP Redirect</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>ICMP</applicationProtocol>
|
||||
<value>redirect</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>ICMP Echo Reply</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>ICMP</applicationProtocol>
|
||||
<value>echo-reply</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>inbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>ping-out</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>4</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>ICMP Echo</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>ICMP</applicationProtocol>
|
||||
<value>echo-request</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>ICMP Redirect</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>ICMP</applicationProtocol>
|
||||
<value>redirect</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>ICMP Echo Reply</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>ICMP</applicationProtocol>
|
||||
<value>echo-reply</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>outbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>ssh-in-ok</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>5</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>SSH</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>TCP</applicationProtocol>
|
||||
<value>22</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>inbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>ssh-out-ok</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>6</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>SSH</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>TCP</applicationProtocol>
|
||||
<value>22</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>outbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>HTTP-ok</name>
|
||||
<description>All can http(s) me</description>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>7</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>HTTP</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>TCP</applicationProtocol>
|
||||
<value>80</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>HTTPS</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>TCP</applicationProtocol>
|
||||
<value>443</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>inbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>sorry-nothing-allowed</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>8</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>reject</action>
|
||||
<direction>inbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
</actionsByCategory>
|
||||
<statusesByCategory>
|
||||
<category>firewall</category>
|
||||
<status>in_sync</status>
|
||||
</statusesByCategory>
|
||||
</securityPolicy>
|
||||
</securityPolicyHierarchy>
|
@ -1,285 +0,0 @@
|
||||
<securityPolicyHierarchy>
|
||||
<name>admin-policy-BB</name>
|
||||
<description>policy-BB, ssh from anywhere are OK, but ping limited to same security-group</description>
|
||||
<securityPolicy>
|
||||
<revision>0</revision>
|
||||
<name>security-policy-BB</name>
|
||||
<description>Security Policy BB</description>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>false</inheritanceAllowed>
|
||||
<precedence>5600</precedence>
|
||||
<actionsByCategory>
|
||||
<category>firewall</category>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>dhcp-in</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>1</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>DHCP-Client</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>UDP</applicationProtocol>
|
||||
<value>68</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>DHCP-Server</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>UDP</applicationProtocol>
|
||||
<value>67</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>inbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>dhcp-out</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>2</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>DHCP-Client</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>UDP</applicationProtocol>
|
||||
<value>68</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>DHCP-Server</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>UDP</applicationProtocol>
|
||||
<value>67</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>outbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>group-ping-ok</name>
|
||||
<description>icmp only allowed from VM with same security-policy</description>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>3</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>ICMP Echo</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>ICMP</applicationProtocol>
|
||||
<value>echo-request</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>ICMP Redirect</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>ICMP</applicationProtocol>
|
||||
<value>redirect</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>ICMP Echo Reply</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>ICMP</applicationProtocol>
|
||||
<value>echo-reply</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>intra</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>ssh-in-ok</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>4</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>SSH</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>TCP</applicationProtocol>
|
||||
<value>22</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>inbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>ssh-out-ok</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>5</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>SSH</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>TCP</applicationProtocol>
|
||||
<value>22</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>outbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>group-HTTP</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>6</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<applications>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>HTTP</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>TCP</applicationProtocol>
|
||||
<value>80</value>
|
||||
</element>
|
||||
</application>
|
||||
<application>
|
||||
<revision>0</revision>
|
||||
<name>HTTPS</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<inheritanceAllowed>true</inheritanceAllowed>
|
||||
<element>
|
||||
<applicationProtocol>TCP</applicationProtocol>
|
||||
<value>443</value>
|
||||
</element>
|
||||
</application>
|
||||
</applications>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>allow</action>
|
||||
<direction>intra</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
<action class="firewallSecurityAction">
|
||||
<revision>0</revision>
|
||||
<name>sorry-nothing-allowed</name>
|
||||
<clientHandle></clientHandle>
|
||||
<isUniversal>false</isUniversal>
|
||||
<universalRevision>0</universalRevision>
|
||||
<category>firewall</category>
|
||||
<executionOrder>7</executionOrder>
|
||||
<isEnabled>true</isEnabled>
|
||||
<isActionEnforced>false</isActionEnforced>
|
||||
<invalidSecondaryContainers>false</invalidSecondaryContainers>
|
||||
<invalidApplications>false</invalidApplications>
|
||||
<logged>false</logged>
|
||||
<action>reject</action>
|
||||
<direction>inbound</direction>
|
||||
<outsideSecondaryContainer>false</outsideSecondaryContainer>
|
||||
</action>
|
||||
</actionsByCategory>
|
||||
<statusesByCategory>
|
||||
<category>firewall</category>
|
||||
<status>in_sync</status>
|
||||
</statusesByCategory>
|
||||
</securityPolicy>
|
||||
</securityPolicyHierarchy>
|
@ -1,234 +0,0 @@
|
||||
# Copyright 2017 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import collections
|
||||
|
||||
import netaddr
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
|
||||
from vmware_nsx_tempest.common import constants
|
||||
from vmware_nsx_tempest.tests.scenario import manager
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApplianceManager(manager.NetworkScenarioTest):
|
||||
server_details = collections.namedtuple('server_details',
|
||||
['server', 'floating_ip',
|
||||
'networks'])
|
||||
|
||||
def setUp(self):
|
||||
super(ApplianceManager, self).setUp()
|
||||
self.topology_routers = {}
|
||||
self.topology_networks = {}
|
||||
self.topology_subnets = {}
|
||||
self.topology_servers = {}
|
||||
self.topology_servers_floating_ip = []
|
||||
self.topology_public_network_id = CONF.network.public_network_id
|
||||
self.topology_config_drive = CONF.compute_feature_enabled.config_drive
|
||||
self.topology_keypairs = {}
|
||||
self.servers_details = {}
|
||||
|
||||
def get_internal_ips(self, server, network, device="network"):
|
||||
internal_ips = [p['fixed_ips'][0]['ip_address'] for p in
|
||||
self.os_admin.ports_client.list_ports(
|
||||
tenant_id=server['tenant_id'],
|
||||
network_id=network['id'])['ports'] if
|
||||
p['device_owner'].startswith(device)]
|
||||
return internal_ips
|
||||
|
||||
def _verify_empty_security_group_status(self, security_group):
|
||||
ip_protocols = ["IPV6", "IPV4"]
|
||||
nsx_fw_section, nsx_fw_section_rules = \
|
||||
self.nsx_client.get_firewall_section_and_rules(
|
||||
security_group['name'], security_group['id'])
|
||||
msg = "Newly created empty security group does not meet criteria !!!"
|
||||
self.assertEqual(nsx_fw_section["rule_count"], 2, msg)
|
||||
self.assertEqual(nsx_fw_section_rules[0]["action"], "ALLOW", msg)
|
||||
self.assertEqual(nsx_fw_section_rules[1]["action"], "ALLOW", msg)
|
||||
self.assertEqual(nsx_fw_section_rules[0]["direction"], "OUT", msg)
|
||||
self.assertEqual(nsx_fw_section_rules[1]["direction"], "OUT", msg)
|
||||
self.assertIn(nsx_fw_section_rules[0]["ip_protocol"], ip_protocols,
|
||||
msg)
|
||||
self.assertIn(nsx_fw_section_rules[1]["ip_protocol"], ip_protocols,
|
||||
msg)
|
||||
|
||||
def create_topology_empty_security_group(self, namestart="vmw_"):
|
||||
security_group = self._create_empty_security_group(namestart=namestart)
|
||||
self._verify_empty_security_group_status(security_group)
|
||||
return security_group
|
||||
|
||||
def add_security_group_rule(self, security_group, rule):
|
||||
return self._create_security_group_rule(secgroup=security_group,
|
||||
**rule)
|
||||
|
||||
def get_server_key(self, server):
|
||||
return self.topology_keypairs[server['key_name']]['private_key']
|
||||
|
||||
def create_topology_router(self, router_name, routers_client=None,
|
||||
**kwargs):
|
||||
if not routers_client:
|
||||
routers_client = self.routers_client
|
||||
router_name_ = constants.APPLIANCE_NAME_STARTS_WITH + router_name
|
||||
router = self._create_router(namestart=router_name_, **kwargs)
|
||||
public_network_info = {"external_gateway_info": dict(
|
||||
network_id=self.topology_public_network_id)}
|
||||
routers_client.update_router(router['id'], **public_network_info)
|
||||
self.topology_routers[router_name] = router
|
||||
return router
|
||||
|
||||
def create_topology_network(self, network_name, networks_client=None,
|
||||
tenant_id=None, port_security_enabled=True, **kwargs):
|
||||
if not networks_client:
|
||||
networks_client = self.networks_client
|
||||
if not tenant_id:
|
||||
tenant_id = networks_client.tenant_id
|
||||
network_name_ = constants.APPLIANCE_NAME_STARTS_WITH + network_name
|
||||
name = data_utils.rand_name(network_name_)
|
||||
# Neutron disables port security by default so we have to check the
|
||||
# config before trying to create the network with port_security_enabled
|
||||
if CONF.network_feature_enabled.port_security:
|
||||
port_security_enabled = True
|
||||
result = networks_client.create_network(
|
||||
name=name, tenant_id=tenant_id,
|
||||
port_security_enabled=port_security_enabled, **kwargs)
|
||||
network = result['network']
|
||||
self.assertEqual(network['name'], name)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
networks_client.delete_network, network['id'])
|
||||
self.topology_networks[network_name] = network
|
||||
return network
|
||||
|
||||
def create_topology_subnet(
|
||||
self, subnet_name, network, routers_client=None,
|
||||
subnets_client=None, router_id=None, ip_version=4, cidr=None,
|
||||
mask_bits=None, **kwargs):
|
||||
subnet_name_ = constants.APPLIANCE_NAME_STARTS_WITH + subnet_name
|
||||
if not subnets_client:
|
||||
subnets_client = self.subnets_client
|
||||
if not routers_client:
|
||||
routers_client = self.routers_client
|
||||
|
||||
def cidr_in_use(cidr, tenant_id):
|
||||
"""Check cidr existence
|
||||
:returns: True if subnet with cidr already exist in tenant
|
||||
False else
|
||||
"""
|
||||
cidr_in_use = \
|
||||
self.os_admin.subnets_client.list_subnets(tenant_id=tenant_id,
|
||||
cidr=cidr)['subnets']
|
||||
return len(cidr_in_use) != 0
|
||||
|
||||
if ip_version == 6:
|
||||
tenant_cidr = (cidr or netaddr.IPNetwork(
|
||||
CONF.network.project_network_v6_cidr))
|
||||
mask_bits = mask_bits or CONF.network.project_network_v6_mask_bits
|
||||
else:
|
||||
tenant_cidr = cidr or netaddr.IPNetwork(
|
||||
CONF.network.project_network_cidr)
|
||||
mask_bits = mask_bits or CONF.network.project_network_mask_bits
|
||||
str_cidr = str(tenant_cidr)
|
||||
if not cidr:
|
||||
# Repeatedly attempt subnet creation with sequential cidr
|
||||
# blocks until an unallocated block is found.
|
||||
for subnet_cidr in tenant_cidr.subnet(mask_bits):
|
||||
str_cidr = str(subnet_cidr)
|
||||
if not cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
|
||||
break
|
||||
else:
|
||||
if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
|
||||
LOG.error("Specified subnet %r is in use" % str_cidr)
|
||||
raise
|
||||
subnet = dict(name=data_utils.rand_name(subnet_name_),
|
||||
network_id=network['id'], tenant_id=network['tenant_id'],
|
||||
cidr=str_cidr, ip_version=ip_version, **kwargs)
|
||||
try:
|
||||
result = None
|
||||
result = subnets_client.create_subnet(**subnet)
|
||||
except lib_exc.Conflict as e:
|
||||
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
||||
if not is_overlapping_cidr:
|
||||
raise
|
||||
self.assertIsNotNone(result, 'Unable to allocate tenant network')
|
||||
subnet = result['subnet']
|
||||
self.assertEqual(subnet['cidr'], str_cidr)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnets_client.delete_subnet, subnet['id'])
|
||||
self.topology_subnets[subnet_name] = subnet
|
||||
if router_id:
|
||||
if not routers_client:
|
||||
routers_client = self.routers_client
|
||||
routers_client.add_router_interface(
|
||||
router_id, subnet_id=subnet["id"])
|
||||
self.addCleanup(
|
||||
test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.remove_router_interface, router_id,
|
||||
subnet_id=subnet["id"])
|
||||
return subnet
|
||||
|
||||
def create_topology_security_group(self, **kwargs):
|
||||
return self._create_security_group(**kwargs)
|
||||
|
||||
def create_topology_instance(
|
||||
self, server_name, networks, security_groups=None,
|
||||
config_drive=None, keypair=None, image_id=None,
|
||||
clients=None, create_floating_ip=True, **kwargs):
|
||||
# Define security group for server.
|
||||
if security_groups:
|
||||
kwargs["security_groups"] = security_groups
|
||||
else:
|
||||
_sg = self.create_topology_security_group()
|
||||
_security_groups = [{'name': _sg['name']}]
|
||||
kwargs["security_groups"] = _security_groups
|
||||
# Define config drive for server.
|
||||
if not config_drive:
|
||||
kwargs["config_drive"] = self.topology_config_drive
|
||||
else:
|
||||
kwargs["config_drive"] = config_drive
|
||||
if not keypair:
|
||||
keypair = self.create_keypair()
|
||||
self.topology_keypairs[keypair['name']] = keypair
|
||||
kwargs["key_name"] = keypair['name']
|
||||
else:
|
||||
kwargs["key_name"] = keypair['name']
|
||||
# Define image id for server.
|
||||
if image_id:
|
||||
kwargs["image_id"] = image_id
|
||||
server_name_ = constants.APPLIANCE_NAME_STARTS_WITH + server_name
|
||||
# Collect all the networks for server.
|
||||
networks_ = []
|
||||
for net in networks:
|
||||
net_ = {"uuid": net["id"]}
|
||||
networks_.append(net_)
|
||||
# Deploy server with all teh args.
|
||||
server = self.create_server(
|
||||
name=server_name_, networks=networks_, clients=clients, **kwargs)
|
||||
if create_floating_ip:
|
||||
floating_ip = self.create_floating_ip(server)
|
||||
server["floating_ip"] = floating_ip
|
||||
self.topology_servers_floating_ip.append(floating_ip)
|
||||
else:
|
||||
floating_ip = None
|
||||
server_details = self.server_details(server=server,
|
||||
floating_ip=floating_ip,
|
||||
networks=networks)
|
||||
self.servers_details[server_name] = server_details
|
||||
self.topology_servers[server_name] = server
|
||||
return server
|
@ -1,172 +0,0 @@
|
||||
# Copyright 2017 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import test_utils
|
||||
|
||||
from vmware_nsx_tempest._i18n import _
|
||||
from vmware_nsx_tempest.common import constants
|
||||
from vmware_nsx_tempest.lib import traffic_manager
|
||||
from vmware_nsx_tempest.services import nsx_client
|
||||
from vmware_nsx_tempest.services import openstack_network_clients
|
||||
|
||||
LOG = constants.log.getLogger(__name__)
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
# It includes feature related function such CRUD Mdproxy, L2GW or QoS
|
||||
class FeatureManager(traffic_manager.TrafficManager):
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
"""
|
||||
Create various client connections. Such as NSXv3 and L2 Gateway.
|
||||
"""
|
||||
super(FeatureManager, cls).setup_clients()
|
||||
try:
|
||||
manager = getattr(cls.os_admin, "manager", cls.os_admin)
|
||||
net_client = getattr(manager, "networks_client")
|
||||
_params = manager.default_params_withy_timeout_values.copy()
|
||||
except AttributeError as attribute_err:
|
||||
LOG.warning(
|
||||
"Failed to locate the attribute, Error: %(err_msg)s",
|
||||
{"err_msg": attribute_err.__str__()})
|
||||
_params = {}
|
||||
cls.l2gw_client = openstack_network_clients.L2GatewayClient(
|
||||
net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
cls.nsx_client = nsx_client.NSXClient(
|
||||
CONF.network.backend,
|
||||
CONF.nsxv3.nsx_manager,
|
||||
CONF.nsxv3.nsx_user,
|
||||
CONF.nsxv3.nsx_password)
|
||||
cls.l2gwc_client = openstack_network_clients.L2GatewayConnectionClient(
|
||||
net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
|
||||
#
|
||||
# L2Gateway base class. To get basics of L2GW.
|
||||
#
|
||||
def create_l2gw(self, l2gw_name, l2gw_param):
|
||||
"""
|
||||
Creates L2GW and returns the response.
|
||||
|
||||
:param l2gw_name: name of the L2GW
|
||||
:param l2gw_param: L2GW parameters
|
||||
|
||||
:return: response of L2GW create API
|
||||
"""
|
||||
LOG.info("l2gw name: %(name)s, l2gw_param: %(devices)s ",
|
||||
{"name": l2gw_name, "devices": l2gw_param})
|
||||
devices = []
|
||||
for device_dict in l2gw_param:
|
||||
interface = [{"name": device_dict["iname"],
|
||||
"segmentation_id": device_dict[
|
||||
"vlans"]}] if "vlans" in device_dict else [
|
||||
{"name": device_dict["iname"]}]
|
||||
device = {"device_name": device_dict["dname"],
|
||||
"interfaces": interface}
|
||||
devices.append(device)
|
||||
l2gw_request_body = {"devices": devices}
|
||||
LOG.info(" l2gw_request_body: %s", l2gw_request_body)
|
||||
rsp = self.l2gw_client.create_l2_gateway(
|
||||
name=l2gw_name, **l2gw_request_body)
|
||||
LOG.info(" l2gw response: %s", rsp)
|
||||
self.addCleanup(
|
||||
test_utils.call_and_ignore_notfound_exc,
|
||||
self.l2gw_client.delete_l2_gateway, rsp[constants.L2GW]["id"])
|
||||
return rsp, devices
|
||||
|
||||
def delete_l2gw(self, l2gw_id):
|
||||
"""
|
||||
Delete L2gw.
|
||||
|
||||
:param l2gw_id: L2GW id to delete l2gw.
|
||||
|
||||
:return: response of the l2gw delete API.
|
||||
"""
|
||||
LOG.info("L2GW id: %(id)s to be deleted.", {"id": l2gw_id})
|
||||
rsp = self.l2gw_client.delete_l2_gateway(l2gw_id)
|
||||
LOG.info("response : %(rsp)s", {"rsp": rsp})
|
||||
return rsp
|
||||
|
||||
def update_l2gw(self, l2gw_id, l2gw_new_name, devices):
|
||||
"""
|
||||
Update existing L2GW.
|
||||
|
||||
:param l2gw_id: L2GW id to update its parameters.
|
||||
:param l2gw_new_name: name of the L2GW.
|
||||
:param devices: L2GW parameters.
|
||||
|
||||
:return: Response of the L2GW update API.
|
||||
"""
|
||||
rsp = self.l2gw_client.update_l2_gateway(l2gw_id,
|
||||
name=l2gw_new_name, **devices)
|
||||
return rsp
|
||||
|
||||
def nsx_bridge_cluster_info(self):
|
||||
"""
|
||||
Collect the device and interface name of the nsx brdige cluster.
|
||||
|
||||
:return: nsx bridge id and display name.
|
||||
"""
|
||||
response = self.nsx_client.get_bridge_cluster_info()
|
||||
if len(response) == 0:
|
||||
raise RuntimeError(_("NSX bridge cluster information is null"))
|
||||
return [(x.get("id"), x.get("display_name")) for x in response]
|
||||
|
||||
def create_l2gw_connection(self, l2gwc_param):
|
||||
"""
|
||||
Creates L2GWC and return the response.
|
||||
|
||||
:param l2gwc_param: L2GWC parameters.
|
||||
|
||||
:return: response of L2GWC create API.
|
||||
"""
|
||||
LOG.info("l2gwc param: %(param)s ", {"param": l2gwc_param})
|
||||
l2gwc_request_body = {"l2_gateway_id": l2gwc_param["l2_gateway_id"],
|
||||
"network_id": l2gwc_param["network_id"]}
|
||||
if "segmentation_id" in l2gwc_param:
|
||||
l2gwc_request_body["segmentation_id"] = l2gwc_param[
|
||||
"segmentation_id"]
|
||||
LOG.info("l2gwc_request_body: %s", l2gwc_request_body)
|
||||
rsp = self.l2gwc_client.create_l2_gateway_connection(
|
||||
**l2gwc_request_body)
|
||||
LOG.info("l2gwc response: %s", rsp)
|
||||
self.addCleanup(
|
||||
test_utils.call_and_ignore_notfound_exc,
|
||||
self.l2gwc_client.delete_l2_gateway_connection,
|
||||
rsp[constants.L2GWC]["id"])
|
||||
return rsp
|
||||
|
||||
def delete_l2gw_connection(self, l2gwc_id):
|
||||
"""
|
||||
Delete L2GWC and returns the response.
|
||||
|
||||
:param l2gwc_id: L2GWC id to delete L2GWC.
|
||||
|
||||
:return: response of the l2gwc delete API.
|
||||
"""
|
||||
LOG.info("L2GW connection id: %(id)s to be deleted",
|
||||
{"id": l2gwc_id})
|
||||
rsp = self.l2gwc_client.delete_l2_gateway_connection(l2gwc_id)
|
||||
LOG.info("response : %(rsp)s", {"rsp": rsp})
|
||||
return rsp
|
@ -1,65 +0,0 @@
|
||||
# Copyright 2017 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from vmware_nsx_tempest.lib import appliance_manager
|
||||
|
||||
|
||||
class TrafficManager(appliance_manager.ApplianceManager):
|
||||
def check_server_internal_ips_using_floating_ip(self, floating_ip, server,
|
||||
address_list, should_connect=True):
|
||||
ip_address = floating_ip['floating_ip_address']
|
||||
private_key = self.get_server_key(server)
|
||||
ssh_source = self.get_remote_client(ip_address,
|
||||
private_key=private_key)
|
||||
for remote_ip in address_list:
|
||||
self.check_remote_connectivity(ssh_source, remote_ip,
|
||||
should_succeed=should_connect)
|
||||
|
||||
def check_network_internal_connectivity(self, network, floating_ip, server,
|
||||
should_connect=True):
|
||||
"""via ssh check VM internal connectivity:
|
||||
- ping internal gateway and DHCP port, implying in-tenant connectivity
|
||||
pinging both, because L3 and DHCP agents might be on different nodes
|
||||
"""
|
||||
# get internal ports' ips:
|
||||
# get all network ports in the new network
|
||||
internal_ips = self.get_internal_ips(server, network, device="network")
|
||||
self.check_server_internal_ips_using_floating_ip(floating_ip, server,
|
||||
internal_ips, should_connect)
|
||||
|
||||
def check_vm_internal_connectivity(self, network, floating_ip, server,
|
||||
should_connect=True):
|
||||
# test internal connectivity to the other VM on the same network
|
||||
compute_ips = self.get_internal_ips(server, network, device="compute")
|
||||
self.check_server_internal_ips_using_floating_ip(floating_ip, server,
|
||||
compute_ips, should_connect)
|
||||
|
||||
def using_floating_ip_check_server_and_project_network_connectivity(self,
|
||||
server_details, network=None):
|
||||
if not network:
|
||||
network = server_details.networks[0]
|
||||
floating_ip = server_details.floating_ip
|
||||
server = server_details.server
|
||||
self.check_network_internal_connectivity(network, floating_ip, server)
|
||||
self.check_vm_internal_connectivity(network, floating_ip, server)
|
||||
|
||||
def check_cross_network_connectivity(self, network1,
|
||||
floating_ip_on_network2, server_on_network2, should_connect=False):
|
||||
# test internal connectivity to the other VM on the same network
|
||||
remote_ips = self.get_internal_ips(server_on_network2, network1,
|
||||
device="compute")
|
||||
self.check_server_internal_ips_using_floating_ip(
|
||||
floating_ip_on_network2, server_on_network2, remote_ips,
|
||||
should_connect)
|
@ -1,60 +0,0 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
|
||||
from tempest import config
|
||||
from tempest.test_discover import plugins
|
||||
|
||||
from vmware_nsx_tempest import config as config_nsx
|
||||
|
||||
|
||||
_opts = [
|
||||
(config_nsx.scenario_group, config_nsx.ScenarioGroup),
|
||||
(config_nsx.network_group, config_nsx.NetworkGroup),
|
||||
(config_nsx.nsxv_group, config_nsx.NSXvGroup),
|
||||
(config_nsx.l2gw_group, config_nsx.L2gwGroup),
|
||||
(config_nsx.nsxv3_group, config_nsx.NSXv3Group)
|
||||
]
|
||||
|
||||
|
||||
class VMwareNsxTempestPlugin(plugins.TempestPlugin):
|
||||
|
||||
"""Our addon configuration is defined at vmware_nsx_tempest/config.py
|
||||
|
||||
1. register_opts() to register group/opts to Tempest
|
||||
2. get_opt_lists() to pass config to Tempest
|
||||
|
||||
The official plugin is defined at
|
||||
https://docs.openstack.org/developer/tempest/plugin.html
|
||||
"""
|
||||
|
||||
def load_tests(self):
|
||||
mydir = os.path.dirname(os.path.abspath(__file__))
|
||||
base_path = os.path.split(mydir)[0]
|
||||
test_dir = "vmware_nsx_tempest/tests"
|
||||
test_fullpath = os.path.join(base_path, test_dir)
|
||||
return test_fullpath, base_path
|
||||
|
||||
def register_opts(self, conf):
|
||||
conf.register_opt(config_nsx.service_option,
|
||||
group='service_available')
|
||||
for group, option in _opts:
|
||||
config.register_opt_group(conf, group, option)
|
||||
|
||||
def get_opt_lists(self):
|
||||
return [(group.name, option) for group, option in _opts
|
||||
].append(('service_available', [config_nsx.service_option]))
|
@ -1,42 +0,0 @@
|
||||
This folder contains services for managing NSX-v, NSX-v3.
|
||||
|
||||
Services provided:
|
||||
|
||||
# OpenStack tempest service clients
|
||||
l2_gateway_client.py
|
||||
based on tempest BaseNetworkClient implements client APIs to manage
|
||||
neutron l2-gateway resources
|
||||
|
||||
l2_gateway_connection_client.py
|
||||
based on tempest BaseNetworkClient implements client APIs to manage
|
||||
neutron l2-gateway-connection resources
|
||||
|
||||
lbaas v2 clients: ported from neutron_lbaas to comply with tempest services.
|
||||
lbaas/load_balancers_client.py
|
||||
lbaas/listeners_client.py
|
||||
lbaas/pools_client.py
|
||||
lbaas/health_monitorys_client.py
|
||||
lbaas/members_client.py
|
||||
|
||||
lbv1_client.py
|
||||
based on tempest BaseNetworkClient implements client APIs to manage
|
||||
neutron v1 load-balancer resources
|
||||
|
||||
network_client_base.py
|
||||
due to tempest network services are in the process of migrating to
|
||||
tempest-lib, some features to be used by tests are not in
|
||||
BaseNetworkClient. Inherent here and used by all vmware-nsx-tempest
|
||||
client for now.
|
||||
|
||||
# NSX speific services
|
||||
nsxv_client.py implements API to manage NSX-v components
|
||||
- Logical switch (Tenant network)
|
||||
- Edge (Service edge, DHCP edge, and VDR edge)
|
||||
- DFW firewall rules (Security group)
|
||||
- SpoofGuard
|
||||
|
||||
nsxv3_client.py implements API to manage NSX backend resources:
|
||||
- logical switch
|
||||
- firewall section
|
||||
- nsgroup
|
||||
- logical router
|
@ -1,103 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Copyright 2015 Hewlett-Packard Development Company, L.P.
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
|
||||
from vmware_nsx_tempest.common import constants
|
||||
|
||||
LOG = constants.log.getLogger(__name__)
|
||||
|
||||
CONF = config.CONF
|
||||
SEGMENTATION_ID_DELIMITER = "#"
|
||||
INTERFACE_SEG_ID_DELIMITER = "|"
|
||||
DEVICE_INTERFACE_DELIMITER = "::"
|
||||
DEVICE_DELIMITER = ","
|
||||
INTERFACE_DELIMITER = ";"
|
||||
"""
|
||||
Sample for providing input for gateway creation in config is noted below
|
||||
Options provide flexibility to user to create l2gateway
|
||||
For single device ,single interface with single vlan
|
||||
l2gw_switch = device_name1::int_name1|vlan1
|
||||
For single device multiple interfaces with single or multiple vlans
|
||||
l2gw_switch = device_name1::int_name1|vlan1#vlan2;int_name2|vlan3
|
||||
For multiple devices with mutiple interfaces having single or mutiple vlan
|
||||
l2gw_switch = device_n1::int_n1|vlan1,device_n2::int_n2|vlan2#vlan3
|
||||
"""
|
||||
|
||||
|
||||
def get_interface(interfaces):
|
||||
interface_dict = []
|
||||
for interface in interfaces:
|
||||
if INTERFACE_SEG_ID_DELIMITER in interface:
|
||||
int_name = interface.split(INTERFACE_SEG_ID_DELIMITER)[0]
|
||||
segid = interface.split(INTERFACE_SEG_ID_DELIMITER)[1]
|
||||
if SEGMENTATION_ID_DELIMITER in segid:
|
||||
segid = segid.split(SEGMENTATION_ID_DELIMITER)
|
||||
else:
|
||||
segid = [segid]
|
||||
interface_detail = {'name': int_name, 'segmentation_id': segid}
|
||||
else:
|
||||
interface_detail = {'name': interface}
|
||||
interface_dict.append(interface_detail)
|
||||
return interface_dict
|
||||
|
||||
|
||||
def get_device_interface(device_name, interface):
|
||||
if INTERFACE_DELIMITER in interface:
|
||||
interface_dict = interface.split(INTERFACE_DELIMITER)
|
||||
interfaces = get_interface(interface_dict)
|
||||
else:
|
||||
interfaces = get_interface([interface])
|
||||
device = {'device_name': device_name,
|
||||
'interfaces': interfaces}
|
||||
return device
|
||||
|
||||
|
||||
def get_l2gw_body(l2gw_conf):
|
||||
device_dict = []
|
||||
devices = l2gw_conf.split(DEVICE_DELIMITER)
|
||||
for device in devices:
|
||||
if DEVICE_INTERFACE_DELIMITER in device:
|
||||
device_name = device.split(DEVICE_INTERFACE_DELIMITER)[0]
|
||||
interface = device.split(DEVICE_INTERFACE_DELIMITER)[1]
|
||||
device = get_device_interface(device_name, interface)
|
||||
device_dict.append(device)
|
||||
body = {'devices': device_dict}
|
||||
return body
|
||||
|
||||
|
||||
def form_dict_devices(devices):
|
||||
seg_ids = []
|
||||
devices1 = dict()
|
||||
int_seg = []
|
||||
for device in devices:
|
||||
device_name = device['device_name']
|
||||
interfaces = device['interfaces']
|
||||
for interface in interfaces:
|
||||
interface_name = interface['name']
|
||||
int_seg.append(interface_name)
|
||||
seg_id = interface['segmentation_id']
|
||||
if type(seg_id) is list:
|
||||
for segid in seg_id:
|
||||
seg_ids.append(segid)
|
||||
else:
|
||||
seg_ids.append(seg_id)
|
||||
int_seg.append(seg_id)
|
||||
devices1.setdefault(device_name, []).append(int_seg)
|
||||
int_seg = []
|
||||
return devices1
|
@ -1,131 +0,0 @@
|
||||
# Copyright (c) 2015 Midokura SARL
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
|
||||
from vmware_nsx_tempest.services import network_client_base as base
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class FWaaSV1Client(base.BaseNetworkClient):
|
||||
|
||||
def create_firewall(self, **kwargs):
|
||||
uri = '/fw/firewalls'
|
||||
post_data = {'firewall': kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_firewall(self, firewall_id, **kwargs):
|
||||
uri = '/fw/firewalls/%s' % firewall_id
|
||||
post_data = {'firewall': kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_firewall(self, firewall_id, **fields):
|
||||
uri = '/fw/firewalls/%s' % firewall_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_firewall(self, firewall_id):
|
||||
uri = '/fw/firewalls/%s' % firewall_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_firewalls(self, **filters):
|
||||
uri = '/fw/firewalls'
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
def create_firewall_rule(self, **kwargs):
|
||||
uri = '/fw/firewall_rules'
|
||||
post_data = {'firewall_rule': kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_firewall_rule(self, firewall_rule_id, **kwargs):
|
||||
uri = '/fw/firewall_rules/%s' % firewall_rule_id
|
||||
post_data = {'firewall_rule': kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_firewall_rule(self, firewall_rule_id, **fields):
|
||||
uri = '/fw/firewall_rules/%s' % firewall_rule_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_firewall_rule(self, firewall_rule_id):
|
||||
uri = '/fw/firewall_rules/%s' % firewall_rule_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_firewall_rules(self, **filters):
|
||||
uri = '/fw/firewall_rules'
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
def is_resource_deleted(self, id):
|
||||
try:
|
||||
self.show_firewall(id)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
return False
|
||||
|
||||
def create_firewall_policy(self, **kwargs):
|
||||
uri = '/fw/firewall_policies'
|
||||
post_data = {'firewall_policy': kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_firewall_policy(self, firewall_policy_id, **kwargs):
|
||||
uri = '/fw/firewall_policies/%s' % firewall_policy_id
|
||||
post_data = {'firewall_policy': kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_firewall_policy(self, firewall_policy_id, **fields):
|
||||
uri = '/fw/firewall_policies/%s' % firewall_policy_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_firewall_policy(self, firewall_policy_id):
|
||||
uri = '/fw/firewall_policies/%s' % firewall_policy_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_firewall_policies(self, **filters):
|
||||
uri = '/fw/firewall_policies'
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
def insert_firewall_rule_in_policy(self, firewall_policy_id,
|
||||
firewall_rule_id, insert_after='',
|
||||
insert_before=''):
|
||||
uri = '/fw/firewall_policies/%s/insert_rule' % firewall_policy_id
|
||||
data = {
|
||||
'firewall_rule_id': firewall_rule_id,
|
||||
'insert_after': insert_after,
|
||||
'insert_before': insert_before,
|
||||
}
|
||||
return self.update_resource(uri, data)
|
||||
|
||||
def remove_firewall_rule_from_policy(self, firewall_policy_id,
|
||||
firewall_rule_id):
|
||||
uri = '/fw/firewall_policies/%s/remove_rule' % firewall_policy_id
|
||||
data = {
|
||||
'firewall_rule_id': firewall_rule_id,
|
||||
}
|
||||
return self.update_resource(uri, data)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = FWaaSV1Client(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,82 +0,0 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from oslo_log import log
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
from vmware_nsx_tempest.common import constants
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class L2GatewayClient(base.BaseNetworkClient):
|
||||
"""
|
||||
Request resources via API for L2GatewayClient
|
||||
l2 gateway create request
|
||||
l2 gateway update request
|
||||
l2 gateway show request
|
||||
l2 gateway delete request
|
||||
l2 gateway list all request
|
||||
"""
|
||||
|
||||
def create_l2_gateway(self, **kwargs):
|
||||
uri = constants.L2_GWS_BASE_URI
|
||||
post_data = {constants.L2GW: kwargs}
|
||||
LOG.info("URI : %(uri)s, posting data : %(post_data)s",
|
||||
{"uri": uri, "post_data": post_data})
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_l2_gateway(self, l2_gateway_id, **kwargs):
|
||||
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
||||
post_data = {constants.L2GW: kwargs}
|
||||
constants.LOG.info(
|
||||
"URI : %(uri)s, posting data : %(post_data)s",
|
||||
{"uri": uri, "post_data": post_data})
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_l2_gateway(self, l2_gateway_id, **fields):
|
||||
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
||||
LOG.info("URI : %(uri)s", {"uri": uri})
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_l2_gateway(self, l2_gateway_id):
|
||||
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
||||
LOG.info("URI : %(uri)s", {"uri": uri})
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_l2_gateways(self, **filters):
|
||||
uri = constants.L2_GWS_BASE_URI
|
||||
LOG.info("URI : %(uri)s", {"uri": uri})
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""
|
||||
Create a l2-gateway client from manager or networks_client
|
||||
"""
|
||||
try:
|
||||
manager = getattr(client_mgr, "manager", client_mgr)
|
||||
net_client = getattr(manager, "networks_client")
|
||||
_params = manager.default_params_withy_timeout_values.copy()
|
||||
except AttributeError as attribute_err:
|
||||
LOG.warning("Failed to locate the attribute, Error: %(err_msg)s",
|
||||
{"err_msg": attribute_err.__str__()})
|
||||
_params = {}
|
||||
client = L2GatewayClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,69 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class L2GatewayConnectionClient(base.BaseNetworkClient):
|
||||
resource = 'l2_gateway_connection'
|
||||
resource_plural = 'l2_gateway_connections'
|
||||
path = 'l2-gateway-connections'
|
||||
resource_base_path = '/%s' % path
|
||||
resource_object_path = '/%s/%%s' % path
|
||||
|
||||
def create_l2_gateway_connection(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_l2_gateway_connection(self, l2_gateway_id, **kwargs):
|
||||
uri = self.resource_object_path % l2_gateway_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_l2_gateway_connection(self, l2_gateway_id, **fields):
|
||||
uri = self.resource_object_path % l2_gateway_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_l2_gateway_connection(self, l2_gateway_id):
|
||||
uri = self.resource_object_path % l2_gateway_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_l2_gateway_connections(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a l2-gateway client from manager or networks_client
|
||||
|
||||
For itempest user:
|
||||
from itempest import load_our_solar_system as osn
|
||||
from vmware_nsx_tempest.services import l2_gateway_connection_client
|
||||
l2gwc_client = l2_gateway_connection_client.get_client(osn.adm.manager)
|
||||
For tempest user:
|
||||
l2gwc_client = l2_gateway_connection_client.get_client(cls.os_adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = L2GatewayConnectionClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,72 +0,0 @@
|
||||
# Copyright 2014 Rackspace US Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class HealthMonitorsClient(base.BaseNetworkClient):
|
||||
resource = 'healthmonitor'
|
||||
resource_plural = 'healthmonitors'
|
||||
path = 'lbaas/healthmonitors'
|
||||
resource_base_path = '/%s' % path
|
||||
resource_object_path = '/%s/%%s' % path
|
||||
|
||||
def create_health_monitor(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_health_monitor(self, health_monitor_id, **kwargs):
|
||||
uri = self.resource_object_path % health_monitor_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_health_monitor(self, health_monitor_id, **fields):
|
||||
uri = self.resource_object_path % health_monitor_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_health_monitor(self, health_monitor_id):
|
||||
uri = self.resource_object_path % health_monitor_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_health_monitors(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a lbaas health_monitors client from manager or networks_client
|
||||
|
||||
For itempest user:
|
||||
from itempest import load_our_solar_system as osn
|
||||
from vmware_nsx_tempest.services.lbaas import health_monitors_client
|
||||
healthmonitors_client = health_monitors_client.get_client(
|
||||
osn.adm.manager)
|
||||
For tempest user:
|
||||
healthmonitors_client = health_monitors_client.get_client(osn.adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = HealthMonitorsClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,59 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class L7PoliciesClient(base.BaseNetworkClient):
|
||||
resource = 'l7policy'
|
||||
resource_plural = 'l7policies'
|
||||
resource_base_path = '/lbaas/l7policies'
|
||||
resource_object_path = '/lbaas/l7policies/%s'
|
||||
|
||||
def create_l7policy(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_l7policy(self, policy_id, **kwargs):
|
||||
uri = self.resource_object_path % (policy_id)
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_l7policy(self, policy_id, **fields):
|
||||
uri = self.resource_object_path % (policy_id)
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_l7policy(self, policy_id):
|
||||
uri = self.resource_object_path % (policy_id)
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_l7policies(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a lbaas l7policies client from manager or networks_client"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = L7PoliciesClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,60 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class L7RulesClient(base.BaseNetworkClient):
|
||||
resource = 'rule'
|
||||
resource_plural = 'rules'
|
||||
resource_base_path = '/lbaas/l7policies/%s/rules'
|
||||
resource_object_path = '/lbaas/l7policies/%s/rules/%s'
|
||||
|
||||
def create_l7rule(self, policy_id, **kwargs):
|
||||
uri = self.resource_base_path % policy_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_l7rule(self, policy_id, rule_id, **kwargs):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_l7rule(self, policy_id, rule_id, **fields):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_l7rule(self, policy_id, rule_id):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_l7rules(self, policy_id, **filters):
|
||||
uri = self.resource_base_path % policy_id
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a lbaas l7rules client from manager or networks_client"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = L7RulesClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,71 +0,0 @@
|
||||
# Copyright 2014 Rackspace US Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class ListenersClient(base.BaseNetworkClient):
|
||||
resource = 'listener'
|
||||
resource_plural = 'listeners'
|
||||
path = 'lbaas/listeners'
|
||||
resource_base_path = '/%s' % path
|
||||
resource_object_path = '/%s/%%s' % path
|
||||
|
||||
def create_listener(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_listener(self, listener_id, **kwargs):
|
||||
uri = self.resource_object_path % listener_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_listener(self, listener_id, **fields):
|
||||
uri = self.resource_object_path % listener_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_listener(self, listener_id):
|
||||
uri = self.resource_object_path % listener_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_listeners(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a lbaas listener client from manager or networks_client
|
||||
|
||||
For itempest user:
|
||||
from itempest import load_our_solar_system as osn
|
||||
from vmware_nsx_tempest.services.lbaas import pools_client
|
||||
lbaas_client = pools_client.get_client(osn.adm.manager)
|
||||
For tempest user:
|
||||
lbaas_client = pools_client.get_client(osn.adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = ListenersClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,141 +0,0 @@
|
||||
# Copyright 2014 Rackspace US Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import time
|
||||
|
||||
from tempest.lib import exceptions
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
from vmware_nsx_tempest._i18n import _
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
LB_NOTFOUND = "loadbalancer {lb_id} not found"
|
||||
|
||||
|
||||
class LoadBalancersClient(base.BaseNetworkClient):
|
||||
resource = 'loadbalancer'
|
||||
resource_plural = 'loadbalancers'
|
||||
path = 'lbaas/loadbalancers'
|
||||
resource_base_path = '/%s' % path
|
||||
resource_object_path = '/%s/%%s' % path
|
||||
resource_object_status_path = '/%s/%%s/statuses' % path
|
||||
resource_object_stats_path = '/%s/%%s/stats' % path
|
||||
|
||||
def create_load_balancer(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_load_balancer(self, load_balancer_id, **kwargs):
|
||||
uri = self.resource_object_path % load_balancer_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_load_balancer(self, load_balancer_id, **fields):
|
||||
uri = self.resource_object_path % load_balancer_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def show_load_balancer_status_tree(self, load_balancer_id, **fields):
|
||||
uri = self.resource_object_status_path % load_balancer_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def show_load_balancer_stats(self, load_balancer_id, **fields):
|
||||
uri = self.resource_object_stats_path % load_balancer_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_load_balancer(self, load_balancer_id):
|
||||
uri = self.resource_object_path % load_balancer_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_load_balancers(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
def wait_for_load_balancer_status(self, load_balancer_id,
|
||||
provisioning_status='ACTIVE',
|
||||
operating_status='ONLINE',
|
||||
is_delete_op=False):
|
||||
"""Must have utility method for load-balancer CRUD operation.
|
||||
|
||||
This is the method you must call to make sure load_balancer_id is
|
||||
in provisioning_status=ACTIVE and opration_status=ONLINE status
|
||||
before manipulating any lbaas resource under load_balancer_id.
|
||||
"""
|
||||
|
||||
interval_time = self.build_interval
|
||||
timeout = self.build_timeout
|
||||
end_time = time.time() + timeout
|
||||
lb = None
|
||||
while time.time() < end_time:
|
||||
try:
|
||||
lb = self.show_load_balancer(load_balancer_id)
|
||||
if not lb:
|
||||
if is_delete_op:
|
||||
break
|
||||
else:
|
||||
raise Exception(
|
||||
LB_NOTFOUND.format(lb_id=load_balancer_id))
|
||||
lb = lb.get(self.resource, lb)
|
||||
if (lb.get('provisioning_status') == provisioning_status and
|
||||
lb.get('operating_status') == operating_status):
|
||||
break
|
||||
time.sleep(interval_time)
|
||||
except exceptions.NotFound as e:
|
||||
if is_delete_op:
|
||||
break
|
||||
else:
|
||||
raise e
|
||||
else:
|
||||
if is_delete_op:
|
||||
raise exceptions.TimeoutException(
|
||||
_("Waited for load balancer {lb_id} to be deleted for "
|
||||
"{timeout} seconds but can still observe that it "
|
||||
"exists.").format(
|
||||
lb_id=load_balancer_id,
|
||||
timeout=timeout))
|
||||
else:
|
||||
raise exceptions.TimeoutException(
|
||||
_("Wait for load balancer ran for {timeout} seconds and "
|
||||
"did not observe {lb_id} reach {provisioning_status} "
|
||||
"provisioning status and {operating_status} "
|
||||
"operating status.").format(
|
||||
timeout=timeout,
|
||||
lb_id=load_balancer_id,
|
||||
provisioning_status=provisioning_status,
|
||||
operating_status=operating_status))
|
||||
return lb
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a lbaas load-balancers client from manager or networks_client
|
||||
|
||||
For itempest user:
|
||||
from itempest import load_our_solar_system as osn
|
||||
from vmware_nsx_tempest.services.lbaas import load_balancers_client
|
||||
lbaas_client = load_balancers_client.get_client(osn.adm.manager)
|
||||
For tempest user:
|
||||
lbaas_client = load_balancers_client.get_client(osn.adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = LoadBalancersClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,70 +0,0 @@
|
||||
# Copyright 2014 Rackspace US Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class MembersClient(base.BaseNetworkClient):
|
||||
resource = 'member'
|
||||
resource_plural = 'members'
|
||||
path = 'lbaas/members'
|
||||
resource_base_path = '/lbaas/pools/%s/members'
|
||||
resource_object_path = '/lbaas/pools/%s/members/%s'
|
||||
|
||||
def create_member(self, pool_id, **kwargs):
|
||||
uri = self.resource_base_path % pool_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_member(self, pool_id, member_id, **kwargs):
|
||||
uri = self.resource_object_path % (pool_id, member_id)
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_member(self, pool_id, member_id, **fields):
|
||||
uri = self.resource_object_path % (pool_id, member_id)
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_member(self, pool_id, member_id):
|
||||
uri = self.resource_object_path % (pool_id, member_id)
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_members(self, pool_id, **filters):
|
||||
uri = self.resource_base_path % pool_id
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a lbaas members client from manager or networks_client
|
||||
|
||||
For itempest user:
|
||||
from itempest import load_our_solar_system as osn
|
||||
from vmware_nsx_tempest.services.lbaas import members_client
|
||||
members_client = members_client.get_client(osn.adm.manager)
|
||||
For tempest user:
|
||||
members_client = members_client.get_client(osn.adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = MembersClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,70 +0,0 @@
|
||||
# Copyright 2014 Rackspace US Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class PoolsClient(base.BaseNetworkClient):
|
||||
resource = 'pool'
|
||||
resource_plural = 'pools'
|
||||
path = 'lbaas/pools'
|
||||
resource_base_path = '/%s' % path
|
||||
resource_object_path = '/%s/%%s' % path
|
||||
|
||||
def create_pool(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_pool(self, pool_id, **kwargs):
|
||||
uri = self.resource_object_path % pool_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_pool(self, pool_id, **fields):
|
||||
uri = self.resource_object_path % pool_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_pool(self, pool_id):
|
||||
uri = self.resource_object_path % pool_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_pools(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a lbaas pools client from manager or networks_client
|
||||
|
||||
For itempest user:
|
||||
from itempest import load_our_solar_system as osn
|
||||
from vmware_nsx_tempest.services.lbaas import pools_client
|
||||
pools_client = pools_client.get_client(osn.adm.manager)
|
||||
For tempest user:
|
||||
pools_client = pools_client.get_client(osn.adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = PoolsClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,320 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import time
|
||||
|
||||
from tempest.lib.common.utils import misc as misc_utils
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
|
||||
from tempest import exceptions
|
||||
from vmware_nsx_tempest._i18n import _
|
||||
from vmware_nsx_tempest.services import network_client_base as base
|
||||
|
||||
POOL_RID = 'pools'
|
||||
VIP_RID = 'vips'
|
||||
HEALTHMONITOR_RID = 'health_monitors'
|
||||
MEMBER_RID = 'members'
|
||||
|
||||
|
||||
class LoadBalancerV1Client(base.BaseNetworkClient):
|
||||
|
||||
def _list_lb(self, lb_resource, **filters):
|
||||
resource_name_s, resource_name_p = _g_resource_namelist(lb_resource)
|
||||
req_uri = '/lb/%s' % (resource_name_p)
|
||||
return self.list_resources(req_uri, **filters)
|
||||
|
||||
def _show_lb(self, lb_resource, resource_id, **fields):
|
||||
resource_name_s, resource_name_p = _g_resource_namelist(lb_resource)
|
||||
req_uri = '/lb/%s/%s' % (resource_name_p, resource_id)
|
||||
return self.show_resource(req_uri, **fields)
|
||||
|
||||
def _delete_lb(self, lb_resource, resource_id):
|
||||
resource_name_s, resource_name_p = _g_resource_namelist(lb_resource)
|
||||
req_uri = '/lb/%s/%s' % (resource_name_p, resource_id)
|
||||
return self.delete_resource(req_uri)
|
||||
|
||||
def _create_lb(self, lb_resource, **kwargs):
|
||||
resource_name_s, resource_name_p = _g_resource_namelist(lb_resource)
|
||||
req_uri = '/lb/%s' % (resource_name_p)
|
||||
post_body = {resource_name_s: kwargs}
|
||||
return self.create_resource(req_uri, post_body)
|
||||
|
||||
def _update_lb(self, lb_resource, resource_id, **kwargs):
|
||||
resource_name_s, resource_name_p = _g_resource_namelist(lb_resource)
|
||||
req_uri = '/lb/%s/%s' % (resource_name_p, resource_id)
|
||||
post_body = {resource_name_s: kwargs}
|
||||
return self.update_resource(req_uri, post_body)
|
||||
|
||||
def show_agent_hosting_pool(self, pool_id):
|
||||
"""Get loadbalancer agent hosting a pool."""
|
||||
req_uri = "/lb/pools/%s/loadbalancer-agent" % (pool_id)
|
||||
return self.show_resource(req_uri)
|
||||
|
||||
def associate_health_monitor_with_pool(self, health_monitor_id, pool_id):
|
||||
"""Create a mapping between a health monitor and a pool."""
|
||||
post_body = {'health_monitor': {'id': health_monitor_id}}
|
||||
req_uri = '/lb/pools/%s/%s' % (pool_id, HEALTHMONITOR_RID)
|
||||
return self.create_resource(req_uri, post_body)
|
||||
|
||||
def create_health_monitor(self, **kwargs):
|
||||
"""Create a health monitor."""
|
||||
create_kwargs = dict(
|
||||
type=kwargs.pop('type', 'TCP'),
|
||||
max_retries=kwargs.pop('nax_retries', 3),
|
||||
timeout=kwargs.pop('timeout', 1),
|
||||
delay=kwargs.pop('delay', 4),
|
||||
)
|
||||
create_kwargs.update(**kwargs)
|
||||
return self._create_lb(HEALTHMONITOR_RID, **create_kwargs)
|
||||
|
||||
def delete_health_monitor(self, health_monitor_id):
|
||||
"""Delete a given health monitor."""
|
||||
return self._delete_lb(HEALTHMONITOR_RID, health_monitor_id)
|
||||
|
||||
def disassociate_health_monitor_with_pool(self, health_monitor_id,
|
||||
pool_id):
|
||||
"""Remove a mapping from a health monitor to a pool."""
|
||||
req_uri = ('/lb/pools/%s/%s/%s'
|
||||
% (pool_id, HEALTHMONITOR_RID, health_monitor_id))
|
||||
return self.delete_resource(req_uri)
|
||||
|
||||
def list_health_monitors(self, **filters):
|
||||
"""List health monitors that belong to a given tenant."""
|
||||
return self._list_lb(HEALTHMONITOR_RID, **filters)
|
||||
|
||||
def show_health_monitor(self, health_monitor_id):
|
||||
"""Show information of a given health monitor."""
|
||||
return self._show_lb(HEALTHMONITOR_RID, health_monitor_id)
|
||||
|
||||
def update_health_monitor(self, health_monitor_id,
|
||||
show_then_update=False, **kwargs):
|
||||
"""Update a given health monitor."""
|
||||
body = (self.show_health_monitor(health_monitor_id)['health_monitor']
|
||||
if show_then_update else {})
|
||||
body.update(**kwargs)
|
||||
return self._update_lb(HEALTHMONITOR_RID,
|
||||
health_monitor_id, **body)
|
||||
|
||||
# tempest create_member(self,protocol_port, pool, ip_version)
|
||||
# we use pool_id
|
||||
def create_member(self, protocol_port, pool_id,
|
||||
ip_version=4, **kwargs):
|
||||
"""Create a member."""
|
||||
create_kwargs = dict(
|
||||
protocol_port=protocol_port,
|
||||
pool_id=pool_id,
|
||||
address=("fd00:abcd" if ip_version == 6 else "10.0.9.46"),
|
||||
)
|
||||
create_kwargs.update(**kwargs)
|
||||
return self._create_lb(MEMBER_RID, **create_kwargs)
|
||||
|
||||
def delete_member(self, member_id):
|
||||
"""Delete a given member."""
|
||||
return self._delete_lb(MEMBER_RID, member_id)
|
||||
|
||||
def list_members(self, **filters):
|
||||
"""List members that belong to a given tenant."""
|
||||
return self._list_lb(MEMBER_RID, **filters)
|
||||
|
||||
def show_member(self, member_id):
|
||||
"""Show information of a given member."""
|
||||
return self._show_lb(MEMBER_RID, member_id)
|
||||
|
||||
def update_member(self, member_id,
|
||||
show_then_update=False, **kwargs):
|
||||
"""Update a given member."""
|
||||
body = (self.show_member(member_id)['member']
|
||||
if show_then_update else {})
|
||||
body.update(**kwargs)
|
||||
return self._update_lb(MEMBER_RID, member_id, **body)
|
||||
|
||||
def create_pool(self, name, lb_method, protocol, subnet_id,
|
||||
**kwargs):
|
||||
"""Create a pool."""
|
||||
lb_method = lb_method or 'ROUND_ROBIN'
|
||||
protocol = protocol or 'HTTP'
|
||||
create_kwargs = dict(
|
||||
name=name, lb_method=lb_method,
|
||||
protocol=protocol, subnet_id=subnet_id,
|
||||
)
|
||||
create_kwargs.update(kwargs)
|
||||
return self._create_lb(POOL_RID, **create_kwargs)
|
||||
|
||||
def delete_pool(self, pool_id):
|
||||
"""Delete a given pool."""
|
||||
return self._delete_lb(POOL_RID, pool_id)
|
||||
|
||||
def list_pools(self, **filters):
|
||||
"""List pools that belong to a given tenant."""
|
||||
return self._list_lb(POOL_RID, **filters)
|
||||
|
||||
def list_lb_pool_stats(self, pool_id, **filters):
|
||||
"""Retrieve stats for a given pool."""
|
||||
req_uri = '/lb/pools/%s/stats' % (pool_id)
|
||||
return self.list_resources(req_uri, **filters)
|
||||
|
||||
def list_pool_on_agents(self, **filters):
|
||||
"""List the pools on a loadbalancer agent."""
|
||||
pass
|
||||
|
||||
def show_pool(self, pool_id):
|
||||
"""Show information of a given pool."""
|
||||
return self._show_lb(POOL_RID, pool_id)
|
||||
|
||||
def update_pool(self, pool_id, show_then_update=False, **kwargs):
|
||||
"""Update a given pool."""
|
||||
body = (self.show_pool(pool_id)['pool']
|
||||
if show_then_update else {})
|
||||
body.update(**kwargs)
|
||||
return self._update_lb(POOL_RID, pool_id, **body)
|
||||
|
||||
def create_vip(self, pool_id, **kwargs):
|
||||
"""Create a vip."""
|
||||
create_kwargs = dict(
|
||||
pool_id=pool_id,
|
||||
protocol=kwargs.pop('protocol', 'HTTP'),
|
||||
protocol_port=kwargs.pop('protocol_port', 80),
|
||||
name=kwargs.pop('name', None),
|
||||
address=kwargs.pop('address', None),
|
||||
)
|
||||
for k in create_kwargs.keys():
|
||||
if create_kwargs[k] is None:
|
||||
create_kwargs.pop(k)
|
||||
create_kwargs.update(**kwargs)
|
||||
# subnet_id needed to create vip
|
||||
return self._create_lb(VIP_RID, **create_kwargs)
|
||||
|
||||
def delete_vip(self, vip_id):
|
||||
"""Delete a given vip."""
|
||||
return self._delete_lb(VIP_RID, vip_id)
|
||||
|
||||
def list_vips(self, **filters):
|
||||
"""List vips that belong to a given tenant."""
|
||||
return self._list_lb(VIP_RID, **filters)
|
||||
|
||||
def show_vip(self, vip_id):
|
||||
"""Show information of a given vip."""
|
||||
return self._show_lb(VIP_RID, vip_id)
|
||||
|
||||
def update_vip(self, vip_id, show_then_update=False, **kwargs):
|
||||
"""Update a given vip."""
|
||||
body = (self.show_vip(vip_id)['vip']
|
||||
if show_then_update else {})
|
||||
body.update(**kwargs)
|
||||
return self._update_lb(VIP_RID, vip_id, **body)
|
||||
|
||||
# Following 3 methods are specifically to load-balancer V1 client.
|
||||
# They are being implemented by the pareant tempest.lib.common.rest_client
|
||||
# with different calling signatures, only id, no resoure_type. Because,
|
||||
# starting in Liberty release, each resource should have its own client.
|
||||
# Since V1 is deprecated, we are not going to change it, and
|
||||
# copy following 2 methods for V1 LB client only.
|
||||
def wait_for_resource_deletion(self, resource_type, id, client=None):
|
||||
"""Waits for a resource to be deleted."""
|
||||
start_time = int(time.time())
|
||||
while True:
|
||||
if self.is_resource_deleted(resource_type, id, client=client):
|
||||
return
|
||||
if int(time.time()) - start_time >= self.build_timeout:
|
||||
raise exceptions.TimeoutException
|
||||
time.sleep(self.build_interval)
|
||||
|
||||
def is_resource_deleted(self, resource_type, id, client=None):
|
||||
if client is None:
|
||||
client = self
|
||||
method = 'show_' + resource_type
|
||||
try:
|
||||
getattr(client, method)(id)
|
||||
except AttributeError:
|
||||
raise Exception(_("Unknown resource type %s ") % resource_type)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
return False
|
||||
|
||||
def wait_for_resource_status(self, fetch, status, interval=None,
|
||||
timeout=None):
|
||||
"""This has different calling signature then rest_client.
|
||||
|
||||
@summary: Waits for a network resource to reach a status
|
||||
@param fetch: the callable to be used to query the resource status
|
||||
@type fecth: callable that takes no parameters and returns the resource
|
||||
@param status: the status that the resource has to reach
|
||||
@type status: String
|
||||
@param interval: the number of seconds to wait between each status
|
||||
query
|
||||
@type interval: Integer
|
||||
@param timeout: the maximum number of seconds to wait for the resource
|
||||
to reach the desired status
|
||||
@type timeout: Integer
|
||||
"""
|
||||
if not interval:
|
||||
interval = self.build_interval
|
||||
if not timeout:
|
||||
timeout = self.build_timeout
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time <= timeout:
|
||||
resource = fetch()
|
||||
if resource['status'] == status:
|
||||
return
|
||||
time.sleep(interval)
|
||||
|
||||
# At this point, the wait has timed out
|
||||
message = 'Resource %s' % (str(resource))
|
||||
message += ' failed to reach status %s' % status
|
||||
message += ' (current: %s)' % resource['status']
|
||||
message += ' within the required time %s' % timeout
|
||||
caller = misc_utils.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
||||
|
||||
|
||||
def _g_resource_namelist(lb_resource):
|
||||
if lb_resource[-1] == 's':
|
||||
return (lb_resource[:-1], lb_resource)
|
||||
return (lb_resource, lb_resource + "s")
|
||||
|
||||
|
||||
def destroy_tenant_lb(lbv1_client):
|
||||
for o in lbv1_client.list_members():
|
||||
lbv1_client.delete_member(o['id'])
|
||||
for o in lbv1_client.list_health_monitors():
|
||||
lbv1_client.delete_health_monitor(o['id'])
|
||||
for o in lbv1_client.list_vips():
|
||||
lbv1_client.delete_vip(o['id'])
|
||||
for o in lbv1_client.list_pools():
|
||||
lbv1_client.delete_pool(o['id'])
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a v1 load balancer client
|
||||
|
||||
For itempest user:
|
||||
from itempest import load_our_solar_system as osn
|
||||
from vmware_nsx_tempest.services import load_balancer_v1_client
|
||||
lbv1 = load_balancer_v1_client.get_client(osn.adm.manager)
|
||||
For tempest user:
|
||||
lbv1 = load_balancer_v1_client.get_client(cls.os_adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = LoadBalancerV1Client(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -1,52 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from tempest import config
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
# netowrk/json/base.py does not include thoese method in network_client
|
||||
class BaseNetworkClient(base.BaseNetworkClient):
|
||||
def __init__(self, auth_provider, service, region,
|
||||
endpoint_type=None, build_interval=None, build_timeout=None,
|
||||
disable_ssl_certificate_validation=None, ca_certs=None,
|
||||
trace_requests=None, **kwargs):
|
||||
dsca = disable_ssl_certificate_validation
|
||||
super(base.BaseNetworkClient, self).__init__(
|
||||
auth_provider, service, region,
|
||||
endpoint_type=endpoint_type,
|
||||
build_interval=build_interval,
|
||||
build_timeout=build_timeout,
|
||||
disable_ssl_certificate_validation=dsca,
|
||||
ca_certs=ca_certs,
|
||||
trace_requests=trace_requests)
|
||||
|
||||
|
||||
default_params = {
|
||||
'disable_ssl_certificate_validation': True,
|
||||
'ca_certs': None,
|
||||
'trace_requests': ''}
|
||||
default_params_2 = {
|
||||
'catalog_type': 'network',
|
||||
'region': 'nova',
|
||||
'endpoint_type': 'publicURL',
|
||||
'build_timeout': 300,
|
||||
'build_interval': 1}
|
||||
|
||||
default_params_3 = config.service_client_config()
|
||||
|
||||
default_params_with_timeout_values = {
|
||||
'build_interval': CONF.network.build_interval,
|
||||
'build_timeout': CONF.network.build_timeout
|
||||
}
|
||||
default_params_with_timeout_values.update(default_params_3)
|
@ -1,46 +0,0 @@
|
||||
# Copyright 2017 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from oslo_log import log as logging
|
||||
|
||||
from vmware_nsx_tempest.services import nsxv3_client
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NSXClient(object):
|
||||
"""Base NSX REST client"""
|
||||
def __init__(self, backend, host, username, password, *args, **kwargs):
|
||||
self.backend = backend.lower()
|
||||
self.host = host
|
||||
self.username = username
|
||||
self.password = password
|
||||
if backend.lower() == "nsxv3":
|
||||
self.nsx = nsxv3_client.NSXV3Client(host, username, password)
|
||||
|
||||
def get_firewall_section_and_rules(self, *args, **kwargs):
|
||||
if self.backend == "nsxv3":
|
||||
firewall_section = self.nsx.get_firewall_section(
|
||||
*args, **kwargs)
|
||||
firewall_section_rules = self.nsx.get_firewall_section_rules(
|
||||
firewall_section)
|
||||
return firewall_section, firewall_section_rules
|
||||
else:
|
||||
#TODO(ddoshi) define else for nsxv
|
||||
pass
|
||||
|
||||
def get_bridge_cluster_info(self, *args, **kwargs):
|
||||
if self.backend == "nsxv3":
|
||||
return self.nsx.get_bridge_cluster_info(
|
||||
*args, **kwargs)
|
@ -1,560 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
from copy import deepcopy
|
||||
import time
|
||||
|
||||
import requests
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from vmware_nsx_tempest.common import constants
|
||||
|
||||
requests.packages.urllib3.disable_warnings()
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NSXV3Client(object):
|
||||
"""Base NSXv3 REST client"""
|
||||
API_VERSION = "v1"
|
||||
|
||||
def __init__(self, host, username, password, *args, **kwargs):
|
||||
self.host = host
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.version = None
|
||||
self.endpoint = None
|
||||
self.content_type = "application/json"
|
||||
self.accept_type = "application/json"
|
||||
self.verify = False
|
||||
self.secure = True
|
||||
self.interface = "json"
|
||||
self.url = None
|
||||
self.headers_non_super_admin = self.__set_headers()
|
||||
self.headers = deepcopy(self.headers_non_super_admin)
|
||||
self.headers_super_admin = self.__set_headers(super_admin=True)
|
||||
self.api_version = NSXV3Client.API_VERSION
|
||||
|
||||
def __set_endpoint(self, endpoint):
|
||||
self.endpoint = endpoint
|
||||
|
||||
def get_endpoint(self):
|
||||
return self.endpoint
|
||||
|
||||
def __set_content_type(self, content_type):
|
||||
self.content_type = content_type
|
||||
|
||||
def get_content_type(self):
|
||||
return self.content_type
|
||||
|
||||
def __set_accept_type(self, accept_type):
|
||||
self.accept_type = accept_type
|
||||
|
||||
def get_accept_type(self):
|
||||
return self.accept_type
|
||||
|
||||
def __set_api_version(self, api_version):
|
||||
self.api_version = api_version
|
||||
|
||||
def get_api_version(self):
|
||||
return self.api_version
|
||||
|
||||
def __set_url(self, api=None, secure=None, host=None, endpoint=None):
|
||||
api = self.api_version if api is None else api
|
||||
secure = self.secure if secure is None else secure
|
||||
host = self.host if host is None else host
|
||||
endpoint = self.endpoint if endpoint is None else endpoint
|
||||
http_type = 'https' if secure else 'http'
|
||||
self.url = '%s://%s/api/%s%s' % (http_type, host, api, endpoint)
|
||||
|
||||
def get_url(self):
|
||||
return self.url
|
||||
|
||||
def __set_headers(self, content=None, accept=None, super_admin=False):
|
||||
content_type = self.content_type if content is None else content
|
||||
accept_type = self.accept_type if accept is None else accept
|
||||
auth_cred = self.username + ":" + self.password
|
||||
auth = base64.b64encode(auth_cred)
|
||||
headers = {}
|
||||
headers['Authorization'] = "Basic %s" % auth
|
||||
headers['Content-Type'] = content_type
|
||||
headers['Accept'] = accept_type
|
||||
if super_admin:
|
||||
headers['X-Allow-Overwrite'] = 'true'
|
||||
return headers
|
||||
|
||||
def get(self, endpoint=None, params=None, cursor=None):
|
||||
"""
|
||||
Basic query method for json API request
|
||||
"""
|
||||
self.__set_url(endpoint=endpoint)
|
||||
if cursor:
|
||||
op = "&" if urlparse.urlparse(self.url).query else "?"
|
||||
self.url += op + "cursor=" + cursor
|
||||
response = requests.get(self.url, headers=self.headers,
|
||||
verify=self.verify, params=params)
|
||||
return response
|
||||
|
||||
def put(self, endpoint=None, body=None):
|
||||
"""
|
||||
Basic put API method on endpoint
|
||||
"""
|
||||
self.__set_url(endpoint=endpoint)
|
||||
response = requests.put(self.url, headers=self.headers,
|
||||
verify=self.verify, data=jsonutils.dumps(body))
|
||||
return response
|
||||
|
||||
def ca_put_request(self, component, comp_id, body):
|
||||
"""
|
||||
NSX-T API Put request for certificate Management
|
||||
"""
|
||||
endpoint = ("/%s/%s" % (component, comp_id))
|
||||
response = self.put(endpoint=endpoint, body=body)
|
||||
return response
|
||||
|
||||
def delete(self, endpoint=None, params=None):
|
||||
"""
|
||||
Basic delete API method on endpoint
|
||||
"""
|
||||
self.__set_url(endpoint=endpoint)
|
||||
response = requests.delete(self.url, headers=self.headers,
|
||||
verify=self.verify, params=params)
|
||||
return response
|
||||
|
||||
def ca_delete_request(self, component=None, comp_id=None):
|
||||
"""
|
||||
NSX-T API delete request for certificate Management
|
||||
"""
|
||||
endpoint = ("/%s/%s" % (component, comp_id))
|
||||
response = self.delete(endpoint=endpoint)
|
||||
return response
|
||||
|
||||
def delete_super_admin(self, endpoint=None, params=None):
|
||||
"""
|
||||
Basic delete API method for NSX super admin on endpoint
|
||||
"""
|
||||
self.__set_url(endpoint=endpoint)
|
||||
response = requests.delete(self.url, headers=self.headers_super_admin,
|
||||
verify=self.verify, params=params)
|
||||
return response
|
||||
|
||||
def post(self, endpoint=None, body=None):
|
||||
"""
|
||||
Basic post API method on endpoint
|
||||
"""
|
||||
self.__set_url(endpoint=endpoint)
|
||||
response = requests.post(self.url, headers=self.headers,
|
||||
verify=self.verify,
|
||||
data=jsonutils.dumps(body))
|
||||
return response
|
||||
|
||||
def get_logical_resources(self, endpoint):
|
||||
"""
|
||||
Get logical resources based on the endpoint
|
||||
|
||||
Getting the logical resource based on the end point. Parse the response
|
||||
for the cursor. If cursor is present, query url for multiple pages to
|
||||
get all the logical resources.
|
||||
"""
|
||||
results = []
|
||||
response = self.get(endpoint=endpoint)
|
||||
res_json = response.json()
|
||||
cursor = res_json.get("cursor")
|
||||
if res_json.get("results"):
|
||||
results.extend(res_json["results"])
|
||||
while cursor:
|
||||
page = self.get(endpoint=endpoint, cursor=cursor).json()
|
||||
results.extend(page.get("results", []))
|
||||
cursor = page.get("cursor")
|
||||
return results
|
||||
|
||||
def get_transport_zones(self):
|
||||
"""
|
||||
Retrieve all transport zones
|
||||
"""
|
||||
return self.get_logical_resources("/transport-zones")
|
||||
|
||||
def get_logical_ports(self):
|
||||
"""
|
||||
Retrieve all logical ports on NSX backend
|
||||
"""
|
||||
return self.get_logical_resources("/logical-ports")
|
||||
|
||||
def get_logical_port(self, os_name):
|
||||
"""
|
||||
Get the logical port based on the os_name provided.
|
||||
The name of the logical port shoud match the os_name.
|
||||
Return the logical port if found, otherwise return None.
|
||||
"""
|
||||
if not os_name:
|
||||
LOG.error("Name of OS port should be present "
|
||||
"in order to query backend logical port created")
|
||||
return None
|
||||
lports = self.get_logical_ports()
|
||||
return self.get_nsx_resource_by_name(lports, os_name)
|
||||
|
||||
def get_logical_port_info(self, lport):
|
||||
"""
|
||||
Retrieve attributes of a given logical port
|
||||
"""
|
||||
lport_uri = "/logical-ports/%s" % lport
|
||||
|
||||
response = self.get(endpoint=lport_uri)
|
||||
res_json = response.json()
|
||||
return res_json
|
||||
|
||||
def get_switching_profile(self, switch_profile):
|
||||
"""
|
||||
Retrieve attributes of a given nsx switching profile
|
||||
"""
|
||||
sw_profile_uri = "/switching-profiles/%s" % switch_profile
|
||||
response = self.get(endpoint=sw_profile_uri)
|
||||
res_json = response.json()
|
||||
return res_json
|
||||
|
||||
def get_os_logical_ports(self):
|
||||
"""
|
||||
Retrieve all logical ports created from OpenStack
|
||||
"""
|
||||
lports = self.get_logical_ports()
|
||||
return self.get_os_resources(lports)
|
||||
|
||||
def update_logical_port_attachment(self, lports):
|
||||
"""
|
||||
Update the logical port attachment
|
||||
|
||||
In order to delete logical ports, we need to detach
|
||||
the VIF attachment on the ports first.
|
||||
"""
|
||||
for p in lports:
|
||||
p['attachment'] = None
|
||||
endpoint = "/logical-ports/%s" % p['id']
|
||||
response = self.put(endpoint=endpoint, body=p)
|
||||
if response.status_code != requests.codes.ok:
|
||||
LOG.error("Failed to update lport %s", p['id'])
|
||||
|
||||
def cleanup_os_logical_ports(self):
|
||||
"""
|
||||
Delete all logical ports created by OpenStack
|
||||
"""
|
||||
lports = self.get_logical_ports()
|
||||
os_lports = self.get_os_resources(lports)
|
||||
LOG.info("Number of OS Logical Ports to be deleted: %s",
|
||||
len(os_lports))
|
||||
# logical port vif detachment
|
||||
self.update_logical_port_attachment(os_lports)
|
||||
for p in os_lports:
|
||||
endpoint = '/logical-ports/%s' % p['id']
|
||||
response = self.delete(endpoint=endpoint)
|
||||
if response.status_code == requests.codes.ok:
|
||||
LOG.info("Successfully deleted logical port %s", p['id'])
|
||||
else:
|
||||
LOG.error("Failed to delete lport %(port_id)s, response "
|
||||
"code %(code)s",
|
||||
{'port_id': p['id'], 'code': response.status_code})
|
||||
|
||||
def get_os_resources(self, resources):
|
||||
"""
|
||||
Get all logical resources created by OpenStack
|
||||
"""
|
||||
os_resources = [r for r in resources if 'tags' in r
|
||||
for tag in r['tags']
|
||||
if 'os-project-id' in tag.values()]
|
||||
return os_resources
|
||||
|
||||
def get_nsx_resource_by_name(self, nsx_resources, nsx_name):
|
||||
"""
|
||||
Get the NSX component created from OpenStack by name.
|
||||
|
||||
The name should be converted from os_name to nsx_name.
|
||||
If found exact one match return it, otherwise report error.
|
||||
"""
|
||||
nsx_resource = [n for n in nsx_resources if
|
||||
n['display_name'] == nsx_name]
|
||||
if len(nsx_resource) == 0:
|
||||
LOG.warning("Backend nsx resource %s NOT found!", nsx_name)
|
||||
return None
|
||||
if len(nsx_resource) > 1:
|
||||
LOG.error("More than 1 nsx resources found: %s!",
|
||||
nsx_resource)
|
||||
return None
|
||||
else:
|
||||
LOG.info("Found nsgroup: %s", nsx_resource[0])
|
||||
return nsx_resource[0]
|
||||
|
||||
def get_logical_switches(self):
|
||||
"""
|
||||
Retrieve all logical switches on NSX backend
|
||||
"""
|
||||
return self.get_logical_resources("/logical-switches")
|
||||
|
||||
def get_logical_switch_profiles(self):
|
||||
"""
|
||||
Retrieve all switching profiles on NSX backend
|
||||
"""
|
||||
return self.get_logical_resources("/switching-profiles")
|
||||
|
||||
def get_switching_profiles(self):
|
||||
"""
|
||||
Retrieve all switching profiles on NSX backend
|
||||
"""
|
||||
return self.get_logical_resources("/switching-profiles")
|
||||
|
||||
def get_bridge_cluster_info(self):
|
||||
"""
|
||||
Get bridge cluster information.
|
||||
|
||||
:return: returns bridge cluster id and bridge cluster name.
|
||||
"""
|
||||
return self.get_logical_resources("/bridge-clusters")
|
||||
|
||||
def get_logical_switch(self, os_name, os_uuid):
|
||||
"""
|
||||
Get the logical switch based on the name and uuid provided.
|
||||
|
||||
The name of the logical switch should follow
|
||||
<os_network_name>_<first 5 os uuid>...<last 5 os uuid>
|
||||
Return logical switch if found, otherwise return None
|
||||
"""
|
||||
if not os_name or not os_uuid:
|
||||
LOG.error("Name and uuid of OpenStack L2 network need to be "
|
||||
"present in order to query backend logical switch!")
|
||||
return None
|
||||
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
||||
lswitches = self.get_logical_switches()
|
||||
return self.get_nsx_resource_by_name(lswitches, nsx_name)
|
||||
|
||||
def get_lswitch_ports(self, ls_id):
|
||||
"""
|
||||
Return all the logical ports that belong to this lswitch
|
||||
"""
|
||||
lports = self.get_logical_ports()
|
||||
return [p for p in lports if p['logical_switch_id'] is ls_id]
|
||||
|
||||
def get_firewall_sections(self):
|
||||
"""
|
||||
Retrieve all firewall sections
|
||||
"""
|
||||
return self.get_logical_resources("/firewall/sections")
|
||||
|
||||
def get_firewall_section(self, os_name, os_uuid):
|
||||
"""
|
||||
Get the firewall section by os_name and os_uuid
|
||||
"""
|
||||
if not os_name or not os_uuid:
|
||||
LOG.error("Name and uuid of OS security group should be "
|
||||
"present in order to query backend FW section "
|
||||
"created")
|
||||
return None
|
||||
nsx_name = os_name + " - " + os_uuid
|
||||
nsx_firewall_time_counter = 0
|
||||
nsx_dfw_section = None
|
||||
# wait till timeout or till dfw section
|
||||
while nsx_firewall_time_counter < \
|
||||
constants.NSX_FIREWALL_REALIZED_TIMEOUT and \
|
||||
not nsx_dfw_section:
|
||||
nsx_firewall_time_counter += 1
|
||||
fw_sections = self.get_firewall_sections()
|
||||
nsx_dfw_section = self.get_nsx_resource_by_name(fw_sections,
|
||||
nsx_name)
|
||||
time.sleep(constants.ONE_SEC)
|
||||
return nsx_dfw_section
|
||||
|
||||
def get_firewall_section_rules(self, fw_section):
|
||||
"""
|
||||
Retrieve all fw rules for a given fw section
|
||||
"""
|
||||
endpoint = "/firewall/sections/%s/rules" % fw_section['id']
|
||||
return self.get_logical_resources(endpoint)
|
||||
|
||||
def get_firewall_section_rule(self, fw_section, os_uuid):
|
||||
"""
|
||||
Get the firewall section rule based on the name
|
||||
"""
|
||||
fw_rules = self.get_firewall_section_rules(fw_section)
|
||||
nsx_name = os_uuid
|
||||
return self.get_nsx_resource_by_name(fw_rules, nsx_name)
|
||||
|
||||
def get_ns_groups(self):
|
||||
"""
|
||||
Retrieve all NSGroups on NSX backend
|
||||
"""
|
||||
return self.get_logical_resources("/ns-groups")
|
||||
|
||||
def get_neutron_ns_group_id(self):
|
||||
"""
|
||||
Retrieve NSGroup Id
|
||||
"""
|
||||
nsx_nsgroup = self.get_ns_groups()
|
||||
for group in nsx_nsgroup:
|
||||
if group['display_name'] == 'neutron_excluded_port_nsgroup':
|
||||
nsgroup_id = group['id']
|
||||
return nsgroup_id
|
||||
|
||||
def get_ns_group_port_members(self, ns_group_id):
|
||||
"""
|
||||
Retrieve NSGroup port members
|
||||
"""
|
||||
endpoint = "/ns-groups/%s/effective-logical-port-members" % ns_group_id
|
||||
response = self.get(endpoint=endpoint)
|
||||
res_json = response.json()
|
||||
return res_json
|
||||
|
||||
def get_ns_group(self, os_name, os_uuid):
|
||||
"""
|
||||
Get the NSGroup based on the name provided.
|
||||
The name of the nsgroup should follow
|
||||
<os_sg_name> - <os_sg_uuid>
|
||||
Return nsgroup if found, otherwise return None
|
||||
"""
|
||||
if not os_name or not os_uuid:
|
||||
LOG.error("Name and uuid of OS security group should be "
|
||||
"present in order to query backend nsgroup created")
|
||||
return None
|
||||
nsx_name = os_name + " - " + os_uuid
|
||||
nsgroups = self.get_ns_groups()
|
||||
return self.get_nsx_resource_by_name(nsgroups, nsx_name)
|
||||
|
||||
def get_logical_routers(self, tier=None):
|
||||
"""
|
||||
Retrieve all the logical routers based on router type. If tier
|
||||
is None, it will return all logical routers.
|
||||
"""
|
||||
if tier:
|
||||
endpoint = "/logical-routers?router_type=%s" % tier
|
||||
else:
|
||||
endpoint = "/logical-routers"
|
||||
return self.get_logical_resources(endpoint)
|
||||
|
||||
def get_logical_router(self, os_name, os_uuid):
|
||||
"""
|
||||
Get the logical router based on the os_name and os_uuid provided.
|
||||
The name of the logical router shoud follow
|
||||
<os_router_name>_<starting_5_uuid>...<trailing_5_uuid>
|
||||
Return the logical router if found, otherwise return None.
|
||||
"""
|
||||
if not os_name or not os_uuid:
|
||||
LOG.error("Name and uuid of OS router should be present "
|
||||
"in order to query backend logical router created")
|
||||
return None
|
||||
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
||||
lrouters = self.get_logical_routers()
|
||||
return self.get_nsx_resource_by_name(lrouters, nsx_name)
|
||||
|
||||
def get_logical_router_ports(self, lrouter):
|
||||
"""
|
||||
Get all logical ports attached to lrouter
|
||||
"""
|
||||
endpoint = "/logical-router-ports?logical_router_id=%s" % lrouter['id']
|
||||
return self.get_logical_resources(endpoint)
|
||||
|
||||
def get_logical_router_nat_rules(self, lrouter):
|
||||
"""
|
||||
Get all user defined NAT rules of the specific logical router
|
||||
"""
|
||||
if not lrouter:
|
||||
LOG.error("Logical router needs to be present in order "
|
||||
"to get the NAT rules")
|
||||
return None
|
||||
endpoint = "/logical-routers/%s/nat/rules" % lrouter['id']
|
||||
return self.get_logical_resources(endpoint)
|
||||
|
||||
def get_logical_router_advertisement(self, lrouter):
|
||||
"""Get logical router advertisement"""
|
||||
if not lrouter:
|
||||
LOG.error("Logical router needs to be present in order "
|
||||
"to get router advertisement!")
|
||||
return None
|
||||
endpoint = "/logical-routers/%s/routing/advertisement" % lrouter['id']
|
||||
response = self.get(endpoint)
|
||||
return response.json()
|
||||
|
||||
def get_logical_dhcp_servers(self):
|
||||
"""
|
||||
Get all logical DHCP servers on NSX backend
|
||||
"""
|
||||
return self.get_logical_resources("/dhcp/servers")
|
||||
|
||||
def get_logical_dhcp_server(self, os_name, os_uuid):
|
||||
"""
|
||||
Get the logical dhcp server based on the name and uuid provided.
|
||||
|
||||
The name of the logical dhcp server should follow
|
||||
<os_network_name>_<first 5 os uuid>...<last 5 os uuid>
|
||||
Return logical dhcp server if found, otherwise return None
|
||||
"""
|
||||
if not os_name or not os_uuid:
|
||||
LOG.error("Name and uuid of OpenStack L2 network need to be "
|
||||
"present in order to query backend logical dhcp "
|
||||
"server!")
|
||||
return None
|
||||
nsx_name = os_name + "_" + os_uuid[:5] + "..." + os_uuid[-5:]
|
||||
dhcp_servers = self.get_logical_dhcp_servers()
|
||||
return self.get_nsx_resource_by_name(dhcp_servers, nsx_name)
|
||||
|
||||
def get_dhcp_server_static_bindings(self, dhcp_server):
|
||||
"""
|
||||
Get all DHCP static bindings of a logical DHCP server
|
||||
"""
|
||||
endpoint = "/dhcp/servers/%s/static-bindings" % dhcp_server
|
||||
return self.get_logical_resources(endpoint)
|
||||
|
||||
def get_md_proxies(self):
|
||||
"""
|
||||
Get md proxies.
|
||||
|
||||
:return: returns list of md proxies information.
|
||||
"""
|
||||
return self.get_logical_resources("/md-proxies")
|
||||
|
||||
def get_nsx_certificate(self):
|
||||
"""
|
||||
Get all certificates registered with backend
|
||||
"""
|
||||
endpoint = "/trust-management/certificates/"
|
||||
response = self.get(endpoint)
|
||||
return response.json()
|
||||
|
||||
def get_openstack_client_certificate(self):
|
||||
"""
|
||||
Get self signed openstack client certificate
|
||||
"""
|
||||
cert_response = self.get_nsx_certificate()
|
||||
for cert in cert_response['results']:
|
||||
if (cert["_create_user"] == "admin" and cert[
|
||||
"resource_type"] == "certificate_self_signed" and cert[
|
||||
"display_name"] != "NSX MP Client Certificate for Key "
|
||||
"Manager"):
|
||||
LOG.info('Client certificate created')
|
||||
return cert
|
||||
LOG.error("Client Certificate not created")
|
||||
return None
|
||||
|
||||
def delete_md_proxy(self, uuid):
|
||||
"""
|
||||
Delete md proxies.
|
||||
"""
|
||||
return self.delete_logical_resources("/md-proxies/%s" % uuid)
|
||||
|
||||
def delete_logical_resources(self, endpoint):
|
||||
"""
|
||||
Delete logical resources based on the endpoint.
|
||||
"""
|
||||
response = self.delete(endpoint=endpoint)
|
||||
return response.json()
|
@ -1,347 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base64
|
||||
import re
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_serialization import jsonutils
|
||||
import requests
|
||||
from tempest import config
|
||||
|
||||
import vmware_nsx_tempest.services.utils as utils
|
||||
|
||||
requests.packages.urllib3.disable_warnings()
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VSMClient(object):
|
||||
"""NSX-v client.
|
||||
|
||||
The client provides the API operations on its components.
|
||||
The purpose of this rest client is to query backend components after
|
||||
issuing corresponding API calls from OpenStack. This is to make sure
|
||||
the API calls has been realized on the NSX-v backend.
|
||||
"""
|
||||
API_VERSION = "2.0"
|
||||
|
||||
def __init__(self, host, username, password, *args, **kwargs):
|
||||
self.force = True if 'force' in kwargs else False
|
||||
self.host = host
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.version = None
|
||||
self.endpoint = None
|
||||
self.content_type = "application/json"
|
||||
self.accept_type = "application/json"
|
||||
self.verify = False
|
||||
self.secure = True
|
||||
self.interface = "json"
|
||||
self.url = None
|
||||
self.headers = None
|
||||
self.api_version = VSMClient.API_VERSION
|
||||
self.default_scope_id = None
|
||||
|
||||
self.__set_headers()
|
||||
self._version = self.get_vsm_version()
|
||||
|
||||
def __set_endpoint(self, endpoint):
|
||||
self.endpoint = endpoint
|
||||
|
||||
def get_endpoint(self):
|
||||
return self.endpoint
|
||||
|
||||
def __set_content_type(self, content_type):
|
||||
self.content_type = content_type
|
||||
|
||||
def get_content_type(self):
|
||||
return self.content_type
|
||||
|
||||
def __set_accept_type(self, accept_type):
|
||||
self.accept_type = accept_type
|
||||
|
||||
def get_accept_type(self):
|
||||
return self.accept_type
|
||||
|
||||
def __set_api_version(self, api_version):
|
||||
self.api_version = api_version
|
||||
|
||||
def get_api_version(self):
|
||||
return self.api_version
|
||||
|
||||
def __set_url(self, version=None, secure=None, host=None, endpoint=None):
|
||||
version = self.api_version if version is None else version
|
||||
secure = self.secure if secure is None else secure
|
||||
host = self.host if host is None else host
|
||||
endpoint = self.endpoint if endpoint is None else endpoint
|
||||
http_type = 'https' if secure else 'http'
|
||||
self.url = '%s://%s/api/%s%s' % (http_type, host, version, endpoint)
|
||||
|
||||
def get_url(self):
|
||||
return self.url
|
||||
|
||||
def __set_headers(self, content=None, accept=None):
|
||||
content_type = self.content_type if content is None else content
|
||||
accept_type = self.accept_type if accept is None else accept
|
||||
auth_cred = self.username + ":" + self.password
|
||||
auth = base64.b64encode(auth_cred)
|
||||
headers = {}
|
||||
headers['Authorization'] = "Basic %s" % auth
|
||||
headers['Content-Type'] = content_type
|
||||
headers['Accept'] = accept_type
|
||||
self.headers = headers
|
||||
|
||||
def get(self, endpoint=None, params=None):
|
||||
"""Basic query GET method for json API request."""
|
||||
self.__set_url(endpoint=endpoint)
|
||||
response = requests.get(self.url, headers=self.headers,
|
||||
verify=self.verify, params=params)
|
||||
return response
|
||||
|
||||
def delete(self, endpoint=None, params=None):
|
||||
"""Basic delete API method on endpoint."""
|
||||
self.__set_url(endpoint=endpoint)
|
||||
response = requests.delete(self.url, headers=self.headers,
|
||||
verify=self.verify, params=params)
|
||||
return response
|
||||
|
||||
def post(self, endpoint=None, body=None):
|
||||
"""Basic post API method on endpoint."""
|
||||
self.__set_url(endpoint=endpoint)
|
||||
response = requests.post(self.url, headers=self.headers,
|
||||
verify=self.verify,
|
||||
data=jsonutils.dumps(body))
|
||||
return response
|
||||
|
||||
def get_all_vdn_scopes(self):
|
||||
"""Retrieve existing network scopes"""
|
||||
self.__set_api_version('2.0')
|
||||
self.__set_endpoint("/vdn/scopes")
|
||||
response = self.get()
|
||||
return response.json()['allScopes']
|
||||
|
||||
# return the vdn_scope_id for the priamry Transport Zone
|
||||
def get_vdn_scope_id(self):
|
||||
"""Retrieve existing network scope id."""
|
||||
scopes = self.get_all_vdn_scopes()
|
||||
if len(scopes) == 0:
|
||||
return scopes[0]['objectId']
|
||||
return CONF.nsxv.vdn_scope_id
|
||||
|
||||
def get_vdn_scope_by_id(self, scope_id):
|
||||
"""Retrieve existing network scopes id"""
|
||||
self.__set_api_version('2.0')
|
||||
self.__set_endpoint("/vdn/scopes/%s" % scope_id)
|
||||
return self.get().json()
|
||||
|
||||
def get_vdn_scope_by_name(self, name):
|
||||
"""Retrieve network scope id of existing scope name:
|
||||
|
||||
nsxv_client.get_vdn_scope_id_by_name('TZ1')
|
||||
"""
|
||||
scopes = self.get_all_vdn_scopes()
|
||||
if name is None:
|
||||
for scope in scopes:
|
||||
if scope['objectId'] == CONF.nsxv.vdn_scope_id:
|
||||
return scope
|
||||
else:
|
||||
for scope in scopes:
|
||||
if scope['name'] == name:
|
||||
return scope
|
||||
return None
|
||||
|
||||
def get_all_logical_switches(self, vdn_scope_id=None):
|
||||
lswitches = []
|
||||
self.__set_api_version('2.0')
|
||||
vdn_scope_id = vdn_scope_id or self.get_vdn_scope_id()
|
||||
endpoint = "/vdn/scopes/%s/virtualwires" % (vdn_scope_id)
|
||||
self.__set_endpoint(endpoint)
|
||||
response = self.get()
|
||||
paging_info = response.json()['dataPage']['pagingInfo']
|
||||
page_size = int(paging_info['pageSize'])
|
||||
total_count = int(paging_info['totalCount'])
|
||||
msg = ("There are total %s logical switches and page size is %s"
|
||||
% (total_count, page_size))
|
||||
LOG.debug(msg)
|
||||
pages = utils.ceil(total_count, page_size)
|
||||
LOG.debug("Total pages: %s" % pages)
|
||||
for i in range(pages):
|
||||
start_index = page_size * i
|
||||
params = {'startindex': start_index}
|
||||
response = self.get(params=params)
|
||||
lswitches += response.json()['dataPage']['data']
|
||||
return lswitches
|
||||
|
||||
def get_logical_switch(self, name):
|
||||
"""Get the logical switch based on the name.
|
||||
|
||||
The uuid of the OpenStack L2 network. Return ls if found,
|
||||
otherwise return None.
|
||||
"""
|
||||
lswitches = self.get_all_logical_switches()
|
||||
lswitch = [ls for ls in lswitches if ls['name'] == name]
|
||||
if len(lswitch) == 0:
|
||||
LOG.debug('logical switch %s NOT found!' % name)
|
||||
lswitch = None
|
||||
else:
|
||||
ls = lswitch[0]
|
||||
LOG.debug('Found lswitch: %s' % ls)
|
||||
return ls
|
||||
|
||||
def delete_logical_switch(self, name):
|
||||
"""Delete logical switch based on name.
|
||||
|
||||
The name of the logical switch on NSX-v is the uuid
|
||||
of the openstack l2 network.
|
||||
"""
|
||||
ls = self.get_logical_switch(name)
|
||||
if ls is not None:
|
||||
endpoint = '/vdn/virtualwires/%s' % ls['objectId']
|
||||
response = self.delete(endpoint=endpoint)
|
||||
if response.status_code == 200:
|
||||
LOG.debug('Successfully deleted logical switch %s' % name)
|
||||
else:
|
||||
LOG.debug('ERROR @delete ls=%s failed with response code %s' %
|
||||
(name, response.status_code))
|
||||
|
||||
def get_all_edges(self):
|
||||
"""Get all edges on NSX-v backend."""
|
||||
self.__set_api_version('4.0')
|
||||
self.__set_endpoint('/edges')
|
||||
edges = []
|
||||
response = self.get()
|
||||
paging_info = response.json()['edgePage']['pagingInfo']
|
||||
page_size = int(paging_info['pageSize'])
|
||||
total_count = int(paging_info['totalCount'])
|
||||
msg = "There are total %s edges and page size is %s" % (total_count,
|
||||
page_size)
|
||||
LOG.debug(msg)
|
||||
pages = utils.ceil(total_count, page_size)
|
||||
for i in range(pages):
|
||||
start_index = page_size * i
|
||||
params = {'startindex': start_index}
|
||||
response = self.get(params=params)
|
||||
edges += response.json()['edgePage']['data']
|
||||
return edges
|
||||
|
||||
def get_edge_firewall_rules(self, edge_Id):
|
||||
"""Get nsx-edge firewall info based on edge_id.
|
||||
|
||||
Return firewall rules if found ,else return None.
|
||||
"""
|
||||
self.__set_api_version('4.0')
|
||||
self.__set_endpoint('/edges/%s/firewall/config ' % edge_Id)
|
||||
response = self.get()
|
||||
rules = response.json()['firewallRules']['firewallRules']
|
||||
if len(rules) == 0:
|
||||
rules = None
|
||||
return rules
|
||||
|
||||
def get_firewall(self):
|
||||
"""Get all firewall on NSX-v beckend.
|
||||
|
||||
Return firewalls if found, else return None.
|
||||
"""
|
||||
self.__set_api_version('4.0')
|
||||
self.__set_endpoint('/firewall/globalroot-0/config')
|
||||
response = self.get()
|
||||
paging_info = response.json()
|
||||
if len(paging_info) == 0:
|
||||
paging_info = None
|
||||
return paging_info
|
||||
|
||||
def get_edge(self, name):
|
||||
"""Get edge based on the name, which is OpenStack router.
|
||||
|
||||
Return edge if found, else return None.
|
||||
"""
|
||||
edges = self.get_all_edges()
|
||||
edge = [e for e in edges if e['name'] == name]
|
||||
if len(edge) == 0:
|
||||
LOG.debug('Edge %s NOT found!' % name)
|
||||
edge = None
|
||||
else:
|
||||
edge = edge[0]
|
||||
LOG.debug('Found edge: %s' % edge)
|
||||
return edge
|
||||
|
||||
def get_dhcp_edge_config(self, edge_id):
|
||||
"""Get dhcp edge config.
|
||||
|
||||
Return edge information.
|
||||
"""
|
||||
self.__set_api_version('4.0')
|
||||
self.__set_endpoint('/edges/%s/dhcp/config' % edge_id)
|
||||
response = self.get()
|
||||
return response
|
||||
|
||||
def get_excluded_vm_name_list(self):
|
||||
"""Get excluded vm's list info from beckend.
|
||||
|
||||
After disabling port security of vm port, vm will get added
|
||||
in exclude list.This method returns the list of vm's present
|
||||
in exclude list.
|
||||
Returns exclude list of vm's name.
|
||||
"""
|
||||
self.__set_api_version('2.1')
|
||||
self.__set_endpoint('/app/excludelist')
|
||||
response = self.get()
|
||||
response_list = []
|
||||
exclude_list = []
|
||||
response_list = response.json()[
|
||||
'excludeListConfigurationDto']['excludeMembers']
|
||||
exclude_list = [member['member']['name'] for member in response_list
|
||||
if member['member']['name']]
|
||||
return exclude_list
|
||||
|
||||
def get_dhcp_edge_info(self):
|
||||
"""Get dhcp edge info.
|
||||
|
||||
Return edge if found, else return None.
|
||||
"""
|
||||
edges = self.get_all_edges()
|
||||
edge_list = []
|
||||
for e in edges:
|
||||
if (not e['edgeStatus'] == 'GREY'
|
||||
and not e['state'] == 'undeployed'):
|
||||
p = re.compile(r'dhcp*')
|
||||
if (p.match(e['name'])):
|
||||
edge_list.append(e['recentJobInfo']['edgeId'])
|
||||
count = 0
|
||||
result_edge = {}
|
||||
for edge_id in edge_list:
|
||||
response = self.get_dhcp_edge_config(edge_id)
|
||||
paging_info = response.json()
|
||||
if (paging_info['staticBindings']['staticBindings']):
|
||||
result_edge[count] = paging_info
|
||||
count += 1
|
||||
else:
|
||||
LOG.debug('Host Routes are not avilable for %s ' % edge_id)
|
||||
if (count > 0):
|
||||
edge = result_edge[0]
|
||||
else:
|
||||
edge = None
|
||||
return edge
|
||||
|
||||
def get_vsm_version(self):
|
||||
"""Get the VSM client version including major, minor, patch, & build#.
|
||||
|
||||
Build number, e.g. 6.2.0.2986609
|
||||
return: vsm version
|
||||
"""
|
||||
self.__set_api_version('1.0')
|
||||
self.__set_endpoint('/appliance-management/global/info')
|
||||
response = self.get()
|
||||
json_ver = response.json()['versionInfo']
|
||||
return '.'.join([json_ver['majorVersion'], json_ver['minorVersion'],
|
||||
json_ver['patchVersion'], json_ver['buildNumber']])
|
@ -1,100 +0,0 @@
|
||||
# Copyright 2017 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from oslo_log import log
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
from vmware_nsx_tempest.common import constants
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class L2GatewayClient(base.BaseNetworkClient):
|
||||
"""
|
||||
Request resources via API for L2GatewayClient
|
||||
l2 gateway create request
|
||||
l2 gateway update request
|
||||
l2 gateway show request
|
||||
l2 gateway delete request
|
||||
l2 gateway list all request
|
||||
"""
|
||||
|
||||
def create_l2_gateway(self, **kwargs):
|
||||
uri = constants.L2_GWS_BASE_URI
|
||||
post_data = {constants.L2GW: kwargs}
|
||||
LOG.info("URI : %(uri)s, posting data : %(post_data)s",
|
||||
{"uri": uri, "post_data": post_data})
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_l2_gateway(self, l2_gateway_id, **kwargs):
|
||||
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
||||
post_data = {constants.L2GW: kwargs}
|
||||
constants.LOG.info(
|
||||
"URI : %(uri)s, posting data : %(post_data)s",
|
||||
{"uri": uri, "post_data": post_data})
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_l2_gateway(self, l2_gateway_id, **fields):
|
||||
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
||||
LOG.info("URI : %(uri)s", {"uri": uri})
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_l2_gateway(self, l2_gateway_id):
|
||||
uri = constants.L2_GWS_BASE_URI + "/" + l2_gateway_id
|
||||
LOG.info("URI : %(uri)s", {"uri": uri})
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_l2_gateways(self, **filters):
|
||||
uri = constants.L2_GWS_BASE_URI
|
||||
LOG.info("URI : %(uri)s", {"uri": uri})
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
class L2GatewayConnectionClient(base.BaseNetworkClient):
|
||||
"""
|
||||
Request resources via API for L2GatewayClient
|
||||
l2 gateway connection create request
|
||||
l2 gateway connection update request
|
||||
l2 gateway connection show request
|
||||
l2 gateway connection delete request
|
||||
l2 gateway connection list all request
|
||||
"""
|
||||
resource = 'l2_gateway_connection'
|
||||
resource_plural = 'l2_gateway_connections'
|
||||
path = 'l2-gateway-connections'
|
||||
resource_base_path = '/%s' % path
|
||||
resource_object_path = '/%s/%%s' % path
|
||||
|
||||
def create_l2_gateway_connection(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_l2_gateway_connection(self, l2_gateway_id, **kwargs):
|
||||
uri = self.resource_object_path % l2_gateway_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_l2_gateway_connection(self, l2_gateway_id, **fields):
|
||||
uri = self.resource_object_path % l2_gateway_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_l2_gateway_connection(self, l2_gateway_id):
|
||||
uri = self.resource_object_path % l2_gateway_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_l2_gateway_connections(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
@ -1,68 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class BandwidthLimitRulesClient(base.BaseNetworkClient):
|
||||
resource = 'bandwidth_limit_rule'
|
||||
resource_plural = 'bandwidth_limit_rules'
|
||||
path = 'qos/policies'
|
||||
resource_base_path = '/%s/%%s/bandwidth_limit_rules' % path
|
||||
resource_object_path = '/%s/%%s/bandwidth_limit_rules/%%s' % path
|
||||
|
||||
def create_bandwidth_limit_rule(self, policy_id, **kwargs):
|
||||
uri = self.resource_base_path % policy_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_bandwidth_limit_rule(self, rule_id, policy_id, **kwargs):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_bandwidth_limit_rule(self, rule_id, policy_id, **fields):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_bandwidth_limit_rule(self, rule_id, policy_id):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_bandwidth_limit_rules(self, policy_id, **filters):
|
||||
uri = self.resource_base_path % policy_id
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr,
|
||||
set_property=False,
|
||||
with_name="qos_bandwidth_limit_rules_client"):
|
||||
"""create a qos bandwidth limit rules client
|
||||
|
||||
For tempest user:
|
||||
client = bandwidth_limit_rules_client.get_client(osn.adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = BandwidthLimitRulesClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
if set_property:
|
||||
setattr(manager, with_name, client)
|
||||
return client
|
@ -1,147 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from vmware_nsx_tempest.services.qos import (
|
||||
bandwidth_limit_rules_client as bandwidth_limit_rules_client)
|
||||
from vmware_nsx_tempest.services.qos import (
|
||||
dscp_marking_rules_client as dscp_marking_rules_client)
|
||||
from vmware_nsx_tempest.services.qos import (
|
||||
policies_client as policies_client)
|
||||
from vmware_nsx_tempest.services.qos import (
|
||||
rule_types_client as rule_types_client)
|
||||
|
||||
RULE_TYPE_BANDWIDTH_LIMIT = "bandwidth_limit"
|
||||
RULE_TYPE_DSCP_MARK = "dscp_marking"
|
||||
VALID_RULE_TYPES = [RULE_TYPE_BANDWIDTH_LIMIT, RULE_TYPE_DSCP_MARK]
|
||||
QOS_POLICY_ID = 'qos_policy_id'
|
||||
|
||||
|
||||
class BaseQosClient(object):
|
||||
def __init__(self, manager, set_property=True):
|
||||
self.policies_client = policies_client.get_client(
|
||||
manager, set_property)
|
||||
self.bandwidths_client = (
|
||||
bandwidth_limit_rules_client.get_client(
|
||||
manager, set_property))
|
||||
self.dscps_client = dscp_marking_rules_client.get_client(
|
||||
manager, set_property)
|
||||
self.types_client = rule_types_client.get_client(manager, True)
|
||||
|
||||
def resp_body(self, result, item):
|
||||
return result.get(item, result)
|
||||
|
||||
def create_policy(self, name, description, shared, **kwargs):
|
||||
result = self.policies_client.create_policy(
|
||||
name=name,
|
||||
description=description,
|
||||
shared=shared,
|
||||
**kwargs
|
||||
)
|
||||
return self.resp_body(result, 'policy')
|
||||
|
||||
def delete_policy(self, policy_id_or_name):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.policies_client.delete_policy(policy_id)
|
||||
return self.resp_body(result, 'policy')
|
||||
|
||||
def list_policies(self, **filters):
|
||||
result = self.policies_client.list_policies(**filters)
|
||||
return self.resp_body(result, 'policies')
|
||||
|
||||
def update_policy(self, policy_id_or_name, **kwargs):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.policies_client.update_policy(policy_id, **kwargs)
|
||||
return self.resp_body(result, 'policy')
|
||||
|
||||
def show_policy(self, policy_id_or_name, **fields):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.policies_client.show_policy(policy_id, **fields)
|
||||
return self.resp_body(result, 'policy')
|
||||
|
||||
def create_bandwidth_limit_rule(self, policy_id_or_name,
|
||||
max_kbps, max_burst_kbps,
|
||||
**kwargs):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.bandwidths_client.create_bandwidth_limit_rule(
|
||||
policy_id,
|
||||
max_kbps=max_kbps, max_burst_kbps=max_burst_kbps,
|
||||
**kwargs)
|
||||
return self.resp_body(result, 'bandwidth_limit_rule')
|
||||
|
||||
def delete_bandwidth_limit_rule(self, rule_id, policy_id_or_name):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.bandwidths_client.delete_bandwidth_limit_rule(
|
||||
rule_id, policy_id)
|
||||
return self.resp_body(result, 'bandwidth_limit_rule')
|
||||
|
||||
def update_bandwidth_limit_rule(self, rule_id, policy_id_or_name,
|
||||
**kwargs):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.bandwidths_client.update_bandwidth_limit_rule(
|
||||
rule_id, policy_id, **kwargs)
|
||||
return self.resp_body(result, 'bandwidth_limit_rule')
|
||||
|
||||
def list_bandwidth_limit_rules(self, policy_id_or_name, **filters):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.bandwidths_client.list_bandwidth_limit_rules(
|
||||
policy_id, **filters)
|
||||
return self.resp_body(result, 'bandwidth_limit_rules')
|
||||
|
||||
def show_bandwidth_limit_rule(self, rule_id, policy_id_or_name,
|
||||
**fields):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.bandwidths_client.show_bandwidth_limit_rule(
|
||||
rule_id, policy_id)
|
||||
return self.resp_body(result, 'bandwidth_limit_rule')
|
||||
|
||||
def create_dscp_marking_rule(self, policy_id_or_name, dscp_mark,
|
||||
**kwargs):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
kwargs['dscp_mark'] = dscp_mark
|
||||
result = self.dscps_client.create_dscp_marking_rule(
|
||||
policy_id, **kwargs)
|
||||
return self.resp_body(result, 'dscp_marking_rule')
|
||||
|
||||
def delete_dscp_marking_rule(self, rule_id, policy_id_or_name):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.dscps_client.delete_dscp_marking_rule(rule_id,
|
||||
policy_id)
|
||||
return self.resp_body(result, 'dscp_marking_rule')
|
||||
|
||||
def update_dscp_marking_rule(self, rule_id, policy_id_or_name,
|
||||
**kwargs):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.dscps_client.update_dscp_marking_rule(
|
||||
rule_id, policy_id, **kwargs)
|
||||
return self.resp_body(result, 'dscp_marking_rule')
|
||||
|
||||
def list_dscp_marking_rules(self, policy_id_or_name, **filters):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.dscps_client.list_dscp_marking_rules(
|
||||
policy_id, **filters)
|
||||
return self.resp_body(result, 'dscp_marking_rules')
|
||||
|
||||
def show_dscp_marking_rule(self, rule_id, policy_id_or_name, **fields):
|
||||
policy_id = self.get_policy_id(policy_id_or_name)
|
||||
result = self.dscps_client.show_dscp_marking_rule(
|
||||
rule_id, policy_id, **fields)
|
||||
return self.resp_body(result, 'dscp_marking_rule')
|
||||
|
||||
def list_rule_types(self):
|
||||
result = self.types_client.list_rule_types()
|
||||
return self.resp_body(result, 'rule_types')
|
||||
|
||||
def available_rule_types(self):
|
||||
return self.list_rule_types()
|
||||
|
||||
def get_policy_id(self, policy_id_or_name):
|
||||
return self.policies_client.get_policy_id(policy_id_or_name)
|
@ -1,68 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class DscpMarkingRulesClient(base.BaseNetworkClient):
|
||||
resource = 'dscp_marking_rule'
|
||||
resource_plural = 'dscp_marking_rules'
|
||||
path = 'qos/policies'
|
||||
resource_base_path = '/%s/%%s/dscp_marking_rules' % path
|
||||
resource_object_path = '/%s/%%s/dscp_marking_rules/%%s' % path
|
||||
|
||||
def create_dscp_marking_rule(self, policy_id, **kwargs):
|
||||
uri = self.resource_base_path % policy_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_dscp_marking_rule(self, rule_id, policy_id, **kwargs):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_dscp_marking_rule(self, rule_id, policy_id, **fields):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_dscp_marking_rule(self, rule_id, policy_id):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_dscp_marking_rules(self, policy_id, **filters):
|
||||
uri = self.resource_base_path % policy_id
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr,
|
||||
set_property=False,
|
||||
with_name="qos_dscp_marking_rules_client"):
|
||||
"""create a qos dscp marking rules client
|
||||
|
||||
For tempest user:
|
||||
client = dscp_marking_rules_client.get_client(osn.adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = DscpMarkingRulesClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
if set_property:
|
||||
setattr(manager, with_name, client)
|
||||
return client
|
@ -1,76 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class PoliciesClient(base.BaseNetworkClient):
|
||||
resource = 'policy'
|
||||
resource_plural = 'policies'
|
||||
path = 'qos/policies'
|
||||
resource_base_path = '/%s' % path
|
||||
resource_object_path = '/%s/%%s' % path
|
||||
|
||||
def create_policy(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_policy(self, policy_id, **kwargs):
|
||||
uri = self.resource_object_path % policy_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_policy(self, policy_id, **fields):
|
||||
uri = self.resource_object_path % policy_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_policy(self, policy_id):
|
||||
uri = self.resource_object_path % policy_id
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_policies(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
# utility
|
||||
def get_policy_id(self, policy_id_or_name):
|
||||
policies = self.list_policies(name=policy_id_or_name)
|
||||
policy_list = policies[self.resource_plural]
|
||||
if len(policy_list) > 0:
|
||||
return policy_list[0]['id']
|
||||
return policy_id_or_name
|
||||
|
||||
|
||||
def get_client(client_mgr,
|
||||
set_property=False,
|
||||
with_name="qos_policies_client"):
|
||||
"""create a qos policies client from manager or networks_client
|
||||
|
||||
For tempest user:
|
||||
client = policies_client.get_client(osn.adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = PoliciesClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
if set_property:
|
||||
setattr(manager, with_name, client)
|
||||
return client
|
@ -1,50 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class RuleTypesClient(base.BaseNetworkClient):
|
||||
resource = 'rule_type'
|
||||
resource_plural = 'rule_types'
|
||||
path = 'qos/rule-types'
|
||||
resource_base_path = '/%s' % path
|
||||
resource_object_path = '/%s/%%s' % path
|
||||
|
||||
def list_rule_types(self):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri)
|
||||
|
||||
|
||||
def get_client(client_mgr,
|
||||
set_property=False,
|
||||
with_name="qos_rule_types_client"):
|
||||
"""create a qos rule_types client from manager or networks_client
|
||||
|
||||
For tempest user:
|
||||
client = rule_types_client.get_client(osn.adm)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = RuleTypesClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
if set_property:
|
||||
setattr(manager, with_name, client)
|
||||
return client
|
@ -1,137 +0,0 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
|
||||
class BaseTagsClient(base.BaseNetworkClient):
|
||||
"""Why base client for tags_client:
|
||||
|
||||
https://bugs.launchpad.net/neutron/+bug/1606659
|
||||
tag-add is a CREATE operation; then expected resp_code is 201
|
||||
however it is using http PUT operation to accomplish it.
|
||||
"""
|
||||
|
||||
def update_resource(self, uri, post_data, resp_code=None):
|
||||
"""allow different response code."""
|
||||
if resp_code:
|
||||
req_uri = self.uri_prefix + uri
|
||||
req_post_data = jsonutils.dumps(post_data)
|
||||
resp, body = self.put(req_uri, req_post_data)
|
||||
body = jsonutils.loads(body)
|
||||
self.expected_success(resp_code, resp.status)
|
||||
return base.rest_client.ResponseBody(
|
||||
resp, body)
|
||||
else:
|
||||
return super(BaseTagsClient, self).update_resource(
|
||||
uri, post_data)
|
||||
|
||||
|
||||
class TagsClient(BaseTagsClient):
|
||||
resource_base_path = '/{resource_type}/{resource_id}/tags'
|
||||
resource_object_path = '/{resource_type}/{resource_id}/tags/{tag}'
|
||||
|
||||
def add_tag(self, **kwargs):
|
||||
"""add a tag to network resource.
|
||||
|
||||
neutron tag-add
|
||||
--resource resource
|
||||
--resource-type network --tag TAG
|
||||
"""
|
||||
uri = self.resource_object_path.format(
|
||||
**self._fix_args(**kwargs))
|
||||
# https://bugs.launchpad.net/neutron/+bug/1606659
|
||||
return self.update_resource(uri, None, 201)
|
||||
|
||||
def remove_tag(self, **kwargs):
|
||||
"""remove a tag from network resource.
|
||||
|
||||
neutron tag-remove
|
||||
--resource resource
|
||||
--resource-type network --tag TAG
|
||||
"""
|
||||
if 'all' in kwargs:
|
||||
return self.remove_all_tags(**kwargs)
|
||||
uri = self.resource_object_path.format(
|
||||
**self._fix_args(**kwargs))
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def remove_all_tags(self, **kwargs):
|
||||
"""remove all tags from network resource.
|
||||
|
||||
neutron tag-remove
|
||||
--resource resource
|
||||
--resource-type network --all
|
||||
"""
|
||||
uri = self.resource_base_path.format(
|
||||
**self._fix_args(**kwargs))
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def replace_tag(self, **kwargs):
|
||||
"""replace network resource's tag with list of tags.
|
||||
|
||||
neutron tag-replace
|
||||
--resource resource
|
||||
--resource-type network --tag TAG
|
||||
"""
|
||||
tag_list = kwargs.pop('tags', None)
|
||||
kwargs = self._fix_args(**kwargs)
|
||||
if 'tag' in kwargs:
|
||||
uri = self.resource_object_path.format(**kwargs)
|
||||
else:
|
||||
uri = self.resource_base_path.format(**kwargs)
|
||||
update_body = None if tag_list is None else {"tags": tag_list}
|
||||
return self.update_resource(uri, update_body)
|
||||
|
||||
def _fix_args(self, **kwargs):
|
||||
"""Fix key-value of input fields.
|
||||
|
||||
resource can be name, to simplify the design, only ID accepted.
|
||||
"""
|
||||
if 'resource' in kwargs and 'resource_id' not in kwargs:
|
||||
kwargs['resource_id'] = kwargs['resource']
|
||||
if 'resource_type' in kwargs:
|
||||
if kwargs['resource_type'][-1] != 's':
|
||||
kwargs['resource_type'] += "s"
|
||||
else:
|
||||
kwargs['resource_type'] = 'networks'
|
||||
return kwargs
|
||||
|
||||
|
||||
def get_client(client_mgr,
|
||||
set_property=False, with_name="tags_client"):
|
||||
"""create tags_client from networks_client.
|
||||
|
||||
Create network tags_client from manager or networks_client.
|
||||
client = tags_client.get_client(manager)
|
||||
"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = TagsClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
if set_property:
|
||||
setattr(manager, with_name, client)
|
||||
return client
|
@ -1,21 +0,0 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
def ceil(a, b):
|
||||
if b == 0:
|
||||
return 0
|
||||
div = a / b
|
||||
mod = 0 if a % b is 0 else 1
|
||||
return div + mod
|
@ -1,446 +0,0 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest._i18n import _
|
||||
from vmware_nsx_tempest.services import tags_client
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
MAX_TAG_LEN = 60
|
||||
|
||||
|
||||
class BaseTagsTest(base.BaseNetworkTest):
|
||||
"""Base class for Tags Test."""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
"""skip tests if the tags feauture is not enabled."""
|
||||
super(BaseTagsTest, cls).skip_checks()
|
||||
if not test.is_extension_enabled('tag', 'network'):
|
||||
msg = "network tag extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
"""setup resources."""
|
||||
super(BaseTagsTest, cls).resource_setup()
|
||||
cls.primary_mgr = cls.get_client_manager()
|
||||
cls.tags_client = tags_client.get_client(cls.primary_mgr)
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
"""cleanup resources before handing over to framework."""
|
||||
super(BaseTagsTest, cls).resource_cleanup()
|
||||
|
||||
@classmethod
|
||||
def list_networks(cls, **filters):
|
||||
nets = cls.networks_client.list_networks(**filters)
|
||||
return nets.get('networks')
|
||||
|
||||
@classmethod
|
||||
def tag_add(cls, network_id, tag, resource_type='network'):
|
||||
cls.tags_client.add_tag(resource_type=resource_type,
|
||||
resource_id=network_id,
|
||||
tag=tag)
|
||||
network = cls.networks_client.show_network(network_id)
|
||||
return network.get('network')
|
||||
|
||||
@classmethod
|
||||
def tag_remove(cls, network_id, tag, resource_type='network'):
|
||||
cls.tags_client.remove_tag(resource_type=resource_type,
|
||||
resource_id=network_id,
|
||||
tag=tag)
|
||||
network = cls.networks_client.show_network(network_id)
|
||||
return network.get('network')
|
||||
|
||||
@classmethod
|
||||
def tag_replace(cls, network_id, tags, resource_type='network'):
|
||||
req_body = dict(resource_type=resource_type, resource_id=network_id)
|
||||
if type(tags) in (list, tuple, set):
|
||||
req_body['tags'] = tags
|
||||
else:
|
||||
req_body['tags'] = [tags]
|
||||
cls.tags_client.replace_tag(**req_body)
|
||||
network = cls.networks_client.show_network(network_id)
|
||||
return network.get('network')
|
||||
|
||||
def network_add_tag(self, network_id, tag):
|
||||
network = self.tag_add(network_id, tag, 'network')
|
||||
self.assertIn(tag, network['tags'])
|
||||
return network
|
||||
|
||||
def network_remove_tag(self, network_id, tag):
|
||||
network = self.tag_remove(network_id, tag, 'network')
|
||||
self.assertNotIn(tag, network['tags'])
|
||||
return network
|
||||
|
||||
def network_replace_tags(self, network_id, tags=None):
|
||||
if tags is None:
|
||||
tags = ['a', 'ab', 'abc']
|
||||
network = self.tag_replace(network_id, tags, 'network')
|
||||
self.assertEqual(len(tags), len(network['tags']))
|
||||
for tag in tags:
|
||||
self.assertIn(tag, network['tags'])
|
||||
return network
|
||||
|
||||
|
||||
class NetworkTagAddTest(BaseTagsTest):
|
||||
"""neutron tag-add test."""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
"""setup resources."""
|
||||
super(NetworkTagAddTest, cls).resource_setup()
|
||||
cls.net = cls.create_network()
|
||||
|
||||
@decorators.idempotent_id('0e37a579-aff3-47ba-9f1f-3ac4482fce16')
|
||||
def test_add_tags(self):
|
||||
"""neutron tag-add operations."""
|
||||
tags = ['a', 'gold', 'T' * MAX_TAG_LEN]
|
||||
network_id = self.net.get('id')
|
||||
# check we can add tag one at time
|
||||
for tag in tags:
|
||||
network = self.network_add_tag(network_id, tag)
|
||||
# and all added tags exist.
|
||||
for tag in tags:
|
||||
self.assertIn(tag, network['tags'])
|
||||
|
||||
@decorators.idempotent_id('eb52eac3-5e79-4183-803a-a3d97ceb171d')
|
||||
@decorators.attr(type='negative')
|
||||
def test_add_tag_one_char_too_long(self):
|
||||
tag_too_long = 'a' * (MAX_TAG_LEN + 1)
|
||||
network_id = self.net.get('id')
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self.network_add_tag,
|
||||
network_id, tag_too_long)
|
||||
|
||||
@decorators.idempotent_id('d08f3fbe-dc6f-4f3c-b9b2-4d9957884edf')
|
||||
@decorators.attr(type='negative')
|
||||
def test_add_tag_empty_one(self):
|
||||
network_id = self.net.get('id')
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self.network_add_tag,
|
||||
network_id, '')
|
||||
|
||||
|
||||
class NetworkTagRemoveTest(BaseTagsTest):
|
||||
"""neutron tag-remove test."""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
"""setup resources."""
|
||||
super(NetworkTagRemoveTest, cls).resource_setup()
|
||||
cls.net = cls.create_network()
|
||||
|
||||
@decorators.idempotent_id('178fbd96-900f-4c3d-8cd1-5525f4cf2b81')
|
||||
def test_remove_tags(self):
|
||||
"""neutron tag-remove operations."""
|
||||
network_id = self.net.get('id')
|
||||
tag = 'spinning-tail'
|
||||
self.network_add_tag(network_id, tag)
|
||||
self.network_remove_tag(network_id, tag)
|
||||
|
||||
@decorators.idempotent_id('1fe5a8b2-ff5d-4250-b930-21b1a3b48055')
|
||||
@decorators.attr(type='negative')
|
||||
def test_remove_all_tags(self):
|
||||
network_id = self.net.get('id')
|
||||
self.network_replace_tags(network_id)
|
||||
req_body = dict(resource_type='network',
|
||||
resource_id=network_id, all=True)
|
||||
self.tags_client.remove_tag(**req_body)
|
||||
network = self.networks_client.show_network(network_id)['network']
|
||||
self.assertEqual(len(network['tags']), 0)
|
||||
|
||||
@decorators.idempotent_id('591337b0-a2e6-4d72-984c-e5b6a6ec12d2')
|
||||
@decorators.attr(type='negative')
|
||||
def test_remove_not_exist_tag(self):
|
||||
"""neutron tag-remove operations."""
|
||||
network_id = self.net.get('id')
|
||||
tag_not_tagged = 'talking-head'
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self.network_remove_tag,
|
||||
network_id, tag_not_tagged)
|
||||
|
||||
|
||||
class NetworkTagReplaceTest(BaseTagsTest):
|
||||
"""neutron tag-replace test."""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
"""setup resources."""
|
||||
super(NetworkTagReplaceTest, cls).resource_setup()
|
||||
cls.net = cls.create_network()
|
||||
|
||||
@decorators.idempotent_id('7d4fb288-2f2d-4f47-84af-be3175b057b5')
|
||||
def test_replace_tags(self):
|
||||
"""neutron tag-replace operations."""
|
||||
network_id = self.net.get('id')
|
||||
tags = ['east', 'south', 'west', 'north']
|
||||
self.network_replace_tags(network_id, tags)
|
||||
new_tags = ['BIG', 'small']
|
||||
self.network_replace_tags(network_id, new_tags)
|
||||
# EQ to remove all
|
||||
empty_tags = []
|
||||
self.network_replace_tags(network_id, empty_tags)
|
||||
|
||||
@decorators.idempotent_id('20a05e9e-0b25-4085-b89f-fd5f0c57d2fa')
|
||||
@decorators.attr(type='negative')
|
||||
def test_replace_tags_one_char_too_long(self):
|
||||
tags_too_long = ['aaa', 'z' * (MAX_TAG_LEN + 1)]
|
||||
network_id = self.net.get('id')
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self.network_replace_tags,
|
||||
network_id, tags_too_long)
|
||||
|
||||
|
||||
class NetworkTagFilterTest(BaseTagsTest):
|
||||
"""searching networks using tags querying params.
|
||||
|
||||
Four query parameters are supported:
|
||||
|
||||
Q-param Q-procedure
|
||||
------------ -----------
|
||||
tags x_and_y
|
||||
tags-any x_or_y
|
||||
not-tags not_x_and_y
|
||||
not-tags-any not_x_or_y
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
"""setup default values for filtering tests."""
|
||||
super(NetworkTagFilterTest, cls).resource_setup()
|
||||
cls.a_b_c = ['a', 'ab', 'abc']
|
||||
cls.not_tagged_tags = ['talking-head', 'spinning-tail']
|
||||
cls._tags = (['east', 'gold', 'production'],
|
||||
['west', 'silver', 'development'],
|
||||
['north', 'brown', 'development', 'abc'],
|
||||
['south', 'brown', 'testing', 'a'],
|
||||
['west', 'gold', 'production', 'ab'],
|
||||
['east', 'silver', 'testing'],
|
||||
['north', 'gold', 'production'],
|
||||
['south', 'silver', 'testing'])
|
||||
cls.QQ = {'router:external': False}
|
||||
cls.GG = {}
|
||||
for ix in range(0, len(cls._tags)):
|
||||
net = cls.create_network()
|
||||
tags = cls._tags[ix]
|
||||
net = cls.tag_replace(net['id'], tags=tags)
|
||||
if not (set(net['tags']) == set(cls._tags[ix])):
|
||||
raise Exception(
|
||||
_("tags[%s] are not tag-replace successfully.") % tags)
|
||||
net_id = net['id']
|
||||
cls.GG[net_id] = set(net['tags'])
|
||||
|
||||
def check_matched_search_list(self, matched_nets, m_net_list, title):
|
||||
LOG.info(_("Expected_nets[{0}]: {1}").format(title, m_net_list))
|
||||
LOG.info(_("Number of matched_nets: {0}").format(len(matched_nets)))
|
||||
self.assertEqual(len(matched_nets), len(m_net_list))
|
||||
for net in matched_nets:
|
||||
self.assertIn(net['id'], m_net_list)
|
||||
|
||||
@decorators.idempotent_id('9646af99-7e04-4724-ac54-4a938de764f1')
|
||||
def test_tags_only_one_network(self):
|
||||
"""each tag in self.a_b_c only tag one network."""
|
||||
for tag in self.a_b_c:
|
||||
filters = {'tags': tag}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.assertEqual(len(nets), 1)
|
||||
|
||||
@decorators.idempotent_id('5632b745-651a-444f-922d-6434e060991a')
|
||||
def test_tags_any_only_one_network(self):
|
||||
"""each tag in self.a_b_c only tag one network."""
|
||||
for tag in self.a_b_c:
|
||||
filters = {'tags-any': tag}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.assertEqual(len(nets), 1)
|
||||
|
||||
@decorators.idempotent_id('a0d8c21b-1ec0-4c6d-b5d8-72baebabde26')
|
||||
def test_tags_not_tagged(self):
|
||||
"""search with tags for tags not being tagged."""
|
||||
for tag in self.not_tagged_tags:
|
||||
filters = {'tags': tag}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.assertEqual(len(nets), 0)
|
||||
|
||||
@decorators.idempotent_id('1049eac1-028b-4664-aeb7-c7656240622d')
|
||||
def test_tags_any_not_tagged(self):
|
||||
"""search with tags-any for tags not being tagged."""
|
||||
for tag in self.not_tagged_tags:
|
||||
filters = {'tags-any': tag}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.assertEqual(len(nets), 0)
|
||||
|
||||
@decorators.idempotent_id('a9b42503-5dd1-490d-b0c6-673951cc86a1')
|
||||
def test_tags(self):
|
||||
"""find networks having tags (and operation)"""
|
||||
tags = ['gold', 'production']
|
||||
m_net_list = x_and_y(tags, self.GG)
|
||||
filters = {'tags': tags}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.check_matched_search_list(nets, m_net_list, 'tags')
|
||||
|
||||
@decorators.idempotent_id('c38e788d-749e-401a-8bbb-26e36a7b573f')
|
||||
def test_tags_any(self):
|
||||
"""find networks having tags-any (or operation)"""
|
||||
tags = ['gold', 'production']
|
||||
m_net_list = x_or_y(tags, self.GG)
|
||||
filters = {'tags-any': tags}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.check_matched_search_list(nets, m_net_list, 'tags-any')
|
||||
|
||||
@decorators.idempotent_id('e7bb1cea-3271-418c-bfe2-038fff6187e6')
|
||||
def test_not_tags(self):
|
||||
"""find networks not having not-tags (and operation)"""
|
||||
tags = ['gold', 'production']
|
||||
m_net_list = not_x_and_y(tags, self.GG)
|
||||
filters = {'not-tags': tags}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.check_matched_search_list(nets, m_net_list, 'not-tags')
|
||||
|
||||
@decorators.idempotent_id('c36a1d00-c131-4297-86c1-a3fc06c61629')
|
||||
def test_not_tags_any(self):
|
||||
"""find networks not having not-tags-any (or operation)"""
|
||||
tags = ['gold', 'production']
|
||||
m_net_list = not_x_or_y(tags, self.GG)
|
||||
filters = {'not-tags-any': tags}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.check_matched_search_list(nets, m_net_list, 'not-tags-any')
|
||||
|
||||
@decorators.idempotent_id('4345e944-6b2b-4106-a208-ce07cefe764f')
|
||||
def test_tags_any_not_tags(self):
|
||||
"""find networks having tags-any and not-tags."""
|
||||
tags = ['gold', 'production']
|
||||
not_tags = ['west']
|
||||
m_net_list = not_x_and_y(not_tags, self.GG,
|
||||
x_or_y(tags, self.GG))
|
||||
filters = {'tags-any': tags, 'not-tags': not_tags}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.check_matched_search_list(nets, m_net_list,
|
||||
'tags-any & not-tags')
|
||||
|
||||
@decorators.idempotent_id('0d635ba7-5c94-4a24-b7a8-d3b413d1ec83')
|
||||
@decorators.skip_because(bug="1611054")
|
||||
def test_tags_tags_any(self):
|
||||
"""finding networks using tags and tags-any."""
|
||||
tags = ['production']
|
||||
tags_any = ['east', 'west']
|
||||
m_net_list = x_or_y(tags_any, self.GG,
|
||||
x_and_y(tags, self.GG))
|
||||
filters = {'tags': tags, 'tags-any': tags_any}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.check_matched_search_list(nets, m_net_list,
|
||||
'tags & tags-any')
|
||||
|
||||
@decorators.idempotent_id('2067a8fc-2d7b-4085-a6c2-7e454f6f26f3')
|
||||
def test_tags_not_tags_any(self):
|
||||
"""finding networks using tags and not-tags-any."""
|
||||
tags = ['gold', 'production']
|
||||
not_tags = ['east', 'west', 'silver']
|
||||
m_net_list = not_x_or_y(not_tags, self.GG,
|
||||
x_and_y(tags, self.GG))
|
||||
filters = {'tags': tags, 'not-tags-any': not_tags}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.check_matched_search_list(nets, m_net_list,
|
||||
'tags & not-tags-any')
|
||||
|
||||
@decorators.idempotent_id('f2bbf51c-e32e-4664-a0db-59eed493c3d1')
|
||||
def test_tags_not_tags_any2(self):
|
||||
"""finding networks using tags and not-tags-any."""
|
||||
tags = ['gold', 'production']
|
||||
not_tags = ['west', 'east']
|
||||
m_net_list = not_x_or_y(not_tags, self.GG,
|
||||
x_and_y(tags, self.GG))
|
||||
filters = {'tags': tags, 'not-tags-any': not_tags}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.check_matched_search_list(nets, m_net_list,
|
||||
'tags & not-tags-any2')
|
||||
|
||||
@decorators.idempotent_id('7b17dfa8-f7ac-47c2-b814-35c5ed1c325b')
|
||||
def test_tags_not_tags(self):
|
||||
"""finding networks using tags and not-tags."""
|
||||
tags = ['gold', 'production']
|
||||
not_tags = ['west']
|
||||
m_net_list = not_x_and_y(not_tags, self.GG,
|
||||
x_and_y(tags, self.GG))
|
||||
filters = {'tags': tags, 'not-tags': not_tags}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.check_matched_search_list(nets, m_net_list,
|
||||
'tags & not-tags')
|
||||
|
||||
@decorators.idempotent_id('f723f717-660b-4d8e-ae9f-014f0a3f812d')
|
||||
def test_tags_not_tags_itself(self):
|
||||
""""tags and not-tags itself is always an empty set."""
|
||||
tags = ['gold', 'production']
|
||||
not_x_and_y(tags, self.GG, x_and_y(tags, self.GG))
|
||||
filters = {'tags': tags, 'not-tags': tags}
|
||||
filters.update(self.QQ)
|
||||
nets = self.list_networks(**filters)
|
||||
self.assertEqual(0, len(nets))
|
||||
|
||||
|
||||
# search/filter methods
|
||||
# K_sets: Dict of sets
|
||||
def x_and_y(x_and_y, K_sets, on_keys=None):
|
||||
"""tags=x_and_y"""
|
||||
s_xy = set(x_and_y)
|
||||
xy_s = [k for k, S in K_sets.items()
|
||||
if (on_keys is None or k in on_keys) and s_xy.issubset(S)]
|
||||
return xy_s
|
||||
|
||||
|
||||
def not_x_and_y(x_and_y, K_sets, on_keys=None):
|
||||
"""not-tags=x_and_y"""
|
||||
s_xy = set(x_and_y)
|
||||
xy_s = [k for k, S in K_sets.items()
|
||||
if (on_keys is None or k in on_keys) and not s_xy.issubset(S)]
|
||||
return xy_s
|
||||
|
||||
|
||||
def x_or_y(x_or_y, K_sets, on_keys=None):
|
||||
"""tags-any=x_or_y"""
|
||||
s_xy = set(x_or_y)
|
||||
xy_s = [k for k, S in K_sets.items()
|
||||
if (on_keys is None or k in on_keys) and len(S & s_xy) > 0]
|
||||
return xy_s
|
||||
|
||||
|
||||
def not_x_or_y(x_or_y, K_sets, on_keys=None):
|
||||
"""not tags-any=x_or_y"""
|
||||
s_xy = set(x_or_y)
|
||||
xy_s = [k for k, S in K_sets.items()
|
||||
if (on_keys is None or k in on_keys) and len(S & s_xy) == 0]
|
||||
return xy_s
|
@ -1,856 +0,0 @@
|
||||
# Copyright 2017 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.services.qos import base_qos
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class BaseQosTest(base.BaseAdminNetworkTest):
|
||||
"""Base class for Qos Test.
|
||||
|
||||
1. Setup QoS clients for admin and primary users.
|
||||
2. Manages qos resources creation and deletion.
|
||||
3. Manages network/port creation and deletion as network cannot be
|
||||
deleted if ports are associated which test framework won't handle.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
"""skip tests if qos is not enabled."""
|
||||
super(BaseQosTest, cls).skip_checks()
|
||||
if not test.is_extension_enabled('qos', 'network'):
|
||||
msg = "q-qos extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
"""setup resources."""
|
||||
super(BaseQosTest, cls).resource_setup()
|
||||
cls.admin_mgr = cls.get_client_manager(credential_type='admin')
|
||||
cls.primary_mgr = cls.get_client_manager()
|
||||
cls.adm_qos_client = base_qos.BaseQosClient(cls.admin_mgr)
|
||||
cls.pri_qos_client = base_qos.BaseQosClient(cls.primary_mgr)
|
||||
cls.qos_available_rule_types = (
|
||||
cls.adm_qos_client.available_rule_types())
|
||||
cls.policies_created = []
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
"""cleanup resources before handing over to framework."""
|
||||
for network in cls.networks:
|
||||
# network cannot be deleted if its ports have policy associated.
|
||||
port_list = cls.admin_mgr.ports_client.list_ports(
|
||||
network_id=network['id'])['ports']
|
||||
for port in port_list:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.delete_port, port['id'])
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.delete_network, network['id'])
|
||||
for policy in cls.policies_created:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.adm_qos_client.delete_policy, policy['id'])
|
||||
super(BaseQosTest, cls).resource_cleanup()
|
||||
|
||||
@classmethod
|
||||
def create_port(cls, network, client_mgr=None, **kwargs):
|
||||
"""create port."""
|
||||
client_mgr = client_mgr if client_mgr else cls.admin_mgr
|
||||
body = client_mgr.ports_client.create_port(
|
||||
network_id=network['id'], **kwargs)
|
||||
port = body.get('port', body)
|
||||
cls.ports.append(port)
|
||||
return port
|
||||
|
||||
@classmethod
|
||||
def update_port(cls, port_id, client_mgr=None, **kwargs):
|
||||
"""update port."""
|
||||
client_mgr = client_mgr if client_mgr else cls.admin_mgr
|
||||
body = client_mgr.ports_client.update_port(
|
||||
port_id, **kwargs)
|
||||
return body.get('port', body)
|
||||
|
||||
@classmethod
|
||||
def show_port(cls, port_id, client_mgr=None):
|
||||
"""show port."""
|
||||
client_mgr = client_mgr if client_mgr else cls.admin_mgr
|
||||
body = client_mgr.ports_client.show_port(port_id)
|
||||
return body.get('port', body)
|
||||
|
||||
@classmethod
|
||||
def delete_port(cls, port_id, client_mgr=None, **kwargs):
|
||||
"""delete port."""
|
||||
client_mgr = client_mgr if client_mgr else cls.admin_mgr
|
||||
body = client_mgr.ports_client.delete_port(port_id)
|
||||
return body.get('port', body)
|
||||
|
||||
@classmethod
|
||||
def create_network(cls, network_name=None, client_mgr=None, **kwargs):
|
||||
"""create network."""
|
||||
network_name = network_name or data_utils.rand_name('qos-net')
|
||||
client_mgr = client_mgr if client_mgr else cls.admin_mgr
|
||||
|
||||
body = client_mgr.networks_client.create_network(
|
||||
name=network_name, **kwargs)
|
||||
network = body['network']
|
||||
cls.networks.append(network)
|
||||
return network
|
||||
|
||||
@classmethod
|
||||
def create_shared_network(cls, network_name=None, client_mgr=None,
|
||||
**kwargs):
|
||||
"""create shared network."""
|
||||
return cls.create_network(network_name, client_mgr,
|
||||
shared=True, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def show_network(cls, network_id, client_mgr=None):
|
||||
"""show network."""
|
||||
client_mgr = client_mgr if client_mgr else cls.admin_mgr
|
||||
network = client_mgr.networks_client.show_network(network_id)
|
||||
return network.get('network', network)
|
||||
|
||||
@classmethod
|
||||
def update_network(cls, network_id, client_mgr=None, **kwargs):
|
||||
"""update network."""
|
||||
client_mgr = client_mgr if client_mgr else cls.admin_mgr
|
||||
network = client_mgr.networks_client.update_network(
|
||||
network_id, **kwargs)
|
||||
return network.get('network', network)
|
||||
|
||||
@classmethod
|
||||
def delete_network(cls, network_id, client_mgr=None):
|
||||
"""delete network."""
|
||||
client_mgr = client_mgr if client_mgr else cls.admin_mgr
|
||||
network = client_mgr.networks_client.delete_network(network_id)
|
||||
return network.get('network', network)
|
||||
|
||||
@classmethod
|
||||
def create_qos_policy(cls, name='test-policy',
|
||||
description='test policy desc',
|
||||
shared=False,
|
||||
qos_client=None, **kwargs):
|
||||
"""create qos policy."""
|
||||
qos_client = qos_client if qos_client else cls.adm_qos_client
|
||||
policy = qos_client.create_policy(
|
||||
name=name, description=description,
|
||||
shared=shared, **kwargs)
|
||||
cls.policies_created.append(policy)
|
||||
return policy
|
||||
|
||||
@classmethod
|
||||
def create_qos_bandwidth_limit_rule(cls, policy_id,
|
||||
qos_client=None, **kwargs):
|
||||
"""create qos-bandwidth-limit-rule."""
|
||||
qos_client = qos_client if qos_client else cls.adm_qos_client
|
||||
rule = qos_client.create_bandwidth_limit_rule(policy_id, **kwargs)
|
||||
return rule
|
||||
|
||||
@classmethod
|
||||
def create_qos_dscp_marking_rule(cls, policy_id, dscp_mark,
|
||||
qos_client=None, **kwargs):
|
||||
"""create qos-dscp-marking-rule."""
|
||||
qos_client = qos_client if qos_client else cls.adm_qos_client
|
||||
rule = qos_client.create_dscp_marking_rule(
|
||||
policy_id, dscp_mark, **kwargs)
|
||||
return rule
|
||||
|
||||
|
||||
class QosPolicyTest(BaseQosTest):
|
||||
"""QoS Policy CURD operations.
|
||||
|
||||
test qos policies and network/port association and disassociation.
|
||||
"""
|
||||
|
||||
@decorators.idempotent_id('108fbdf7-3463-4e47-9871-d07f3dcf5bbb')
|
||||
def test_create_policy(self):
|
||||
"""qos-policy-create: create policy."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy desc1',
|
||||
shared=False)
|
||||
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
# Test 'show policy'
|
||||
retrieved_policy = self.adm_qos_client.show_policy(policy['id'])
|
||||
self.assertEqual('test-policy', retrieved_policy['name'])
|
||||
self.assertEqual('test policy desc1',
|
||||
retrieved_policy['description'])
|
||||
self.assertFalse(retrieved_policy['shared'])
|
||||
|
||||
# Test 'list policies'
|
||||
policies = self.adm_qos_client.list_policies()
|
||||
policies_ids = [p['id'] for p in policies]
|
||||
self.assertIn(policy['id'], policies_ids)
|
||||
|
||||
@decorators.idempotent_id('f8d20e92-f06d-4805-b54f-230f77715815')
|
||||
def test_list_policy_filter_by_name(self):
|
||||
"""qos-policy-list --name=<name>: list policies."""
|
||||
name1 = data_utils.rand_name('test-policy')
|
||||
name2 = name1 + "0"
|
||||
policy_name1 = self.create_qos_policy(
|
||||
name=name1, description='test policy', shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy_name1['id'])
|
||||
policy_name2 = self.create_qos_policy(
|
||||
name=name2, description='test policy', shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy_name2['id'])
|
||||
policies = self.adm_qos_client.list_policies(name=name1)
|
||||
self.assertEqual(1, len(policies))
|
||||
|
||||
retrieved_policy = policies[0]
|
||||
self.assertEqual(name1, retrieved_policy['name'])
|
||||
|
||||
@decorators.idempotent_id('8e88a54b-f0b2-4b7d-b061-a15d93c2c7d6')
|
||||
def test_policy_update(self):
|
||||
"""qos-policy-update POLICY_ID."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
self.adm_qos_client.update_policy(policy['id'],
|
||||
description='test policy desc2',
|
||||
shared=True)
|
||||
|
||||
retrieved_policy = self.adm_qos_client.show_policy(policy['id'])
|
||||
self.assertEqual('test policy desc2',
|
||||
retrieved_policy['description'])
|
||||
self.assertTrue(retrieved_policy['shared'])
|
||||
self.assertEmpty(retrieved_policy['rules'])
|
||||
|
||||
@decorators.idempotent_id('1cb42653-54bd-4a9a-b888-c55e18199201')
|
||||
def test_delete_policy(self):
|
||||
"""qos-policy-delete POLICY_ID."""
|
||||
policy = self.create_qos_policy(
|
||||
'test-policy', 'desc', True)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
retrieved_policy = self.adm_qos_client.show_policy(policy['id'])
|
||||
self.assertEqual('test-policy', retrieved_policy['name'])
|
||||
|
||||
self.adm_qos_client.delete_policy(policy['id'])
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self.adm_qos_client.show_policy, policy['id'])
|
||||
|
||||
def _test_list_admin_rule_types(self):
|
||||
"""qos-available-rule-types: available rule type from admin view."""
|
||||
self._test_list_rule_types(self.adm_qos_client)
|
||||
|
||||
def _test_list_regular_rule_types(self):
|
||||
"""qos-available-rule-types: available rule type from project view."""
|
||||
self._test_list_rule_types(self.pri_qos_client)
|
||||
|
||||
def _test_list_rule_types(self, client):
|
||||
# List supported rule types
|
||||
# TODO(QoS): since in gate we run both ovs and linuxbridge ml2 drivers,
|
||||
# and since Linux Bridge ml2 driver does not have QoS support yet, ml2
|
||||
# plugin reports no rule types are supported. Once linuxbridge will
|
||||
# receive support for QoS, the list of expected rule types will change.
|
||||
#
|
||||
# In theory, we could make the test conditional on which ml2 drivers
|
||||
# are enabled in gate (or more specifically, on which supported qos
|
||||
# rules are claimed by core plugin), but that option doesn't seem to be
|
||||
# available thru tempest.lib framework
|
||||
expected_rule_types = []
|
||||
expected_rule_details = ['type']
|
||||
|
||||
rule_types = client.available_rule_types()
|
||||
actual_rule_types = [rule['type'] for rule in rule_types]
|
||||
|
||||
# TODO(akang): seems not correct
|
||||
# Verify that only required fields present in rule details
|
||||
for rule in actual_rule_types:
|
||||
self.assertEqual(tuple(rule.keys()), tuple(expected_rule_details))
|
||||
|
||||
# Verify if expected rules are present in the actual rules list
|
||||
for rule in expected_rule_types:
|
||||
self.assertIn(rule, actual_rule_types)
|
||||
|
||||
def _disassociate_network(self, network_id, client_mgr=None):
|
||||
self.update_network(network_id, client_mgr=client_mgr,
|
||||
qos_policy_id=None)
|
||||
updated_network = self.show_network(network_id,
|
||||
client_mgr=client_mgr)
|
||||
self.assertIsNone(updated_network['qos_policy_id'])
|
||||
|
||||
@decorators.idempotent_id('65b9ef75-1911-406a-bbdb-ca1d68d528b0')
|
||||
def test_policy_association_with_admin_network(self):
|
||||
"""admin can create network with non-shared policy."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
network = self.create_shared_network('test network',
|
||||
qos_policy_id=policy['id'])
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
retrieved_network = self.show_network(network['id'])
|
||||
self.assertEqual(
|
||||
policy['id'], retrieved_network['qos_policy_id'])
|
||||
self._disassociate_network(network['id'], self.admin_mgr)
|
||||
|
||||
@decorators.idempotent_id('1738de5d-0476-4163-9022-5e1b548c208e')
|
||||
def test_policy_association_with_tenant_network(self):
|
||||
"""project/tenant can create network with shared policy."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=True)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
network = self.create_network('test network',
|
||||
client_mgr=self.primary_mgr,
|
||||
qos_policy_id=policy['id'])
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
retrieved_network = self.show_network(network['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
self.assertEqual(
|
||||
policy['id'], retrieved_network['qos_policy_id'])
|
||||
|
||||
self._disassociate_network(network['id'], self.primary_mgr)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('9efe63d0-836f-4cc2-b00c-468e63aa614e')
|
||||
def test_policy_association_with_network_nonexistent_policy(self):
|
||||
"""Can not attach network to a nonexist policy."""
|
||||
network = self.create_network(
|
||||
'test network',
|
||||
qos_policy_id='9efe63d0-836f-4cc2-b00c-468e63aa614e')
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
retrieved_network = self.show_network(network['id'])
|
||||
# check if network is not attached to the policy
|
||||
self.assertIsNone(retrieved_network['qos_policy_id'],
|
||||
'Error: Network is attached to non-existent policy')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('1aa55a79-324f-47d9-a076-894a8fc2448b')
|
||||
def test_policy_association_with_network_non_shared_policy(self):
|
||||
"""tenant/project can not attach network with not-shared policy."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
network = self.create_network(
|
||||
'test network',
|
||||
qos_policy_id=policy['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
retrieved_network = self.show_network(network['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
# check if network is not attached to the policy
|
||||
self.assertIsNone(retrieved_network['qos_policy_id'],
|
||||
'Error: Network is attached to QoS policy')
|
||||
|
||||
@decorators.idempotent_id('10a9392c-1359-4cbb-989f-fb768e5834a8')
|
||||
def test_policy_update_association_with_admin_network(self):
|
||||
"""admin can create associate non-shared policy to network."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
network = self.create_shared_network('test network')
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
retrieved_network = self.show_network(network['id'])
|
||||
self.assertIsNone(retrieved_network['qos_policy_id'])
|
||||
|
||||
self.update_network(
|
||||
network['id'], qos_policy_id=policy['id'])
|
||||
retrieved_network = self.show_network(network['id'])
|
||||
self.assertEqual(
|
||||
policy['id'], retrieved_network['qos_policy_id'])
|
||||
|
||||
self._disassociate_network(network['id'], self.admin_mgr)
|
||||
|
||||
def _disassociate_port(self, port_id, client_mgr=None):
|
||||
client_mgr = client_mgr if client_mgr else self.admin_mgr
|
||||
self.update_port(port_id, qos_policy_id=None,
|
||||
client_mgr=client_mgr)
|
||||
updated_port = self.show_port(port_id, client_mgr=client_mgr)
|
||||
self.assertIsNone(updated_port['qos_policy_id'])
|
||||
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('98fcd95e-84cf-4746-860e-44692e674f2e')
|
||||
def test_policy_association_with_port_shared_policy(self):
|
||||
"""test port can associate shared policy."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=True)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
network = self.create_shared_network('test network')
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
port = self.create_port(network, qos_policy_id=policy['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_port, port['id'])
|
||||
retrieved_port = self.show_port(port['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
self.assertEqual(
|
||||
policy['id'], retrieved_port['qos_policy_id'])
|
||||
|
||||
self._disassociate_port(port['id'], client_mgr=self.primary_mgr)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('49e02f5a-e1dd-41d5-9855-cfa37f2d195e')
|
||||
def test_policy_association_with_port_nonexistent_policy(self):
|
||||
"""test port cannot be created with nonexist policy."""
|
||||
network = self.create_shared_network('test network')
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
self.assertRaises(
|
||||
exceptions.NotFound,
|
||||
self.create_port,
|
||||
network,
|
||||
qos_policy_id='49e02f5a-e1dd-41d5-9855-cfa37f2d195e')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('f53d961c-9fe5-4422-8b66-7add972c6031')
|
||||
def test_policy_association_with_port_non_shared_policy(self):
|
||||
"""project/tenant can not associate port with non-shared policy."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
network = self.create_shared_network('test network')
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
port = self.create_port(network, qos_policy_id=policy['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_port, port['id'])
|
||||
retrieved_port = self.show_port(port['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
# check if port is not attached to the policy
|
||||
self.assertIsNone(retrieved_port['qos_policy_id'],
|
||||
'Error:Port is attached to qos policy')
|
||||
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('f8163237-fba9-4db5-9526-bad6d2343c76')
|
||||
def test_policy_update_association_with_port_shared_policy(self):
|
||||
"""project/tenant can update port with shared policy."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=True)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
network = self.create_shared_network('test network')
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
port = self.create_port(network, client_mgr=self.primary_mgr)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_port, port['id'])
|
||||
retrieved_port = self.show_port(port['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
self.assertIsNone(retrieved_port['qos_policy_id'])
|
||||
|
||||
self.update_port(port['id'], qos_policy_id=policy['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
retrieved_port = self.show_port(port['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
self.assertEqual(
|
||||
policy['id'], retrieved_port['qos_policy_id'])
|
||||
|
||||
self._disassociate_port(port['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('18163237-8ba9-4db5-9525-bad6d2343c75')
|
||||
def test_delete_not_allowed_if_policy_in_use_by_network(self):
|
||||
"""can not delete policy if used by network."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=True)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
network = self.create_shared_network(
|
||||
'test network', qos_policy_id=policy['id'])
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
self.assertRaises(
|
||||
exceptions.Conflict,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
|
||||
self._disassociate_network(network['id'], self.admin_mgr)
|
||||
self.adm_qos_client.delete_policy(policy['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.attr(type='nsxv3')
|
||||
@decorators.idempotent_id('24153230-84a9-4dd5-9525-bad6d2343c75')
|
||||
def test_delete_not_allowed_if_policy_in_use_by_port(self):
|
||||
"""can not delete policy if used by port."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=True)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
network = self.create_shared_network('test network')
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
port = self.create_port(network, qos_policy_id=policy['id'],
|
||||
client_mgr=self.primary_mgr)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_port, port['id'])
|
||||
self.assertRaises(
|
||||
exceptions.Conflict,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
|
||||
self._disassociate_port(port['id'], client_mgr=self.primary_mgr)
|
||||
self.adm_qos_client.delete_policy(policy['id'])
|
||||
|
||||
@decorators.idempotent_id('a2a5849b-dd06-4b18-9664-0b6828a1fc27')
|
||||
def test_qos_policy_delete_with_rules(self):
|
||||
"""Policy with rules attached can be deleted."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
self.adm_qos_client.create_bandwidth_limit_rule(
|
||||
policy['id'], 200, 1337)
|
||||
|
||||
self.adm_qos_client.delete_policy(policy['id'])
|
||||
|
||||
with testtools.ExpectedException(exceptions.NotFound):
|
||||
self.adm_qos_client.show_policy(policy['id'])
|
||||
|
||||
|
||||
class QosBandwidthLimitRuleTest(BaseQosTest):
|
||||
"""QoS Bandwidth limit rule CURD operations."""
|
||||
|
||||
@decorators.idempotent_id('8a59b00b-3e9c-4787-92f8-93a5cdf5e378')
|
||||
def test_rule_create(self):
|
||||
"""qos-bandwidth-limit-rule-create POLICY_ID."""
|
||||
qos_client = self.adm_qos_client
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
rule = self.create_qos_bandwidth_limit_rule(
|
||||
policy_id=policy['id'], max_kbps=200, max_burst_kbps=1337)
|
||||
|
||||
# Test 'show rule'
|
||||
retrieved_rule = qos_client.show_bandwidth_limit_rule(
|
||||
rule['id'], policy['id'])
|
||||
self.assertEqual(rule['id'], retrieved_rule['id'])
|
||||
self.assertEqual(200, retrieved_rule['max_kbps'])
|
||||
self.assertEqual(1337, retrieved_rule['max_burst_kbps'])
|
||||
|
||||
# Test 'list rules'
|
||||
rules = qos_client.list_bandwidth_limit_rules(policy['id'])
|
||||
rules_ids = [r['id'] for r in rules]
|
||||
self.assertIn(rule['id'], rules_ids)
|
||||
|
||||
# Test 'show policy'
|
||||
retrieved_policy = qos_client.show_policy(policy['id'])
|
||||
policy_rules = retrieved_policy['rules']
|
||||
self.assertEqual(1, len(policy_rules))
|
||||
self.assertEqual(rule['id'], policy_rules[0]['id'])
|
||||
self.assertEqual(base_qos.RULE_TYPE_BANDWIDTH_LIMIT,
|
||||
policy_rules[0]['type'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('8a59b00b-ab01-4787-92f8-93a5cdf5e378')
|
||||
def test_rule_create_fail_for_the_same_type(self):
|
||||
"""One bandwidth limit rule per policy."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
self.create_qos_bandwidth_limit_rule(
|
||||
policy_id=policy['id'], max_kbps=200, max_burst_kbps=1337)
|
||||
|
||||
self.assertRaises(exceptions.Conflict,
|
||||
self.create_qos_bandwidth_limit_rule,
|
||||
policy_id=policy['id'],
|
||||
max_kbps=201, max_burst_kbps=1338)
|
||||
|
||||
@decorators.idempotent_id('149a6988-2568-47d2-931e-2dbc858943b3')
|
||||
def test_rule_update(self):
|
||||
"""qos-bandwidth-limit-rule-update RULE-ID POLICY_ID."""
|
||||
qos_client = self.adm_qos_client
|
||||
max_kbps = 200
|
||||
max_burst_kbps = 1337
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
rule = self.create_qos_bandwidth_limit_rule(
|
||||
policy_id=policy['id'], max_kbps=1, max_burst_kbps=1)
|
||||
|
||||
qos_client.update_bandwidth_limit_rule(
|
||||
rule['id'], policy['id'],
|
||||
max_kbps=max_kbps, max_burst_kbps=max_burst_kbps)
|
||||
|
||||
retrieved_rule = qos_client.show_bandwidth_limit_rule(
|
||||
rule['id'], policy['id'])
|
||||
self.assertEqual(max_kbps, retrieved_rule['max_kbps'])
|
||||
self.assertEqual(max_burst_kbps, retrieved_rule['max_burst_kbps'])
|
||||
|
||||
@decorators.idempotent_id('67ee6efd-7b33-4a68-927d-275b4f8ba958')
|
||||
def test_rule_delete(self):
|
||||
"""qos-bandwidth-limit-rule-delete RULE-ID POLICY_ID."""
|
||||
qos_client = self.adm_qos_client
|
||||
max_kbps = 200
|
||||
max_burst_kbps = 1337
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
rule = self.create_qos_bandwidth_limit_rule(
|
||||
policy['id'],
|
||||
max_kbps=max_kbps, max_burst_kbps=max_burst_kbps)
|
||||
|
||||
retrieved_rule = qos_client.show_bandwidth_limit_rule(
|
||||
rule['id'], policy['id'])
|
||||
self.assertEqual(rule['id'], retrieved_rule['id'])
|
||||
|
||||
qos_client.delete_bandwidth_limit_rule(
|
||||
rule['id'], policy['id'])
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
qos_client.show_bandwidth_limit_rule,
|
||||
rule['id'], policy['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('f211222c-5808-46cb-a961-983bbab6b852')
|
||||
def test_rule_create_rule_nonexistent_policy(self):
|
||||
"""Cannot create rule with nonexist policy."""
|
||||
self.assertRaises(
|
||||
exceptions.NotFound,
|
||||
self.create_qos_bandwidth_limit_rule,
|
||||
'policy', max_kbps=200, max_burst_kbps=1337)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('eed8e2a6-22da-421b-89b9-935a2c1a1b50')
|
||||
def test_policy_create_forbidden_for_regular_tenants(self):
|
||||
"""project/tenant cannot create policy."""
|
||||
self.assertRaises(
|
||||
exceptions.Forbidden,
|
||||
self.create_qos_policy,
|
||||
'test-policy', 'test policy', False,
|
||||
qos_client=self.pri_qos_client)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a4a2e7ad-786f-4927-a85a-e545a93bd274')
|
||||
def test_rule_create_forbidden_for_regular_tenants(self):
|
||||
"""project/tenant cannot create rule."""
|
||||
self.assertRaises(
|
||||
exceptions.Forbidden,
|
||||
self.create_qos_bandwidth_limit_rule,
|
||||
'policy', max_kbps=1, max_burst_kbps=2,
|
||||
qos_client=self.pri_qos_client)
|
||||
|
||||
@decorators.idempotent_id('ce0bd0c2-54d9-4e29-85f1-cfb36ac3ebe2')
|
||||
def test_get_rules_by_policy(self):
|
||||
"""qos-bandwidth-limit-rule-list POLICY_ID."""
|
||||
policy1 = self.create_qos_policy(name='test-policy1',
|
||||
description='test policy1',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy1['id'])
|
||||
rule1 = self.create_qos_bandwidth_limit_rule(
|
||||
policy_id=policy1['id'], max_kbps=200, max_burst_kbps=1337)
|
||||
|
||||
policy2 = self.create_qos_policy(name='test-policy2',
|
||||
description='test policy2',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy2['id'])
|
||||
rule2 = self.create_qos_bandwidth_limit_rule(
|
||||
policy_id=policy2['id'], max_kbps=5000, max_burst_kbps=2523)
|
||||
|
||||
# Test 'list rules'
|
||||
rules = self.adm_qos_client.list_bandwidth_limit_rules(policy1['id'])
|
||||
rules_ids = [r['id'] for r in rules]
|
||||
self.assertIn(rule1['id'], rules_ids)
|
||||
self.assertNotIn(rule2['id'], rules_ids)
|
||||
|
||||
|
||||
class QosDscpMarkingRuleTest(BaseQosTest):
|
||||
"""QoS Dscp Marking Rule CRUD operation."""
|
||||
|
||||
VALID_DSCP_MARK1 = 56
|
||||
VALID_DSCP_MARK2 = 48
|
||||
|
||||
@decorators.idempotent_id('8a59b40b-3e9c-4787-92f8-93a5cdf5e378')
|
||||
def test_rule_create(self):
|
||||
"""qos-dscp-marking-rule-create POLICY_ID."""
|
||||
qos_client = self.adm_qos_client
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
rule = self.create_qos_dscp_marking_rule(
|
||||
policy['id'], self.VALID_DSCP_MARK1)
|
||||
|
||||
# Test 'show rule'
|
||||
retrieved_rule = qos_client.show_dscp_marking_rule(
|
||||
rule['id'], policy['id'])
|
||||
self.assertEqual(rule['id'], retrieved_rule['id'])
|
||||
self.assertEqual(self.VALID_DSCP_MARK1, retrieved_rule['dscp_mark'])
|
||||
|
||||
# Test 'list rules'
|
||||
rules = qos_client.list_dscp_marking_rules(policy['id'])
|
||||
rules_ids = [r['id'] for r in rules]
|
||||
self.assertIn(rule['id'], rules_ids)
|
||||
|
||||
# Test 'show policy'
|
||||
retrieved_policy = qos_client.show_policy(policy['id'])
|
||||
policy_rules = retrieved_policy['rules']
|
||||
self.assertEqual(1, len(policy_rules))
|
||||
self.assertEqual(rule['id'], policy_rules[0]['id'])
|
||||
self.assertEqual(base_qos.RULE_TYPE_DSCP_MARK,
|
||||
policy_rules[0]['type'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('8b59b10b-ab01-4787-92f8-93a5cdf5e378')
|
||||
def test_rule_create_fail_for_the_same_type(self):
|
||||
"""One dscp marking rule per policy."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
self.create_qos_dscp_marking_rule(
|
||||
policy['id'], self.VALID_DSCP_MARK1)
|
||||
|
||||
self.assertRaises(exceptions.Conflict,
|
||||
self.create_qos_dscp_marking_rule,
|
||||
policy_id=policy['id'],
|
||||
dscp_mark=self.VALID_DSCP_MARK2)
|
||||
|
||||
@decorators.idempotent_id('249a6988-2568-47d2-931e-2dbc858943b3')
|
||||
def test_rule_update(self):
|
||||
"""qos-dscp-marking-rule-create POLICY_ID."""
|
||||
qos_client = self.adm_qos_client
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
rule = self.create_qos_dscp_marking_rule(
|
||||
policy['id'], self.VALID_DSCP_MARK1)
|
||||
|
||||
qos_client.update_dscp_marking_rule(
|
||||
rule['id'], policy['id'], dscp_mark=self.VALID_DSCP_MARK2)
|
||||
|
||||
retrieved_rule = qos_client.show_dscp_marking_rule(
|
||||
rule['id'], policy['id'])
|
||||
self.assertEqual(self.VALID_DSCP_MARK2, retrieved_rule['dscp_mark'])
|
||||
|
||||
@decorators.idempotent_id('67ed6efd-7b33-4a68-927d-275b4f8ba958')
|
||||
def test_rule_delete(self):
|
||||
"""qos-dscp-marking-rule-delete POLICY_ID."""
|
||||
qos_client = self.adm_qos_client
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
rule = self.create_qos_dscp_marking_rule(
|
||||
policy['id'], self.VALID_DSCP_MARK1)
|
||||
|
||||
retrieved_rule = qos_client.show_dscp_marking_rule(
|
||||
rule['id'], policy['id'])
|
||||
self.assertEqual(rule['id'], retrieved_rule['id'])
|
||||
|
||||
qos_client.delete_dscp_marking_rule(rule['id'], policy['id'])
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
qos_client.show_dscp_marking_rule,
|
||||
rule['id'], policy['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('f215222c-5808-46cb-a961-983bbab6b852')
|
||||
def test_rule_create_rule_nonexistent_policy(self):
|
||||
"""can not create dscp marking rule with nonexist policy."""
|
||||
self.assertRaises(
|
||||
exceptions.NotFound,
|
||||
self.create_qos_dscp_marking_rule,
|
||||
'policy', self.VALID_DSCP_MARK1)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a4a2e3ad-786f-4927-a85a-e545a93bd274')
|
||||
def test_rule_create_forbidden_for_regular_tenants(self):
|
||||
"""project/tenant can not create dscp marking rule."""
|
||||
self.assertRaises(
|
||||
exceptions.Forbidden,
|
||||
self.create_qos_dscp_marking_rule,
|
||||
'policy', self.VALID_DSCP_MARK1,
|
||||
qos_client=self.pri_qos_client)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('32646b08-4f05-4493-a48a-bde768a18533')
|
||||
def test_invalid_rule_create(self):
|
||||
"""Can not create rule with invalid dscp_mark value."""
|
||||
policy = self.create_qos_policy(name='test-policy',
|
||||
description='test policy',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy['id'])
|
||||
self.assertRaises(
|
||||
exceptions.BadRequest,
|
||||
self.create_qos_dscp_marking_rule,
|
||||
policy['id'], 58)
|
||||
|
||||
@decorators.idempotent_id('cf0bd0c2-54d9-4e29-85f1-cfb36ac3ebe2')
|
||||
def test_get_rules_by_policy(self):
|
||||
"""qos-dscp-marking-rule-list POLICY_ID."""
|
||||
policy1 = self.create_qos_policy(name='test-policy1',
|
||||
description='test policy1',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy1['id'])
|
||||
rule1 = self.create_qos_dscp_marking_rule(
|
||||
policy1['id'], self.VALID_DSCP_MARK1)
|
||||
|
||||
policy2 = self.create_qos_policy(name='test-policy2',
|
||||
description='test policy2',
|
||||
shared=False)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.adm_qos_client.delete_policy, policy2['id'])
|
||||
rule2 = self.create_qos_dscp_marking_rule(
|
||||
policy2['id'], self.VALID_DSCP_MARK2)
|
||||
|
||||
# Test 'list rules'
|
||||
rules = self.adm_qos_client.list_dscp_marking_rules(policy1['id'])
|
||||
rules_ids = [r['id'] for r in rules]
|
||||
self.assertIn(rule1['id'], rules_ids)
|
||||
self.assertNotIn(rule2['id'], rules_ids)
|
@ -1,23 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright 2010-2011 OpenStack Foundation
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import base
|
||||
|
||||
|
||||
class TestCase(base.BaseTestCase):
|
||||
|
||||
"""Test case base class for all unit tests."""
|
@ -1,6 +0,0 @@
|
||||
Placeholder for dvs plugin specific automated tests
|
||||
directory:
|
||||
dvs/
|
||||
api/
|
||||
scenario/
|
||||
|
@ -1,157 +0,0 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest.lib.common.utils import data_utils
|
||||
import tempest.test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class BaseDvsAdminNetworkTest(base.BaseAdminNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
for port in cls.ports:
|
||||
cls.admin_ports_client.delete_port(port['id'])
|
||||
for subnet in cls.subnets:
|
||||
cls.admin_subnets_client.delete_subnet(subnet['id'])
|
||||
for network in cls.networks:
|
||||
cls.admin_networks_client.delete_network(network['id'])
|
||||
# clean up ports, subnets and networks
|
||||
cls.ports = []
|
||||
cls.subnets = []
|
||||
cls.networks = []
|
||||
|
||||
@classmethod
|
||||
def create_network(cls, **kwargs):
|
||||
"""Wrapper utility that returns a test admin provider network."""
|
||||
network_name = (kwargs.get('net_name')
|
||||
or data_utils.rand_name('test-adm-net-'))
|
||||
net_type = kwargs.get('net_type', "flat")
|
||||
if tempest.test.is_extension_enabled('provider', 'network'):
|
||||
body = {'name': network_name}
|
||||
body.update({'provider:network_type': net_type,
|
||||
'provider:physical_network': 'dvs'})
|
||||
if net_type == 'vlan':
|
||||
_vlanid = kwargs.get('seg_id')
|
||||
body.update({'provider:segmentation_id': _vlanid})
|
||||
|
||||
body = cls.admin_networks_client.create_network(**body)
|
||||
network = body['network']
|
||||
cls.networks.append(network)
|
||||
return network
|
||||
|
||||
@classmethod
|
||||
def create_subnet(cls, network):
|
||||
"""Wrapper utility that returns a test subnet."""
|
||||
# The cidr and mask_bits depend on the ip version.
|
||||
if cls._ip_version == 4:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_cidr
|
||||
or "192.168.101.0/24")
|
||||
mask_bits = CONF.network.project_network_mask_bits or 24
|
||||
elif cls._ip_version == 6:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
|
||||
mask_bits = CONF.network.project_network_v6_mask_bits
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
for subnet_cidr in cidr.subnet(mask_bits):
|
||||
try:
|
||||
body = cls.admin_subnets_client.create_subnet(
|
||||
network_id=network['id'],
|
||||
cidr=str(subnet_cidr),
|
||||
ip_version=cls._ip_version)
|
||||
break
|
||||
except exceptions.BadRequest as e:
|
||||
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
||||
if not is_overlapping_cidr:
|
||||
raise
|
||||
else:
|
||||
message = 'Available CIDR for subnet creation could not be found'
|
||||
raise exceptions.BuildErrorException(message)
|
||||
subnet = body['subnet']
|
||||
cls.subnets.append(subnet)
|
||||
return subnet
|
||||
|
||||
@classmethod
|
||||
def create_port(cls, network_id, **kwargs):
|
||||
"""Wrapper utility that returns a test port."""
|
||||
body = cls.admin_ports_client.create_port(network_id=network_id,
|
||||
**kwargs)
|
||||
port = body['port']
|
||||
cls.ports.append(port)
|
||||
return port
|
||||
|
||||
@classmethod
|
||||
def update_network(cls, network_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_networks_client
|
||||
return net_client.update_network(network_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def delete_network(cls, network_id, client=None):
|
||||
net_client = client if client else cls.admin_networks_client
|
||||
return net_client.delete_network(network_id)
|
||||
|
||||
@classmethod
|
||||
def show_network(cls, network_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_networks_client
|
||||
return net_client.show_network(network_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def list_networks(cls, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_networks_client
|
||||
return net_client.list_networks(**kwargs)
|
||||
|
||||
@classmethod
|
||||
def update_subnet(cls, subnet_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_subnets_client
|
||||
return net_client.update_subnet(subnet_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def delete_subnet(cls, subnet_id, client=None):
|
||||
net_client = client if client else cls.admin_subnets_client
|
||||
return net_client.delete_subnet(subnet_id)
|
||||
|
||||
@classmethod
|
||||
def show_subnet(cls, subnet_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_subnets_client
|
||||
return net_client.show_subnet(subnet_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def list_subnets(cls, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_subnets_client
|
||||
return net_client.list_subnets(**kwargs)
|
||||
|
||||
@classmethod
|
||||
def delete_port(cls, port_id, client=None):
|
||||
net_client = client if client else cls.admin_ports_client
|
||||
return net_client.delete_port(port_id)
|
||||
|
||||
@classmethod
|
||||
def show_port(cls, port_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_ports_client
|
||||
return net_client.show_port(port_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def list_ports(cls, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_ports_client
|
||||
return net_client.list_ports(**kwargs)
|
||||
|
||||
@classmethod
|
||||
def update_port(cls, port_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_ports_client
|
||||
return net_client.update_port(port_id, **kwargs)
|
@ -1,122 +0,0 @@
|
||||
# Copyright 2014 VMware.inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base_dvs as base
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
|
||||
class AdminNetworksTestJSON(base.BaseDvsAdminNetworkTest):
|
||||
_interface = 'json'
|
||||
|
||||
"""
|
||||
Test admin actions for networks, subnets.
|
||||
|
||||
create/update/delete an admin network
|
||||
create/update/delete an admin subnets
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(AdminNetworksTestJSON, cls).resource_setup()
|
||||
name = data_utils.rand_name('admin-network-')
|
||||
cls.network = cls.create_network(net_name=name)
|
||||
cls.name = cls.network['name']
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.cidr = cls.subnet['cidr']
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('1dcead1d-d773-4da1-9534-0b984ca684b3')
|
||||
def test_create_update_delete_flat_network_subnet(self):
|
||||
# Create an admin network
|
||||
name = data_utils.rand_name('admin-network-')
|
||||
network = self.create_network(net_name=name, net_type='flat')
|
||||
net_id = network['id']
|
||||
# Verify an exception thrown when updating network
|
||||
new_name = "New_network"
|
||||
# create a subnet and verify it is an admin tenant subnet
|
||||
subnet = self.create_subnet(network)
|
||||
subnet_id = subnet['id']
|
||||
self.assertEqual(network['tenant_id'], subnet['tenant_id'])
|
||||
# Verify subnet update
|
||||
new_name = "New_subnet"
|
||||
body = self.update_subnet(subnet_id, name=new_name)
|
||||
updated_subnet = body['subnet']
|
||||
self.assertEqual(updated_subnet['name'], new_name)
|
||||
# Delete subnet and network
|
||||
body = self.delete_subnet(subnet_id)
|
||||
# Remove subnet from cleanup list
|
||||
self.subnets.pop()
|
||||
body = self.delete_network(net_id)
|
||||
self.networks.pop()
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('15d3d53c-3328-401f-b8f5-3a29aee2ea3a')
|
||||
def test_create_update_delete_vlan_network_subnet(self):
|
||||
# Create an admin network
|
||||
name = data_utils.rand_name('admin-network-')
|
||||
network = self.create_network(net_name=name,
|
||||
net_type='vlan',
|
||||
seg_id=1000)
|
||||
net_id = network['id']
|
||||
# Verify an exception thrown when updating network
|
||||
new_name = "New_network"
|
||||
# create a subnet and verify it is an admin tenant subnet
|
||||
subnet = self.create_subnet(network)
|
||||
subnet_id = subnet['id']
|
||||
self.assertEqual(network['tenant_id'], subnet['tenant_id'])
|
||||
# Verify subnet update
|
||||
new_name = "New_subnet"
|
||||
body = self.update_subnet(subnet_id, name=new_name)
|
||||
updated_subnet = body['subnet']
|
||||
self.assertEqual(updated_subnet['name'], new_name)
|
||||
# Delete subnet and network
|
||||
body = self.delete_subnet(subnet_id)
|
||||
# Remove subnet from cleanup list
|
||||
self.subnets.pop()
|
||||
body = self.delete_network(net_id)
|
||||
self.networks.pop()
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('838aee5f-92f2-47b9-86c6-629a04aa6269')
|
||||
def test_show_network(self):
|
||||
# Verify the details of a network
|
||||
body = self.show_network(self.network['id'])
|
||||
network = body['network']
|
||||
for key in ['id', 'name', 'provider:network_type',
|
||||
'provider:physical_network']:
|
||||
self.assertEqual(network[key], self.network[key])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('b86d50ef-39a7-4136-8c89-e5e534fe92aa')
|
||||
def test_list_networks(self):
|
||||
# Verify the network exists in the list of all networks
|
||||
body = self.list_networks()
|
||||
networks = [network['id'] for network in body['networks']
|
||||
if network['id'] == self.network['id']]
|
||||
self.assertNotEmpty(networks, "Created network not found in the list")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('ee3f8b79-da3f-4394-9bea-012488202257')
|
||||
def test_show_subnet(self):
|
||||
# Verify the details of a subnet
|
||||
body = self.show_subnet(self.subnet['id'])
|
||||
subnet = body['subnet']
|
||||
self.assertNotEmpty(subnet, "Subnet returned has no fields")
|
||||
for key in ['id', 'cidr']:
|
||||
self.assertIn(key, subnet)
|
||||
self.assertEqual(subnet[key], self.subnet[key])
|
@ -1,98 +0,0 @@
|
||||
# Copyright 2014 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base_dvs as base
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
|
||||
class AdminPortsTestJSON(base.BaseDvsAdminNetworkTest):
|
||||
_interface = 'json'
|
||||
|
||||
"""
|
||||
Test the following operations for ports:
|
||||
|
||||
port create
|
||||
port delete
|
||||
port list
|
||||
port show
|
||||
port update
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(AdminPortsTestJSON, cls).resource_setup()
|
||||
name = data_utils.rand_name('admin-ports-')
|
||||
cls.network = cls.create_network(net_name=name)
|
||||
cls.port = cls.create_port(cls.network['id'])
|
||||
|
||||
@decorators.idempotent_id('c3f751d4-e358-44b9-bfd2-3d563c4a2d04')
|
||||
def test_create_update_delete_port(self):
|
||||
# Verify port creation
|
||||
network_id = self.network['id']
|
||||
port = self.create_port(network_id)
|
||||
self.assertTrue(port['admin_state_up'])
|
||||
# Verify port update
|
||||
new_name = "New_Port"
|
||||
body = self.update_port(
|
||||
port['id'],
|
||||
name=new_name,
|
||||
admin_state_up=False)
|
||||
updated_port = body['port']
|
||||
self.assertEqual(updated_port['name'], new_name)
|
||||
self.assertFalse(updated_port['admin_state_up'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('d3dcd23b-7d5a-4720-8d88-473fb154d609')
|
||||
def test_show_port(self):
|
||||
# Verify the details of port
|
||||
body = self.show_port(self.port['id'])
|
||||
port = body['port']
|
||||
self.assertIn('id', port)
|
||||
self.assertEqual(port['id'], self.port['id'])
|
||||
self.assertEqual(self.port['admin_state_up'], port['admin_state_up'])
|
||||
self.assertEqual(self.port['device_id'], port['device_id'])
|
||||
self.assertEqual(self.port['device_owner'], port['device_owner'])
|
||||
self.assertEqual(self.port['mac_address'], port['mac_address'])
|
||||
self.assertEqual(self.port['name'], port['name'])
|
||||
self.assertEqual(self.port['security_groups'],
|
||||
port['security_groups'])
|
||||
self.assertEqual(self.port['network_id'], port['network_id'])
|
||||
self.assertEqual(self.port['security_groups'],
|
||||
port['security_groups'])
|
||||
self.assertEqual(port['fixed_ips'], [])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('c5f74042-c512-4569-b9b9-bc2bf46e77e1')
|
||||
def test_list_ports(self):
|
||||
# Verify the port exists in the list of all ports
|
||||
body = self.list_ports()
|
||||
ports = [port['id'] for port in body['ports']
|
||||
if port['id'] == self.port['id']]
|
||||
self.assertNotEmpty(ports, "Created port not found in the list")
|
||||
|
||||
@decorators.idempotent_id('2775f96c-a09b-49e1-a5a4-adb83a3e91c7')
|
||||
@decorators.attr(type='smoke')
|
||||
def test_list_ports_fields(self):
|
||||
# Verify specific fields of ports
|
||||
fields = ['binding:vif_type', 'id', 'mac_address']
|
||||
body = self.list_ports(fields=fields)
|
||||
ports = body['ports']
|
||||
self.assertNotEmpty(ports, "Port list returned is empty")
|
||||
# Asserting the fields returned are correct
|
||||
# Verify binding:vif_type is dvs
|
||||
for port in ports:
|
||||
self.assertEqual(sorted(fields), sorted(port.keys()))
|
||||
self.assertEqual(port.get(fields[0]), 'dvs')
|
@ -1,193 +0,0 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import netaddr
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.tests.scenario import manager
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestDvsNetworkBasicOps(manager.NetworkScenarioTest):
|
||||
|
||||
"""
|
||||
This smoke test suite assumes that Nova has been configured to
|
||||
boot VM's with Neutron-managed VDS networking, and attempts to
|
||||
verify network connectivity as follows:
|
||||
|
||||
"""
|
||||
def setUp(self):
|
||||
super(TestDvsNetworkBasicOps, self).setUp()
|
||||
self._ip_version = 4
|
||||
self.keypairs = {}
|
||||
self.servers = []
|
||||
self.admin_net_client = self.admin_manager.networks_client
|
||||
self.admin_subnet_client = self.admin_manager.subnets_client
|
||||
|
||||
def _setup_network(self):
|
||||
self.network = self._create_network()
|
||||
self.subnet = self._create_subnet(self.network)
|
||||
|
||||
def _list_ports(self, *args, **kwargs):
|
||||
"""List ports using admin creds """
|
||||
ports_list = self.admin_manager.ports_client.list_ports(
|
||||
*args, **kwargs)
|
||||
return ports_list['ports']
|
||||
|
||||
def _create_network(self, network_name=None):
|
||||
"""Wrapper utility that returns a test admin provider network."""
|
||||
network_name = network_name or data_utils.rand_name('test-adm-net-')
|
||||
if test.is_extension_enabled('provider', 'network'):
|
||||
body = {'name': network_name}
|
||||
body.update({'provider:network_type': 'flat',
|
||||
'provider:physical_network': 'dvs',
|
||||
'shared': True})
|
||||
body = self.admin_net_client.create_network(**body)
|
||||
self.addCleanup(self.admin_net_client.delete_network,
|
||||
body['network']['id'])
|
||||
return body['network']
|
||||
|
||||
def _create_subnet(self, network):
|
||||
# The cidr and mask_bits depend on the ip version.
|
||||
if self._ip_version == 4:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_cidr
|
||||
or "192.168.101.0/24")
|
||||
mask_bits = CONF.network.project_network_mask_bits or 24
|
||||
elif self._ip_version == 6:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
|
||||
mask_bits = CONF.network.project_network_v6_mask_bits
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
for subnet_cidr in cidr.subnet(mask_bits):
|
||||
try:
|
||||
body = self.admin_subnet_client.create_subnet(
|
||||
network_id=network['id'],
|
||||
cidr=str(subnet_cidr),
|
||||
ip_version=self._ip_version)
|
||||
break
|
||||
except exceptions.BadRequest as e:
|
||||
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
||||
if not is_overlapping_cidr:
|
||||
raise
|
||||
else:
|
||||
message = ('Available CIDR for subnet creation '
|
||||
'could not be found')
|
||||
raise exceptions.BuildErrorException(message)
|
||||
return body['subnet']
|
||||
|
||||
def _check_networks(self):
|
||||
"""
|
||||
Checks that we see the newly created network/subnet via
|
||||
checking the result of list_[networks,subnets]
|
||||
"""
|
||||
|
||||
seen_nets = self.admin_manager.networks_client.list_networks()
|
||||
seen_names = [n['name'] for n in seen_nets['networks']]
|
||||
seen_ids = [n['id'] for n in seen_nets['networks']]
|
||||
self.assertIn(self.network['name'], seen_names)
|
||||
self.assertIn(self.network['id'], seen_ids)
|
||||
|
||||
seen_subnets = self.admin_manager.subnets_client.list_subnets()
|
||||
seen_net_ids = [n['network_id'] for n in seen_subnets['subnets']]
|
||||
seen_subnet_ids = [n['id'] for n in seen_subnets['subnets']]
|
||||
self.assertIn(self.network['id'], seen_net_ids)
|
||||
self.assertIn(self.subnet['id'], seen_subnet_ids)
|
||||
|
||||
def _create_server(self):
|
||||
keypair = self.create_keypair()
|
||||
self.keypairs[keypair['name']] = keypair
|
||||
networks = [{'uuid': self.network['id']}]
|
||||
|
||||
name = data_utils.rand_name('server-smoke')
|
||||
server = self.create_server(name=name,
|
||||
networks=networks,
|
||||
key_name=keypair['name'],
|
||||
wait_until='ACTIVE')
|
||||
self.servers.append(server)
|
||||
return server
|
||||
|
||||
def _get_server_key(self, server):
|
||||
return self.keypairs[server['key_name']]['private_key']
|
||||
|
||||
def _check_server_connectivity(self, address_list,
|
||||
should_connect=True):
|
||||
private_key = self._get_server_key(self.servers[0])
|
||||
ip_address = address_list[0]
|
||||
ssh_source = self.get_remote_client(ip_address,
|
||||
private_key=private_key)
|
||||
for remote_ip in address_list:
|
||||
if should_connect:
|
||||
msg = "Timed out waiting for "
|
||||
"%s to become reachable" % remote_ip
|
||||
else:
|
||||
msg = "ip address %s is reachable" % remote_ip
|
||||
try:
|
||||
self.assertTrue(self._check_remote_connectivity
|
||||
(ssh_source, remote_ip, should_connect),
|
||||
msg)
|
||||
except Exception:
|
||||
LOG.exception("Unable to access {dest} via ping to "
|
||||
"fix-ip {src}".format(dest=remote_ip,
|
||||
src=ip_address))
|
||||
raise
|
||||
|
||||
def _check_network_internal_connectivity(self, network,
|
||||
should_connect=True):
|
||||
"""
|
||||
via ssh check VM internal connectivity:
|
||||
- ping internal gateway and DHCP port, implying in-tenant connectivity
|
||||
pinging both, because L3 and DHCP agents might be on different nodes
|
||||
"""
|
||||
server = self.servers[0]
|
||||
# get internal ports' ips:
|
||||
# get all network ports in the new network
|
||||
internal_ips = ([p['fixed_ips'][0]['ip_address'] for p in
|
||||
self._list_ports(tenant_id=server['tenant_id'],
|
||||
network_id=network['id'])
|
||||
if p['device_owner'].startswith('compute')])
|
||||
|
||||
self._check_server_connectivity(internal_ips,
|
||||
should_connect)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@test.services('compute', 'network')
|
||||
@decorators.idempotent_id('b977dce6-6527-4676-9b66-862b22058f0f')
|
||||
def test_network_basic_ops(self):
|
||||
"""
|
||||
For a freshly-booted VM with an IP address ("port") on a given
|
||||
network:
|
||||
|
||||
- the Tempest host can ping the IP address. This implies, but
|
||||
does not guarantee (see the ssh check that follows), that the
|
||||
VM has been assigned the correct IP address and has
|
||||
connectivity to the Tempest host.
|
||||
|
||||
- the Tempest host can perform key-based authentication to an
|
||||
ssh server hosted at the IP address. This check guarantees
|
||||
that the IP address is associated with the target VM.
|
||||
|
||||
- the Tempest host can ssh into the VM via the IP address and
|
||||
successfully execute the following:
|
||||
"""
|
||||
self._setup_network()
|
||||
self._check_networks()
|
||||
self._create_server()
|
||||
self._check_network_internal_connectivity(self.network)
|
@ -1,6 +0,0 @@
|
||||
Placeholder for NSX-v plugin specific automated tests
|
||||
directory:
|
||||
nsxv/
|
||||
api/
|
||||
scenario/
|
||||
scale/
|
@ -1 +0,0 @@
|
||||
Placeholder for nsxv neutron plugin specific API tests.
|
@ -1,215 +0,0 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class BaseAdminNetworkTest(base.BaseAdminNetworkTest):
|
||||
# NOTE(akang): This class inherits from BaseAdminNetworkTest.
|
||||
# By default client is cls.client, but for provider network,
|
||||
# the client is admin_client. The test class should pass
|
||||
# client=self.admin_client, if it wants to create provider
|
||||
# network/subnet.
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(BaseAdminNetworkTest, cls).skip_checks()
|
||||
if not test.is_extension_enabled('provider', 'network'):
|
||||
msg = "Network Provider Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseAdminNetworkTest, cls).resource_setup()
|
||||
cls.admin_netwk_info = []
|
||||
cls.admin_port_info = []
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
if CONF.service_available.neutron:
|
||||
for netwk_info in cls.admin_netwk_info:
|
||||
net_client, network = netwk_info
|
||||
try:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
net_client.delete_network, network['id'])
|
||||
except Exception:
|
||||
pass
|
||||
for port_info in cls.admin_port_info:
|
||||
port_client, port = port_info
|
||||
try:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
port_client.delete_port, port['id'])
|
||||
except Exception:
|
||||
pass
|
||||
super(BaseAdminNetworkTest, cls).resource_cleanup()
|
||||
|
||||
@classmethod
|
||||
def create_network(cls, network_name=None, client=None,
|
||||
**kwargs):
|
||||
net_client = client if client else cls.admin_networks_client
|
||||
network_name = network_name or data_utils.rand_name('ADM-network-')
|
||||
post_body = {'name': network_name}
|
||||
post_body.update(kwargs)
|
||||
body = net_client.create_network(**post_body)
|
||||
network = body['network']
|
||||
cls.admin_netwk_info.append([net_client, network])
|
||||
return body
|
||||
|
||||
@classmethod
|
||||
def update_network(cls, network_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_networks_client
|
||||
return net_client.update_network(network_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def delete_network(cls, network_id, client=None):
|
||||
net_client = client if client else cls.admin_networks_client
|
||||
return net_client.delete_network(network_id)
|
||||
|
||||
@classmethod
|
||||
def show_network(cls, network_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_networks_client
|
||||
return net_client.show_network(network_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def list_networks(cls, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_networks_client
|
||||
return net_client.list_networks(**kwargs)
|
||||
|
||||
@classmethod
|
||||
def create_subnet(cls, network, client=None,
|
||||
gateway='', cidr=None, mask_bits=None,
|
||||
ip_version=None, cidr_offset=0, **kwargs):
|
||||
ip_version = (ip_version if ip_version is not None
|
||||
else cls._ip_version)
|
||||
net_client = client if client else cls.admin_subnets_client
|
||||
post_body = get_subnet_create_options(
|
||||
network['id'], ip_version,
|
||||
gateway=gateway, cidr=cidr, cidr_offset=cidr_offset,
|
||||
mask_bits=mask_bits, **kwargs)
|
||||
return net_client.create_subnet(**post_body)
|
||||
|
||||
@classmethod
|
||||
def update_subnet(cls, subnet_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_subnets_client
|
||||
return net_client.update_subnet(subnet_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def delete_subnet(cls, subnet_id, client=None):
|
||||
net_client = client if client else cls.admin_subnets_client
|
||||
return net_client.delete_subnet(subnet_id)
|
||||
|
||||
@classmethod
|
||||
def show_subnet(cls, subnet_id, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_subnets_client
|
||||
return net_client.show_subnet(subnet_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def list_subnets(cls, client=None, **kwargs):
|
||||
net_client = client if client else cls.admin_subnets_client
|
||||
return net_client.list_subnets(**kwargs)
|
||||
|
||||
@classmethod
|
||||
def create_port(cls, network_id, port_name=None, client=None, **kwargs):
|
||||
port_client = client if client else cls.admin_ports_client
|
||||
port_name = port_name or data_utils.rand_name('ADM-port-')
|
||||
post_body = {'name': port_name,
|
||||
'network_id': network_id}
|
||||
post_body.update(kwargs)
|
||||
body = port_client.create_port(**post_body)
|
||||
port = body['port']
|
||||
cls.admin_port_info.append([port_client, port])
|
||||
return body
|
||||
|
||||
@classmethod
|
||||
def update_port(cls, port_id, client=None, **kwargs):
|
||||
port_client = client if client else cls.admin_ports_client
|
||||
return port_client.update_port(port_id, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def delete_port(cls, port_id, client=None):
|
||||
port_client = client if client else cls.admin_ports_client
|
||||
return port_client.delete_port(port_id)
|
||||
|
||||
@classmethod
|
||||
def list_ports(cls, client=None, **kwargs):
|
||||
port_client = client if client else cls.admin_ports_client
|
||||
return port_client.list_ports(**kwargs)
|
||||
|
||||
# add other create methods, i.e. security-group, port, floatingip
|
||||
# if needed.
|
||||
|
||||
|
||||
def get_subnet_create_options(network_id, ip_version=4,
|
||||
gateway='', cidr=None, mask_bits=None,
|
||||
num_subnet=1, gateway_offset=1, cidr_offset=0,
|
||||
**kwargs):
|
||||
|
||||
"""When cidr_offset>0 it request only one subnet-options:
|
||||
|
||||
subnet = get_subnet_create_options('abcdefg', 4, num_subnet=4)[3]
|
||||
subnet = get_subnet_create_options('abcdefg', 4, cidr_offset=3)
|
||||
"""
|
||||
|
||||
gateway_not_set = (gateway == '')
|
||||
if ip_version == 4:
|
||||
cidr = cidr or netaddr.IPNetwork(CONF.network.project_network_cidr)
|
||||
mask_bits = mask_bits or CONF.network.project_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = (
|
||||
cidr or netaddr.IPNetwork(CONF.network.project_network_v6_cidr))
|
||||
mask_bits = mask_bits or CONF.network.project_network_v6_mask_bits
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
subnet_list = []
|
||||
if cidr_offset > 0:
|
||||
num_subnet = cidr_offset + 1
|
||||
for subnet_cidr in cidr.subnet(mask_bits):
|
||||
if gateway_not_set:
|
||||
gateway_ip = gateway or (
|
||||
str(netaddr.IPAddress(subnet_cidr) + gateway_offset))
|
||||
else:
|
||||
gateway_ip = gateway
|
||||
try:
|
||||
subnet_body = dict(
|
||||
network_id=network_id,
|
||||
cidr=str(subnet_cidr),
|
||||
ip_version=ip_version,
|
||||
gateway_ip=gateway_ip,
|
||||
**kwargs)
|
||||
if num_subnet <= 1:
|
||||
return subnet_body
|
||||
subnet_list.append(subnet_body)
|
||||
if len(subnet_list) >= num_subnet:
|
||||
if cidr_offset > 0:
|
||||
# user request the 'cidr_offset'th of cidr
|
||||
return subnet_list[cidr_offset]
|
||||
# user request list of cidr
|
||||
return subnet_list
|
||||
except exceptions.BadRequest as e:
|
||||
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
||||
if not is_overlapping_cidr:
|
||||
raise
|
||||
else:
|
||||
message = 'Available CIDR for subnet creation could not be found'
|
||||
raise exceptions.BuildErrorException(message)
|
||||
return {}
|
@ -1,516 +0,0 @@
|
||||
# Copyright 2015 Rackspace
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# 2016-03 (akang)
|
||||
# ported from neutron-lbaas to comply to tempest framework
|
||||
# NSX-v require vip-subnet attached to exclusive router
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest import test
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions
|
||||
|
||||
from vmware_nsx_tempest.services.lbaas import health_monitors_client
|
||||
from vmware_nsx_tempest.services.lbaas import l7policies_client
|
||||
from vmware_nsx_tempest.services.lbaas import l7rules_client
|
||||
from vmware_nsx_tempest.services.lbaas import listeners_client
|
||||
from vmware_nsx_tempest.services.lbaas import load_balancers_client
|
||||
from vmware_nsx_tempest.services.lbaas import members_client
|
||||
from vmware_nsx_tempest.services.lbaas import pools_client
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
NO_ROUTER_TYPE = CONF.nsxv.no_router_type
|
||||
L7_POLICY_ACTIONS = ('REJECT', 'REDIRECT_TO_URL', 'REDIRECT_TO_POOL')
|
||||
L7_RULE_TYPES = ('HOSTNAME', 'PATH', 'FILE_TYPE', 'HEADER', 'COOKIE')
|
||||
L7_RULE_COMPARISON_TYPES = ('REGEXP', 'STARTS_WITH', 'ENDS_WITH',
|
||||
'CONTAINS', 'EQUAL_TO')
|
||||
|
||||
|
||||
class BaseTestCase(base.BaseNetworkTest):
|
||||
|
||||
# This class picks non-admin credentials and run the tempest tests
|
||||
|
||||
_lbs_to_delete = []
|
||||
_setup_lbaas_non_admin_resource = True
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(BaseTestCase, cls).skip_checks()
|
||||
if not test.is_extension_enabled('lbaasv2', 'network'):
|
||||
msg = "lbaasv2 extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseTestCase, cls).resource_setup()
|
||||
|
||||
if cls._setup_lbaas_non_admin_resource:
|
||||
mgr = cls.get_client_manager()
|
||||
cls.create_lbaas_clients(mgr)
|
||||
cls.setup_lbaas_core_network()
|
||||
|
||||
@classmethod
|
||||
def create_lbaas_clients(cls, mgr):
|
||||
cls.load_balancers_client = load_balancers_client.get_client(mgr)
|
||||
cls.listeners_client = listeners_client.get_client(mgr)
|
||||
cls.pools_client = pools_client.get_client(mgr)
|
||||
cls.members_client = members_client.get_client(mgr)
|
||||
cls.health_monitors_client = health_monitors_client.get_client(mgr)
|
||||
# l7-switching clients
|
||||
cls.l7policies_client = l7policies_client.get_client(cls.manager)
|
||||
cls.l7rules_client = l7rules_client.get_client(cls.manager)
|
||||
|
||||
@classmethod
|
||||
def setup_lbaas_core_network(cls):
|
||||
rand_number = data_utils.rand_name()
|
||||
network_name = 'lbaas-network-' + rand_number
|
||||
router_name = 'lbaas-router-' + rand_number
|
||||
cls.network = cls.create_network(network_name)
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.tenant_id = cls.subnet.get('tenant_id')
|
||||
cls.subnet_id = cls.subnet.get('id')
|
||||
# NSX-v: load-balancer's subnet need to attach to exclusive-router
|
||||
if not CONF.nsxv.manager_uri:
|
||||
router_cfg = dict(router_name=router_name)
|
||||
else:
|
||||
router_cfg = dict(router_name=router_name, router_type='exclusive')
|
||||
if NO_ROUTER_TYPE:
|
||||
# router_type is NSX-v extension.
|
||||
router_cfg.pop('router_type', None)
|
||||
cls.router = cls.create_router(**router_cfg)
|
||||
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
for lb_id in cls._lbs_to_delete:
|
||||
try:
|
||||
statuses = cls._show_load_balancer_status_tree(lb_id)
|
||||
lb = statuses.get('loadbalancer')
|
||||
except exceptions.NotFound:
|
||||
continue
|
||||
for listener in lb.get('listeners', []):
|
||||
for policy in listener.get('l7policies'):
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.l7policies_client.delete_l7policy,
|
||||
policy.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
for pool in listener.get('pools'):
|
||||
cls.delete_lb_pool_resources(lb_id, pool)
|
||||
# delete listener
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.listeners_client.delete_listener,
|
||||
listener.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
# delete pools not attached to listener, but loadbalancer
|
||||
for pool in lb.get('pools', []):
|
||||
cls.delete_lb_pool_resources(lb_id, pool)
|
||||
# delete load-balancer
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls._delete_load_balancer, lb_id)
|
||||
# NSX-v: delete exclusive router
|
||||
cls.delete_router(cls.router)
|
||||
super(BaseTestCase, cls).resource_cleanup()
|
||||
|
||||
@classmethod
|
||||
def delete_lb_pool_resources(cls, lb_id, pool):
|
||||
# delete pool's health-monitor
|
||||
hm = pool.get('healthmonitor')
|
||||
if hm:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.health_monitors_client.delete_health_monitor,
|
||||
pool.get('healthmonitor').get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
# delete pool's members
|
||||
members = pool.get('members', [])
|
||||
for member in members:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.members_client.delete_member,
|
||||
pool.get('id'), member.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
# delete pool
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.pools_client.delete_pool, pool.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.LOG = logging.getLogger(cls._get_full_case_name())
|
||||
super(BaseTestCase, cls).setUpClass()
|
||||
|
||||
def setUp(cls):
|
||||
cls.LOG.info(('Starting: {0}').format(cls._testMethodName))
|
||||
super(BaseTestCase, cls).setUp()
|
||||
|
||||
def tearDown(cls):
|
||||
super(BaseTestCase, cls).tearDown()
|
||||
cls.LOG.info(('Finished: {0}').format(cls._testMethodName))
|
||||
|
||||
@classmethod
|
||||
def _create_load_balancer(cls, wait=True, **lb_kwargs):
|
||||
lb = cls.load_balancers_client.create_load_balancer(**lb_kwargs)
|
||||
lb = lb.get('loadbalancer', lb)
|
||||
cls._lbs_to_delete.append(lb.get('id'))
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(lb.get('id'))
|
||||
|
||||
port = cls.ports_client.show_port(lb['vip_port_id'])
|
||||
cls.ports.append(port['port'])
|
||||
return lb
|
||||
|
||||
@classmethod
|
||||
def _create_active_load_balancer(cls, **kwargs):
|
||||
lb = cls._create_load_balancer(**kwargs)
|
||||
lb = lb.get('loadbalancer', lb)
|
||||
lb = cls._wait_for_load_balancer_status(lb.get('id'))
|
||||
return lb
|
||||
|
||||
@classmethod
|
||||
def _delete_load_balancer(cls, load_balancer_id, wait=True):
|
||||
cls.load_balancers_client.delete_load_balancer(load_balancer_id)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(
|
||||
load_balancer_id, delete=True)
|
||||
|
||||
@classmethod
|
||||
def _update_load_balancer(cls, load_balancer_id, wait=True, **lb_kwargs):
|
||||
lb = cls.load_balancers_client.update_load_balancer(
|
||||
load_balancer_id, **lb_kwargs)
|
||||
lb = lb.get('loadbalancer', lb)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(
|
||||
load_balancer_id)
|
||||
return lb
|
||||
|
||||
@classmethod
|
||||
def _show_load_balancer(cls, load_balancer_id):
|
||||
lb = cls.load_balancers_client.show_load_balancer(load_balancer_id)
|
||||
lb = lb.get('loadbalancer', lb)
|
||||
return lb
|
||||
|
||||
@classmethod
|
||||
def _list_load_balancers(cls, **filters):
|
||||
lbs = cls.load_balancers_client.list_load_balancers(**filters)
|
||||
lb_list = lbs.get('loadbalancers', lbs)
|
||||
return lb_list
|
||||
|
||||
@classmethod
|
||||
def _wait_for_load_balancer_status(cls, load_balancer_id,
|
||||
provisioning_status='ACTIVE',
|
||||
operating_status='ONLINE',
|
||||
delete=False):
|
||||
return cls.load_balancers_client.wait_for_load_balancer_status(
|
||||
load_balancer_id,
|
||||
provisioning_status=provisioning_status,
|
||||
operating_status=operating_status,
|
||||
is_delete_op=delete)
|
||||
|
||||
@classmethod
|
||||
def _show_load_balancer_status_tree(cls, load_balancer_id):
|
||||
statuses = cls.load_balancers_client.show_load_balancer_status_tree(
|
||||
load_balancer_id=load_balancer_id)
|
||||
statuses = statuses.get('statuses', statuses)
|
||||
return statuses
|
||||
|
||||
@classmethod
|
||||
def _show_load_balancer_stats(cls, load_balancer_id):
|
||||
stats = cls.load_balancers_client.show_load_balancer_stats(
|
||||
load_balancer_id=load_balancer_id)
|
||||
stats = stats.get('stats', stats)
|
||||
return stats
|
||||
|
||||
@classmethod
|
||||
def _create_listener(cls, wait=True, **listener_kwargs):
|
||||
listener = cls.listeners_client.create_listener(**listener_kwargs)
|
||||
listener = listener.get('listener', listener)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
return listener
|
||||
|
||||
@classmethod
|
||||
def _delete_listener(cls, listener_id, wait=True):
|
||||
cls.listeners_client.delete_listener(listener_id)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
|
||||
@classmethod
|
||||
def _update_listener(cls, listener_id, wait=True, **listener_kwargs):
|
||||
listener = cls.listeners_client.update_listener(
|
||||
listener_id, **listener_kwargs)
|
||||
listener = listener.get('listener', listener)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(
|
||||
cls.load_balancer.get('id'))
|
||||
return listener
|
||||
|
||||
@classmethod
|
||||
def _show_listener(cls, listener_id):
|
||||
listener = cls.listeners_client.show_listener(listener_id)
|
||||
listener = listener.get('listener', listener)
|
||||
return listener
|
||||
|
||||
@classmethod
|
||||
def _list_listeners(cls, **filters):
|
||||
lbs = cls.listeners_client.list_listeners(**filters)
|
||||
lb_list = lbs.get('listeners', lbs)
|
||||
return lb_list
|
||||
|
||||
@classmethod
|
||||
def _create_pool(cls, wait=True, **pool_kwargs):
|
||||
pool = cls.pools_client.create_pool(**pool_kwargs)
|
||||
pool = pool.get('pool', pool)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
return pool
|
||||
|
||||
@classmethod
|
||||
def _delete_pool(cls, pool_id, wait=True):
|
||||
cls.pools_client.delete_pool(pool_id)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
|
||||
@classmethod
|
||||
def _update_pool(cls, pool_id, wait=True, **pool_kwargs):
|
||||
pool = cls.pools_client.update_pool(pool_id, **pool_kwargs)
|
||||
pool = pool.get('pool', pool)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(
|
||||
cls.load_balancer.get('id'))
|
||||
return pool
|
||||
|
||||
@classmethod
|
||||
def _show_pool(cls, pool_id):
|
||||
pool = cls.pools_client.show_pool(pool_id)
|
||||
pool = pool.get('pool', pool)
|
||||
return pool
|
||||
|
||||
@classmethod
|
||||
def _list_pools(cls, **filters):
|
||||
pools = cls.pools_client.list_pools(**filters)
|
||||
pool_list = pools.get('pools', pools)
|
||||
return pool_list
|
||||
|
||||
def _create_health_monitor(self, wait=True, cleanup=True,
|
||||
**health_monitor_kwargs):
|
||||
hm = self.health_monitors_client.create_health_monitor(
|
||||
**health_monitor_kwargs)
|
||||
hm = hm.get('healthmonitor', hm)
|
||||
if cleanup:
|
||||
self.addCleanup(self._delete_health_monitor, hm.get('id'))
|
||||
if wait:
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
return hm
|
||||
|
||||
def _delete_health_monitor(self, health_monitor_id, wait=True):
|
||||
self.health_monitors_client.delete_health_monitor(health_monitor_id)
|
||||
if wait:
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
|
||||
def _update_health_monitor(self, health_monitor_id, wait=True,
|
||||
**health_monitor_kwargs):
|
||||
hm = self.health_monitors_client.update_health_monitor(
|
||||
health_monitor_id, **health_monitor_kwargs)
|
||||
hm = hm.get('healthmonitor', hm)
|
||||
if wait:
|
||||
self._wait_for_load_balancer_status(
|
||||
self.load_balancer.get('id'))
|
||||
return hm
|
||||
|
||||
def _show_health_monitor(self, health_monitor_id):
|
||||
hm = self.health_monitors_client.show_health_monitor(health_monitor_id)
|
||||
hm = hm.get('healthmonitor', hm)
|
||||
return hm
|
||||
|
||||
def _list_health_monitors(self, **filters):
|
||||
hms = self.health_monitors_client.list_health_monitors(**filters)
|
||||
hm_list = hms.get('healthmonitors', hms)
|
||||
return hm_list
|
||||
|
||||
@classmethod
|
||||
def _create_member(cls, pool_id, wait=True, **member_kwargs):
|
||||
member = cls.members_client.create_member(pool_id, **member_kwargs)
|
||||
member = member.get('member', member)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
return member
|
||||
|
||||
@classmethod
|
||||
def _delete_member(cls, pool_id, member_id, wait=True):
|
||||
cls.members_client.delete_member(pool_id, member_id)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
|
||||
@classmethod
|
||||
def _update_member(cls, pool_id, member_id, wait=True,
|
||||
**member_kwargs):
|
||||
member = cls.members_client.update_member(
|
||||
pool_id, member_id, **member_kwargs)
|
||||
member = member.get('member', member)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(
|
||||
cls.load_balancer.get('id'))
|
||||
return member
|
||||
|
||||
@classmethod
|
||||
def _show_member(cls, pool_id, member_id):
|
||||
member = cls.members_client.show_member(pool_id, member_id)
|
||||
member = member.get('member', member)
|
||||
return member
|
||||
|
||||
@classmethod
|
||||
def _list_members(cls, pool_id, **filters):
|
||||
members = cls.members_client.list_members(pool_id, **filters)
|
||||
member_list = members.get('members', members)
|
||||
return member_list
|
||||
|
||||
@classmethod
|
||||
def _create_l7policy(cls, wait=True, **kwargs):
|
||||
l7policy = cls.l7policies_client.create_l7policy(**kwargs)
|
||||
l7policy = l7policy.get('l7policy', l7policy)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
return l7policy
|
||||
|
||||
@classmethod
|
||||
def _delete_l7policy(cls, policy_id, wait=True):
|
||||
cls.l7policies_client.delete_l7policy(policy_id)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
|
||||
@classmethod
|
||||
def _update_l7policy(cls, policy_id, wait=True, **kwargs):
|
||||
l7policy = cls.l7policies_client.update_l7policy(policy_id, **kwargs)
|
||||
l7policy = l7policy.get('l7policy', l7policy)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
return l7policy
|
||||
|
||||
@classmethod
|
||||
def _show_l7policy(cls, policy_id, **fields):
|
||||
l7policy = cls.l7policies_client.show_l7policy(policy_id, **fields)
|
||||
l7policy = l7policy.get('l7policy', l7policy)
|
||||
return l7policy
|
||||
|
||||
@classmethod
|
||||
def _list_l7policies(cls, **filters):
|
||||
l7policies = cls.l7policies_client.list_l7policies(**filters)
|
||||
l7policies = l7policies.get('l7policies', l7policies)
|
||||
return l7policies
|
||||
|
||||
@classmethod
|
||||
def _create_l7rule(cls, policy_id, wait=True, **kwargs):
|
||||
l7rule = cls.l7rules_client.create_l7rule(policy_id, **kwargs)
|
||||
l7rule = l7rule.get('rule', l7rule)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
return l7rule
|
||||
|
||||
@classmethod
|
||||
def _delete_l7rule(cls, policy_id, rule_id, wait=True):
|
||||
cls.l7rules_client.delete_l7rule(policy_id, rule_id)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
|
||||
@classmethod
|
||||
def _update_l7rule(cls, policy_id, rule_id, wait=True, **kwargs):
|
||||
l7rule = cls.l7rules_client.update_l7rule(policy_id, rule_id,
|
||||
**kwargs)
|
||||
l7rule = l7rule.get('rule', l7rule)
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer.get('id'))
|
||||
return l7rule
|
||||
|
||||
@classmethod
|
||||
def _show_l7rule(cls, policy_id, rule_id, **fields):
|
||||
l7rule = cls.l7rules_client.show_l7rule(policy_id, rule_id, **fields)
|
||||
l7rule = l7rule.get('rule', l7rule)
|
||||
return l7rule
|
||||
|
||||
@classmethod
|
||||
def _list_l7rules(cls, policy_id, **filters):
|
||||
l7rules = cls.l7rules_client.list_l7rules(policy_id, **filters)
|
||||
l7rules = l7rules.get('rules', l7rules)
|
||||
return l7rules
|
||||
|
||||
@classmethod
|
||||
def _check_status_tree(cls, load_balancer_id, listener_ids=None,
|
||||
pool_ids=None, health_monitor_id=None,
|
||||
member_ids=None):
|
||||
statuses = cls._show_load_balancer_status_tree(load_balancer_id)
|
||||
load_balancer = statuses['loadbalancer']
|
||||
assert 'ONLINE' == load_balancer['operating_status']
|
||||
assert 'ACTIVE' == load_balancer['provisioning_status']
|
||||
|
||||
if listener_ids:
|
||||
cls._check_status_tree_thing(listener_ids,
|
||||
load_balancer['listeners'])
|
||||
if pool_ids:
|
||||
cls._check_status_tree_thing(pool_ids,
|
||||
load_balancer['listeners']['pools'])
|
||||
if member_ids:
|
||||
cls._check_status_tree_thing(
|
||||
member_ids,
|
||||
load_balancer['listeners']['pools']['members'])
|
||||
if health_monitor_id:
|
||||
health_monitor = (
|
||||
load_balancer['listeners']['pools']['health_monitor'])
|
||||
assert health_monitor_id == health_monitor['id']
|
||||
assert 'ACTIVE' == health_monitor['provisioning_status']
|
||||
|
||||
@classmethod
|
||||
def _check_status_tree_thing(cls, actual_thing_ids, status_tree_things):
|
||||
found_things = 0
|
||||
status_tree_things = status_tree_things
|
||||
assert len(actual_thing_ids) == len(status_tree_things)
|
||||
for actual_thing_id in actual_thing_ids:
|
||||
for status_tree_thing in status_tree_things:
|
||||
if status_tree_thing['id'] == actual_thing_id:
|
||||
assert 'ONLINE' == (
|
||||
status_tree_thing['operating_status'])
|
||||
assert 'ACTIVE' == (
|
||||
status_tree_thing['provisioning_status'])
|
||||
found_things += 1
|
||||
assert len(actual_thing_ids) == found_things
|
||||
|
||||
@classmethod
|
||||
def _get_full_case_name(cls):
|
||||
name = '{module}:{case_name}'.format(
|
||||
module=cls.__module__,
|
||||
case_name=cls.__name__
|
||||
)
|
||||
return name
|
||||
|
||||
|
||||
class BaseAdminTestCase(BaseTestCase):
|
||||
|
||||
# This class picks admin credentials and run the tempest tests
|
||||
_setup_lbaas_non_admin_resource = False
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseAdminTestCase, cls).resource_setup()
|
||||
|
||||
cls.admin_mgr = cls.get_client_manager(credential_type='admin')
|
||||
cls.admin_tenant_id = cls.admin_mgr.networks_client.tenant_id
|
||||
cls.create_lbaas_clients(cls.admin_mgr)
|
||||
cls.setup_lbaas_core_network()
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
super(BaseAdminTestCase, cls).resource_cleanup()
|
@ -1,101 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TestHealthMonitors(base.BaseAdminTestCase):
|
||||
|
||||
"""Tests the following operations in the Neutron-LBaaS API
|
||||
|
||||
using the REST client for Health Monitors with ADMIN role:
|
||||
|
||||
create health monitor with missing tenant_id
|
||||
create health monitor with empty tenant id
|
||||
create health monitor with another tenant_id
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestHealthMonitors, cls).resource_setup()
|
||||
cls.load_balancer = cls._create_load_balancer(
|
||||
tenant_id=cls.subnet.get('tenant_id'),
|
||||
vip_subnet_id=cls.subnet.get('id'))
|
||||
cls.listener = cls._create_listener(
|
||||
loadbalancer_id=cls.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=80)
|
||||
cls.pool = cls._create_pool(
|
||||
protocol='HTTP', lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=cls.listener.get('id'))
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
super(TestHealthMonitors, cls).resource_cleanup()
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('24cf7da4-b829-4df5-a133-b6cef97ec560')
|
||||
def test_create_health_monitor_missing_tenant_id_field(self):
|
||||
"""Test if admin user can
|
||||
|
||||
create health monitor with a missing tenant id field.
|
||||
"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3, max_retries=10,
|
||||
timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
admin_hm = self._show_health_monitor(hm.get('id'))
|
||||
admin_tenant_id = admin_hm.get('tenant_id')
|
||||
hm_tenant_id = hm.get('tenant_id')
|
||||
self.assertEqual(admin_tenant_id, hm_tenant_id)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('acbff982-15d6-43c5-a015-e72b7df30998')
|
||||
def test_create_health_monitor_empty_tenant_id_field(self):
|
||||
"""Test with admin user
|
||||
|
||||
creating health monitor with an empty tenant id field should fail.
|
||||
Kilo: @decorators.skip_because(bug="1638148")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10,
|
||||
timeout=5,
|
||||
pool_id=self.pool.get('id'),
|
||||
tenant_id="")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('a318d351-a72e-46dc-a094-8a751e4fa7aa')
|
||||
def test_create_health_monitor_for_another_tenant_id_field(self):
|
||||
"""Test with admin user
|
||||
|
||||
create health Monitors for another tenant id.
|
||||
"""
|
||||
|
||||
tenantid = uuidutils.generate_uuid()
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3, max_retries=10,
|
||||
timeout=5,
|
||||
pool_id=self.pool.get('id'),
|
||||
tenant_id=tenantid)
|
||||
|
||||
self.assertEqual(hm.get('tenant_id'), tenantid)
|
||||
self.assertNotEqual(hm.get('tenant_id'),
|
||||
self.subnet.get('tenant_id'))
|
@ -1,687 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
|
||||
class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
"""Tests the following operations in the Neutron-LBaaS API
|
||||
|
||||
using the REST client for Health Monitors:
|
||||
list pools
|
||||
create pool
|
||||
get pool
|
||||
update pool
|
||||
delete pool
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestHealthMonitors, cls).resource_setup()
|
||||
cls.load_balancer = cls._create_load_balancer(
|
||||
tenant_id=cls.subnet.get('tenant_id'),
|
||||
vip_subnet_id=cls.subnet.get('id'))
|
||||
cls.listener = cls._create_listener(
|
||||
loadbalancer_id=cls.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=80)
|
||||
cls.pool = cls._create_pool(
|
||||
protocol='HTTP', lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=cls.listener.get('id'))
|
||||
cls.create_basic_hm_kwargs = {'type': 'HTTP', 'delay': 3,
|
||||
'max_retries': 10, 'timeout': 5,
|
||||
'pool_id': cls.pool.get('id')}
|
||||
|
||||
# possible cause is bug#1638601: can not delete health monitor
|
||||
# temparary solution
|
||||
def remove_existing_health_monitors(self):
|
||||
"""remove all existing hm because one pool can only one hm
|
||||
|
||||
During testing, because bug#163860 and
|
||||
one pool can only have one health_monitor,
|
||||
we delete hm before testing -- acutally not very effective.
|
||||
|
||||
hm_list = self._list_health_monitors()
|
||||
for hm in hm_list:
|
||||
test_utils.call_and_igonre_not_found_exc(
|
||||
self._delete_health_monitor,
|
||||
hm.get('id'))
|
||||
"""
|
||||
return None
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('3c223a4d-3733-4daa-a6e3-69a31f9e7304')
|
||||
def test_list_health_monitors_empty(self):
|
||||
hm_list = self._list_health_monitors()
|
||||
self.assertEmpty(hm_list)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('76880edd-b01c-4b80-ba4d-1d10f35aaeb7')
|
||||
def test_list_health_monitors_one(self):
|
||||
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
|
||||
hm_list = self._list_health_monitors()
|
||||
self.assertIn(hm, hm_list)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('22b984d5-8284-4f7c-90c4-407d0e872ea8')
|
||||
def test_list_health_monitors_two(self):
|
||||
hm1 = self._create_health_monitor(**self.create_basic_hm_kwargs)
|
||||
new_listener = self._create_listener(
|
||||
loadbalancer_id=self.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=88)
|
||||
self.addCleanup(self._delete_listener, new_listener.get('id'))
|
||||
new_pool = self._create_pool(
|
||||
protocol='HTTP', lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=new_listener.get('id'))
|
||||
self.addCleanup(self._delete_pool, new_pool.get('id'))
|
||||
hm2 = self._create_health_monitor(
|
||||
type='HTTP', max_retries=10, delay=3, timeout=5,
|
||||
pool_id=new_pool.get('id'))
|
||||
hm_list = self._list_health_monitors()
|
||||
self.assertEqual(2, len(hm_list))
|
||||
self.assertIn(hm1, hm_list)
|
||||
self.assertIn(hm2, hm_list)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('ca49b640-259c-49ee-be9c-b425a4bbd2cf')
|
||||
def test_get_health_monitor(self):
|
||||
hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
|
||||
hm_test = self._show_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm, hm_test)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('80ded4c2-2277-4e19-8280-3519b22a999e')
|
||||
def test_create_health_monitor(self):
|
||||
new_hm = self._create_health_monitor(**self.create_basic_hm_kwargs)
|
||||
hm = self._show_health_monitor(new_hm.get('id'))
|
||||
self.assertEqual(new_hm, hm)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('387f669b-7a02-4ab3-880d-719dd79ff853')
|
||||
def test_create_health_monitor_missing_attribute(self):
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type=['smoke', 'negative'])
|
||||
@decorators.idempotent_id('bf2ec88e-91d3-48f5-b9f2-be3dab21445c')
|
||||
def test_create_health_monitor_missing_required_field_type(self):
|
||||
"""Test if a non_admin user can
|
||||
|
||||
create a health monitor with type missing
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type=['smoke', 'negative'])
|
||||
@decorators.idempotent_id('85110a81-d905-40f1-92c0-7dafb1617915')
|
||||
def test_create_health_monitor_missing_required_field_delay(self):
|
||||
"""Test if a non_admin user can
|
||||
|
||||
create a health monitor with delay missing
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type=['smoke', 'negative'])
|
||||
@decorators.idempotent_id('10ed9396-271a-4edd-948d-93ad44df2713')
|
||||
def test_create_health_monitor_missing_required_field_timeout(self):
|
||||
"""Test if a non_admin user can
|
||||
|
||||
create a health monitor with timeout missing
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type=['smoke', 'negative'])
|
||||
@decorators.idempotent_id('69614cb5-9078-4b93-8dfa-45d59ac240f8')
|
||||
def test_create_health_monitor_missing_required_field_max_retries(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with max_retries missing
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type=['smoke', 'negative'])
|
||||
@decorators.idempotent_id('543d1f68-1b3a-49c8-bc6c-3eb8123b6e9a')
|
||||
def test_create_health_monitor_missing_required_field_pool_id(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with pool_id missing
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('4f8d17d2-3e52-4e34-83c7-4398b328c559')
|
||||
def test_create_health_monitor_missing_admin_state_up(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with admin_state_up missing
|
||||
"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
hm_test = self._show_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm, hm_test)
|
||||
self.assertEqual(True, hm_test.get('admin_state_up'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('6e1066d3-f358-446e-a574-5d4ceaf0b51d')
|
||||
def test_create_health_monitor_missing_http_method(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with http_method missing
|
||||
"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
hm_test = self._show_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm, hm_test)
|
||||
self.assertEqual('GET', hm_test.get('http_method'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('9b25196f-7476-4ed7-9542-1f22a76b79f8')
|
||||
def test_create_health_monitor_missing_url_path(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with url_path missing
|
||||
"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
hm_test = self._show_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm, hm_test)
|
||||
self.assertEqual('/', hm_test.get('url_path'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('c69da922-1c46-4b9b-8b8b-2e700d506a9c')
|
||||
def test_create_health_monitor_missing_expected_codes(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with expected_codes missing
|
||||
"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
hm_test = self._show_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm, hm_test)
|
||||
self.assertEqual('200', hm_test.get('expected_codes'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a00cb8e0-cd0b-44d0-85b0-5935a0297e37')
|
||||
def test_create_health_monitor_invalid_tenant_id(self):
|
||||
"""Test create health monitor with invalid tenant_id"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
tenant_id='blah',
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('fcd93a6d-1fec-4031-9c18-611f4f3b270e')
|
||||
def test_create_health_monitor_invalid_type(self):
|
||||
"""Test create health monitor with invalid type"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='blah', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('3c2829d9-5d51-4bcc-b83e-f28f6e6d0bc3')
|
||||
def test_create_health_monitor_invalid_delay(self):
|
||||
"""Test create health monitor with invalid delay"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay='blah', max_retries=10,
|
||||
timeout=5, pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('7155e366-72a2-47a0-9fcf-25e38a3ef7f7')
|
||||
def test_create_health_monitor_invalid_max_retries(self):
|
||||
"""Test create health monitor with invalid max_retries"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries='blah',
|
||||
timeout=5, pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('fb5d0016-5ea6-4697-8049-e80473e67880')
|
||||
def test_create_health_monitor_invalid_timeout(self):
|
||||
"""Test create health monitor with invalid timeout"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10,
|
||||
timeout='blah', pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('7f3e6e95-3eac-4a46-983a-ba1fd3b0afdf')
|
||||
def test_create_health_monitor_invalid_pool_id(self):
|
||||
"""Test create health monitor with invalid pool id"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('f5aacc27-3573-4749-9cb9-3261fcabf1e9')
|
||||
def test_create_health_monitor_invalid_admin_state_up(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with invalid admin_state_up
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'),
|
||||
admin_state_up='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('0f9f2488-aefb-44c9-a08b-67b715e63091')
|
||||
def test_create_health_monitor_invalid_expected_codes(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with invalid expected_codes
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'),
|
||||
expected_codes='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('0d637b7f-52ea-429f-8f97-584a5a9118aa')
|
||||
def test_create_health_monitor_invalid_url_path(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with invalid url_path
|
||||
Kilo: @decorators.skip_because(bug="1641652")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'), url_path='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('7d4061c4-1fbc-43c3-81b5-2d099a120297')
|
||||
def test_create_health_monitor_invalid_http_method(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with invalid http_method
|
||||
Kilo: @decorators.skip_because(bug="1641643")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'), http_method='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('b655cee7-df0d-4531-bd98-a4918d2e752a')
|
||||
def test_create_health_monitor_empty_type(self):
|
||||
"""Test create health monitor with empty type"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('d843c9f4-507e-462f-8f2b-319af23029db')
|
||||
def test_create_health_monitor_empty_delay(self):
|
||||
"""Test create health monitor with empty delay"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay='', max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('bb9cb2b0-2684-4f4d-b344-6e7b0c58b019')
|
||||
def test_create_health_monitor_empty_timeout(self):
|
||||
"""Test create health monitor with empty timeout"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout='',
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('3b52441d-5e8a-4d17-b772-bd261d0c2656')
|
||||
def test_create_health_monitor_empty_max_retries(self):
|
||||
"""Test create health monitor with empty max_retries"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries='', timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('397aa201-25c1-4828-8c60-9cee5c4d89ab')
|
||||
# NSX-v does reject empty pool_id
|
||||
def test_create_health_monitor_empty_max_pool_id(self):
|
||||
"""Test create health monitor with empty pool_id"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('e806c916-877c-41dc-bacb-aabd9684a540')
|
||||
# NSX-v does reject empty admin_state_up
|
||||
def test_create_health_monitor_empty_max_admin_state_up(self):
|
||||
"""Test create health monitor with empty admin_state_up"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'), admin_state_up='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('9c8e8fe8-a3a2-481b-9ac8-eb9ecccd8330')
|
||||
def test_create_health_monitor_empty_max_http_method(self):
|
||||
"""Test create health monitor with empty http_method
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1639340")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'), http_method='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('9016c846-fc7c-4063-9f01-61fad37c435d')
|
||||
def test_create_health_monitor_empty_max_url_path(self):
|
||||
"""Test create health monitor with empty url_path
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1639340")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'), url_path='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('5df60d27-55ec-42a9-96cd-3affa611c8b1')
|
||||
# NSX-v does reject empty expected_codes
|
||||
def test_create_health_monitor_empty_expected_codes(self):
|
||||
"""Test create health monitor with empty expected_codes"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'), expected_codes='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('da63bd3a-89d5-40dd-b920-420263cbfd93')
|
||||
def test_create_health_monitor_invalid_attribute(self):
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries='twenty one',
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('2005ded4-7d26-4946-8d22-e05bf026bd44')
|
||||
def test_create_health_monitor_extra_attribute(self):
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10,
|
||||
pool_id=self.pool.get('id'), subnet_id=10)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('79b4a4f9-1d2d-4df0-a11b-dd97f973dff2')
|
||||
def test_update_health_monitor(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
max_retries = 1
|
||||
new_hm = self._update_health_monitor(
|
||||
hm.get('id'), max_retries=max_retries)
|
||||
self.assertEqual(max_retries, new_hm.get('max_retries'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('9496ba1f-e917-4972-883b-432e44f3cf19')
|
||||
def test_update_health_monitor_missing_admin_state_up(self):
|
||||
"""Test update health monitor with missing admin state field"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
new_hm = self._update_health_monitor(hm.get('id'))
|
||||
self.assertEqual(True, new_hm.get('admin_state_up'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('88570f22-cb68-47b4-a020-52b75af818d3')
|
||||
def test_update_health_monitor_missing_delay(self):
|
||||
"""Test update health monitor with missing delay field"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
new_hm = self._update_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm.get('delay'), new_hm.get('delay'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('45ace70d-28a5-405d-95cd-b2c92ccaa593')
|
||||
def test_update_health_monitor_missing_timeout(self):
|
||||
"""Test update health monitor with missing timeout field"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
new_hm = self._update_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm.get('timeout'), new_hm.get('timeout'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('269af536-2352-4772-bf35-268df9f4542c')
|
||||
def test_update_health_monitor_missing_max_retries(self):
|
||||
"""Test update health monitor with missing max retries field"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
new_hm = self._update_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm.get('max_retries'), new_hm.get('max_retries'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('318d972f-9cd1-42ef-9b8b-2f91ba785ac7')
|
||||
def test_update_health_monitor_missing_http_method(self):
|
||||
"""Test update health monitor with missing http_method field"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
new_hm = self._update_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm.get('http_method'), new_hm.get('http_method'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('4b97ab67-889d-480c-bedc-f06d86479bb5')
|
||||
def test_update_health_monitor_missing_url_path(self):
|
||||
"""Test update health monitor with missing url_path field"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
new_hm = self._update_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm.get('url_path'), new_hm.get('url_path'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('095cdb91-0937-4ae1-8b46-5edd10f00a1e')
|
||||
def test_update_health_monitor_missing_expected_codes(self):
|
||||
"""Test update health monitor with missing expected_codes field"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
||||
new_hm = self._update_health_monitor(hm.get('id'))
|
||||
self.assertEqual(hm.get('expected_codes'),
|
||||
new_hm.get('expected_codes'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('646d74ed-9afe-4710-a677-c36f85482731')
|
||||
def test_update_health_monitor_invalid_attribute(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), max_retries='blue')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('9d717551-82ab-4073-a269-8b05b67d8306')
|
||||
def test_update_health_monitor_invalid_admin_state_up(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), admin_state_up='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('b865dc8a-695b-4f15-891c-e73b7402ddeb')
|
||||
def test_update_health_monitor_invalid_delay(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), delay='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('813c8bc1-7ba6-4ae5-96f3-1fdb10ae7be3')
|
||||
def test_update_health_monitor_invalid_timeout(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), timeout='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('05456473-5014-43ae-97a2-3790e4987526')
|
||||
def test_update_health_monitor_invalid_max_retries(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), max_retries='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('1e2fb718-de77-46a3-8897-6f5aff6cab5e')
|
||||
def test_update_health_monitor_invalid_http_method(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1641643")"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), http_method='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('07d62a55-18b3-4b74-acb2-b73a0b5e4364')
|
||||
def test_update_health_monitor_invalid_url_path(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1641652")"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), url_path='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('47c96e10-4863-4635-8bc6-371d460f61bc')
|
||||
def test_update_health_monitor_invalid_expected_codes(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), expected_codes='blah')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('8594b3a3-70e8-4dfa-8928-18bc1cc7ab4a')
|
||||
def test_update_health_monitor_empty_admin_state_up(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), admin_state_up='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('1e1b761d-5114-4931-935d-1069d66e2bb1')
|
||||
def test_update_health_monitor_empty_delay(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), empty_delay='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('e6e4a6b7-50b4-465d-be02-44fd5f258bb6')
|
||||
def test_update_health_monitor_empty_timeout(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), timeout='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('65d05adf-a399-4457-bd83-92c43c1eca01')
|
||||
def test_update_health_monitor_empty_max_retries(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), max_retries='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('0c464bb3-ff84-4816-9237-4583e4da9881')
|
||||
def test_update_health_monitor_empty_empty_http_method(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1639340")"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), http_method='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('3e87c0a8-ef15-457c-a58f-270de8c5c76c')
|
||||
def test_update_health_monitor_empty_url_path(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1639340")"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), url_path='')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('d45189e6-db9f-44d1-b5ad-8b7691e781ee')
|
||||
def test_update_health_monitor_empty_expected_codes(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), expected_codes='')
|
||||
|
||||
@decorators.attr(type=['smoke', 'negative'])
|
||||
@decorators.idempotent_id('cf70e44e-8060-494a-b577-d656726ba3d8')
|
||||
def test_update_health_monitor_extra_attribute(self):
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._update_health_monitor,
|
||||
hm.get('id'), protocol='UDP')
|
||||
|
||||
@decorators.attr(type=['smoke', 'negative'])
|
||||
@decorators.idempotent_id('fe44e0d9-957b-44cf-806b-af7819444864')
|
||||
def test_delete_health_monitor(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1639340")"""
|
||||
hm = self._create_health_monitor(cleanup=False, type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
self._delete_health_monitor(hm.get('id'))
|
||||
self.assertRaises(ex.NotFound,
|
||||
self._show_health_monitor,
|
||||
hm.get('id'))
|
@ -1,157 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
CONF = config.CONF
|
||||
PROTOCOL_PORT = 80
|
||||
|
||||
|
||||
class TestL7Policies(base.BaseTestCase):
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(TestL7Policies, cls).skip_checks()
|
||||
if '1739510' in CONF.nsxv.bugs_to_resolve:
|
||||
msg = ("skip lbaas_l7_switching_ops because bug=1739150"
|
||||
" -- l7 switching is not supported")
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestL7Policies, cls).resource_setup()
|
||||
cls.load_balancer = cls._create_load_balancer(
|
||||
tenant_id=cls.subnet.get('tenant_id'),
|
||||
vip_subnet_id=cls.subnet.get('id'),
|
||||
wait=True)
|
||||
cls.loadbalancer_id = cls.load_balancer.get('id')
|
||||
cls.listener = cls._create_listener(
|
||||
loadbalancer_id=cls.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=80)
|
||||
cls.listener_id = cls.listener.get('id')
|
||||
cls.pool = cls._create_pool(protocol='HTTP',
|
||||
tenant_id=cls.tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=cls.listener_id)
|
||||
cls.pool_id = cls.pool.get('id')
|
||||
cls.pool7 = cls._create_pool(protocol='HTTP',
|
||||
tenant_id=cls.tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
loadbalancer_id=cls.loadbalancer_id)
|
||||
cls.pool7_id = cls.pool7.get('id')
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
super(TestL7Policies, cls).resource_cleanup()
|
||||
|
||||
def remove_all_policies(self):
|
||||
policies = self._list_l7policies()
|
||||
for policy in policies:
|
||||
self._delete_l7policy(policy.get('id'))
|
||||
policies = self._list_l7policies()
|
||||
self.assertEmpty(policies)
|
||||
|
||||
def create_to_pool_policy(self, to_position=None, name='policy-pool'):
|
||||
policy_kwargs = dict(
|
||||
action='REDIRECT_TO_POOL', name=name,
|
||||
redirect_pool_id=self.pool7_id,
|
||||
listener_id=self.listener_id)
|
||||
if to_position:
|
||||
policy_kwargs['position'] = to_position
|
||||
policy = self._create_l7policy(**policy_kwargs)
|
||||
self.assertEqual(policy.get('name'), name)
|
||||
self.assertEqual(policy.get('listener_id'), self.listener_id)
|
||||
self.assertEqual(policy.get('redirect_pool_id'), self.pool7_id)
|
||||
return policy
|
||||
|
||||
def create_to_url_policy(self, redirect_url=None, to_position=None,
|
||||
name='policy-url'):
|
||||
policy_kwargs = dict(
|
||||
action='REDIRECT_TO_URL', name=name,
|
||||
redirect_url=redirect_url,
|
||||
redirect_pool_id=self.pool7_id,
|
||||
listener_id=self.listener_id)
|
||||
if to_position:
|
||||
policy_kwargs['position'] = to_position
|
||||
policy = self._create_l7policy(**policy_kwargs)
|
||||
self.assertEqual(policy.get('name'), name)
|
||||
self.assertEqual(policy.get('listener_id'), self.listener_id)
|
||||
self.assertEqual(policy.get('redirect_pool_id'), self.pool7_id)
|
||||
return policy
|
||||
|
||||
def create_reject_policy(self, to_position=1, name='policy-reject'):
|
||||
policy_kwargs = dict(
|
||||
action='REJECT', name=name,
|
||||
redirect_pool_id=self.pool7_id,
|
||||
listener_id=self.listener_id)
|
||||
if to_position:
|
||||
policy_kwargs['position'] = to_position
|
||||
policy = self._create_l7policy(**policy_kwargs)
|
||||
self.assertEqual(policy.get('name'), name)
|
||||
self.assertEqual(policy.get('listener_id'), self.listener_id)
|
||||
self.assertEqual(policy.get('redirect_pool_id'), self.pool7_id)
|
||||
return policy
|
||||
|
||||
@decorators.idempotent_id('465c9bea-53de-4a1f-ae00-fa2ee52d250b')
|
||||
def test_l7policies_crud_ops(self):
|
||||
policy = self.create_to_pool_policy()
|
||||
# update
|
||||
new_policy_name = policy.get('name') + "-update"
|
||||
policy2 = self._update_l7policy(policy.get('id'),
|
||||
name=new_policy_name)
|
||||
self.assertEqual(policy2.get('name'), new_policy_name)
|
||||
# show
|
||||
s_policy = self._show_l7policy(policy.get('id'))
|
||||
self.assertEqual(policy2.get('name'), s_policy.get('name'))
|
||||
# list
|
||||
policies = self._list_l7policies()
|
||||
policy_id_list = [x.get('id') for x in policies]
|
||||
self.assertIn(policy.get('id'), policy_id_list)
|
||||
# delete
|
||||
self._delete_l7policy(policy.get('id'))
|
||||
policies = self._list_l7policies()
|
||||
policy_id_list = [x.get('id') for x in policies]
|
||||
self.assertNotIn(policy.get('id'), policy_id_list)
|
||||
|
||||
@decorators.idempotent_id('726588f4-970a-4f32-8253-95766ddaa7b4')
|
||||
def test_policy_position(self):
|
||||
self.remove_all_policies()
|
||||
policy1 = self.create_to_pool_policy()
|
||||
self.assertEqual(policy1.get('position'), 1)
|
||||
# create reject_policy at position=1
|
||||
policy2 = self.create_reject_policy(to_position=1)
|
||||
self.assertEqual(policy2.get('position'), 1)
|
||||
policy1A = self._show_l7policy(policy1.get('id'))
|
||||
self.assertEqual(policy1A.get('position'), 2)
|
||||
# create to_url_policy at position=2
|
||||
policy3 = self.create_to_url_policy(to_position=2)
|
||||
self.assertEqual(policy3.get('position'), 2)
|
||||
policy2A = self._show_l7policy(policy2.get('id'))
|
||||
self.assertEqual(policy2A.get('position'), 1)
|
||||
policy1A = self._show_l7policy(policy1.get('id'))
|
||||
self.assertEqual(policy1A.get('position'), 3)
|
||||
# delete policy3, policy1 position==2
|
||||
self._delete_l7policy(policy3.get('id'))
|
||||
policy1A = self._show_l7policy(policy1.get('id'))
|
||||
self.assertEqual(policy1A.get('position'), 2)
|
||||
policy2A = self._show_l7policy(policy2.get('id'))
|
||||
self.assertEqual(policy2A.get('position'), 1)
|
||||
self._delete_l7policy(policy2.get('id'))
|
||||
policies = self._list_l7policies()
|
||||
self.assertEqual(len(policies), 1)
|
||||
self.assertEqual(policy1.get('id'), policies[0].get('id'))
|
||||
self._delete_l7policy(policy1.get('id'))
|
||||
policies = self._list_l7policies()
|
||||
self.assertEmpty(policies)
|
@ -1,89 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
CONF = config.CONF
|
||||
PROTOCOL_PORT = 80
|
||||
|
||||
|
||||
class TestL7Rules(base.BaseTestCase):
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(TestL7Rules, cls).skip_checks()
|
||||
if '1739510' in CONF.nsxv.bugs_to_resolve:
|
||||
msg = ("skip lbaas_l7_switching_ops because bug=1739150"
|
||||
" -- l7 switching is not supported")
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestL7Rules, cls).resource_setup()
|
||||
cls.load_balancer = cls._create_load_balancer(
|
||||
tenant_id=cls.subnet.get('tenant_id'),
|
||||
vip_subnet_id=cls.subnet.get('id'),
|
||||
wait=True)
|
||||
cls.loadbalancer_id = cls.load_balancer.get('id')
|
||||
cls.listener = cls._create_listener(
|
||||
loadbalancer_id=cls.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=80)
|
||||
cls.listener_id = cls.listener.get('id')
|
||||
cls.pool = cls._create_pool(protocol='HTTP',
|
||||
tenant_id=cls.tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=cls.listener_id)
|
||||
cls.pool_id = cls.pool.get('id')
|
||||
cls.pool7 = cls._create_pool(protocol='HTTP',
|
||||
tenant_id=cls.tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
loadbalancer_id=cls.loadbalancer_id)
|
||||
cls.pool7_id = cls.pool7.get('id')
|
||||
cls.policy7 = cls._create_l7policy(action='REDIRECT_TO_POOL',
|
||||
name='policy1',
|
||||
redirect_pool_id=cls.pool7_id,
|
||||
listener_id=cls.listener_id)
|
||||
cls.policy7_id = cls.policy7.get('id')
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
super(TestL7Rules, cls).resource_cleanup()
|
||||
|
||||
@decorators.idempotent_id('27e8a3a1-bd3a-40e5-902d-fe9bc79ebf1f')
|
||||
def test_l7rules_crud_ops(self):
|
||||
rule = self._create_l7rule(self.policy7_id,
|
||||
type='PATH',
|
||||
compare_type='STARTS_WITH',
|
||||
value='/api')
|
||||
self.assertEqual(rule.get('compare_type'), 'STARTS_WITH')
|
||||
self.assertEqual(rule.get('value'), '/api')
|
||||
self.assertEqual(rule.get('type'), 'PATH')
|
||||
# update
|
||||
new_value = '/v2/api'
|
||||
rule2 = self._update_l7rule(self.policy7_id, rule.get('id'),
|
||||
value=new_value)
|
||||
self.assertEqual(rule2.get('value'), new_value)
|
||||
# show
|
||||
s_rule = self._show_l7rule(self.policy7_id, rule.get('id'))
|
||||
self.assertEqual(s_rule.get('value'), new_value)
|
||||
# list
|
||||
rules = self._list_l7rules(self.policy7_id)
|
||||
rule_id_list = [x.get('id') for x in rules]
|
||||
self.assertIn(rule.get('id'), rule_id_list)
|
||||
# delete
|
||||
self._delete_l7rule(self.policy7_id, rule.get('id'))
|
||||
rules = self._list_l7rules(self.policy7_id)
|
||||
rule_id_list = [x.get('id') for x in rules]
|
||||
self.assertNotIn(rule.get('id'), rule_id_list)
|
@ -1,110 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ListenersTest(base.BaseAdminTestCase):
|
||||
|
||||
"""Tests the listener creation operation in admin scope
|
||||
|
||||
in the Neutron-LBaaS API using the REST client for Listeners:
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ListenersTest, cls).resource_setup()
|
||||
cls.create_lb_kwargs = {'tenant_id': cls.subnet['tenant_id'],
|
||||
'vip_subnet_id': cls.subnet['id']}
|
||||
cls.load_balancer = cls._create_active_load_balancer(
|
||||
**cls.create_lb_kwargs)
|
||||
cls.protocol = 'HTTP'
|
||||
cls.port = 80
|
||||
cls.load_balancer_id = cls.load_balancer['id']
|
||||
cls.create_listener_kwargs = {'loadbalancer_id': cls.load_balancer_id,
|
||||
'protocol': cls.protocol,
|
||||
'protocol_port': cls.port}
|
||||
cls.listener = cls._create_listener(
|
||||
**cls.create_listener_kwargs)
|
||||
cls.listener_id = cls.listener['id']
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
super(ListenersTest, cls).resource_cleanup()
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('f84bfb35-7f73-4576-b2ca-26193850d2bf')
|
||||
def test_create_listener_empty_tenant_id(self):
|
||||
"""Test create listener with an empty tenant id should fail
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638738")
|
||||
"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8081
|
||||
create_new_listener_kwargs['tenant_id'] = ""
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._create_listener,
|
||||
**create_new_listener_kwargs)
|
||||
self._check_status_tree(
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('71ebb8d6-ff2a-410d-a089-b086f195609d')
|
||||
def test_create_listener_invalid_tenant_id(self):
|
||||
"""Test create listener with an invalid tenant id"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8082
|
||||
create_new_listener_kwargs['tenant_id'] = "&^%123"
|
||||
new_listener = self._create_listener(
|
||||
**create_new_listener_kwargs)
|
||||
new_listener_id = new_listener['id']
|
||||
self.addCleanup(self._delete_listener, new_listener_id)
|
||||
self._check_status_tree(
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id, new_listener_id])
|
||||
listener = self._show_listener(new_listener_id)
|
||||
self.assertEqual(new_listener, listener)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('55eaeab9-a21e-470c-8861-5af1ded9d64a')
|
||||
def test_create_listener_missing_tenant_id(self):
|
||||
"""Test create listener with an missing tenant id.
|
||||
|
||||
Verify that creating a listener in admin scope with
|
||||
a missing tenant_id creates the listener with admin
|
||||
tenant_id.
|
||||
"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8083
|
||||
admin_listener = self._create_listener(
|
||||
**create_new_listener_kwargs)
|
||||
admin_listener_id = admin_listener['id']
|
||||
self.addCleanup(self._delete_listener, admin_listener_id)
|
||||
self._check_status_tree(
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id, admin_listener_id])
|
||||
listener = self._show_listener(admin_listener_id)
|
||||
self.assertEqual(admin_listener, listener)
|
||||
self.assertEqual(admin_listener.get('tenant_id'),
|
||||
listener.get('tenant_id'))
|
@ -1,603 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ListenersTest(base.BaseTestCase):
|
||||
|
||||
"""Tests the following operations in the Neutron-LBaaS API
|
||||
|
||||
using the REST client for Listeners:
|
||||
|
||||
list listeners
|
||||
create listener
|
||||
get listener
|
||||
update listener
|
||||
delete listener
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ListenersTest, cls).resource_setup()
|
||||
cls.create_lb_kwargs = {'tenant_id': cls.subnet['tenant_id'],
|
||||
'vip_subnet_id': cls.subnet['id']}
|
||||
cls.load_balancer = cls._create_active_load_balancer(
|
||||
**cls.create_lb_kwargs)
|
||||
cls.protocol = 'HTTP'
|
||||
cls.port = 80
|
||||
cls.load_balancer_id = cls.load_balancer['id']
|
||||
cls.create_listener_kwargs = {'loadbalancer_id': cls.load_balancer_id,
|
||||
'protocol': cls.protocol,
|
||||
'protocol_port': cls.port}
|
||||
cls.listener = cls._create_listener(**cls.create_listener_kwargs)
|
||||
cls.listener_id = cls.listener['id']
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('32ae6156-d809-49fc-a45b-55269660651c')
|
||||
def test_get_listener(self):
|
||||
"""Test get listener"""
|
||||
listener = self._show_listener(self.listener_id)
|
||||
self.assertEqual(self.listener, listener)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('4013ab83-924a-4c53-982e-83388d7ad4d9')
|
||||
def test_list_listeners(self):
|
||||
"""Test get listeners with one listener"""
|
||||
listeners = self._list_listeners()
|
||||
self.assertEqual(len(listeners), 1)
|
||||
self.assertIn(self.listener, listeners)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('04f58729-3f93-4616-bb9d-8baaff3542b2')
|
||||
def test_list_listeners_two(self):
|
||||
"""Test get listeners with two listeners"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8080
|
||||
new_listener = self._create_listener(
|
||||
**create_new_listener_kwargs)
|
||||
new_listener_id = new_listener['id']
|
||||
self.addCleanup(self._delete_listener, new_listener_id)
|
||||
self._check_status_tree(
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id, new_listener_id])
|
||||
listeners = self._list_listeners()
|
||||
self.assertEqual(len(listeners), 2)
|
||||
self.assertIn(self.listener, listeners)
|
||||
self.assertIn(new_listener, listeners)
|
||||
self.assertNotEqual(self.listener, new_listener)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('7989096b-95c2-4b26-86b1-5aec0a2d8386')
|
||||
def test_create_listener(self):
|
||||
"""Test create listener"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8081
|
||||
new_listener = self._create_listener(
|
||||
**create_new_listener_kwargs)
|
||||
new_listener_id = new_listener['id']
|
||||
self.addCleanup(self._delete_listener, new_listener_id)
|
||||
self._check_status_tree(
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id, new_listener_id])
|
||||
listener = self._show_listener(new_listener_id)
|
||||
self.assertEqual(new_listener, listener)
|
||||
self.assertNotEqual(self.listener, new_listener)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('f7ef7f56-b791-48e8-9bbe-838a3ed94519')
|
||||
def test_create_listener_missing_field_loadbalancer(self):
|
||||
"""Test create listener with a missing required field loadbalancer"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('c392301c-3d9a-4123-85c3-124e4e3253f6')
|
||||
def test_create_listener_missing_field_protocol(self):
|
||||
"""Test create listener with a missing required field protocol"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port=self.port)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('12c1c5b5-81a9-4384-811e-7131f65f3b1b')
|
||||
def test_create_listener_missing_field_protocol_port(self):
|
||||
"""Test create listener with a missing required field protocol_port"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol=self.protocol)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('214a7acc-eacb-4828-ad27-b7f4774947cf')
|
||||
def test_create_listener_missing_admin_state_up(self):
|
||||
"""Test create listener with a missing admin_state_up field"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8083
|
||||
new_listener = self._create_listener(
|
||||
**create_new_listener_kwargs)
|
||||
new_listener_id = new_listener['id']
|
||||
self.addCleanup(self._delete_listener, new_listener_id)
|
||||
self._check_status_tree(
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id, new_listener_id])
|
||||
listener = self._show_listener(new_listener_id)
|
||||
self.assertEqual(new_listener, listener)
|
||||
self.assertTrue(new_listener['admin_state_up'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('86d892dd-9025-4051-a160-8bf1bbb8c64d')
|
||||
def test_create_listener_invalid_load_balancer_id(self):
|
||||
"""Test create listener with an invalid load_balancer_id"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id="234*",
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('fb430d68-e68d-4bd0-b43d-f1175ad5a819')
|
||||
def test_create_listener_invalid_protocol(self):
|
||||
"""Test create listener with an invalid protocol"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port=self.port,
|
||||
protocol="UDP")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('8e472e7e-a5c2-4dba-ac5c-993f6e6bb229')
|
||||
def test_create_listener_invalid_protocol_port(self):
|
||||
"""Test create listener with an invalid protocol_port"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port="9999999",
|
||||
protocol=self.protocol)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('57fc90f4-95e4-4f3c-8f53-32c7282b956e')
|
||||
def test_create_listener_invalid_admin_state_up(self):
|
||||
"""Test update listener with an invalid admin_state_up"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol,
|
||||
admin_state_up="abc123")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('556e1ab9-051c-4e9c-aaaa-f11d15de070b')
|
||||
def test_create_listener_invalid_tenant_id(self):
|
||||
"""Test create listener with an invalid tenant id"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol,
|
||||
tenant_id="&^%123")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('59d32fd7-06f6-4466-bdd4-0be23b15970c')
|
||||
def test_create_listener_invalid_name(self):
|
||||
"""Test create listener with an invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol,
|
||||
name='a' * 256)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('95457f70-2c1a-4c14-aa80-db8e803d78a9')
|
||||
def test_create_listener_invalid_description(self):
|
||||
"""Test create listener with an invalid description
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol,
|
||||
description='a' * 256)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('177d337f-fe0c-406c-92f1-a25c0103bd0f')
|
||||
def test_create_listener_invalid_connection_limit(self):
|
||||
"""Test create listener_ids
|
||||
|
||||
with an invalid value for connection _limit field
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol,
|
||||
connection_limit="&^%123")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('8af7b033-8ff7-4bdb-8949-76809745d8a9')
|
||||
def test_create_listener_empty_load_balancer_id(self):
|
||||
"""Test create listener with an empty load_balancer_id"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id="",
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('242af61b-ce50-46e2-926a-6801600dcee4')
|
||||
def test_create_listener_empty_protocol(self):
|
||||
"""Test create listener with an empty protocol"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port=self.port,
|
||||
protocol="")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('4866af4c-2b91-4bce-af58-af77f19d9119')
|
||||
def test_create_listener_empty_protocol_port(self):
|
||||
"""Test create listener with an empty protocol_port"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port="",
|
||||
protocol=self.protocol)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('09636ad1-a9d5-4c03-92db-ae5d9847993d')
|
||||
def test_create_listener_empty_admin_state_up(self):
|
||||
"""Test update listener with an empty admin_state_up"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol,
|
||||
admin_state_up="")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('46fc3784-d676-42f7-953b-a23c1d62323d')
|
||||
def test_create_listener_empty_tenant_id(self):
|
||||
"""Test create listener with an empty tenant id
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638701")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol,
|
||||
tenant_id="")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('b4120626-a47e-4b4e-9b64-017e595c4daf')
|
||||
def test_create_listener_empty_name(self):
|
||||
"""Test create listener with an empty name"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8081
|
||||
create_new_listener_kwargs['name'] = ""
|
||||
new_listener = self._create_listener(
|
||||
**create_new_listener_kwargs)
|
||||
new_listener_id = new_listener['id']
|
||||
self.addCleanup(self._delete_listener, new_listener_id)
|
||||
self._check_status_tree(
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id, new_listener_id])
|
||||
listener = self._show_listener(new_listener_id)
|
||||
self.assertEqual(new_listener, listener)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('af067d00-d496-4f02-87d6-40624c34d492')
|
||||
def test_create_listener_empty_description(self):
|
||||
"""Test create listener with an empty description"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8082
|
||||
create_new_listener_kwargs['description'] = ""
|
||||
new_listener = self._create_listener(
|
||||
**create_new_listener_kwargs)
|
||||
new_listener_id = new_listener['id']
|
||||
self.addCleanup(self._delete_listener, new_listener_id)
|
||||
self._check_status_tree(
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id, new_listener_id])
|
||||
listener = self._show_listener(new_listener_id)
|
||||
self.assertEqual(new_listener, listener)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('dd271757-c447-4579-a417-f9d0871b145c')
|
||||
def test_create_listener_empty_connection_limit(self):
|
||||
"""Test create listener with an empty connection _limit field"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
protocol_port=self.port,
|
||||
protocol=self.protocol,
|
||||
connection_limit="")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a1602217-e1b4-4f85-8a5e-d474477333f3')
|
||||
def test_create_listener_incorrect_attribute(self):
|
||||
"""Test create a listener withan extra, incorrect field"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
incorrect_attribute="incorrect_attribute",
|
||||
**self.create_listener_kwargs)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('27c443ff-3aee-4ae6-8b9a-6abf3d5443bf')
|
||||
def test_update_listener(self):
|
||||
"""Test update listener"""
|
||||
self._update_listener(self.listener_id,
|
||||
name='new_name')
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
listener = self._show_listener(self.listener_id)
|
||||
self.assertEqual(listener.get('name'), 'new_name')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a709e4da-01ef-4dda-a336-f5e37268b5ea')
|
||||
def test_update_listener_invalid_tenant_id(self):
|
||||
"""Test update listener with an invalid tenant id"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
tenant_id="&^%123")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('d88dd3d5-a52f-4306-ba53-e8f6f4e1b399')
|
||||
def test_update_listener_invalid_admin_state_up(self):
|
||||
"""Test update a listener with an invalid admin_state_up"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
admin_state_up="$23")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('7c0efb63-90d9-43d0-b959-eb841ef39832')
|
||||
def test_update_listener_invalid_name(self):
|
||||
"""Test update a listener with an invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
name='a' * 256)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('ba9bfad8-dbb0-4cbc-b2e3-52bf72bc1fc5')
|
||||
def test_update_listener_invalid_description(self):
|
||||
"""Test update a listener with an invalid description
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
description='a' * 256)
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('dcafa50b-cece-4904-bcc9-a0dd1ac99a7e')
|
||||
def test_update_listener_invalid_connection_limit(self):
|
||||
"""Test update a listener with an invalid connection_limit"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
connection_limit="$23")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('27e009c5-3c79-414d-863d-24b731f03123')
|
||||
def test_update_listener_incorrect_attribute(self):
|
||||
"""Test update a listener with an extra, incorrect field"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
name="listener_name123",
|
||||
description="listener_description123",
|
||||
admin_state_up=True,
|
||||
connection_limit=10,
|
||||
vip_subnet_id="123321123")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('e8bdd948-7bea-494b-8a4a-e730b70f2882')
|
||||
def test_update_listener_missing_name(self):
|
||||
"""Test update listener with a missing name"""
|
||||
old_listener = self._show_listener(self.listener_id)
|
||||
old_name = old_listener['name']
|
||||
self._update_listener(self.listener_id,
|
||||
description='updated')
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
listener = self._show_listener(self.listener_id)
|
||||
self.assertEqual(listener.get('name'), old_name)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('7e0194b8-9315-452d-9de5-d48f227b626f')
|
||||
def test_update_listener_missing_description(self):
|
||||
"""Test update listener with a missing description"""
|
||||
old_listener = self._show_listener(self.listener_id)
|
||||
old_description = old_listener['description']
|
||||
self._update_listener(self.listener_id,
|
||||
name='updated_name')
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
listener = self._show_listener(self.listener_id)
|
||||
self.assertEqual(listener.get('description'), old_description)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('285dd3f2-fcb8-4ccb-b9ce-d6207b29a2f8')
|
||||
def test_update_listener_missing_admin_state_up(self):
|
||||
"""Test update listener with a missing admin_state_up"""
|
||||
old_listener = self._show_listener(self.listener_id)
|
||||
old_admin_state_up = old_listener['admin_state_up']
|
||||
self._update_listener(self.listener_id,
|
||||
name='updated_name')
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
listener = self._show_listener(self.listener_id)
|
||||
self.assertEqual(listener.get('admin_state_up'), old_admin_state_up)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('5c510338-0f8a-4d1e-805b-f8458f2e80ee')
|
||||
def test_update_listener_missing_connection_limit(self):
|
||||
"""Test update listener with a missing connection_limit"""
|
||||
old_listener = self._show_listener(self.listener_id)
|
||||
old_connection_limit = old_listener['connection_limit']
|
||||
self._update_listener(self.listener_id,
|
||||
name='updated_name')
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
listener = self._show_listener(self.listener_id)
|
||||
self.assertEqual(listener.get('connection_limit'),
|
||||
old_connection_limit)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('677205d9-9d97-4232-a8e3-d17ebf42ff05')
|
||||
def test_update_listener_empty_tenant_id(self):
|
||||
"""Test update listener with an empty tenant id"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
tenant_id="")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('6e9f8fdb-48b0-4c4e-9b29-460576b125ff')
|
||||
def test_update_listener_empty_admin_state_up(self):
|
||||
"""Test update a listener with an empty admin_state_up"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
admin_state_up="")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('cf619b8d-1916-4144-85c7-e5a34e0d7a2b')
|
||||
def test_update_listener_empty_name(self):
|
||||
"""Test update a listener with an empty name"""
|
||||
self._update_listener(self.listener_id,
|
||||
name="")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
listener = self._show_listener(self.listener_id)
|
||||
self.assertEqual(listener.get('name'), "")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('a9b6f721-c3c1-4d22-a3e5-7e89b58fa3a7')
|
||||
def test_update_listener_empty_description(self):
|
||||
"""Test update a listener with an empty description"""
|
||||
self._update_listener(self.listener_id,
|
||||
description="")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
listener = self._show_listener(self.listener_id)
|
||||
self.assertEqual(listener.get('description'), "")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('7ddcf46b-068b-449c-9dde-ea4021dd76bf')
|
||||
def test_update_listener_empty_connection_limit(self):
|
||||
"""Test update a listener with an empty connection_limit"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
connection_limit="")
|
||||
self._check_status_tree(load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('c891c857-fa89-4775-92d8-5320321b86cd')
|
||||
def test_delete_listener(self):
|
||||
"""Test delete listener"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8083
|
||||
new_listener = self._create_listener(**create_new_listener_kwargs)
|
||||
new_listener_id = new_listener['id']
|
||||
self._check_status_tree(
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
listener_ids=[self.listener_id, new_listener_id])
|
||||
listener = self._show_listener(new_listener_id)
|
||||
self.assertEqual(new_listener, listener)
|
||||
self.assertNotEqual(self.listener, new_listener)
|
||||
self._delete_listener(new_listener_id)
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self._show_listener,
|
||||
new_listener_id)
|
@ -1,115 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
import testtools
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoadBalancersTest(base.BaseAdminTestCase):
|
||||
|
||||
"""Tests the following operations in the Neutron-LBaaS API
|
||||
|
||||
using the REST client for Load Balancers with default credentials:
|
||||
|
||||
list load balancers
|
||||
create load balancer
|
||||
get load balancer
|
||||
update load balancer
|
||||
delete load balancer
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(LoadBalancersTest, cls).resource_setup()
|
||||
cls.create_lb_kwargs = {'tenant_id': cls.subnet['tenant_id'],
|
||||
'vip_subnet_id': cls.subnet['id']}
|
||||
cls.load_balancer = \
|
||||
cls._create_active_load_balancer(**cls.create_lb_kwargs)
|
||||
cls.load_balancer_id = cls.load_balancer['id']
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@testtools.skipIf('1641902' in CONF.nsxv.bugs_to_resolve,
|
||||
"skip_because bug=1641902")
|
||||
@decorators.idempotent_id('0008ae1e-77a2-45d9-b81e-0e3119b5a26d')
|
||||
def test_create_load_balancer_missing_tenant_id_field_for_admin(self):
|
||||
"""Test create load balancer with a missing tenant id field.
|
||||
|
||||
Verify tenant_id matches when creating loadbalancer vs.
|
||||
load balancer(admin tenant)
|
||||
Kilo: @decorators.skip_because(bug="1641902")
|
||||
"""
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
admin_lb = self._show_load_balancer(
|
||||
load_balancer.get('id'))
|
||||
self.assertEqual(load_balancer.get('tenant_id'),
|
||||
admin_lb.get('tenant_id'))
|
||||
self._wait_for_load_balancer_status(load_balancer['id'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@testtools.skipIf('1715126' in CONF.nsxv.bugs_to_resolve,
|
||||
"skip_because bug=1715126")
|
||||
@decorators.idempotent_id('37620941-47c1-40b2-84d8-db17ff823ebc')
|
||||
def test_create_load_balancer_missing_tenant_id_for_other_tenant(self):
|
||||
"""Test create load balancer with a missing tenant id field.
|
||||
|
||||
Verify tenant_id does not match of subnet(non-admin tenant) vs.
|
||||
load balancer(admin tenant)
|
||||
Kilo: @decorators.skip_because(bug="1638571")
|
||||
"""
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
self.assertNotEqual(load_balancer.get('tenant_id'),
|
||||
self.subnet['tenant_id'])
|
||||
self._wait_for_load_balancer_status(load_balancer['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('5bf483f5-ae28-47f5-8805-642da0ffcb40')
|
||||
# Empty tenant_id causing ServerFault
|
||||
def test_create_load_balancer_empty_tenant_id_field(self):
|
||||
"""Test create load balancer with empty tenant_id field should fail
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638148")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._create_load_balancer,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
wait=False,
|
||||
tenant_id="")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('19fc8a44-1280-49f3-be5b-0d30e6e43363')
|
||||
# NSX-v: 2nd tenant_id at the same subnet not supported; got serverFault
|
||||
def test_create_load_balancer_for_another_tenant(self):
|
||||
"""Test create load balancer for other tenant
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638571")
|
||||
"""
|
||||
tenant = 'deffb4d7c0584e89a8ec99551565713c'
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
tenant_id=tenant)
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
self.assertEqual(load_balancer.get('tenant_id'), tenant)
|
||||
self._wait_for_load_balancer_status(load_balancer['id'])
|
@ -1,497 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from oslo_log import log as logging
|
||||
import testtools
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoadBalancersTest(base.BaseTestCase):
|
||||
|
||||
"""Tests the following operations in the Neutron-LBaaS API
|
||||
|
||||
using the REST client for Load Balancers with default credentials:
|
||||
|
||||
list load balancers
|
||||
create load balancer
|
||||
get load balancer
|
||||
update load balancer
|
||||
delete load balancer
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(LoadBalancersTest, cls).resource_setup()
|
||||
cls.create_lb_kwargs = {'tenant_id': cls.subnet['tenant_id'],
|
||||
'vip_subnet_id': cls.subnet['id']}
|
||||
cls.load_balancer = \
|
||||
cls._create_active_load_balancer(**cls.create_lb_kwargs)
|
||||
cls.load_balancer_id = cls.load_balancer['id']
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('b7ea6c09-e077-4a67-859b-b2cd01e3b46b')
|
||||
def test_list_load_balancers(self):
|
||||
"""Test list load balancers with one load balancer"""
|
||||
load_balancers = self._list_load_balancers()
|
||||
self.assertEqual(len(load_balancers), 1)
|
||||
self.assertIn(self.load_balancer, load_balancers)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('8c2302df-ca94-4950-9826-eb996630a392')
|
||||
def test_list_load_balancers_two(self):
|
||||
"""Test list load balancers with two load balancers"""
|
||||
new_load_balancer = self._create_active_load_balancer(
|
||||
**self.create_lb_kwargs)
|
||||
new_load_balancer_id = new_load_balancer['id']
|
||||
self.addCleanup(self._delete_load_balancer, new_load_balancer_id)
|
||||
load_balancers = self._list_load_balancers()
|
||||
self.assertEqual(len(load_balancers), 2)
|
||||
self.assertIn(self.load_balancer, load_balancers)
|
||||
self.assertIn(new_load_balancer, load_balancers)
|
||||
self.assertNotEqual(self.load_balancer, new_load_balancer)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('56345a78-1d53-4c05-9d7b-3e5cf34c22aa')
|
||||
def test_get_load_balancer(self):
|
||||
"""Test get load balancer"""
|
||||
load_balancer = self._show_load_balancer(
|
||||
self.load_balancer_id)
|
||||
self.assertEqual(self.load_balancer, load_balancer)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('5bf80330-d908-4025-9467-bca1727525c8')
|
||||
def test_create_load_balancer(self):
|
||||
"""Test create load balancer"""
|
||||
new_load_balancer = self._create_active_load_balancer(
|
||||
**self.create_lb_kwargs)
|
||||
new_load_balancer_id = new_load_balancer['id']
|
||||
self.addCleanup(self._delete_load_balancer, new_load_balancer_id)
|
||||
load_balancer = self._show_load_balancer(
|
||||
new_load_balancer_id)
|
||||
self.assertEqual(new_load_balancer, load_balancer)
|
||||
self.assertNotEqual(self.load_balancer, new_load_balancer)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('66bf5390-154f-4627-af61-2c1c30325d6f')
|
||||
def test_create_load_balancer_missing_vip_subnet_id_field(self):
|
||||
"""Test create load balancer
|
||||
|
||||
with a missing required vip_subnet_id field
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
tenant_id=self.subnet['tenant_id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('8e78a7e6-2da3-4f79-9f66-fd1447277883')
|
||||
def test_create_load_balancer_empty_provider_field(self):
|
||||
"""Test create load balancer with an empty provider field"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
provider="")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('def37122-3f9a-47f5-b7b5-b5c0d5e7e5ca')
|
||||
def test_create_load_balancer_empty_description_field(self):
|
||||
"""Test create load balancer with an empty description field"""
|
||||
load_balancer = self._create_active_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'], description="")
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
self.assertEqual(load_balancer.get('description'), "")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('69944c74-3ea1-4c06-8d28-82120721a13e')
|
||||
def test_create_load_balancer_empty_vip_address_field(self):
|
||||
"""Test create load balancer with empty vip_address field"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
vip_address="")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('63bbe788-f3a6-444f-89b3-8c740425fc39')
|
||||
def test_create_load_balancer_missing_admin_state_up(self):
|
||||
"""Test create load balancer with a missing admin_state_up field"""
|
||||
load_balancer = self._create_active_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
self.assertEqual(load_balancer.get('admin_state_up'), True)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('499f164a-e926-47a6-808a-14f3c29d04c9')
|
||||
def test_create_load_balancer_empty_admin_state_up_field(self):
|
||||
"""Test create load balancer with empty admin_state_up field"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
admin_state_up="")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('e4511356-0e78-457c-a310-8515b2dedad4')
|
||||
def test_create_load_balancer_missing_name(self):
|
||||
"""Test create load balancer with a missing name field"""
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
self.assertEqual(load_balancer.get('name'), '')
|
||||
self._wait_for_load_balancer_status(load_balancer['id'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('6bd4a92c-7498-4b92-aeae-bce0b74608e3')
|
||||
def test_create_load_balancer_empty_name(self):
|
||||
"""Test create load balancer with an empty name field"""
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'], name="")
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
self.assertEqual(load_balancer.get('name'), "")
|
||||
self._wait_for_load_balancer_status(load_balancer['id'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('e605b1ea-5179-4035-8100-c24d0164a5a5')
|
||||
def test_create_load_balancer_missing_description(self):
|
||||
"""Test create load balancer with a missing description field"""
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
self.assertEqual(load_balancer.get('description'), '')
|
||||
self._wait_for_load_balancer_status(load_balancer['id'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('9f718024-340b-405f-817f-311392353c32')
|
||||
def test_create_load_balancer_missing_vip_address(self):
|
||||
"""Test create load balancer
|
||||
|
||||
with a missing vip_address field,checks for
|
||||
ipversion and actual ip address
|
||||
"""
|
||||
load_balancer = self._create_active_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
load_balancer_ip_initial = load_balancer['vip_address']
|
||||
ip = netaddr.IPAddress(load_balancer_ip_initial)
|
||||
self.assertEqual(ip.version, 4)
|
||||
load_balancer = self._show_load_balancer(
|
||||
load_balancer['id'])
|
||||
load_balancer_final = load_balancer['vip_address']
|
||||
self.assertEqual(load_balancer_ip_initial, load_balancer_final)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('f599ccbd-73e8-4e27-96a5-d9e0e3419a9f')
|
||||
def test_create_load_balancer_missing_provider_field(self):
|
||||
"""Test create load balancer with a missing provider field"""
|
||||
load_balancer = self._create_active_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
load_balancer_initial = load_balancer['provider']
|
||||
load_balancer = self._show_load_balancer(
|
||||
load_balancer['id'])
|
||||
load_balancer_final = load_balancer['provider']
|
||||
self.assertEqual(load_balancer_initial, load_balancer_final)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('377166eb-f581-4383-bc2e-54fdeed73e42')
|
||||
def test_create_load_balancer_invalid_vip_subnet_id(self):
|
||||
"""Test create load balancer with an invalid vip subnet id"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
vip_subnet_id="abc123")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('512bec06-5259-4e93-b482-7ec3346c794a')
|
||||
def test_create_load_balancer_empty_vip_subnet_id(self):
|
||||
"""Test create load balancer with an empty vip subnet id"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
vip_subnet_id="")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('02bd6d0e-820e-46fb-89cb-1d335e7aaa02')
|
||||
def test_create_load_balancer_invalid_tenant_id(self):
|
||||
"""Test create load balancer with an invalid tenant id"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
tenant_id="&^%123")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('b8c56e4a-9644-4119-8fc9-130841caf662')
|
||||
def test_create_load_balancer_invalid_name(self):
|
||||
"""Test create load balancer with an invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
tenant_id=self.subnet['tenant_id'],
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
name='n' * 256)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('d638ae60-7de5-45da-a7d9-53eca4998980')
|
||||
def test_create_load_balancer_invalid_description(self):
|
||||
"""Test create load balancer with an invalid description
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
tenant_id=self.subnet['tenant_id'],
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
description='d' * 256)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('56768aa6-b26e-48aa-8118-956c62930d79')
|
||||
def test_create_load_balancer_incorrect_attribute(self):
|
||||
"""Test create a load balancer with an extra, incorrect field"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
tenant_id=self.subnet['tenant_id'],
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
protocol_port=80)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('a130e70f-9d76-4bff-89de-3e564952b244')
|
||||
def test_create_load_balancer_missing_tenant_id_field(self):
|
||||
"""Test create load balancer with a missing tenant id field"""
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
self.addCleanup(self._delete_load_balancer, load_balancer['id'])
|
||||
self.assertEqual(load_balancer.get('tenant_id'),
|
||||
self.subnet['tenant_id'])
|
||||
self._wait_for_load_balancer_status(load_balancer['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('25261cca-0c38-4dc8-bb40-f7692035740f')
|
||||
def test_create_load_balancer_empty_tenant_id_field(self):
|
||||
"""Test create load balancer with empty tenant_id field"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
wait=False,
|
||||
tenant_id="")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('10de328d-c754-484b-841f-313307f92935')
|
||||
def test_create_load_balancer_other_tenant_id_field(self):
|
||||
"""Test create load balancer for other tenant"""
|
||||
tenant = 'deffb4d7c0584e89a8ec99551565713c'
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
tenant_id=tenant)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@testtools.skipIf('1703396' in CONF.nsxv.bugs_to_resolve,
|
||||
"skip_because bug=1703396")
|
||||
@decorators.idempotent_id('9963cbf5-97d0-4ab9-96e5-6cbd65c98714')
|
||||
def test_create_load_balancer_invalid_flavor_field(self):
|
||||
"""Test create load balancer with an invalid flavor field"""
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self._create_load_balancer,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
flavor_id="NO_SUCH_FLAVOR")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@testtools.skipIf('1703396' in CONF.nsxv.bugs_to_resolve,
|
||||
"skip_because bug=1703396")
|
||||
@decorators.idempotent_id('f7319e32-0fad-450e-8f53-7567f56e8223')
|
||||
def test_create_load_balancer_provider_flavor_conflict(self):
|
||||
"""Test create load balancer with both a provider and a flavor"""
|
||||
self.assertRaises(exceptions.Conflict,
|
||||
self._create_load_balancer,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
flavor_id="NO_SUCH_FLAVOR",
|
||||
provider="NO_SUCH_PROVIDER")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('1d92d98f-550f-4f05-a246-cdf4525459a2')
|
||||
def test_update_load_balancer(self):
|
||||
"""Test update load balancer"""
|
||||
self._update_load_balancer(self.load_balancer_id,
|
||||
name='new_name')
|
||||
load_balancer = self._show_load_balancer(
|
||||
self.load_balancer_id)
|
||||
self.assertEqual(load_balancer.get('name'), 'new_name')
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('474ca200-8dea-4d20-8468-abc0169a445b')
|
||||
def test_update_load_balancer_empty_name(self):
|
||||
"""Test update load balancer with empty name"""
|
||||
self._update_load_balancer(self.load_balancer_id,
|
||||
name="")
|
||||
load_balancer = self._show_load_balancer(
|
||||
self.load_balancer_id)
|
||||
self.assertEqual(load_balancer.get('name'), "")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('551be885-215d-4941-8870-651cbc871162')
|
||||
def test_update_load_balancer_invalid_name(self):
|
||||
"""Test update load balancer with invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_load_balancer,
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
wait=False,
|
||||
name='a' * 256)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('62eef0ba-3859-4c8f-9e6a-8d6918754597')
|
||||
def test_update_load_balancer_missing_name(self):
|
||||
"""Test update load balancer with missing name"""
|
||||
loadbalancer = self._show_load_balancer(
|
||||
self.load_balancer_id)
|
||||
load_balancer_initial = loadbalancer['name']
|
||||
self._update_load_balancer(self.load_balancer_id)
|
||||
load_balancer = self._show_load_balancer(
|
||||
self.load_balancer_id)
|
||||
load_balancer_new = load_balancer['name']
|
||||
self.assertEqual(load_balancer_initial, load_balancer_new)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('ab3550c6-8b21-463c-bc5d-e79cbae3432f')
|
||||
def test_update_load_balancer_invalid_description(self):
|
||||
"""Test update load balancer with invalid description
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_load_balancer,
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
wait=False,
|
||||
description='a' * 256)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('157ebdbf-4ad2-495d-b880-c1b1a8edc46d')
|
||||
def test_update_load_balancer_empty_description(self):
|
||||
"""Test update load balancer with empty description"""
|
||||
self._update_load_balancer(self.load_balancer_id,
|
||||
description="")
|
||||
load_balancer = self._show_load_balancer(
|
||||
self.load_balancer_id)
|
||||
self.assertEqual(load_balancer.get('description'), "")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('d13fa2f5-e8df-4d53-86a8-68583941200c')
|
||||
def test_update_load_balancer_missing_description(self):
|
||||
"""Test update load balancer with missing description"""
|
||||
loadbalancer = self._show_load_balancer(
|
||||
self.load_balancer_id)
|
||||
load_balancer_initial = loadbalancer['description']
|
||||
self._update_load_balancer(self.load_balancer_id)
|
||||
load_balancer = self._show_load_balancer(
|
||||
self.load_balancer_id)
|
||||
load_balancer_new = load_balancer['description']
|
||||
self.assertEqual(load_balancer_initial, load_balancer_new)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('96e46a1a-62e7-47f1-98c5-9983f89e622f')
|
||||
def test_update_load_balancer_invalid_admin_state_up_field(self):
|
||||
"""Test update load balancer with an invalid admin_state_up"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_load_balancer,
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
wait=False,
|
||||
admin_state_up="a&^%$jbc123")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('48f1e227-8b15-4389-a050-7ce76f4b4d46')
|
||||
def test_update_load_balancer_empty_admin_state_up_field(self):
|
||||
"""Test update load balancer with an empty admin_state_up"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_load_balancer,
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
wait=False,
|
||||
admin_state_up="")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('a9182e53-ddaa-4f41-af54-585d983279ba')
|
||||
def test_update_load_balancer_missing_admin_state_up(self):
|
||||
"""Test update load balancer with missing admin state field"""
|
||||
loadbalancer = self._show_load_balancer(
|
||||
self.load_balancer_id)
|
||||
load_balancer_initial = loadbalancer['admin_state_up']
|
||||
self._update_load_balancer(self.load_balancer_id)
|
||||
self.assertEqual(load_balancer_initial, True)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('bfbe9339-d083-4a88-b6d6-015522809c3a')
|
||||
def test_update_load_balancer_incorrect_attribute(self):
|
||||
"""Test update a load balancer with an extra, invalid attribute"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_load_balancer,
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
wait=False,
|
||||
name="lb_name",
|
||||
description="lb_name_description",
|
||||
admin_state_up=True,
|
||||
port=80)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('d2258984-6e9a-41d6-bffa-0543c8b1f2b0')
|
||||
def test_get_load_balancer_status_tree(self):
|
||||
"""Test get load balancer status tree"""
|
||||
statuses = self._show_load_balancer_status_tree(
|
||||
self.load_balancer_id)
|
||||
load_balancer = statuses['loadbalancer']
|
||||
self.assertEqual("ONLINE", load_balancer['operating_status'])
|
||||
self.assertEqual("ACTIVE", load_balancer['provisioning_status'])
|
||||
self.assertEmpty(load_balancer['listeners'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('a23677a9-b770-4894-8be9-cd66590c228b')
|
||||
def test_get_load_balancer_stats(self):
|
||||
"""Test get load balancer stats"""
|
||||
stats = self._show_load_balancer_stats(
|
||||
self.load_balancer_id)
|
||||
self.assertEqual(0, stats['bytes_in'])
|
||||
self.assertEqual(0, stats['bytes_out'])
|
||||
self.assertEqual(0, stats['total_connections'])
|
||||
self.assertEqual(0, stats['active_connections'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('f289f8df-a867-45cd-bee3-7ff08f5e96e0')
|
||||
def test_delete_load_balancer(self):
|
||||
"""Test delete load balancer"""
|
||||
new_load_balancer = self._create_active_load_balancer(
|
||||
**self.create_lb_kwargs)
|
||||
new_load_balancer_id = new_load_balancer['id']
|
||||
load_balancer = self._show_load_balancer(
|
||||
new_load_balancer_id)
|
||||
self.assertEqual(new_load_balancer, load_balancer)
|
||||
self.assertNotEqual(self.load_balancer, new_load_balancer)
|
||||
self._delete_load_balancer(new_load_balancer_id)
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self._show_load_balancer,
|
||||
new_load_balancer_id)
|
@ -1,84 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MemberTest(base.BaseAdminTestCase):
|
||||
"""Test the member creation operation in admin scope in Neutron-LBaaS API
|
||||
|
||||
using the REST client for members:
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(MemberTest, cls).resource_setup()
|
||||
# core network setup is moved to base class
|
||||
cls.load_balancer = cls._create_active_load_balancer(
|
||||
tenant_id=cls.tenant_id,
|
||||
vip_subnet_id=cls.subnet.get('id'))
|
||||
cls.load_balancer_id = cls.load_balancer.get("id")
|
||||
cls._wait_for_load_balancer_status(cls.load_balancer_id)
|
||||
cls.listener = cls._create_listener(
|
||||
loadbalancer_id=cls.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=80)
|
||||
cls.listener_id = cls.listener.get('id')
|
||||
cls.pool = cls._create_pool(protocol='HTTP',
|
||||
tenant_id=cls.tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=cls.listener_id)
|
||||
cls.pool_id = cls.pool.get('id')
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
super(MemberTest, cls).resource_cleanup()
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('03eeec24-78d8-4c2f-8d6c-4a78817f352e')
|
||||
def test_create_member_invalid_tenant_id(self):
|
||||
"""Test create member with invalid tenant_id"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
# avoid port=80 to avoid duplicate port during failed testings
|
||||
member_opts['protocol_port'] = 84
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
member_opts['tenant_id'] = "$232!$pw"
|
||||
member = self._create_member(self.pool_id, **member_opts)
|
||||
self.addCleanup(self._delete_member, self.pool_id, member['id'])
|
||||
self.assertEqual(member['subnet_id'], self.subnet_id)
|
||||
self.assertEqual(member['tenant_id'], "$232!$pw")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('01c9ea0c-bdfe-4108-95d1-69ecdc0a1f26')
|
||||
def test_create_member_empty_tenant_id(self):
|
||||
"""Test create member with an empty tenant_id should fail
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638148")
|
||||
"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
member_opts['tenant_id'] = ""
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
@ -1,479 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MemberTest(base.BaseTestCase):
|
||||
|
||||
"""Test the following operations in Neutron-LBaaS API
|
||||
|
||||
using the REST client for members:
|
||||
|
||||
list members of a pool
|
||||
create a member of a Pool
|
||||
update a pool member
|
||||
delete a member
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(MemberTest, cls).resource_setup()
|
||||
# core network setup is moved to base class
|
||||
cls.load_balancer = cls._create_active_load_balancer(
|
||||
tenant_id=cls.tenant_id,
|
||||
vip_subnet_id=cls.subnet.get('id'))
|
||||
cls.load_balancer_id = cls.load_balancer.get("id")
|
||||
cls.listener = cls._create_listener(
|
||||
loadbalancer_id=cls.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=80)
|
||||
cls.listener_id = cls.listener.get('id')
|
||||
cls.pool = cls._create_pool(protocol='HTTP',
|
||||
tenant_id=cls.tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=cls.listener_id)
|
||||
cls.pool_id = cls.pool.get('id')
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
super(MemberTest, cls).resource_cleanup()
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('6dcdc53c-52cf-4b6e-aeec-d13df68ed001')
|
||||
def test_list_empty_members(self):
|
||||
"""Test that pool members are empty."""
|
||||
members = self._list_members(self.pool_id)
|
||||
self.assertEmpty(members,
|
||||
msg='Initial pool was supposed to be empty')
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('346e49ce-0665-4995-a03a-b007052d3619')
|
||||
def test_list_3_members(self):
|
||||
"""Test that we can list members. """
|
||||
member_ips_exp = set([u"127.0.0.0", u"127.0.0.1", u"127.0.0.2"])
|
||||
for ip in member_ips_exp:
|
||||
member_opts = self.build_member_opts()
|
||||
member_opts["address"] = ip
|
||||
member = self._create_member(self.pool_id, **member_opts)
|
||||
self.addCleanup(self._delete_member, self.pool_id, member['id'])
|
||||
members = self._list_members(self.pool_id)
|
||||
self.assertEqual(3, len(members))
|
||||
for member in members:
|
||||
self.assertEqual(member["tenant_id"], self.tenant_id)
|
||||
self.assertEqual(member["protocol_port"], 80)
|
||||
self.assertEqual(member["subnet_id"], self.subnet_id)
|
||||
found_member_ips = set([m["address"] for m in members])
|
||||
self.assertEqual(found_member_ips, member_ips_exp)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('3121bbdc-81e4-40e3-bf66-3ceefd72a0f5')
|
||||
def test_add_member(self):
|
||||
"""Test that we can add a single member."""
|
||||
expect_empty_members = self._list_members(self.pool_id)
|
||||
self.assertEmpty(expect_empty_members)
|
||||
member_opts = self.build_member_opts()
|
||||
member = self._create_member(self.pool_id, **member_opts)
|
||||
member_id = member.get("id")
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
self.assertEqual(member_opts["address"], member["address"])
|
||||
self.assertEqual(self.tenant_id, member["tenant_id"])
|
||||
self.assertEqual(80, member["protocol_port"])
|
||||
self.assertEqual(self.subnet_id, member["subnet_id"])
|
||||
# Should have default values for admin_state_up and weight
|
||||
self.assertEqual(True, member["admin_state_up"])
|
||||
self.assertEqual(1, member["weight"])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('fc513a45-4c24-42ea-8807-a9b86a81ee56')
|
||||
def test_get_member(self):
|
||||
"""Test that we can fetch a member by id."""
|
||||
member_opts = self.build_member_opts()
|
||||
member_id = self._create_member(self.pool_id,
|
||||
**member_opts)["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
member = self._show_member(self.pool_id, member_id)
|
||||
self.assertEqual(member_id, member["id"])
|
||||
self.assertEqual(member_opts["address"], member["address"])
|
||||
self.assertEqual(member_opts["tenant_id"], member["tenant_id"])
|
||||
self.assertEqual(member_opts["protocol_port"], member["protocol_port"])
|
||||
self.assertEqual(member_opts["subnet_id"], member["subnet_id"])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('2cead036-5a63-43a4-9d9d-03c9b744c101')
|
||||
def test_create_member_missing_required_field_tenant_id(self):
|
||||
"""Test if a non_admin user can create a member_opts
|
||||
|
||||
with tenant_id missing
|
||||
"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
member = self._create_member(self.pool_id, **member_opts)
|
||||
self.addCleanup(self._delete_member, self.pool_id, member['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('d7ed0870-a065-4fbd-8d95-0ea4d12063c2')
|
||||
def test_create_member_missing_required_field_address(self):
|
||||
"""Test create a member with missing field address"""
|
||||
member_opts = {}
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('8d2b9a53-aac7-4fb9-b068-47647289aa21')
|
||||
def test_create_member_missing_required_field_protocol_port(self):
|
||||
"""Test create a member with missing field protocol_port"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('9710cd4c-aac0-4b71-b295-82a88c67b0b8')
|
||||
def test_create_member_missing_required_field_subnet_id(self):
|
||||
"""Test create a member with missing field subnet_id """
|
||||
member_opts = {}
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a6814c49-758d-490a-9557-ef03f0d78c44')
|
||||
def test_raises_BadRequest_when_missing_attrs_during_member_create(self):
|
||||
"""Test failure on missing attributes on member create."""
|
||||
member_opts = {}
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('840bfa84-1d16-4149-a863-6f7afec1682f')
|
||||
def test_create_member_invalid_tenant_id(self):
|
||||
"""Test create member with invalid tenant_id"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
member_opts['tenant_id'] = "$232!$pw"
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a99dbd0a-5f8c-4c96-8900-1a7d297d913b')
|
||||
def test_create_member_invalid_address(self):
|
||||
"""Test create member with invalid address"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127$%<ki"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('736b0771-b98c-4045-97e0-a44e4e18c22e')
|
||||
def test_create_member_invalid_protocol_port(self):
|
||||
"""Test create member with invalid protocol_port"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 8090000
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('2cc67f5a-3f66-427e-90b8-59a3da5c1d21')
|
||||
def test_create_member_invalid_subnet_id(self):
|
||||
"""Test create member with invalid subnet_id"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = "45k%^"
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('3403c6f5-5a30-4115-ac3a-8a22855fd614')
|
||||
def test_create_member_invalid_admin_state_up(self):
|
||||
"""Test create member with invalid admin_state_up"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
member_opts['admin_state_up'] = "$232!$pw"
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('b12216ec-3442-4239-ba2c-dd17640449d1')
|
||||
def test_create_member_invalid_weight(self):
|
||||
"""Test create member with invalid weight"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
member_opts['weight'] = "$232!$pw"
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('88eb464b-4de6-4ed7-a1e8-bc61581a5c6e')
|
||||
def test_create_member_empty_tenant_id(self):
|
||||
"""Test create member with an empty tenant_id"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
member_opts['tenant_id'] = ""
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('238cd859-2b60-4e42-b356-c6b38768c3e4')
|
||||
def test_create_member_empty_address(self):
|
||||
"""Test create member with an empty address"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = ""
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('834905ac-5c95-4dfc-900c-1676b6c28247')
|
||||
def test_create_member_empty_protocol_port(self):
|
||||
"""Test create member with an empty protocol_port"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = ""
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a0f2148e-160e-4b12-8e30-567a0448d179')
|
||||
def test_create_member_empty_subnet_id(self):
|
||||
"""Test create member with empty subnet_id"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = ""
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('63cd5897-b82c-4508-8be7-3b7ccab21798')
|
||||
def test_create_member_empty_admin_state_up(self):
|
||||
"""Test create member with an empty admin_state_up"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
member_opts['admin_state_up'] = ""
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('55f16682-74a2-4df7-a6b3-2da3623f4a41')
|
||||
def test_create_member_empty_weight(self):
|
||||
"""Test create member with an empty weight"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
member_opts['subnet_id'] = self.subnet_id
|
||||
member_opts['weight'] = ""
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('c99f6146-2c85-4a32-a850-942d6836c175')
|
||||
def test_delete_member(self):
|
||||
"""Test that we can delete a member by id."""
|
||||
member_opts = self.build_member_opts()
|
||||
member_id = self._create_member(self.pool_id,
|
||||
**member_opts)["id"]
|
||||
members = self._list_members(self.pool_id)
|
||||
self.assertEqual(1, len(members))
|
||||
self._delete_member(self.pool_id, member_id)
|
||||
members = self._list_members(self.pool_id)
|
||||
self.assertEmpty(members)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('7d51aa2d-9582-4160-b07b-bf3c3b3e335e')
|
||||
def test_update_member(self):
|
||||
"""Test that we can update a member."""
|
||||
member_opts = self.build_member_opts()
|
||||
member = self._create_member(self.pool_id,
|
||||
**member_opts)
|
||||
member_id = member["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member['id'])
|
||||
# Make sure the defaults are correct
|
||||
self.assertEqual(True, member["admin_state_up"])
|
||||
self.assertEqual(1, member["weight"])
|
||||
# Lets overwrite the defaults
|
||||
member_opts = {"weight": 10, "admin_state_up": False}
|
||||
member = self._update_member(self.pool_id, member_id,
|
||||
**member_opts)
|
||||
# And make sure they stick
|
||||
self.assertFalse(member["admin_state_up"])
|
||||
self.assertEqual(10, member["weight"])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('101555d6-c472-45e4-b302-b2916ab6fad5')
|
||||
def test_update_member_missing_admin_state_up(self):
|
||||
"""Test that we can update a member with missing admin_state_up."""
|
||||
member_opts = self.build_member_opts()
|
||||
member = self._create_member(self.pool_id,
|
||||
**member_opts)
|
||||
member_id = member["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
self.assertEqual(True, member["admin_state_up"])
|
||||
self.assertEqual(1, member["weight"])
|
||||
member_opts = {"weight": 10}
|
||||
member = self._update_member(self.pool_id, member_id,
|
||||
**member_opts)
|
||||
self.assertEqual(True, member["admin_state_up"])
|
||||
self.assertEqual(10, member["weight"])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('815c037b-7e3b-474d-a4f6-eec26b44d677')
|
||||
def test_update_member_missing_weight(self):
|
||||
"""Test that we can update a member with missing weight."""
|
||||
member_opts = self.build_member_opts()
|
||||
member = self._create_member(self.pool_id,
|
||||
**member_opts)
|
||||
member_id = member["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
self.assertEqual(True, member["admin_state_up"])
|
||||
self.assertEqual(1, member["weight"])
|
||||
member_opts = {"admin_state_up": False}
|
||||
member = self._update_member(self.pool_id, member_id,
|
||||
**member_opts)
|
||||
self.assertFalse(member["admin_state_up"])
|
||||
self.assertEqual(1, member["weight"])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('3ab3bb11-e287-4693-8ea0-5cfbb4cc2c85')
|
||||
def test_update_member_invalid_admin_state_up(self):
|
||||
"""Test that we can update a member with empty admin_state_up."""
|
||||
member_opts = self.build_member_opts()
|
||||
member = self._create_member(self.pool_id,
|
||||
**member_opts)
|
||||
member_id = member["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
self.assertEqual(True, member["admin_state_up"])
|
||||
self.assertEqual(1, member["weight"])
|
||||
member_opts = {"weight": 10, "admin_state_up": "%^67"}
|
||||
self.assertRaises(ex.BadRequest, self._update_member,
|
||||
self.pool_id, member_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('71979c3b-08d6-449b-8de2-1eefc9d0db0e')
|
||||
def test_update_member_invalid_weight(self):
|
||||
"""Test that we can update a member with an empty weight."""
|
||||
member_opts = self.build_member_opts()
|
||||
member = self._create_member(self.pool_id,
|
||||
**member_opts)
|
||||
member_id = member["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
self.assertEqual(True, member["admin_state_up"])
|
||||
self.assertEqual(1, member["weight"])
|
||||
member_opts = {"admin_state_up": False, "weight": "*^$df"}
|
||||
self.assertRaises(ex.BadRequest, self._update_member,
|
||||
self.pool_id, member_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('e1470212-0a36-4d8c-8e30-1f69a8d31ae1')
|
||||
def test_update_member_empty_admin_state_up(self):
|
||||
"""Test that we can update a member with empty admin_state_up."""
|
||||
member_opts = self.build_member_opts()
|
||||
member = self._create_member(self.pool_id,
|
||||
**member_opts)
|
||||
member_id = member["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
self.assertEqual(True, member["admin_state_up"])
|
||||
self.assertEqual(1, member["weight"])
|
||||
member_opts = {"weight": 10, "admin_state_up": ""}
|
||||
self.assertRaises(ex.BadRequest, self._update_member,
|
||||
self.pool_id, member_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('cd1e276c-b220-439d-a9dc-823a10d11b6a')
|
||||
def test_update_member_empty_weight(self):
|
||||
"""Test that we can update a member with an empty weight."""
|
||||
member_opts = self.build_member_opts()
|
||||
member = self._create_member(self.pool_id,
|
||||
**member_opts)
|
||||
member_id = member["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
self.assertEqual(True, member["admin_state_up"])
|
||||
self.assertEqual(1, member["weight"])
|
||||
member_opts = {"admin_state_up": False, "weight": ""}
|
||||
self.assertRaises(ex.BadRequest, self._update_member,
|
||||
self.pool_id, member_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('25779006-1e2c-4155-9126-49f45e7646a3')
|
||||
def test_raises_immutable_when_updating_immutable_attrs_on_member(self):
|
||||
"""Test failure on immutable attribute on member create."""
|
||||
member_opts = self.build_member_opts()
|
||||
member_id = self._create_member(self.pool_id,
|
||||
**member_opts)["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
member_opts = {"address": "127.0.0.69"}
|
||||
# The following code actually raises a 400 instead of a 422 as expected
|
||||
# Will need to consult with blogan as to what to fix
|
||||
self.assertRaises(ex.BadRequest, self._update_member,
|
||||
self.pool_id, member_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a332ecda-bb18-4cc2-b847-c09a72d90fd1')
|
||||
def test_raises_exception_on_invalid_attr_on_create(self):
|
||||
"""Test failure on invalid attribute on member create."""
|
||||
member_opts = self.build_member_opts()
|
||||
member_opts["invalid_op"] = "should_break_request"
|
||||
self.assertRaises(ex.BadRequest, self._create_member,
|
||||
self.pool_id, **member_opts)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('bc4c3eb5-14d5-43dd-93cb-603801fa6f32')
|
||||
def test_raises_exception_on_invalid_attr_on_update(self):
|
||||
"""Test failure on invalid attribute on member update."""
|
||||
member_opts = self.build_member_opts()
|
||||
member = self._create_member(self.pool_id, **member_opts)
|
||||
member_id = member["id"]
|
||||
self.addCleanup(self._delete_member, self.pool_id, member_id)
|
||||
member_opts["invalid_op"] = "watch_this_break"
|
||||
self.assertRaises(ex.BadRequest, self._update_member,
|
||||
self.pool_id, member_id, **member_opts)
|
||||
|
||||
@classmethod
|
||||
def build_member_opts(cls, **kw):
|
||||
"""Build out default member dictionary """
|
||||
opts = {"address": kw.get("address", "127.0.0.1"),
|
||||
"tenant_id": kw.get("tenant_id", cls.tenant_id),
|
||||
"protocol_port": kw.get("protocol_port", 80),
|
||||
"subnet_id": kw.get("subnet_id", cls.subnet_id)}
|
||||
return opts
|
@ -1,114 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
PROTOCOL_PORT = 80
|
||||
|
||||
|
||||
class TestPools(base.BaseAdminTestCase):
|
||||
|
||||
"""Tests the following operations in the Neutron-LBaaS API
|
||||
|
||||
using the REST client for Pools:
|
||||
|
||||
list pools
|
||||
create pool
|
||||
get pool
|
||||
update pool
|
||||
delete pool
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestPools, cls).resource_setup()
|
||||
cls.load_balancer = cls._create_load_balancer(
|
||||
tenant_id=cls.subnet.get('tenant_id'),
|
||||
vip_subnet_id=cls.subnet.get('id'))
|
||||
|
||||
def increment_protocol_port(self):
|
||||
global PROTOCOL_PORT
|
||||
PROTOCOL_PORT += 1
|
||||
|
||||
def _prepare_and_create_pool(self, protocol=None, lb_algorithm=None,
|
||||
listener_id=None, **kwargs):
|
||||
self.increment_protocol_port()
|
||||
if not protocol:
|
||||
protocol = 'HTTP'
|
||||
if not lb_algorithm:
|
||||
lb_algorithm = 'ROUND_ROBIN'
|
||||
if not listener_id:
|
||||
listener = self._create_listener(
|
||||
loadbalancer_id=self.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=PROTOCOL_PORT, **kwargs)
|
||||
listener_id = listener.get('id')
|
||||
response = self._create_pool(protocol=protocol,
|
||||
lb_algorithm=lb_algorithm,
|
||||
listener_id=listener_id,
|
||||
**kwargs)
|
||||
self.addCleanup(self._delete_pool, response['id'])
|
||||
return response
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('71b9d3e1-3f13-4c84-a905-054c9cd3d4aa')
|
||||
def test_create_pool_using_empty_tenant_field(self):
|
||||
"""Test create pool with empty tenant field should fail
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638148")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
tenant_id="",
|
||||
lb_algorithm='ROUND_ROBIN')
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('f782967d-8dca-4d7b-b625-bfd811379b42')
|
||||
def test_create_pool_missing_tenant_id_for_other_tenant(self):
|
||||
"""Test create pool with a missing tenant id field.
|
||||
|
||||
Verify tenant_id does not match when creating pool vs.
|
||||
pool (admin client)
|
||||
"""
|
||||
new_pool = self._prepare_and_create_pool(
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN')
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
pool_tenant = pool['tenant_id']
|
||||
self.assertNotEqual(pool_tenant, self.subnet['tenant_id'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('140c4c95-3d12-47d7-9b20-cc3c60e24af9')
|
||||
def test_create_pool_missing_tenant_id_for_admin(self):
|
||||
"""Test create pool with a missing tenant id field.
|
||||
|
||||
Verify tenant_id matches when creating pool vs. pool (admin client)
|
||||
"""
|
||||
new_pool = self._prepare_and_create_pool(
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN')
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
pool_tenant = pool['tenant_id']
|
||||
self.assertEqual(pool_tenant, pool.get('tenant_id'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('1cf07f5c-7609-4b64-b5b8-f27050860132')
|
||||
def test_create_pool_for_another_tenant(self):
|
||||
"""Test create pool for other tenant field"""
|
||||
tenant = 'deffb4d7c0584e89a8ec99551565713c'
|
||||
new_pool = self._prepare_and_create_pool(
|
||||
tenant_id=tenant)
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
pool_tenant = pool.get('tenant_id')
|
||||
self.assertEqual(pool_tenant, tenant)
|
@ -1,634 +0,0 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api.lbaas import base
|
||||
|
||||
PROTOCOL_PORT = 80
|
||||
|
||||
|
||||
class TestPools(base.BaseTestCase):
|
||||
|
||||
"""Tests the following operations in the Neutron-LBaaS API
|
||||
|
||||
using the REST client for Pools:
|
||||
|
||||
list pools
|
||||
create pool
|
||||
get pool
|
||||
update pool
|
||||
delete pool
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestPools, cls).resource_setup()
|
||||
cls.load_balancer = cls._create_load_balancer(
|
||||
tenant_id=cls.subnet.get('tenant_id'),
|
||||
vip_subnet_id=cls.subnet.get('id'),
|
||||
wait=True)
|
||||
cls.listener = cls._create_listener(
|
||||
loadbalancer_id=cls.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=80)
|
||||
|
||||
def increment_protocol_port(self):
|
||||
global PROTOCOL_PORT
|
||||
PROTOCOL_PORT += 1
|
||||
|
||||
def _prepare_and_create_pool(self, protocol=None, lb_algorithm=None,
|
||||
listener_id=None, cleanup=True, **kwargs):
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
self.increment_protocol_port()
|
||||
if not protocol:
|
||||
protocol = 'HTTP'
|
||||
if not lb_algorithm:
|
||||
lb_algorithm = 'ROUND_ROBIN'
|
||||
if not listener_id:
|
||||
listener = self._create_listener(
|
||||
loadbalancer_id=self.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=PROTOCOL_PORT,
|
||||
wait=True)
|
||||
listener_id = listener.get('id')
|
||||
response = self._create_pool(protocol=protocol,
|
||||
lb_algorithm=lb_algorithm,
|
||||
listener_id=listener_id,
|
||||
wait=True,
|
||||
**kwargs)
|
||||
if cleanup:
|
||||
self.addCleanup(self._delete_pool, response['id'])
|
||||
return response
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('99154002-e598-4277-b6d8-bf0fe10f276f')
|
||||
def test_list_pools_empty(self):
|
||||
"""Test get pools when empty"""
|
||||
pools = self._list_pools()
|
||||
self.assertEmpty(pools)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('4f09b544-8e82-4313-b452-8fe3ca5ad14e')
|
||||
def test_list_pools_one(self):
|
||||
"""Test get pools with one pool"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
new_pool = self._show_pool(new_pool['id'])
|
||||
pools = self._list_pools()
|
||||
self.assertEqual(1, len(pools))
|
||||
self.assertIn(new_pool, pools)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('7562b846-a685-49ea-9d41-afcaff418bae')
|
||||
def test_list_pools_two(self):
|
||||
"""Test get pools with two pools"""
|
||||
new_pool1 = self._prepare_and_create_pool()
|
||||
new_pool2 = self._prepare_and_create_pool()
|
||||
pools = self._list_pools()
|
||||
self.assertEqual(2, len(pools))
|
||||
self.assertIn(new_pool1, pools)
|
||||
self.assertIn(new_pool2, pools)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('0cf61c6a-efd5-4859-9d92-da204f5ec1ed')
|
||||
def test_get_pool(self):
|
||||
"""Test get pool"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
self.assertEqual(new_pool, pool)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('7fc310a0-7640-4f7c-8cdb-53b6ae23bd52')
|
||||
def test_create_pool(self):
|
||||
"""Test create pool"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
self.assertEqual(new_pool, pool)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('5f414612-4f8c-4f48-ac99-286356870fae')
|
||||
def test_create_pool_missing_required_fields(self):
|
||||
"""Test create pool with a missing required fields"""
|
||||
tenant_id = self.subnet.get('tenant_id')
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
tenant_id=tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN')
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('7fe53b0c-d7b8-4283-aeb3-eeeb3219e42f')
|
||||
def test_create_pool_missing_tenant_field(self):
|
||||
"""Test create pool with a missing required tenant field"""
|
||||
tenant_id = self.subnet.get('tenant_id')
|
||||
new_pool = self._prepare_and_create_pool(
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN')
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
pool_tenant = pool['tenant_id']
|
||||
self.assertEqual(tenant_id, pool_tenant)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('7d17e507-99c2-4e8f-a403-27b630b403a2')
|
||||
def test_create_pool_missing_protocol_field(self):
|
||||
"""Test create pool with a missing required protocol field"""
|
||||
self.increment_protocol_port()
|
||||
listener = self._create_listener(
|
||||
loadbalancer_id=self.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=PROTOCOL_PORT)
|
||||
self.addCleanup(self._delete_listener, listener['id'])
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
listener_id = listener.get('id')
|
||||
tenant_id = self.subnet.get('tenant_id')
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
tenant_id=tenant_id,
|
||||
listener_id=listener_id,
|
||||
lb_algorithm='ROUND_ROBIN')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('99051cc6-bf51-4af0-b530-edbfb7d4b7ab')
|
||||
def test_create_pool_missing_lb_algorithm_field(self):
|
||||
"""Test create pool with a missing required lb algorithm field"""
|
||||
self.increment_protocol_port()
|
||||
listener = self._create_listener(
|
||||
loadbalancer_id=self.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=PROTOCOL_PORT)
|
||||
self.addCleanup(self._delete_listener, listener['id'])
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
listener_id = listener.get('id')
|
||||
tenant_id = self.subnet.get('tenant_id')
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
tenant_id=tenant_id,
|
||||
listener_id=listener_id,
|
||||
protocol='HTTP')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('d04b75fe-688b-4713-83d1-f0ac29005391')
|
||||
def test_create_pool_missing_listener_id_field(self):
|
||||
"""Test create pool with a missing required listener id field"""
|
||||
tenant_id = self.subnet.get('tenant_id')
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
tenant_id=tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
protocol='HTTP')
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('378c56b4-cf61-448b-8460-1ffb1a091ea5')
|
||||
def test_create_pool_missing_description_field(self):
|
||||
"""Test create pool with missing description field"""
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool_initial = self._show_pool(new_pool.get('id'))
|
||||
desc = pool_initial.get('description')
|
||||
self.assertEqual(desc, "")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('f73ff259-7fbb-41ac-ab92-c6eef0213e20')
|
||||
def test_create_pool_missing_name_field(self):
|
||||
"""Test create pool with a missing name field"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool_initial = self._show_pool(new_pool.get('id'))
|
||||
name = pool_initial.get('name')
|
||||
self.assertEqual(name, "")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('37957c70-6979-4e15-a316-8c29cb7e724e')
|
||||
def test_create_pool_missing_admin_state_up_field(self):
|
||||
"""Test create pool with a missing admin_state_up field"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool_initial = self._show_pool(new_pool.get('id'))
|
||||
state = pool_initial.get('admin_state_up')
|
||||
self.assertEqual(state, True)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('d1e41b4b-fe79-4bec-bc94-5934995c6e05')
|
||||
def test_create_pool_missing_session_pers_field(self):
|
||||
"""Test create pool with a missing session_pers field"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool_initial = self._show_pool(new_pool.get('id'))
|
||||
sess = pool_initial.get('session_persistence')
|
||||
self.assertIsNone(sess)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('440b3975-b7c8-4cff-85a5-a0a02ad6b8f9')
|
||||
def test_create_pool_invalid_protocol(self):
|
||||
"""Test create pool with an invalid protocol"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='UDP',
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a0b322b1-629c-483c-9136-397fc9100e48')
|
||||
def test_create_pool_invalid_session_persistence_field(self):
|
||||
"""Test create pool with invalid session persistance field"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
session_persistence={'type': 'HTTP'},
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('53cd9427-29fa-4a55-adb8-9cb6388b9548')
|
||||
def test_create_pool_invalid_algorithm(self):
|
||||
"""Test create pool with an invalid algorithm"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
lb_algorithm='LEAST_CON',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('26e6bb34-4b0f-4650-a5dc-87484fa55038')
|
||||
def test_create_pool_invalid_admin_state_up(self):
|
||||
"""Test create pool with an invalid admin state up field"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
admin_state_up="$!1%9823",
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('8df02129-2b9c-4628-a390-805967107090')
|
||||
def test_create_pool_invalid_listener_field(self):
|
||||
"""Test create pool with invalid listener field"""
|
||||
tenant_id = self.subnet.get('tenant_id')
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
tenant_id=tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
listener_id="$@5$%$7863")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('94949cd4-ebc1-4af5-a220-9ebb32772fbc')
|
||||
def test_create_pool_invalid_tenant_id_field(self):
|
||||
"""Test create pool with invalid tenant_id field"""
|
||||
self.increment_protocol_port()
|
||||
listener = self._create_listener(
|
||||
loadbalancer_id=self.load_balancer.get('id'),
|
||||
protocol='HTTP', protocol_port=PROTOCOL_PORT)
|
||||
self.addCleanup(self._delete_listener, listener['id'])
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
listener_id = listener.get('id')
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
tenant_id="*&7653^%&",
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
listener_id=listener_id)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('e335db64-ad16-4e23-bd60-c72c37c7b188')
|
||||
def test_create_pool_incorrect_attribute(self):
|
||||
"""Test create a pool with an extra, incorrect field"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
protocol_port=80,
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('390053c1-adc9-4b1a-8eb0-dbdb9085cf0f')
|
||||
def test_create_pool_empty_listener_field(self):
|
||||
"""Test create pool with empty listener field"""
|
||||
tenant_id = self.subnet.get('tenant_id')
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
tenant_id=tenant_id,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
listener_id="")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('8b25defa-8efc-47f5-a43d-3d299d7b9752')
|
||||
def test_create_pool_empty_description_field(self):
|
||||
"""Test create pool with empty description field"""
|
||||
new_pool = self._prepare_and_create_pool(description="")
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
pool_desc = pool.get('description')
|
||||
self.assertEqual(pool_desc, '')
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('c8cd496c-7698-4c0e-bbed-fe9ef6c910de')
|
||||
def test_create_pool_empty_name_field(self):
|
||||
"""Test create pool with empty name field"""
|
||||
new_pool = self._prepare_and_create_pool(name="")
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
pool_name = pool.get('name')
|
||||
self.assertEqual(pool_name, '')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('b7997d71-84ea-43d2-8ce0-eea4156cc952')
|
||||
def test_create_pool_empty_protocol(self):
|
||||
"""Test create pool with an empty protocol"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol="",
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('bffe50bb-8be5-4ed9-aea6-a15b40342599')
|
||||
def test_create_pool_empty_session_persistence_field(self):
|
||||
"""Test create pool with empty session persistence field"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
session_persistence="",
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('4cfd301a-baae-462d-8041-84c337e95d16')
|
||||
def test_create_pool_empty_algorithm(self):
|
||||
"""Test create pool with an empty algorithm"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
lb_algorithm="",
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('814de2e3-a536-4ab1-a80f-9506b11c7bc8')
|
||||
def test_create_pool_empty_admin_state_up(self):
|
||||
"""Test create pool with an invalid admin state up field"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
admin_state_up="",
|
||||
lb_algorithm='ROUND_ROBIN')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('0f230e6d-057d-4da8-a42d-f32464ae1c47')
|
||||
def test_create_pool_empty_tenant_field(self):
|
||||
"""Test create pool with empty tenant field"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
tenant_id="",
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('4a0e711a-b4da-4226-b265-f87b04ee4977')
|
||||
def test_create_pool_for_other_tenant_field(self):
|
||||
"""Test create pool for other tenant field"""
|
||||
tenant = 'deffb4d7c0584e89a8ec99551565713c'
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
tenant_id=tenant,
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('cb564af8-89aa-40ca-850e-55418da0f235')
|
||||
def test_create_pool_invalid_name_field(self):
|
||||
"""known bug with
|
||||
|
||||
input more than 255 chars Test create pool with invalid name field
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'],
|
||||
name='n' * 256)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('7f4472be-feb7-4ab7-9fb9-97e08f1fa787')
|
||||
def test_create_pool_invalid_desc_field(self):
|
||||
"""known bug with
|
||||
|
||||
input more than 255 chars Test create pool with invalid desc field
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._prepare_and_create_pool,
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'],
|
||||
description='d' * 256)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('b09b14dc-029d-4132-94dd-e713c9bfa0ee')
|
||||
def test_create_pool_with_session_persistence_unsupported_type(self):
|
||||
"""Test create a pool
|
||||
|
||||
with an incorrect type value for session persistence
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
session_persistence={'type': 'UNSUPPORTED'},
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('b5af574a-d05f-4db0-aece-58676cdbf440')
|
||||
def test_create_pool_with_session_persistence_http_cookie(self):
|
||||
"""Test create a pool with session_persistence type=HTTP_COOKIE"""
|
||||
new_pool = self._prepare_and_create_pool(
|
||||
session_persistence={'type': 'HTTP_COOKIE'})
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
self.assertEqual(new_pool, pool)
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('2d6b6667-e38b-4e7f-8443-8dc7ee63ea87')
|
||||
def test_create_pool_with_session_persistence_app_cookie(self):
|
||||
"""Test create a pool with session_persistence type=APP_COOKIE"""
|
||||
new_pool = self._prepare_and_create_pool(
|
||||
session_persistence={'type': 'APP_COOKIE',
|
||||
'cookie_name': 'sessionId'})
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
self.assertEqual(new_pool, pool)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('9ac450fc-24c5-4b5c-a781-b23e5713f172')
|
||||
def test_create_pool_with_session_persistence_redundant_cookie_name(self):
|
||||
"""Test create a pool
|
||||
|
||||
with session_persistence with cookie_name for type=HTTP_COOKIE
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
session_persistence={'type': 'HTTP_COOKIE',
|
||||
'cookie_name': 'sessionId'},
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('7783ebd0-5bd9-43f0-baf2-a43212ba2617')
|
||||
def test_create_pool_with_session_persistence_without_cookie_name(self):
|
||||
"""Test create a pool
|
||||
|
||||
with session_persistence without cookie_name for type=APP_COOKIE
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
session_persistence={'type': 'APP_COOKIE'},
|
||||
protocol='HTTP',
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
listener_id=self.listener['id'])
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('767ed26e-7114-402a-bdee-443d52009a73')
|
||||
def test_update_pool(self):
|
||||
"""Test update pool"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
desc = 'testing update with new description'
|
||||
pool = self._update_pool(new_pool.get('id'),
|
||||
description=desc,
|
||||
wait=True)
|
||||
self.assertEqual(desc, pool.get('description'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('5cbc4dac-13fc-44de-b98f-41ca369a6e0f')
|
||||
def test_update_pool_missing_name(self):
|
||||
"""Test update pool with missing name"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool_initial = self._show_pool(new_pool.get('id'))
|
||||
name = pool_initial.get('name')
|
||||
pool = self._update_pool(new_pool.get('id'))
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
self.assertEqual(name, pool.get('name'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('af9c2f8e-b0e3-455b-83f0-222f8d692185')
|
||||
def test_update_pool_missing_description(self):
|
||||
"""Test update pool with missing description"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool_initial = self._show_pool(new_pool.get('id'))
|
||||
desc = pool_initial.get('description')
|
||||
pool = self._update_pool(new_pool.get('id'))
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
self.assertEqual(desc, pool.get('description'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('3b41e855-edca-42c1-a1c6-07421f87704d')
|
||||
def test_update_pool_missing_admin_state_up(self):
|
||||
"""Test update pool with missing admin state up field"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool_initial = self._show_pool(new_pool.get('id'))
|
||||
admin = pool_initial.get('admin_state_up')
|
||||
pool = self._update_pool(new_pool.get('id'))
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
self.assertEqual(admin, pool.get('admin_state_up'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('8b49ecc3-4694-4482-9b2d-dc928576e161')
|
||||
def test_update_pool_missing_session_persistence(self):
|
||||
"""Test update pool with missing session persistence"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool_initial = self._show_pool(new_pool.get('id'))
|
||||
sess_pers = pool_initial.get('session_persistence')
|
||||
pool = self._update_pool(new_pool.get('id'))
|
||||
self.assertAlmostEqual(sess_pers, pool.get('session_persistence'))
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('23a9dbaf-105b-450e-95cf-050203b28366')
|
||||
def test_update_pool_invalid_name(self):
|
||||
"""Test update pool with invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'), name='n' * 256)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('efeeb827-5cb0-4349-8272-b2dbcbf42d22')
|
||||
def test_update_pool_invalid_desc(self):
|
||||
"""Test update pool with invalid desc
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'),
|
||||
description='d' * 256)
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a91c1380-0d36-43a1-bf64-8fe9078e2bbd')
|
||||
def test_update_pool_invalid_admin_state_up(self):
|
||||
"""Test update pool with an invalid admin_state_up"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'), admin_state_up='hello')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('5d45b0e3-7d7f-4523-8504-9ccfd6ecec81')
|
||||
def test_update_pool_invalid_session_persistence(self):
|
||||
"""Test update pool with an invalid session pers. field"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'),
|
||||
session_persistence={'type': 'Hello'})
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('3ddec9b1-fc7a-4073-9451-e73316237763')
|
||||
def test_update_pool_empty_name(self):
|
||||
"""Test update pool with empty name"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool = self._update_pool(new_pool.get('id'), name="")
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
self.assertEqual(pool.get('name'), "")
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('171e1153-9898-467d-80ed-d6deed430342')
|
||||
def test_update_pool_empty_description(self):
|
||||
"""Test update pool with empty description"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
pool = self._update_pool(new_pool.get('id'),
|
||||
description="")
|
||||
self._wait_for_load_balancer_status(self.load_balancer.get('id'))
|
||||
self.assertEqual(pool.get('description'), "")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('397bd0ec-0e82-4421-a672-b7a2c4e84b56')
|
||||
def test_update_pool_empty_admin_state_up(self):
|
||||
"""Test update pool with empty admin state up"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'), admin_state_up="")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('f68a6ed5-4577-44f1-81c8-6dd30d8a874d')
|
||||
def test_update_pool_empty_session_persistence(self):
|
||||
"""Test update pool with empty session persistence field"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'),
|
||||
session_persistence="")
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('d8027ea2-6912-41f7-bf5a-f2eb3d0901b1')
|
||||
def test_update_pool_invalid_attribute(self):
|
||||
"""Test update pool with an invalid attribute"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'), lb_algorithm='ROUNDED')
|
||||
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('a58822ee-56fc-4b96-bb28-47cd07ae9cb8')
|
||||
def test_update_pool_incorrect_attribute(self):
|
||||
"""Test update a pool with an extra, incorrect field"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'), protocol='HTTPS')
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('4839f03e-2439-4619-8546-411ca883066d')
|
||||
def test_delete_pool(self):
|
||||
"""Test delete pool"""
|
||||
new_pool = self._prepare_and_create_pool(cleanup=False)
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
self.assertEqual(new_pool, pool)
|
||||
self._delete_pool(new_pool.get('id'))
|
||||
self.assertRaises(ex.NotFound, self._show_pool,
|
||||
new_pool.get('id'))
|
||||
|
||||
@decorators.attr(type='smoke')
|
||||
@decorators.idempotent_id('cd30962a-12ce-4ae9-89de-db007aebbd9f')
|
||||
def test_delete_invalid_pool(self):
|
||||
"""Test delete pool that doesn't exist"""
|
||||
new_pool = self._prepare_and_create_pool(cleanup=False)
|
||||
pool = self._show_pool(new_pool.get('id'))
|
||||
self.assertEqual(new_pool, pool)
|
||||
self._delete_pool(new_pool.get('id'))
|
||||
self.assertRaises(ex.NotFound, self._delete_pool,
|
||||
new_pool.get('id'))
|
@ -1,270 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api import base_provider as base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class AdminPolicyTest(base.BaseAdminNetworkTest):
|
||||
"""Provides organization to policy traffic using NSX security policy
|
||||
|
||||
When security-group-policy extension is enabled:
|
||||
|
||||
1. Only Admin can create secuiry-group-policy.
|
||||
2. Tenants can not create security-group.
|
||||
3. No security rules can be added to security-group-policy.
|
||||
4. Only Admin can update security-group-policy.
|
||||
|
||||
If tests failed, check vmware/nsx.ini and neutron/policy.json to make
|
||||
sure correct settings are being applied.
|
||||
|
||||
ATTENTIONS:
|
||||
if allow_tenant_rules_with_policy=True
|
||||
run test_tenant_create_security_group_if_allowed
|
||||
if allow_tenant_rules_with_policy=False
|
||||
run test_tenant_cannot_create_security_group (negative test)
|
||||
|
||||
WARNING: Tempest scenario tests, tenants will create security-groups,
|
||||
and failures should be expected. So when run scenario tests,
|
||||
set allow_tenant_rules_with_policy to True.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(AdminPolicyTest, cls).skip_checks()
|
||||
if not test.is_extension_enabled('security-group-policy', 'network'):
|
||||
msg = "Extension security-group-policy is not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(AdminPolicyTest, cls).setup_clients()
|
||||
cls.cmgr_pri = cls.get_client_manager('primary')
|
||||
cls.cmgr_alt = cls.get_client_manager('alt')
|
||||
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(AdminPolicyTest, cls).resource_setup()
|
||||
cls.default_policy_id = CONF.nsxv.default_policy_id
|
||||
cls.alt_policy_id = CONF.nsxv.alt_policy_id
|
||||
if not (cls.default_policy_id and
|
||||
cls.default_policy_id.startswith("policy-")):
|
||||
msg = "default_policy_id is not defined in session nsxv"
|
||||
raise cls.skipException(msg)
|
||||
|
||||
def delete_security_group(self, sg_client, sg_id):
|
||||
sg_client.delete_security_group(sg_id)
|
||||
|
||||
def create_security_group(self, sg_client, sg_name=None, desc=None,
|
||||
tenant_id=None):
|
||||
name = sg_name or data_utils.rand_name('security-group')
|
||||
desc = desc or "OS security-group %s" % name
|
||||
sg_dict = dict(name=name, description=desc)
|
||||
if tenant_id:
|
||||
sg_dict['tenant_id'] = tenant_id
|
||||
sg = sg_client.create_security_group(**sg_dict)
|
||||
sg = sg.get('security_group', sg)
|
||||
return sg
|
||||
|
||||
def create_security_group_policy(self, cmgr=None, policy_id=None,
|
||||
tenant_id=None, provider=False):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
policy_id = policy_id or self.default_policy_id
|
||||
sg_client = cmgr.security_groups_client
|
||||
sg_dict = dict(policy=policy_id,
|
||||
name=data_utils.rand_name('admin-policy'))
|
||||
if tenant_id:
|
||||
sg_dict['tenant_id'] = tenant_id
|
||||
if provider:
|
||||
sg_dict['provider'] = True
|
||||
sg = sg_client.create_security_group(**sg_dict)
|
||||
sg = sg.get('security_group', sg)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_security_group,
|
||||
sg_client, sg.get('id'))
|
||||
return sg
|
||||
|
||||
def update_security_group_policy(self, security_group_id,
|
||||
new_policy_id, cmgr=None):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
sg_client = cmgr.security_groups_client
|
||||
sg = sg_client.update_security_group(security_group_id,
|
||||
policy=new_policy_id)
|
||||
return sg.get('security_group', sg)
|
||||
|
||||
def create_security_group_rule(self, security_group_id,
|
||||
cmgr=None, tenant_id=None):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
sgr_client = cmgr.security_group_rules_client
|
||||
sgr_dict = dict(security_group_id=security_group_id,
|
||||
direction='ingress', protocol='icmp')
|
||||
if tenant_id:
|
||||
sgr_dict['tenant_id'] = tenant_id
|
||||
sgr = sgr_client.create_security_group_rule(**sgr_dict)
|
||||
return sgr.get('security_group_rule', sgr)
|
||||
|
||||
def get_default_security_group_policy(self, cmgr=None):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
sg_client = cmgr.security_groups_client
|
||||
sg_list = sg_client.list_security_groups()
|
||||
# why list twice, see bug#1772424
|
||||
sg_list = sg_client.list_security_groups(name='default')
|
||||
sg_list = sg_list.get('security_groups', sg_list)
|
||||
return sg_list[0]
|
||||
|
||||
def show_security_group_policy(self, security_group_id, cmgr=None):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
sg_client = cmgr.security_groups_client
|
||||
sg = sg_client.show_security_group(security_group_id)
|
||||
return sg.get('security_group', sg)
|
||||
|
||||
@decorators.idempotent_id('825d0270-6649-44f2-ac0c-a3b5566d0d2a')
|
||||
def test_admin_can_crud_policy(self):
|
||||
sg_desc = "crud security-group-policy"
|
||||
sg_client = self.cmgr_adm.security_groups_client
|
||||
sg = self.create_security_group_policy(self.cmgr_adm)
|
||||
sg_id = sg.get('id')
|
||||
self.assertEqual(self.default_policy_id, sg.get('policy'))
|
||||
sg_client.update_security_group(sg_id, description=sg_desc)
|
||||
sg_show = self.show_security_group_policy(sg_id)
|
||||
self.assertEqual(sg_desc, sg_show.get('description'))
|
||||
self.delete_security_group(sg_client, sg_id)
|
||||
sg_list = sg_client.list_security_groups(id=sg_id)
|
||||
sg_list = sg_list.get('security_groups', sg_list)
|
||||
self.assertEqual(len(sg_list), 0)
|
||||
|
||||
@decorators.idempotent_id('809d72be-c2d8-4e32-b538-09a5003630c0')
|
||||
def test_admin_can_create_policy_for_tenant(self):
|
||||
tenant_id = self.cmgr_alt.networks_client.tenant_id
|
||||
sg = self.create_security_group_policy(self.cmgr_adm,
|
||||
tenant_id=tenant_id)
|
||||
self.assertEqual(self.default_policy_id, sg.get('policy'))
|
||||
|
||||
@decorators.idempotent_id('1ab540b0-2a56-46cd-bbaa-607a655b4688')
|
||||
def test_admin_can_create_provider_policy(self):
|
||||
tenant_id = self.cmgr_pri.networks_client.tenant_id
|
||||
sg = self.create_security_group_policy(self.cmgr_adm,
|
||||
tenant_id=tenant_id,
|
||||
provider=True)
|
||||
self.assertEqual(self.default_policy_id, sg.get('policy'))
|
||||
self.assertEqual(sg.get('provider'), True)
|
||||
|
||||
@decorators.idempotent_id('1d31ea7a-37f1-40db-b917-4acfbf565ae2')
|
||||
def test_tenant_has_default_policy(self):
|
||||
sg = self.get_default_security_group_policy(self.cmgr_pri)
|
||||
self.assertEqual(self.default_policy_id, sg.get('policy'))
|
||||
|
||||
@testtools.skipIf(not CONF.nsxv.alt_policy_id.startswith('policy-'),
|
||||
"nsxv.alt_policy_id not defined.")
|
||||
@decorators.idempotent_id('6784cf25-6b50-4349-b96b-85076111dbf4')
|
||||
def test_admin_change_tenant_policy(self):
|
||||
tenant_id = self.cmgr_alt.networks_client.tenant_id
|
||||
sg = self.create_security_group_policy(tenant_id=tenant_id)
|
||||
sg_id = sg.get('id')
|
||||
self.update_security_group_policy(sg_id, self.alt_policy_id)
|
||||
sg = self.show_security_group_policy(sg_id, self.cmgr_alt)
|
||||
self.assertEqual(self.alt_policy_id, sg.get('policy'))
|
||||
|
||||
@testtools.skipIf(not CONF.nsxv.allow_tenant_rules_with_policy,
|
||||
"skip because tenant is not allowed to create SG.")
|
||||
@decorators.idempotent_id('4abf29bd-22ae-46b4-846b-e7c28f318159')
|
||||
def test_tenant_create_security_group_if_allowed(self):
|
||||
"""test if allow_tenant_rules_with_policy=True"""
|
||||
sg_client = self.cmgr_pri.security_groups_client
|
||||
sg_name = data_utils.rand_name('security-group')
|
||||
sg = self.create_security_group(sg_client, sg_name)
|
||||
self.assertEqual(sg.get('name'), sg_name)
|
||||
|
||||
@decorators.attr(type=['negative'])
|
||||
@decorators.idempotent_id('5099604c-637a-4b25-8756-c6fc0929f963')
|
||||
def test_add_rules_to_policy_disallowed(self):
|
||||
tenant_id = self.cmgr_pri.networks_client.tenant_id
|
||||
sg = self.create_security_group_policy(self.cmgr_adm,
|
||||
tenant_id=tenant_id)
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self.create_security_group_rule, sg.get('id'),
|
||||
cmgr=self.cmgr_adm, tenant_id=tenant_id)
|
||||
|
||||
@decorators.attr(type=['negative'])
|
||||
@decorators.idempotent_id('9a604036-ace6-4ced-92b8-be732eee310f')
|
||||
def test_cannot_create_policy_with_invalid_policy_id(self):
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self.create_security_group_policy,
|
||||
self.cmgr_adm, "invalid-policy-id")
|
||||
|
||||
@decorators.attr(type=['negative'])
|
||||
@decorators.idempotent_id('4d383d3c-f1e6-47e3-906e-3c171146965a')
|
||||
def test_tenant_cannot_delete_its_policy(self):
|
||||
tenant_cmgr = self.cmgr_alt
|
||||
tenant_id = tenant_cmgr.networks_client.tenant_id
|
||||
sg = self.create_security_group_policy(cmgr=self.cmgr_adm,
|
||||
tenant_id=tenant_id)
|
||||
sg_id = sg.get('id')
|
||||
tenant_sg_client = tenant_cmgr.security_groups_client
|
||||
self.assertRaises(exceptions.Forbidden,
|
||||
self.delete_security_group,
|
||||
tenant_sg_client, sg_id)
|
||||
|
||||
@decorators.attr(type=['negative'])
|
||||
@decorators.idempotent_id('154985cd-26b2-468d-af6d-b6144ef2d378')
|
||||
def test_tenant_cannot_update_its_policy(self):
|
||||
tenant_cmgr = self.cmgr_alt
|
||||
tenant_id = tenant_cmgr.networks_client.tenant_id
|
||||
sg = self.create_security_group_policy(cmgr=self.cmgr_adm,
|
||||
tenant_id=tenant_id)
|
||||
sg_id = sg.get('id')
|
||||
self.assertRaises(exceptions.Forbidden,
|
||||
self.update_security_group_policy,
|
||||
sg_id, self.alt_policy_id, self.cmgr_alt)
|
||||
|
||||
@decorators.attr(type=['negative'])
|
||||
@decorators.idempotent_id('d6d8c918-d488-40c4-83dc-8ce1a565e54f')
|
||||
def test_tenant_cannot_create_policy(self):
|
||||
self.assertRaises(exceptions.Forbidden,
|
||||
self.create_security_group_policy,
|
||||
self.cmgr_pri)
|
||||
|
||||
@decorators.attr(type=['negative'])
|
||||
@testtools.skipIf(CONF.nsxv.allow_tenant_rules_with_policy,
|
||||
"skip because tenant is allowed to create SG.")
|
||||
@decorators.idempotent_id('82aa02ee-8008-47a9-90ea-ba7840bfb932')
|
||||
def test_tenant_cannot_create_security_group(self):
|
||||
"""Only valid if allow_tenant_rules_with_policy=True
|
||||
|
||||
If test fail, check nsx.ini and vmware_nsx_tempest/config.py
|
||||
to make sure they are the same value.
|
||||
|
||||
Exception is BadRequest, not Forbideen as the message is
|
||||
edited first before integration check.
|
||||
|
||||
counter part test is:
|
||||
test_tenant_create_security_group_if_allowed()
|
||||
"""
|
||||
sg_client = self.cmgr_pri.security_groups_client
|
||||
sg_name = data_utils.rand_name('security-group')
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self.create_security_group,
|
||||
sg_client, sg_name)
|
@ -1,129 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.api import base_provider as base
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class DnsSearchDomainTest(base.BaseAdminNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(DnsSearchDomainTest, cls).resource_setup()
|
||||
cls.dns_search_domain = CONF.network.dns_search_domain
|
||||
network_name = data_utils.rand_name('dns-search')
|
||||
resp = cls.create_network(client=cls.networks_client,
|
||||
name=network_name)
|
||||
cls.project_network = resp.get('network', resp)
|
||||
# addCleanup() only available at instance, not at class
|
||||
resp = cls.create_subnet(cls.project_network,
|
||||
name=network_name,
|
||||
client=cls.subnets_client,
|
||||
dns_search_domain=cls.dns_search_domain)
|
||||
cls.tenant_subnet = resp.get('subnet', resp)
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
# we need to cleanup resouces created at class methods
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.networks_client.delete_network,
|
||||
cls.project_network['id'])
|
||||
super(DnsSearchDomainTest, cls).resource_cleanup()
|
||||
|
||||
def create_networks(self, network_name):
|
||||
resp = self.create_network(client=self.networks_client,
|
||||
name=network_name)
|
||||
network = resp.get('network', resp)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.networks_client.delete_network,
|
||||
network['id'])
|
||||
resp = self.create_subnet(network,
|
||||
name=network_name,
|
||||
client=self.subnets_client,
|
||||
dns_search_domain=self.dns_search_domain)
|
||||
subnet = resp.get('subnet', resp)
|
||||
return (network, subnet)
|
||||
|
||||
@decorators.idempotent_id('879d620c-535c-467f-9e62-f2bf3178b5b7')
|
||||
def test_dns_search_domain_crud_operations(self):
|
||||
"""perform CRUD operation on subnet with dns_search_domain."""
|
||||
network_name = data_utils.rand_name('crud-search-domain')
|
||||
network, subnet = self.create_networks(network_name)
|
||||
self.assertEqual('ACTIVE', network['status'])
|
||||
new_name = network_name + "-update"
|
||||
resp = self.update_subnet(
|
||||
subnet['id'], name=new_name,
|
||||
client=self.subnets_client,
|
||||
dns_search_domain=self.dns_search_domain)
|
||||
subnet = resp.get('subnet', resp)
|
||||
self.assertEqual(subnet['name'], new_name)
|
||||
self.assertEqual(subnet['dns_search_domain'],
|
||||
self.dns_search_domain)
|
||||
subnet_list = self.list_subnets(client=self.subnets_client,
|
||||
name=new_name)['subnets']
|
||||
self.assertEqual(1, len(subnet_list))
|
||||
self.delete_subnet(subnet['id'])
|
||||
subnet_list = self.list_subnets(client=self.subnets_client,
|
||||
name=new_name)['subnets']
|
||||
self.assertEqual(0, len(subnet_list))
|
||||
|
||||
@decorators.idempotent_id('40facdd9-40c0-48a1-bff1-57ba0ed0dc49')
|
||||
def test_list_search_domain(self):
|
||||
subnet_list = self.list_subnets(client=self.subnets_client,
|
||||
subnet_id=self.tenant_subnet['id'])
|
||||
self.assertEqual(1, len(subnet_list))
|
||||
|
||||
@decorators.idempotent_id('8d023934-b0c8-4588-b48b-17db047a4d8b')
|
||||
def test_show_search_domain(self):
|
||||
resp = self.show_subnet(self.tenant_subnet['id'],
|
||||
client=self.subnets_client)
|
||||
subnet = resp.get('subnet', resp)
|
||||
self.assertEqual(self.dns_search_domain,
|
||||
subnet['dns_search_domain'])
|
||||
|
||||
@decorators.idempotent_id('2b5990bf-d904-4e18-b197-93f3c061c260')
|
||||
def test_update_subnet_search_domain_field(self):
|
||||
"""attach 2nd subnet to network and update its dns_search_domain."""
|
||||
subnet_name = data_utils.rand_name('upd-search-domain')
|
||||
# 2nd subnet attached to a network, make sure to use different cidr
|
||||
resp = self.create_subnet(self.project_network,
|
||||
name=subnet_name,
|
||||
cidr_offset=1,
|
||||
client=self.subnets_client)
|
||||
subnet = resp.get('subnet', resp)
|
||||
self.assertNotIn('dns_search_domain', subnet)
|
||||
resp = self.update_subnet(
|
||||
subnet['id'],
|
||||
client=self.subnets_client,
|
||||
dns_search_domain=self.dns_search_domain)
|
||||
subnet = resp.get('subnet', resp)
|
||||
self.assertEqual(subnet['dns_search_domain'],
|
||||
self.dns_search_domain)
|
||||
# no method to remove dns_search_domain attribute
|
||||
# set to '' to clear search domain
|
||||
resp = self.update_subnet(
|
||||
subnet['id'],
|
||||
client=self.subnets_client,
|
||||
dns_search_domain='')
|
||||
subnet = resp.get('subnet', resp)
|
||||
self.assertEqual(subnet['dns_search_domain'], '')
|
||||
self.delete_subnet(subnet['id'],
|
||||
client=self.subnets_client)
|
@ -1,56 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class DnsSearchDoaminsNegativeTest(base.BaseAdminNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(DnsSearchDoaminsNegativeTest, cls).skip_checks()
|
||||
|
||||
def create_network_with_bad_dns_search_domain(
|
||||
self, dns_search_domain="vmware@com"):
|
||||
networks_client = self.networks_client
|
||||
subnets_client = self.subnets_client
|
||||
network_name = data_utils.rand_name('dns-sear-negative')
|
||||
resp = networks_client.create_network(name=network_name)
|
||||
network = resp.get('network', resp)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
networks_client.delete_network,
|
||||
network['id'])
|
||||
subnet_cfg = {
|
||||
'client': subnets_client,
|
||||
'name': network_name,
|
||||
'dns_search_domain': dns_search_domain}
|
||||
# should trigger exception of BadRequest with message:
|
||||
# Invalid input for dns_search_domain: ...
|
||||
resp = self.create_subnet(network, **subnet_cfg)
|
||||
subnet = resp.get('subnet', resp)
|
||||
return (network, subnet)
|
||||
|
||||
@decorators.attr(type=['negative'])
|
||||
@decorators.idempotent_id('11bdc214-10d7-4926-8f49-2da3d8719143')
|
||||
def test_create_dns_search_domain_negative(self):
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self.create_network_with_bad_dns_search_domain)
|
@ -1,116 +0,0 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
import test_subnets as SNET
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FlatNetworksTestJSON(SNET.SubnetTestJSON):
|
||||
_interface = 'json'
|
||||
_provider_network_body = {
|
||||
'name': data_utils.rand_name('FLAT-network'),
|
||||
'provider:network_type': 'flat'}
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(FlatNetworksTestJSON, cls).resource_setup()
|
||||
|
||||
def _create_network(self, _auto_clean_up=True, network_name=None,
|
||||
**kwargs):
|
||||
network_name = network_name or data_utils.rand_name('flat-netwk')
|
||||
# self.create_network expect network_name
|
||||
# self.admin_client.create_network()
|
||||
# and self.client.create_network() expect name
|
||||
post_body = {'name': network_name,
|
||||
'provider:network_type': 'flat'}
|
||||
post_body.update(kwargs)
|
||||
LOG.debug("create FLAT network: %s", str(post_body))
|
||||
body = self.admin_networks_client.create_network(**post_body)
|
||||
network = body['network']
|
||||
if _auto_clean_up:
|
||||
self.addCleanup(self._try_delete_network, network['id'])
|
||||
return network
|
||||
|
||||
@decorators.idempotent_id('dc2f2f46-0577-4e2a-b35d-3c8c8bbce5bf')
|
||||
def test_create_network(self):
|
||||
# Create a network as an admin user specifying the
|
||||
# flat network type attribute
|
||||
network = self._create_network()
|
||||
# Verifies router:network_type parameter
|
||||
self.assertIsNotNone(network['id'])
|
||||
self.assertEqual(network.get('provider:network_type'), 'flat')
|
||||
|
||||
@decorators.idempotent_id('777fc335-b26c-42ea-9759-c71dff2ce1c6')
|
||||
def test_update_network(self):
|
||||
# Update flat network as an admin user specifying the
|
||||
# flat network attribute
|
||||
network = self._create_network(shared=True, _auto_clean_up=False)
|
||||
self.assertEqual(network.get('shared'), True)
|
||||
new_name = network['name'] + "-updated"
|
||||
update_body = {'shared': False, 'name': new_name}
|
||||
body = self.update_network(network['id'], **update_body)
|
||||
updated_network = body['network']
|
||||
# Verify that name and shared parameters were updated
|
||||
self.assertEqual(updated_network['shared'], False)
|
||||
self.assertEqual(updated_network['name'], new_name)
|
||||
# get flat network attributes and verify them
|
||||
body = self.show_network(network['id'])
|
||||
updated_network = body['network']
|
||||
# Verify that name and shared parameters were updated
|
||||
self.assertEqual(updated_network['shared'], False)
|
||||
self.assertEqual(updated_network['name'], new_name)
|
||||
self.assertEqual(updated_network['status'], network['status'])
|
||||
self.assertEqual(updated_network['subnets'], network['subnets'])
|
||||
self._delete_network(network['id'])
|
||||
|
||||
@decorators.idempotent_id('1dfc1c11-e838-464c-85b2-ed5e4c477c64')
|
||||
def test_list_networks(self):
|
||||
# Create flat network
|
||||
network = self._create_network(shared=True)
|
||||
# List networks as a normal user and confirm it is available
|
||||
body = self.list_networks(client=self.networks_client)
|
||||
network_list = [net['id'] for net in body['networks']]
|
||||
self.assertIn(network['id'], network_list)
|
||||
update_body = {'shared': False}
|
||||
body = self.update_network(network['id'], **update_body)
|
||||
# List networks as a normal user and confirm it is not available
|
||||
body = self.list_networks(client=self.networks_client)
|
||||
network_list = [net['id'] for net in body['networks']]
|
||||
self.assertNotIn(network['id'], network_list)
|
||||
|
||||
@decorators.idempotent_id('b5649fe2-a214-4105-8053-1825a877c45b')
|
||||
def test_show_network_attributes(self):
|
||||
# Create flat network
|
||||
network = self._create_network(shared=True)
|
||||
# Show a flat network as a normal user and confirm the
|
||||
# flat network attribute is returned.
|
||||
body = self.show_network(network['id'], client=self.networks_client)
|
||||
show_net = body['network']
|
||||
self.assertEqual(network['name'], show_net['name'])
|
||||
self.assertEqual(network['id'], show_net['id'])
|
||||
# provider attributes are for admin only
|
||||
body = self.show_network(network['id'])
|
||||
show_net = body['network']
|
||||
net_attr_list = show_net.keys()
|
||||
for attr in ('admin_state_up', 'port_security_enabled', 'shared',
|
||||
'status', 'subnets', 'tenant_id', 'router:external',
|
||||
'provider:network_type', 'provider:physical_network',
|
||||
'provider:segmentation_id'):
|
||||
self.assertIn(attr, net_attr_list)
|
@ -1,186 +0,0 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
# Copyright 2015 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.services import base_l2gw
|
||||
from vmware_nsx_tempest.services import l2_gateway_client as L2GW
|
||||
|
||||
CONF = config.CONF
|
||||
L2GW_RID = 'l2_gateway'
|
||||
L2GW_RIDs = 'l2_gateways'
|
||||
MSG_DIFF = "l2gw %s=%s is not the same as requested=%s"
|
||||
|
||||
|
||||
class L2GatewayTest(base.BaseAdminNetworkTest):
|
||||
"""Test l2-gateway operations:
|
||||
|
||||
l2-gateway-create
|
||||
l2-gateway-show
|
||||
l2-gateway-update
|
||||
l2-gateway-list
|
||||
l2-gateway-delete
|
||||
|
||||
over single device/interface/vlan
|
||||
over single device/interface/multiple-vlans
|
||||
over single device/multiple-interfaces/multiple-vlans
|
||||
over multiple-device/multiple-interfaces/multiple-vlans
|
||||
"""
|
||||
|
||||
credentials = ['primary', 'admin']
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(L2GatewayTest, cls).skip_checks()
|
||||
if not test.is_extension_enabled('l2-gateway', 'network'):
|
||||
msg = "l2-gateway extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
# if CONF attr device_on_vlan not defined, SKIP entire test suite
|
||||
cls.getattr_or_skip_test("device_one_vlan")
|
||||
|
||||
@classmethod
|
||||
def getattr_or_skip_test(cls, l2gw_attr_name):
|
||||
attr_value = getattr(CONF.l2gw, l2gw_attr_name, None)
|
||||
if attr_value:
|
||||
return attr_value
|
||||
msg = "CONF session:l2gw attr:%s is not defined." % (l2gw_attr_name)
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(L2GatewayTest, cls).setup_clients()
|
||||
cls.l2gw_created = {}
|
||||
l2gw_mgr = cls.os_adm
|
||||
cls.l2gw_client = L2GW.get_client(l2gw_mgr)
|
||||
cls.l2gw_list_0 = cls.l2gw_client.list_l2_gateways()[L2GW_RIDs]
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(L2GatewayTest, cls).resource_setup()
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
for _id in cls.l2gw_created.keys():
|
||||
try:
|
||||
cls.l2gw_client.delete_l2_gateway(_id)
|
||||
except Exception:
|
||||
# log it please
|
||||
pass
|
||||
|
||||
def get_segmentation_id(self, _l2gw, d_idx=0, i_idx=0):
|
||||
_dev = _l2gw['devices'][d_idx]
|
||||
_seg = _dev['interfaces'][i_idx].get('segmentation_id', [])
|
||||
return sorted(_seg)
|
||||
|
||||
def pop_segmentation_id(self, _l2gw, d_idx=0, i_idx=0):
|
||||
_dev = _l2gw['devices'][d_idx]
|
||||
_seg = _dev['interfaces'][i_idx].pop('segmentation_id', [])
|
||||
return sorted(_seg)
|
||||
|
||||
def get_interfaces(self, _l2gw, d_idx=0):
|
||||
_dev = _l2gw['devices'][d_idx]
|
||||
return sorted(_dev)
|
||||
|
||||
def do_csuld_single_device_interface_vlan(self, _name, _devices):
|
||||
_vlan_id_list = self.get_segmentation_id(_devices, 0, 0)
|
||||
_res_new = self.l2gw_client.create_l2_gateway(
|
||||
name=_name, **_devices)[L2GW_RID]
|
||||
self.l2gw_created[_res_new['id']] = _res_new
|
||||
self.assertEqual(_name, _res_new['name'],
|
||||
MSG_DIFF % ('name', _res_new['name'], _name))
|
||||
# w/wo vlan provided, need to check it is assigned/not-assigned
|
||||
_seg_list = self.get_segmentation_id(_res_new, 0, 0)
|
||||
self.assertEqual(0, cmp(_vlan_id_list, _seg_list),
|
||||
MSG_DIFF % ('vlan', _seg_list, _vlan_id_list))
|
||||
_res_show = self.l2gw_client.show_l2_gateway(
|
||||
_res_new['id'])[L2GW_RID]
|
||||
_if_created = _res_new['devices'][0]['interfaces']
|
||||
_if_shown = _res_show['devices'][0]['interfaces']
|
||||
self.assertEqual(0, cmp(_if_created, _if_shown),
|
||||
MSG_DIFF % ('interfaces', _if_created, _if_shown))
|
||||
_name2 = _name + "-day2"
|
||||
_res_upd = self.l2gw_client.update_l2_gateway(
|
||||
_res_new['id'], name=_name2)[L2GW_RID]
|
||||
_res_lst = self.l2gw_client.list_l2_gateways(
|
||||
name=_name2)[L2GW_RIDs][0]
|
||||
self.assertEqual(_name2 == _res_upd['name'],
|
||||
_name2 == _res_lst['name'],
|
||||
MSG_DIFF % ('name', _res_new['name'], _name2))
|
||||
self.l2gw_client.delete_l2_gateway(_res_new['id'])
|
||||
_res_lst = self.l2gw_client.list_l2_gateways(name=_name2)[L2GW_RIDs]
|
||||
self.l2gw_created.pop(_res_new['id'])
|
||||
self.assertEmpty(_res_lst,
|
||||
"l2gw name=%s, id=%s not deleted." %
|
||||
(_name2, _res_new['id']))
|
||||
|
||||
@decorators.idempotent_id('8b45a9a5-468b-4317-983d-7cceda367074')
|
||||
def test_csuld_single_device_interface_without_vlan(self):
|
||||
"""Single device/interface/vlan
|
||||
|
||||
Create l2gw with one and only one VLAN. In this case,
|
||||
l2-gateway-connnection does not need to specify VLAN.
|
||||
"""
|
||||
|
||||
dev_profile = self.getattr_or_skip_test("device_one_vlan")
|
||||
_name = data_utils.rand_name('l2gw-1v1')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
self.pop_segmentation_id(_devices, 0, 0)
|
||||
self.do_csuld_single_device_interface_vlan(_name, _devices)
|
||||
|
||||
@decorators.idempotent_id('af57cf56-a169-4d88-b32e-7f49365ce407')
|
||||
def test_csuld_single_device_interface_vlan(self):
|
||||
"""Single device/interface/vlan
|
||||
|
||||
Create l2gw without specifying LAN. In this case,
|
||||
l2-gateway-connnection need to specify VLAN.
|
||||
"""
|
||||
|
||||
dev_profile = self.getattr_or_skip_test("device_one_vlan")
|
||||
_name = data_utils.rand_name('l2gw-1v2')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
self.do_csuld_single_device_interface_vlan(_name, _devices)
|
||||
|
||||
@decorators.idempotent_id('cb59145e-3d2b-46b7-8f7b-f30f794a4d51')
|
||||
@decorators.skip_because(bug="1559913")
|
||||
def test_csuld_single_device_interface_mvlan(self):
|
||||
dev_profile = self.getattr_or_skip_test("device_multiple_vlans")
|
||||
_name = data_utils.rand_name('l2gw-2v1')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
self.do_csuld_single_device_interface_vlan(_name, _devices)
|
||||
|
||||
@decorators.skip_because(bug="1559913")
|
||||
@decorators.idempotent_id('5522bdfe-ebe8-4eea-81b4-f4075bb608cf')
|
||||
def test_csuld_single_device_minterface_mvlan_type1(self):
|
||||
# NSX-v does not support multiple interfaces
|
||||
dev_profile = self.getattr_or_skip_test(
|
||||
"multiple_interfaces_multiple_vlans")
|
||||
_name = data_utils.rand_name('l2gw-m2v1')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
self.do_csuld_single_device_interface_vlan(_name, _devices)
|
||||
|
||||
@decorators.skip_because(bug="1559913")
|
||||
@decorators.idempotent_id('5bec26e0-855f-4537-b31b-31663a820ddb')
|
||||
def test_csuld_single_device_minterface_mvlan_type2(self):
|
||||
# NSX-v does not support multiple interfaces
|
||||
dev_profile = self.getattr_or_skip_test(
|
||||
"multiple_interfaces_multiple_vlans")
|
||||
_name = data_utils.rand_name('l2gw-m2v2')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
self.do_csuld_single_device_interface_vlan(_name, _devices)
|
@ -1,273 +0,0 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
# Copyright 2015 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest import test
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
from vmware_nsx_tempest.services import base_l2gw
|
||||
from vmware_nsx_tempest.services import l2_gateway_client as L2GW
|
||||
from vmware_nsx_tempest.services import \
|
||||
l2_gateway_connection_client as L2GWC
|
||||
|
||||
CONF = config.CONF
|
||||
L2GW_RID = 'l2_gateway'
|
||||
L2GW_RIDs = 'l2_gateways'
|
||||
L2GWC_RID = 'l2_gateway_connection'
|
||||
L2GWC_RIDs = 'l2_gateway_connections'
|
||||
MSG_DIFF = "l2gw %s=%s is not the same as requested=%s"
|
||||
|
||||
|
||||
class L2GatewayConnectionTest(base.BaseAdminNetworkTest):
|
||||
"""Test l2-gateway-connection operations:
|
||||
|
||||
l2-gateway-connection-create
|
||||
l2-gateway-connection-show
|
||||
l2-gateway-connection-update (no case)
|
||||
l2-gateway-connection-list
|
||||
l2-gateway-connection-delete
|
||||
|
||||
over single device/interface/vlan
|
||||
over single device/interface/multiple-vlans
|
||||
over single device/multiple-interfaces/multiple-vlans
|
||||
over multiple-device/multiple-interfaces/multiple-vlans
|
||||
"""
|
||||
|
||||
credentials = ['primary', 'admin']
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(L2GatewayConnectionTest, cls).skip_checks()
|
||||
if not test.is_extension_enabled('l2-gateway', 'network'):
|
||||
msg = "l2-gateway extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
if not test.is_extension_enabled('l2-gateway-connection',
|
||||
'network'):
|
||||
msg = "l2-gateway-connection extension is not enabled"
|
||||
raise cls.skipException(msg)
|
||||
# skip test if CONF session:l2gw does not have the following opts
|
||||
cls.getattr_or_skip_test("device_one_vlan")
|
||||
cls.getattr_or_skip_test("vlan_subnet_ipv4_dict")
|
||||
|
||||
@classmethod
|
||||
def getattr_or_skip_test(cls, l2gw_attr_name):
|
||||
attr_value = getattr(CONF.l2gw, l2gw_attr_name, None)
|
||||
if attr_value:
|
||||
return attr_value
|
||||
msg = "CONF session:l2gw attr:%s is not defined." % (l2gw_attr_name)
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(L2GatewayConnectionTest, cls).setup_clients()
|
||||
cls.l2gw_created = {}
|
||||
cls.l2gwc_created = {}
|
||||
l2gw_mgr = cls.os_adm
|
||||
cls.l2gw_client = L2GW.get_client(l2gw_mgr)
|
||||
cls.l2gwc_client = L2GWC.get_client(l2gw_mgr)
|
||||
cls.l2gw_list_0 = cls.l2gw_client.list_l2_gateways()[L2GW_RIDs]
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(L2GatewayConnectionTest, cls).resource_setup()
|
||||
# create primary tenant's VLAN network
|
||||
_subnet = cls.getattr_or_skip_test("vlan_subnet_ipv4_dict")
|
||||
for _x in ('mask_bits',):
|
||||
if _x in _subnet:
|
||||
_subnet[_x] = int(_subnet[_x])
|
||||
# cidr must be presented & in IPNetwork structure
|
||||
_subnet['cidr'] = netaddr.IPNetwork(_subnet['cidr'])
|
||||
_start = _subnet.pop('start', None)
|
||||
_end = _subnet.pop('end', None)
|
||||
if _start and _end:
|
||||
_subnet['allocation_pools'] = [{'start': _start, 'end': _end}]
|
||||
cls.network = cls.create_network()
|
||||
# baseAdminNetworkTest does not derive ip_version, mask_bits from cidr
|
||||
_subnet['ip_version'] = 4
|
||||
if 'mask_bits' not in _subnet:
|
||||
_subnet['mask_bits'] = _subnet['cidr'].prefixlen
|
||||
cls.subnet = cls.create_subnet(cls.network, **_subnet)
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
cls.l2gw_cleanup()
|
||||
if hasattr(cls, 'network'):
|
||||
cls.networks_client.delete_network(cls.network['id'])
|
||||
|
||||
@classmethod
|
||||
def l2gw_cleanup(cls):
|
||||
"""
|
||||
Delete created L2GWs and L2GWCs.
|
||||
"""
|
||||
for _id in cls.l2gwc_created.keys():
|
||||
try:
|
||||
cls.l2gwc_client.delete_l2_gateway_connection(_id)
|
||||
except Exception:
|
||||
# log it please
|
||||
pass
|
||||
for _id in cls.l2gw_created.keys():
|
||||
try:
|
||||
cls.l2gw_client.delete_l2_gateway(_id)
|
||||
except Exception:
|
||||
# log it please
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def get_ipaddress_from_tempest_conf(cls, ip_version=4):
|
||||
"""Return first subnet gateway for configured CIDR."""
|
||||
if ip_version == 4:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
|
||||
elif ip_version == 6:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
|
||||
return netaddr.IPAddress(cidr)
|
||||
|
||||
def get_segmentation_id(self, _l2gw, d_idx=0, i_idx=0):
|
||||
_dev = _l2gw['devices'][d_idx]
|
||||
_seg = _dev['interfaces'][i_idx].get('segmentation_id', [])
|
||||
return sorted(_seg)
|
||||
|
||||
def get_interfaces(self, _l2gw, d_idx=0):
|
||||
_dev = _l2gw['devices'][d_idx]
|
||||
return sorted(_dev)
|
||||
|
||||
def pop_segmentation_id(self, _l2gw, d_idx=0, i_idx=0):
|
||||
_dev = _l2gw['devices'][d_idx]
|
||||
_seg = _dev['interfaces'][i_idx].pop('segmentation_id', [])
|
||||
return sorted(_seg)
|
||||
|
||||
def create_l2gw_switch(self, _name, _devices):
|
||||
_vlan_id_list = self.get_segmentation_id(_devices)
|
||||
_res_new = self.l2gw_client.create_l2_gateway(
|
||||
name=_name, **_devices)[L2GW_RID]
|
||||
self.l2gw_created[_res_new['id']] = _res_new
|
||||
_res_show = self.l2gw_client.show_l2_gateway(
|
||||
_res_new['id'])[L2GW_RID]
|
||||
return (_res_show, _vlan_id_list)
|
||||
|
||||
def create_l2gw_connection(self, _l2gw, network_id=None, **kwargs):
|
||||
network_id = network_id or self.network['id']
|
||||
_seg_id = kwargs.pop('default_segmentation_id',
|
||||
kwargs.pop('segmentation_id', None))
|
||||
cr_body = {'l2_gateway_id': _l2gw['id'], 'network_id': network_id}
|
||||
if _seg_id:
|
||||
cr_body['segmentation_id'] = _seg_id
|
||||
_res_new = self.l2gwc_client.create_l2_gateway_connection(
|
||||
**cr_body)[L2GWC_RID]
|
||||
self.l2gwc_created[_res_new['id']] = _res_new
|
||||
_res_show = self.l2gwc_client.show_l2_gateway_connection(
|
||||
_res_new['id'])[L2GWC_RID]
|
||||
return (_res_show, _seg_id)
|
||||
|
||||
def do_suld_l2gw_connection(self, _res_new):
|
||||
_res_show = self.l2gwc_client.show_l2_gateway_connection(
|
||||
_res_new['id'])[L2GWC_RID]
|
||||
for _k in ('l2_gateway_id', 'network_id'):
|
||||
self.assertEqual(_res_show[_k], _res_new[_k])
|
||||
_res_lst = self.l2gwc_client.list_l2_gateway_connections(
|
||||
l2_gateway_id=_res_new['l2_gateway_id'],
|
||||
network_id=_res_new['network_id'])[L2GWC_RIDs][0]
|
||||
self.assertEqual(_res_show['l2_gateway_id'], _res_lst['l2_gateway_id'])
|
||||
self.l2gwc_client.delete_l2_gateway_connection(_res_new['id'])
|
||||
_res_lst = self.l2gwc_client.list_l2_gateway_connections(
|
||||
l2_gateway_id=_res_new['l2_gateway_id'],
|
||||
network_id=_res_new['network_id'])[L2GWC_RIDs]
|
||||
self.l2gwc_created.pop(_res_new['id'])
|
||||
self.assertEmpty(_res_lst,
|
||||
"l2gwc id=%s not deleted." % (_res_new['id']))
|
||||
|
||||
@decorators.idempotent_id('6628c662-b997-46cd-8266-77f329bda062')
|
||||
def test_csuld_single_device_interface_without_vlan(self):
|
||||
"""Single device/interface/vlan
|
||||
|
||||
Create l2gw with one and only one VLAN. In this case,
|
||||
l2-gateway-connnection does not need to specify VLAN.
|
||||
"""
|
||||
|
||||
dev_profile = self.getattr_or_skip_test("device_one_vlan")
|
||||
_name = data_utils.rand_name('l2gwc-1v1')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
_vlan_id_list = self.pop_segmentation_id(_devices)
|
||||
(_gw, _seg_list) = self.create_l2gw_switch(_name, _devices)
|
||||
(_res_new, _seg_id) = self.create_l2gw_connection(
|
||||
_gw, segmentation_id=_vlan_id_list[0])
|
||||
_seg_new = str(_res_new.get('segmentation_id'))
|
||||
self.assertEqual(_seg_new, str(_seg_id))
|
||||
self.do_suld_l2gw_connection(_res_new)
|
||||
self.addCleanup(self.l2gw_cleanup)
|
||||
|
||||
@decorators.idempotent_id('222104e3-1260-42c1-bdf6-536c1141387c')
|
||||
def test_csuld_single_device_interface_vlan(self):
|
||||
"""Single device/interface/vlan
|
||||
|
||||
Create l2gw without specifying LAN. In this case,
|
||||
l2-gateway-connnection need to specify VLAN.
|
||||
"""
|
||||
|
||||
dev_profile = self.getattr_or_skip_test("device_one_vlan")
|
||||
_name = data_utils.rand_name('l2gwc-1v2')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
(_gw, _seg_list) = self.create_l2gw_switch(_name, _devices)
|
||||
(_res_new, _seg_id) = self.create_l2gw_connection(_gw)
|
||||
_seg_new = _res_new.get('segmentation_id', None)
|
||||
# vlan specified @l2-gateway, so it is empty @l2-gateway-connection
|
||||
self.assertEmpty(_seg_new)
|
||||
self.do_suld_l2gw_connection(_res_new)
|
||||
self.addCleanup(self.l2gw_cleanup)
|
||||
|
||||
@decorators.skip_because(bug="1559913")
|
||||
@decorators.idempotent_id('1875eca7-fde9-49ba-be21-47a8cc41f2e5')
|
||||
def test_csuld_single_device_interface_mvlan_type2(self):
|
||||
dev_profile = self.getattr_or_skip_test("device_multiple_vlans")
|
||||
_name = data_utils.rand_name('l2gwc-2v1')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
_vlan_id_list = self.get_segmentation_id(_devices)
|
||||
(_gw, _seg_list) = self.create_l2gw_switch(_name, _devices)
|
||||
(_res_new, _seg_id_list) = self.create_l2gw_connection(_gw)
|
||||
_seg_id_list = _res_new.get('segmentation_id')
|
||||
self.assertEqaul(0, cmp(_vlan_id_list, _seg_id_list),
|
||||
MSG_DIFF % ('vlan', _vlan_id_list, _seg_id_list))
|
||||
self.do_suld_l2gw_connection(_res_new)
|
||||
self.addCleanup(self.l2gw_cleanup)
|
||||
|
||||
@decorators.skip_because(bug="1559913")
|
||||
@decorators.idempotent_id('53755cb0-fdca-4ee7-8e43-a9b8a9d6d90a')
|
||||
def test_csuld_single_device_minterface_mvlan_type1(self):
|
||||
# NSX-v does not support multiple interfaces
|
||||
dev_profile = self.getattr_or_skip_test(
|
||||
"multiple_interfaces_multiple_vlans")
|
||||
_name = data_utils.rand_name('l2gwc-m2v1')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
_gw = self.create_l2gw_switch(_name, _devices)
|
||||
(_res_new, _seg_id) = self.create_l2gw_connection(_gw)
|
||||
self.do_suld_l2gw_connection(_res_new)
|
||||
self.addCleanup(self.l2gw_cleanup)
|
||||
|
||||
@decorators.skip_because(bug="1559913")
|
||||
@decorators.idempotent_id('723b0b78-35d7-4774-89c1-ec73797a1fe3')
|
||||
def test_csuld_single_device_minterface_mvlan_type2(self):
|
||||
dev_profile = self.getattr_or_skip_test(
|
||||
"multiple_interfaces_multiple_vlans")
|
||||
_name = data_utils.rand_name('l2gwc-m2v2')
|
||||
_devices = base_l2gw.get_l2gw_body(dev_profile)
|
||||
_gw = self.create_l2gw_switch(_name, _devices)
|
||||
(_res_new, _seg_id) = self.create_l2gw_connection(_gw)
|
||||
self.do_suld_l2gw_connection(_res_new)
|
||||
self.addCleanup(self.l2gw_cleanup)
|
@ -1,306 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
import base_provider as base
|
||||
import six
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.services import nsxv_client
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class MultipleTransportZonesTest(base.BaseAdminNetworkTest):
|
||||
"""Validate that NSX-v plugin can support multiple transport zones.
|
||||
|
||||
The test environment must at least have 1 additional TZ created.
|
||||
The default number of TZs used to test, include the default TZ is 3.
|
||||
However, all MTZ tests can run with 2 TZs in the testbed.
|
||||
"""
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(MultipleTransportZonesTest, cls).skip_checks()
|
||||
if not test.is_extension_enabled('provider', 'network'):
|
||||
msg = "provider extension is not enabled"
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(MultipleTransportZonesTest, cls).setup_clients()
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(MultipleTransportZonesTest, cls).resource_setup()
|
||||
manager_ip = re.search(r"(\d{1,3}\.){3}\d{1,3}",
|
||||
CONF.nsxv.manager_uri).group(0)
|
||||
cls.vsm = nsxv_client.VSMClient(
|
||||
manager_ip, CONF.nsxv.user, CONF.nsxv.password)
|
||||
cls.nsxv_scope_ids = cls.get_all_scope_id_list(with_default_scope=True)
|
||||
if len(cls.nsxv_scope_ids) < 2:
|
||||
msg = "Only one transport zone deployed. Need at least 2."
|
||||
raise cls.skipException(msg)
|
||||
cls.provider_network_type = getattr(CONF.nsxv,
|
||||
"provider_network_type",
|
||||
'vxlan')
|
||||
cls.MAX_MTZ = CONF.nsxv.max_mtz
|
||||
|
||||
@classmethod
|
||||
def create_project_network_subnet(cls, name_prefix='mtz-project'):
|
||||
network_name = data_utils.rand_name(name_prefix)
|
||||
resp = cls.create_network(client=cls.networks_client,
|
||||
name=network_name)
|
||||
network = resp.get('network', resp)
|
||||
cls.tenant_net = [None, network]
|
||||
resp = cls.create_subnet(network,
|
||||
name=network_name,
|
||||
client=cls.subnets_client)
|
||||
subnet = resp.get('subnet', resp)
|
||||
return (network['id'], (None, network, subnet))
|
||||
|
||||
@classmethod
|
||||
def get_all_scope_id_list(cls, with_default_scope=False):
|
||||
"""return all scope IDs w/wo the default scope defined in NSX."""
|
||||
scopes = cls.vsm.get_all_vdn_scopes()
|
||||
scope_id_list = [x['objectId'] for x in scopes]
|
||||
if with_default_scope:
|
||||
return scope_id_list
|
||||
try:
|
||||
scope_id_list.remove(CONF.nsxv.vdn_scope_id)
|
||||
except Exception:
|
||||
pass
|
||||
return scope_id_list
|
||||
|
||||
def create_network_subnet(self, scope_id, cidr=None, cidr_offset=0):
|
||||
network_name = data_utils.rand_name('mtz-network-')
|
||||
create_kwargs = {'provider:network_type': self.provider_network_type,
|
||||
'provider:physical_network': scope_id}
|
||||
resp = self.create_network(network_name, **create_kwargs)
|
||||
network = resp.get('network', resp)
|
||||
net_id = network['id']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, net_id)
|
||||
self.assertEqual(scope_id,
|
||||
network['provider:physical_network'])
|
||||
resp = self.create_subnet(network,
|
||||
name=network_name,
|
||||
cidr=cidr,
|
||||
cidr_offset=cidr_offset)
|
||||
subnet = resp.get('subnet', resp)
|
||||
resp = self.show_network(net_id)
|
||||
s_network = resp.get('network', resp)
|
||||
net_subnets = s_network['subnets']
|
||||
self.assertIn(subnet['id'], net_subnets)
|
||||
lswitch_list = self.vsm.get_all_logical_switches(scope_id)
|
||||
lswitch_list = [x for x in lswitch_list if x['name'] == net_id]
|
||||
msg = ("network=%s is not configured by specified vdn_scope_id=%s"
|
||||
% (net_id, scope_id))
|
||||
self.assertTrue(len(lswitch_list) == 1, msg=msg)
|
||||
return (net_id, s_network, subnet)
|
||||
|
||||
def delete_networks(self, nets):
|
||||
for net_id in six.iterkeys(nets):
|
||||
self.delete_network(net_id)
|
||||
|
||||
def check_update_network(self, network):
|
||||
new_name = network['name'] + "-2nd"
|
||||
self.update_network(network['id'], name=new_name)
|
||||
resp = self.show_network(network['id'])
|
||||
s_network = resp.get('network', resp)
|
||||
self.assertEqual(new_name, s_network['name'])
|
||||
|
||||
def check_update_subnet(self, subnet):
|
||||
new_name = subnet['name'] + "-2nd"
|
||||
self.update_subnet(subnet['id'], name=new_name)
|
||||
resp = self.show_subnet(subnet['id'])['subnet']
|
||||
s_subnet = resp.get('subnet', resp)
|
||||
self.assertEqual(new_name, s_subnet['name'])
|
||||
|
||||
def create_show_update_delete_mtz_network_subnet(self, s_id):
|
||||
net_id, network, subnet = self.create_network_subnet(s_id)
|
||||
self.check_update_network(network)
|
||||
self.check_update_subnet(subnet)
|
||||
self.delete_network(net_id)
|
||||
|
||||
def create_router_by_type(self, router_type, name=None, **kwargs):
|
||||
routers_client = self.admin_manager.routers_client
|
||||
router_name = name or data_utils.rand_name('mtz-')
|
||||
create_kwargs = dict(name=router_name, external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id})
|
||||
if router_type in ('shared', 'exclusive'):
|
||||
create_kwargs['router_type'] = router_type
|
||||
elif router_type in ('distributed'):
|
||||
create_kwargs['distributed'] = True
|
||||
kwargs.update(create_kwargs)
|
||||
router = routers_client.create_router(**kwargs)
|
||||
router = router['router'] if 'router' in router else router
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.delete_router, router['id'])
|
||||
self.assertEqual(router['name'], router_name)
|
||||
return (routers_client, router)
|
||||
|
||||
def create_router_and_add_interfaces(self, router_type, nets):
|
||||
(routers_client, router) = self.create_router_by_type(router_type)
|
||||
if router_type == 'exclusive':
|
||||
router_nsxv_name = '%s-%s' % (router['name'], router['id'])
|
||||
exc_edge = self.vsm.get_edge(router_nsxv_name)
|
||||
self.assertTrue(exc_edge is not None)
|
||||
self.assertEqual(exc_edge['edgeType'], 'gatewayServices')
|
||||
for net_id, (s_id, network, subnet) in six.iteritems(nets):
|
||||
# register to cleanup before adding interfaces so interfaces
|
||||
# and router can be deleted if test is aborted.
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.remove_router_interface,
|
||||
router['id'], subnet_id=subnet['id'])
|
||||
routers_client.add_router_interface(
|
||||
router['id'], subnet_id=subnet['id'])
|
||||
return router
|
||||
|
||||
def clear_router_gateway_and_interfaces(self, router, nets):
|
||||
routers_client = self.admin_manager.routers_client
|
||||
routers_client.update_router(router['id'],
|
||||
external_gateway_info=dict())
|
||||
for net_id, (s_id, network, subnet) in six.iteritems(nets):
|
||||
try:
|
||||
routers_client.remove_router_interface(
|
||||
router['id'], subnet_id=subnet['id'])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _test_router_with_multiple_mtz_networks(self, router_type):
|
||||
"""test router attached with multiple TZs."""
|
||||
scope_id_list = self.get_all_scope_id_list(with_default_scope=True)
|
||||
nets = {}
|
||||
for cidr_step in range(0, self.MAX_MTZ):
|
||||
s_id = scope_id_list[cidr_step % len(scope_id_list)]
|
||||
net_id, network, subnet = self.create_network_subnet(
|
||||
s_id, cidr_offset=(cidr_step + 2))
|
||||
nets[net_id] = (s_id, network, subnet)
|
||||
router = self.create_router_and_add_interfaces(router_type, nets)
|
||||
self.clear_router_gateway_and_interfaces(router, nets)
|
||||
|
||||
def _test_router_with_network_and_mtz_networks(self, router_type):
|
||||
"""test router attached with multiple TZs and one tenant network."""
|
||||
scope_id_list = self.get_all_scope_id_list(with_default_scope=True)
|
||||
nets = {}
|
||||
net_id, net_info = self.create_project_network_subnet('mtz-tenant')
|
||||
nets[net_id] = net_info
|
||||
for cidr_step in range(0, self.MAX_MTZ):
|
||||
s_id = scope_id_list[cidr_step % len(scope_id_list)]
|
||||
net_id, network, subnet = self.create_network_subnet(
|
||||
s_id, cidr_offset=(cidr_step + 2))
|
||||
nets[net_id] = (s_id, network, subnet)
|
||||
router = self.create_router_and_add_interfaces(router_type, nets)
|
||||
self.clear_router_gateway_and_interfaces(router, nets)
|
||||
|
||||
@decorators.idempotent_id('39bc7909-912c-4e16-8246-773ae6a40ba4')
|
||||
def test_mtz_network_crud_operations(self):
|
||||
scope_id_list = self.get_all_scope_id_list(with_default_scope=False)
|
||||
s_id = scope_id_list[0]
|
||||
self.create_show_update_delete_mtz_network_subnet(s_id)
|
||||
|
||||
@decorators.idempotent_id('4e1717d6-df39-4539-99da-df23814cfe14')
|
||||
def test_mtz_overlay_network(self):
|
||||
"""overlay subnets with the same TZ"""
|
||||
scope_id_list = self.get_all_scope_id_list(with_default_scope=True)
|
||||
s_id = scope_id_list[0]
|
||||
nets = {}
|
||||
for cidr_step in range(1, self.MAX_MTZ):
|
||||
net_id, network, subnet = self.create_network_subnet(s_id)
|
||||
nets[net_id] = (s_id, network, subnet)
|
||||
self.delete_networks(nets)
|
||||
|
||||
@decorators.idempotent_id('6ecf67fc-4396-41d9-9d84-9d8c936dcb8f')
|
||||
def test_multiple_mtz_overlay_network(self):
|
||||
"""overlay subnets from multiple TZs."""
|
||||
scope_id_list = self.get_all_scope_id_list(with_default_scope=True)
|
||||
nets = {}
|
||||
cidr_step = 0
|
||||
for s_id in scope_id_list:
|
||||
net_id, network, subnet = self.create_network_subnet(s_id)
|
||||
nets[net_id] = (s_id, network, subnet)
|
||||
net_id, network, subnet = self.create_network_subnet(s_id)
|
||||
nets[net_id] = (s_id, network, subnet)
|
||||
cidr_step += 1
|
||||
if cidr_step < self.MAX_MTZ:
|
||||
break
|
||||
self.delete_networks(nets)
|
||||
|
||||
@decorators.idempotent_id('e7e0fc6c-41fd-44bc-b9b1-4501ce618738')
|
||||
def test_mtz_non_overlay_network(self):
|
||||
"""non-overlay subnets from one TZ."""
|
||||
scope_id_list = self.get_all_scope_id_list(with_default_scope=False)
|
||||
s_id = scope_id_list[0]
|
||||
nets = {}
|
||||
for cidr_step in range(0, self.MAX_MTZ):
|
||||
net_id, network, subnet = self.create_network_subnet(
|
||||
s_id, cidr_offset=(cidr_step + 1))
|
||||
nets[net_id] = (s_id, network, subnet)
|
||||
self.delete_networks(nets)
|
||||
|
||||
@decorators.idempotent_id('b1cb5815-6380-421f-beef-ae3cb148cef4')
|
||||
def test_multiple_mtz_non_overlay_network(self):
|
||||
"""non-overlay subnets from multiple TZs."""
|
||||
scope_id_list = self.get_all_scope_id_list(with_default_scope=True)
|
||||
nets = {}
|
||||
for cidr_step in range(0, self.MAX_MTZ):
|
||||
s_id = scope_id_list[cidr_step % len(scope_id_list)]
|
||||
net_id, network, subnet = self.create_network_subnet(
|
||||
s_id, cidr_offset=cidr_step)
|
||||
nets[net_id] = (s_id, network, subnet)
|
||||
self.delete_networks(nets)
|
||||
|
||||
@decorators.idempotent_id('006a1a4b-4b63-4663-8baa-affe5df62b11')
|
||||
def test_shared_router_with_multiple_mtz_networks(self):
|
||||
"""shared router attached with multiple TZs."""
|
||||
self._test_router_with_multiple_mtz_networks(
|
||||
router_type='shared')
|
||||
|
||||
@decorators.idempotent_id('b160d1dc-0332-4d1a-b2a0-c11f57fe4dd9')
|
||||
def test_exclusive_router_with_multiple_mtz_networks(self):
|
||||
"""exclusive router attached with multiple TZs."""
|
||||
self._test_router_with_multiple_mtz_networks(
|
||||
router_type='exclusive')
|
||||
|
||||
@decorators.skip_because(bug="1592174")
|
||||
@decorators.idempotent_id('2c46290c-8a08-4037-aada-f96fd34b3260')
|
||||
def test_distributed_router_with_multiple_mtz_networks(self):
|
||||
"""exclusive router attached with multiple TZs."""
|
||||
self._test_router_with_multiple_mtz_networks(
|
||||
router_type='distributed')
|
||||
|
||||
@decorators.idempotent_id('be8f7320-2246-43f3-a826-768f763c9bd0')
|
||||
def test_shared_router_with_network_and_mtz_networks(self):
|
||||
"""router attached with multiple TZs and one tenant network."""
|
||||
self._test_router_with_network_and_mtz_networks(
|
||||
router_type='shared')
|
||||
|
||||
@decorators.idempotent_id('3cb27410-67e2-4e82-95c7-3dbbe9a8c64b')
|
||||
def test_exclusive_router_with_network_and_mtz_networks(self):
|
||||
"""router attached with multiple TZs and one tenant network."""
|
||||
self._test_router_with_network_and_mtz_networks(
|
||||
router_type='exclusive')
|
||||
|
||||
@decorators.skip_because(bug="1592174")
|
||||
@decorators.idempotent_id('e7c066d5-c2f1-41e7-bc86-9b6295461903')
|
||||
def test_distributed_router_with_network_and_mtz_networks(self):
|
||||
"""router attached with multiple TZs and one tenant network."""
|
||||
self._test_router_with_network_and_mtz_networks(
|
||||
router_type='distributed')
|
@ -1,60 +0,0 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class MultipleTransportZonesNegativeTest(base.BaseAdminNetworkTest):
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(MultipleTransportZonesNegativeTest, cls).skip_checks()
|
||||
if not hasattr(CONF.nsxv, 'vdn_scope_id'):
|
||||
msg = "Testbed Network & Security vdn_scope_id not specified."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
def create_mtz_networks(self, networks_client=None, scope_id=None):
|
||||
networks_client = networks_client or self.admin_networks_client
|
||||
scope_id = scope_id or CONF.nsxv.vdn_scope_id
|
||||
network_name = data_utils.rand_name('mtz-negative-')
|
||||
create_kwargs = {'provider:network_type': 'vxlan',
|
||||
'provider:physical_network': scope_id}
|
||||
resp = networks_client.create_network(name=network_name,
|
||||
**create_kwargs)
|
||||
network = resp['network'] if 'network' in resp else resp
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
networks_client.delete_network,
|
||||
network['id'])
|
||||
return network
|
||||
|
||||
@decorators.attr(type=['negative'])
|
||||
@decorators.idempotent_id('8aff7abc-eacd-409c-8278-4cb7bde6da84')
|
||||
def test_create_mtz_networks(self):
|
||||
# Multiple Transport Zone use provier network to implement
|
||||
# its TZ allocation.
|
||||
# Only admin client can create MTZ networks.
|
||||
# non-admin client can not create mtz network
|
||||
self.assertRaises(lib_exc.Forbidden,
|
||||
self.create_mtz_networks,
|
||||
networks_client=self.networks_client)
|
@ -1,266 +0,0 @@
|
||||
# Copyright 2017 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base_provider as base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PortTypeTest(base.BaseAdminNetworkTest):
|
||||
"""NSX-V OpenStack port types test
|
||||
|
||||
Positive
|
||||
- Create direct port
|
||||
- Enable port direct vnic-type
|
||||
- Delete direct port
|
||||
- List ports with direct port
|
||||
- Create, update, delete direct port
|
||||
Negative
|
||||
- Create direct port without flat network with port configs
|
||||
- Create direct port with flat network without port configs
|
||||
- Update direct port with flat network without port configs
|
||||
- Update direct port without flat network with port configs
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(PortTypeTest, cls).setup_clients()
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(PortTypeTest, cls).resource_setup()
|
||||
|
||||
def _create_flat_network(self, _auto_clean_up=True, network_name=None,
|
||||
**kwargs):
|
||||
network_name = network_name or data_utils.rand_name('flat-net')
|
||||
post_body = {'name': network_name,
|
||||
'provider:network_type': 'flat'}
|
||||
post_body.update(kwargs)
|
||||
LOG.debug("create FLAT network: %s", str(post_body))
|
||||
body = self.admin_networks_client.create_network(**post_body)
|
||||
network = body['network']
|
||||
self.networks.append(network)
|
||||
if _auto_clean_up:
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, network['id'])
|
||||
return network
|
||||
|
||||
def _create_direct_port(self, network_id, _auto_clean_up=True,
|
||||
port_name=None, **kwargs):
|
||||
dir_port_name = port_name or data_utils.rand_name('direct-port')
|
||||
post_body = {'name': dir_port_name,
|
||||
'port_security_enabled': 'False',
|
||||
'security_groups': [],
|
||||
'binding:vnic_type': 'direct'}
|
||||
post_body.update(kwargs)
|
||||
LOG.debug("create DIRECT port: %s", str(post_body))
|
||||
body = self.create_port(network_id=network_id, **post_body)
|
||||
dir_port = body['port']
|
||||
if _auto_clean_up:
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_port, dir_port['id'])
|
||||
return dir_port
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('ebb15f36-79bd-4461-91b5-84a57616730c')
|
||||
def test_create_direct_port(self):
|
||||
"""
|
||||
Test create a direct openstack port. After creation, check
|
||||
OpenStack for the port vnic-type.
|
||||
"""
|
||||
test_flat_net = self._create_flat_network()
|
||||
dir_port = self._create_direct_port(network_id=test_flat_net['id'])
|
||||
self.assertEqual(dir_port['binding:vnic_type'], 'direct',
|
||||
"Created port type is not DIRECT")
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('2eaa0014-3265-479c-9012-c110df566ef1')
|
||||
def test_enable_port_direct(self):
|
||||
"""
|
||||
Test updating a port to be a direct openstack port.
|
||||
After updating, check nsx_v backend for the port type.
|
||||
"""
|
||||
test_flat_net = self._create_flat_network()
|
||||
test_port_name = data_utils.rand_name('test-port-')
|
||||
orig_post = {'name': test_port_name,
|
||||
'port_security_enabled': 'False',
|
||||
'security_groups': []}
|
||||
LOG.debug("create NORMAL port: %s", str(orig_post))
|
||||
test_port = self.create_port(network_id=test_flat_net['id'],
|
||||
**orig_post)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_port, test_port['port']['id'])
|
||||
post_body = {'binding:vnic_type': 'direct'}
|
||||
LOG.debug("update port to be DIRECT: %s", str(orig_post))
|
||||
self.assertEqual(test_port['port']['binding:vnic_type'], 'normal',
|
||||
"Port vnic-type is not NORMAL")
|
||||
updated_port = self.update_port(test_port['port']['id'], **post_body)
|
||||
self.assertEqual(updated_port['port']['binding:vnic_type'], 'direct',
|
||||
"Port vnic-type was not updated to DIRECT")
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('d77125af-7e8f-4dcf-a3a4-7956b3eaa2d2')
|
||||
def test_delete_direct_port(self):
|
||||
"""
|
||||
Test create, then delete a direct openstack port.
|
||||
Verify port type and port delete.
|
||||
"""
|
||||
test_flat_net = self._create_flat_network()
|
||||
dir_port = self._create_direct_port(network_id=test_flat_net['id'])
|
||||
self.assertEqual(dir_port['binding:vnic_type'], 'direct',
|
||||
"Port type is not DIRECT")
|
||||
self.assertFalse(self.delete_port(dir_port['id']),
|
||||
"Delete of Direct port was not successful")
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('b69f5ff1-7e86-4790-9392-434cd9ab808f')
|
||||
def test_list_direct_ports(self):
|
||||
"""
|
||||
Create one each of a normal and direct port.
|
||||
Verify that both ports are included in port-list output.
|
||||
"""
|
||||
test_list_ports = []
|
||||
test_flat_net = self._create_flat_network()
|
||||
dir_port = self._create_direct_port(network_id=test_flat_net['id'])
|
||||
test_list_ports.append(dir_port)
|
||||
vanilla_port_name = data_utils.rand_name('vanilla-port-')
|
||||
vanilla_post = {'name': vanilla_port_name}
|
||||
body = self.create_port(network_id=test_flat_net['id'],
|
||||
**vanilla_post)
|
||||
test_port = body['port']
|
||||
test_list_ports.append(test_port)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_port, test_port['id'])
|
||||
body = self.admin_ports_client.list_ports(
|
||||
network_id=test_flat_net['id'])
|
||||
ports_list = body['ports']
|
||||
pids_list = [p['id'] for p in ports_list]
|
||||
ports_not_listed = []
|
||||
for port in test_list_ports:
|
||||
if port['id'] not in pids_list:
|
||||
ports_not_listed.append(port['id'])
|
||||
self.assertEmpty(ports_not_listed, "These ports not listed: %s"
|
||||
% ports_not_listed)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('9b7ec966-f4e4-4087-9789-96a3aa669fa2')
|
||||
def test_create_update_delete_direct_port(self):
|
||||
"""
|
||||
Create, update, delete direct port. Verify port type and update
|
||||
operation.
|
||||
"""
|
||||
test_flat_net = self._create_flat_network()
|
||||
dir_port = self._create_direct_port(network_id=test_flat_net['id'])
|
||||
self.assertEqual(dir_port['binding:vnic_type'], 'direct',
|
||||
"Port VNIC_TYPE should be set to DIRECT")
|
||||
updated_port_name = data_utils.rand_name('update-port-')
|
||||
updated_post = {'name': updated_port_name}
|
||||
updated_port = self.update_port(dir_port['id'], **updated_post)['port']
|
||||
self.assertEqual(updated_port['binding:vnic_type'], 'direct',
|
||||
"VNIC_TYPE is not correct type, should be DIRECT")
|
||||
self.assertEqual(updated_port['name'], updated_port_name,
|
||||
"Port name should be updated to %s"
|
||||
% updated_port_name)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('e661ba70-0ab4-4f91-8d84-c5c102ec5793')
|
||||
def test_create_direct_port_without_flat_network_negative(self):
|
||||
"""
|
||||
Create a network. Create a direct openstack port.
|
||||
Creation should fail on a bad request since flat net prereq is not met
|
||||
"""
|
||||
net_name = data_utils.rand_name('test-net')
|
||||
net_body = self.create_network(name=net_name)
|
||||
test_net = net_body['network']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, test_net['id'])
|
||||
self.assertRaises(ex.BadRequest, self._create_direct_port,
|
||||
network_id=test_net['id'])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('ee87287f-4ec6-4502-9bc1-855fa7c93e90')
|
||||
def test_create_direct_port_w_flat_net_wout_port_settings_negative(self):
|
||||
"""
|
||||
Create a flat network. Create a direct openstack port without required
|
||||
port settings.
|
||||
"""
|
||||
test_flat_net = self._create_flat_network()
|
||||
test_port_name = data_utils.rand_name('test-port-')
|
||||
orig_post = {'name': test_port_name, 'binding:vnic_type': 'direct'}
|
||||
LOG.debug("create DIRECT port: %s", str(orig_post))
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self.create_port, network_id=test_flat_net['id'],
|
||||
**orig_post)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('03e0065e-6d76-45d5-9192-ce89853dfa9e')
|
||||
def test_update_direct_port_w_flat_net_wout_port_configs_negative(self):
|
||||
"""
|
||||
Create a flat network. Create an openstack port with vnic-type normal.
|
||||
Update port to set vnic-type to direct, without required port settings.
|
||||
Update should fail on a bad request since prereq is not met.
|
||||
"""
|
||||
test_flat_net = self._create_flat_network()
|
||||
test_port_name = data_utils.rand_name('test-port-')
|
||||
orig_post = {'name': test_port_name}
|
||||
LOG.debug("create NORMAL port: %s", str(orig_post))
|
||||
test_port = self.create_port(network_id=test_flat_net['id'],
|
||||
**orig_post)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_port, test_port['port']['id'])
|
||||
post_body = {'binding:vnic_type': 'direct'}
|
||||
LOG.debug("update port to be DIRECT: %s", str(orig_post))
|
||||
self.assertEqual(test_port['port']['binding:vnic_type'], 'normal',
|
||||
"Orig port should be vnic-type NORMAL")
|
||||
self.assertRaises(ex.BadRequest, self.update_port,
|
||||
test_port['port']['id'], **post_body)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.attr(type='negative')
|
||||
@decorators.idempotent_id('d3e75ed7-f3e5-4395-9ab0-063e7a8c141c')
|
||||
def test_update_direct_port_wout_flat_net_with_port_configs_negative(self):
|
||||
"""
|
||||
Create a network. Create a normal openstack port. Update port to direct
|
||||
vnic-type. Update should fail since flat net prereq is not met
|
||||
"""
|
||||
net_name = data_utils.rand_name('test-net')
|
||||
net_body = self.create_network(name=net_name)
|
||||
test_net = net_body['network']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, test_net['id'])
|
||||
test_port_name = data_utils.rand_name('test-port-')
|
||||
orig_post = {'name': test_port_name}
|
||||
LOG.debug("create NORMAL port: %s", str(orig_post))
|
||||
test_port = self.create_port(network_id=test_net['id'],
|
||||
**orig_post)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_port, test_port['port']['id'])
|
||||
post_body = {'port_security_enabled': 'False',
|
||||
'security_groups': [],
|
||||
'binding:vnic_type': 'direct'}
|
||||
self.assertRaises(ex.BadRequest, self.update_port,
|
||||
test_port['port']['id'], **post_body)
|
@ -1,331 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import re
|
||||
|
||||
from oslo_log import log as logging
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.services import nsxv_client
|
||||
from vmware_nsx_tempest.tests.nsxv.api import base_provider as base
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProviderSecGroup(base.BaseAdminNetworkTest):
|
||||
"""Test Provider Security Group
|
||||
|
||||
1. Only Admin can create provider security group.
|
||||
2. Tenants can not create provider security-group.
|
||||
3. Check Provider sec group at beckend in firewall section
|
||||
4. Check the priority of provider sec groups at beckend
|
||||
5. Check non-admin tenant can't create provider security group
|
||||
6. Check multiple rules under provider sec group
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(ProviderSecGroup, cls).skip_checks()
|
||||
if not test.is_extension_enabled('provider-security-group', 'network'):
|
||||
msg = "Extension provider-security-group is not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(ProviderSecGroup, cls).setup_clients()
|
||||
cls.cmgr_pri = cls.get_client_manager('primary')
|
||||
cls.cmgr_alt = cls.get_client_manager('alt')
|
||||
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ProviderSecGroup, cls).resource_setup()
|
||||
manager_ip = re.search(r"(\d{1,3}\.){3}\d{1,3}",
|
||||
CONF.nsxv.manager_uri).group(0)
|
||||
cls.vsm = nsxv_client.VSMClient(
|
||||
manager_ip, CONF.nsxv.user, CONF.nsxv.password)
|
||||
|
||||
def delete_security_group(self, sg_client, sg_id):
|
||||
sg_client.delete_security_group(sg_id)
|
||||
|
||||
def create_security_provider_group(self, cmgr=None,
|
||||
project_id=None, provider=False):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
sg_client = cmgr.security_groups_client
|
||||
sg_dict = dict(name=data_utils.rand_name('provider-sec-group'))
|
||||
if project_id:
|
||||
sg_dict['tenant_id'] = project_id
|
||||
if provider:
|
||||
sg_dict['provider'] = True
|
||||
sg = sg_client.create_security_group(**sg_dict)
|
||||
sg = sg.get('security_group', sg)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_security_group,
|
||||
sg_client, sg.get('id'))
|
||||
return sg
|
||||
|
||||
def update_security_provider_group(self, security_group_id,
|
||||
new_policy_id, cmgr=None):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
sg_client = cmgr.security_groups_client
|
||||
sg = sg_client.update_security_group(security_group_id,
|
||||
policy=new_policy_id)
|
||||
return sg.get('security_group', sg)
|
||||
|
||||
def create_security_group_rule(self, security_group_id,
|
||||
cmgr=None, project_id=None,
|
||||
protocol=None):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
sgr_client = cmgr.security_group_rules_client
|
||||
sgr_dict = dict(security_group_id=security_group_id,
|
||||
direction='ingress', protocol=protocol)
|
||||
if project_id:
|
||||
sgr_dict['tenant_id'] = project_id
|
||||
sgr = sgr_client.create_security_group_rule(**sgr_dict)
|
||||
return sgr.get('security_group_rule', sgr)
|
||||
|
||||
def show_security_provider_group(self, security_group_id, cmgr=None):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
sg_client = cmgr.security_groups_client
|
||||
sg = sg_client.show_security_group(security_group_id)
|
||||
return sg.get('security_group', sg)
|
||||
|
||||
def get_default_security_group_policy(self, cmgr=None):
|
||||
cmgr = cmgr or self.cmgr_adm
|
||||
sg_client = cmgr.security_groups_client
|
||||
sg_list = sg_client.list_security_groups()
|
||||
# why list twice, see bug#1772424
|
||||
sg_list = sg_client.list_security_groups(name='default')
|
||||
sg_list = sg_list.get('security_groups', sg_list)
|
||||
return sg_list[0]
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('5480d96e-287b-4e59-9ee3-d1bcf451dfc9')
|
||||
def test_provider_security_group_crud(self):
|
||||
sg_desc = "crud provider-security-group"
|
||||
sg_client = self.cmgr_adm.security_groups_client
|
||||
sg = self.create_security_provider_group(self.cmgr_adm, provider=True)
|
||||
sg_id = sg.get('id')
|
||||
show_sec_group = sg_client.show_security_group(sg_id)
|
||||
self.assertEqual(True, show_sec_group['security_group']['provider'])
|
||||
sg_show = sg_client.update_security_group(sg_id, description=sg_desc)
|
||||
self.assertEqual(sg_desc, sg_show['security_group'].get('description'))
|
||||
self.delete_security_group(sg_client, sg_id)
|
||||
sg_list = sg_client.list_security_groups(id=sg_id)
|
||||
sg_list = sg_list.get('security_groups', sg_list)
|
||||
self.assertEqual(len(sg_list), 0)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('6e48a2ed-8035-4986-a5c6-903c49ae26a2')
|
||||
def test_admin_can_create_provider_security_group_for_tenant(self):
|
||||
project_id = self.cmgr_alt.networks_client.tenant_id
|
||||
sg = self.create_security_provider_group(self.cmgr_adm,
|
||||
project_id=project_id,
|
||||
provider=True)
|
||||
self.assertEqual(True, sg.get('provider'))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('95ce76a4-a125-411b-95d7-7a66addf0efc')
|
||||
def test_tenant_provider_sec_group_with_no_rules(self):
|
||||
sg = self.create_security_provider_group(self.cmgr_adm,
|
||||
provider=True)
|
||||
self.assertEmpty(sg.get('security_group_rules'))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('5e6237ca-033a-4bee-b5fb-8f225ed00b0c')
|
||||
def test_admin_can_create_security_group_rule(self):
|
||||
sg_client = self.cmgr_adm.security_groups_client
|
||||
sg = self.create_security_provider_group(self.cmgr_adm,
|
||||
provider=True)
|
||||
sg_id = sg.get('id')
|
||||
self.create_security_group_rule(sg_id, cmgr=self.cmgr_adm,
|
||||
protocol='icmp')
|
||||
show_sec_group = sg_client.show_security_group(sg_id)
|
||||
self.assertEqual('ingress',
|
||||
show_sec_group['security_group']
|
||||
['security_group_rules']
|
||||
[0]['direction'])
|
||||
self.assertEqual('icmp',
|
||||
show_sec_group['security_group']
|
||||
['security_group_rules']
|
||||
[0]['protocol'])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('18737e13-4bca-4f62-b993-f021795a7dbf')
|
||||
def test_provider_security_group_rule_at_beckend(self):
|
||||
sg = self.create_security_provider_group(self.cmgr_adm, provider=True)
|
||||
sg_id = sg.get('id')
|
||||
sg_name = sg.get('name')
|
||||
sg_rule = self.create_security_group_rule(sg_id, cmgr=self.cmgr_adm,
|
||||
protocol='icmp')
|
||||
sg_rule.get('id')
|
||||
firewall_section = self.vsm.get_firewall()
|
||||
for i in (0, len(firewall_section)):
|
||||
if (sg_name in firewall_section['layer3Sections']
|
||||
['layer3Sections'][i]['name']):
|
||||
for rule in firewall_section.\
|
||||
get('layer3Sections')['layer3Sections'][i]['rules']:
|
||||
for rule_proto in rule['services']['serviceList']:
|
||||
self.assertIn('ICMP', rule_proto['protocolName'])
|
||||
self.assertIn('deny', rule['action'], "Provider "
|
||||
"security Group applied")
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('b179a32b-e012-43ec-9d2d-f9e5801c97c6')
|
||||
def test_provider_security_group_predence_at_beckend(self):
|
||||
sg = self.create_security_provider_group(self.cmgr_adm, provider=True)
|
||||
sg_name = sg.get('name')
|
||||
firewall_section = self.vsm.get_firewall()
|
||||
count = 0
|
||||
for i in (0, len(firewall_section)):
|
||||
if (count == 0):
|
||||
self.assertIn(sg_name, firewall_section['layer3Sections']
|
||||
['layer3Sections'][i]['name'],
|
||||
"Provider security Group applied at the beckend"
|
||||
" and has higher predence over default sec "
|
||||
"group")
|
||||
self.assertEqual(0, count)
|
||||
break
|
||||
count += count
|
||||
self.assertEqual(0, count)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('38b21ea8-7822-4b1a-b923-cd00fa57ca4d')
|
||||
def test_provider_security_group_at_port_level(self):
|
||||
sg = self.create_security_provider_group(self.cmgr_adm,
|
||||
provider=True)
|
||||
sg_id = sg.get('id')
|
||||
net_client = self.cmgr_adm.networks_client
|
||||
body = {'name': 'provider-network'}
|
||||
network = net_client.create_network(**body)
|
||||
body = {"network_id": network['network']['id'],
|
||||
"allocation_pools": [{"start": "2.0.0.2",
|
||||
"end": "2.0.0.254"}],
|
||||
"ip_version": 4, "cidr": "2.0.0.0/24"}
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
subnet_client.create_subnet(**body)
|
||||
body = {"network_id": network['network']['id'],
|
||||
"admin_state_up": 'true'}
|
||||
port_client = self.cmgr_adm.ports_client
|
||||
port_id = port_client.create_port(**body)
|
||||
ss = port_client.show_port(port_id['port']['id'])
|
||||
self.assertEqual([sg_id], ss['port']['provider_security_groups'])
|
||||
body = {"id": port_id}
|
||||
port_client.delete_port(port_id['port']['id'])
|
||||
net_client.delete_network(network['network']['id'])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('b1e904fb-a70a-400e-a757-d772aab152eb')
|
||||
def test_provider_sec_group_with_multiple_rules(self):
|
||||
project_id = self.cmgr_adm.networks_client.tenant_id
|
||||
sg = self.create_security_provider_group(self.cmgr_adm,
|
||||
project_id=project_id)
|
||||
sg_rule1 = self.create_security_group_rule(sg.get('id'),
|
||||
cmgr=self.cmgr_adm,
|
||||
project_id=project_id,
|
||||
protocol='icmp')
|
||||
sg_rule1_id = sg_rule1.get('id')
|
||||
sg_rule2 = self.create_security_group_rule(sg.get('id'),
|
||||
cmgr=self.cmgr_adm,
|
||||
project_id=project_id,
|
||||
protocol='tcp')
|
||||
sg_rule2_id = sg_rule2.get('id')
|
||||
self.assertNotEqual(sg_rule1_id, sg_rule2_id)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('edd94f8c-53b7-4286-9350-0ddc0af3213b')
|
||||
def test_clear_provider_sec_group_from_port(self):
|
||||
project_id = self.cmgr_adm.networks_client.tenant_id
|
||||
self.create_security_provider_group(self.cmgr_adm,
|
||||
project_id=project_id,
|
||||
provider=True)
|
||||
net_client = self.cmgr_adm.networks_client
|
||||
body = {'name': 'provider-network'}
|
||||
network = net_client.create_network(**body)
|
||||
body = {"network_id": network['network']['id'],
|
||||
"allocation_pools": [{"start": "2.0.0.2",
|
||||
"end": "2.0.0.254"}],
|
||||
"ip_version": 4, "cidr": "2.0.0.0/24"}
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
subnet_client.create_subnet(**body)
|
||||
body = {"network_id": network['network']['id'],
|
||||
"provider_security_groups": []}
|
||||
port_client = self.cmgr_adm.ports_client
|
||||
port_id = port_client.create_port(**body)
|
||||
ss = port_client.show_port(port_id['port']['id'])
|
||||
self.assertEmpty(ss['port']['provider_security_groups'])
|
||||
port_client.delete_port(port_id['port']['id'])
|
||||
net_client.delete_network(network['network']['id'])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('6c1e6728-b84a-47f9-9021-ff3e3f88a933')
|
||||
def test_tenant_cannot_delete_admin_provider_security_group(self):
|
||||
project_id = self.cmgr_adm.networks_client.tenant_id
|
||||
sg = self.create_security_provider_group(self.cmgr_adm,
|
||||
project_id=project_id,
|
||||
provider=True)
|
||||
sg_id = sg.get('id')
|
||||
sg_client = self.cmgr_alt.security_groups_client
|
||||
try:
|
||||
self.delete_security_group(sg_client, sg_id)
|
||||
except Exception:
|
||||
LOG.debug("Non Admin tenant can't see admin"
|
||||
"provider security group")
|
||||
pass
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('94e06ee2-82ed-4203-ac9b-421a298bdba4')
|
||||
def test_tenant_cannot_create_provider_sec_group(self):
|
||||
project_id = self.cmgr_alt.networks_client.tenant_id
|
||||
self.assertRaises(exceptions.Forbidden,
|
||||
self.create_security_provider_group,
|
||||
self.cmgr_alt, project_id=project_id,
|
||||
provider=True)
|
||||
LOG.debug("Non-Admin Tenant cannot create provider sec group")
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('01f00a36-7576-40e0-a397-b43860a9c122')
|
||||
def test_update_port_with_psg(self):
|
||||
net_client = self.cmgr_adm.networks_client
|
||||
body = {'name': 'provider-network'}
|
||||
network = net_client.create_network(**body)
|
||||
body = {"network_id": network['network']['id'],
|
||||
"allocation_pools": [{"start": "2.0.0.2",
|
||||
"end": "2.0.0.254"}],
|
||||
"ip_version": 4, "cidr": "2.0.0.0/24"}
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
subnet_client.create_subnet(**body)
|
||||
body = {"network_id": network['network']['id'],
|
||||
"provider_security_groups": []}
|
||||
port_client = self.cmgr_adm.ports_client
|
||||
port_id = port_client.create_port(**body)
|
||||
ss = port_client.show_port(port_id['port']['id'])
|
||||
self.assertEmpty(ss['port']['provider_security_groups'])
|
||||
project_id = self.cmgr_adm.networks_client.tenant_id
|
||||
sg = self.create_security_provider_group(self.cmgr_adm,
|
||||
project_id=project_id,
|
||||
provider=True)
|
||||
sg_id = sg.get('id')
|
||||
body = {"provider_security_groups": ["%s" % sg_id]}
|
||||
port_client.update_port(port_id['port']['id'], **body)
|
||||
ss = port_client.show_port(port_id['port']['id'])
|
||||
self.assertIsNotNone(ss['port']['provider_security_groups'])
|
||||
port_client.delete_port(port_id['port']['id'])
|
||||
net_client.delete_network(network['network']['id'])
|
@ -1,206 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
import time
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib.services import network as net_clients
|
||||
from tempest import test
|
||||
from vmware_nsx_tempest.services import nsxv_client
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
ROUTER_SIZE = ('compact', 'large', 'xlarge', 'quadlarge')
|
||||
|
||||
|
||||
class RouterSizeBaseTest(base.BaseAdminNetworkTest):
|
||||
"""Base class to test creating routers with different router sizes:
|
||||
|
||||
NSX-v allows exclusive router to be created with one of ROUTER_SIZE.
|
||||
Starts with VIO-3.0 it can update its router_size after created.
|
||||
|
||||
tempest internally uses urllib3 and by default it will retry very 60
|
||||
seconds. However this retry mechanism causes bug#1716696.
|
||||
|
||||
A better solution is to change request's retry-time so it will not
|
||||
cause neutront keep creating routers while router was not created
|
||||
in time.
|
||||
|
||||
Methods should be used to change retry-time are:
|
||||
|
||||
create_exclusive_router & change_router_size
|
||||
|
||||
The retry-time is http_timeout in request.__init__() and is
|
||||
defined by CONF.nsxv.create_router_http_timeout.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(RouterSizeBaseTest, cls).skip_checks()
|
||||
if not test.is_extension_enabled('nsxv-router-type', 'network'):
|
||||
msg = "router-type extension is not enabled"
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(RouterSizeBaseTest, cls).setup_clients()
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(RouterSizeBaseTest, cls).resource_setup()
|
||||
cls.tenant_cidr = (CONF.network.project_network_cidr
|
||||
if cls._ip_version == 4 else
|
||||
CONF.network.project_network_v6_cidr)
|
||||
manager_ip = re.search(r"(\d{1,3}\.){3}\d{1,3}",
|
||||
CONF.nsxv.manager_uri).group(0)
|
||||
cls.vsm = nsxv_client.VSMClient(
|
||||
manager_ip, CONF.nsxv.user, CONF.nsxv.password)
|
||||
|
||||
def setUp(self):
|
||||
super(RouterSizeBaseTest, self).setUp()
|
||||
params = {'build_interval': self.routers_client.build_interval,
|
||||
'build_timeout': self.routers_client.build_timeout}
|
||||
http_timeout = CONF.nsxv.create_router_http_timeout
|
||||
self.router_sizes_client = net_clients.RoutersClient(
|
||||
self.routers_client.auth_provider,
|
||||
self.routers_client.service,
|
||||
self.routers_client.region,
|
||||
self.routers_client.endpoint_type,
|
||||
http_timeout=http_timeout,
|
||||
**params)
|
||||
|
||||
def create_exclusive_router(self, router_size):
|
||||
name = data_utils.rand_name('rtr1-%s' % router_size)
|
||||
LOG.debug("create router with size=%s", router_size)
|
||||
ext_gw_info = dict(
|
||||
network_id=CONF.network.public_network_id)
|
||||
rtr_cfg = dict(
|
||||
name=name, admin_state_up=False,
|
||||
external_gateway_info=ext_gw_info,
|
||||
router_type='exclusive',
|
||||
router_size=router_size)
|
||||
router = self.router_sizes_client.create_router(**rtr_cfg)
|
||||
router = router.get('router', router)
|
||||
self.routers.append(router)
|
||||
self.assertEqual(router['name'], name)
|
||||
self.check_router_nsx_name(router, router_size)
|
||||
return router
|
||||
|
||||
def change_router_size(self, router, new_router_size):
|
||||
LOG.debug("update router to size=%s", new_router_size)
|
||||
update_router = self.router_sizes_client.update_router(
|
||||
router['id'], router_size=new_router_size)['router']
|
||||
self.assertEqual(update_router['router_size'], new_router_size)
|
||||
self.check_router_nsx_name(update_router, new_router_size)
|
||||
return router
|
||||
|
||||
def check_router_nsx_name(self, router, router_size=None):
|
||||
router_nsxv_name = self.get_router_nsx_name(router)
|
||||
exc_edge = self.vsm.get_edge(router_nsxv_name)
|
||||
self.assertTrue(exc_edge is not None)
|
||||
self.assertEqual(exc_edge['edgeType'], 'gatewayServices')
|
||||
if router_size:
|
||||
edge_type = exc_edge['appliancesSummary']['applianceSize']
|
||||
LOG.debug("check router size at backend is %s", router_size)
|
||||
self.assertEqual(edge_type, router_size)
|
||||
return router_nsxv_name
|
||||
|
||||
def get_router_nsx_name(self, router):
|
||||
router_nsxv_name = '%s-%s' % (router['name'], router['id'])
|
||||
return router_nsxv_name
|
||||
|
||||
def do_create_update_delete_router_with_size(self,
|
||||
router_size,
|
||||
del_waitfor=10.0,
|
||||
del_interval=1.5):
|
||||
router = self.create_exclusive_router(router_size)
|
||||
updated_name = 'updated-' + router['name']
|
||||
update_router = self.router_sizes_client.update_router(
|
||||
router['id'], name=updated_name)['router']
|
||||
self.assertEqual(update_router['name'], updated_name)
|
||||
# change router name, the backend also change
|
||||
router = self.router_sizes_client.show_router(
|
||||
router['id'])['router']
|
||||
nsxv_edge_name = self.check_router_nsx_name(router, router_size)
|
||||
# Delete the exclusive router and verify it has been deleted
|
||||
# from nsxv backend
|
||||
self.router_sizes_client.delete_router(router['id'])
|
||||
list_body = self.router_sizes_client.list_routers()
|
||||
routers_list = [r['id'] for r in list_body['routers']]
|
||||
self.assertNotIn(router['id'], routers_list)
|
||||
wait_till = time.time() + del_waitfor
|
||||
while (time.time() < wait_till):
|
||||
try:
|
||||
self.assertIsNone(self.vsm.get_edge(nsxv_edge_name))
|
||||
return
|
||||
except Exception:
|
||||
time.sleep(del_interval)
|
||||
# last try. Fail if nesx_edge still exists
|
||||
fail_msg = ("%s router nsxv_edge[%s] still exists after %s seconds." %
|
||||
(router_size, nsxv_edge_name, del_waitfor))
|
||||
self.assertEqual(self.vsm.get_edge(nsxv_edge_name), None, fail_msg)
|
||||
|
||||
def do_router_size_change_test(self, router_size, new_router_size_list):
|
||||
router = self.create_exclusive_router(router_size)
|
||||
for new_router_size in new_router_size_list:
|
||||
self.change_router_size(router, new_router_size)
|
||||
|
||||
|
||||
class CompactRouterTest(RouterSizeBaseTest):
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('d75fbcd5-c8cb-49ea-a868-ada12fd8c87f')
|
||||
def test_create_update_delete_compact_router(self):
|
||||
self.do_create_update_delete_router_with_size('compact')
|
||||
|
||||
|
||||
class LargeRouterTest(RouterSizeBaseTest):
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('da00c74f-81e6-4ef9-8aca-8e0345b376e9')
|
||||
def test_create_update_delete_large_router(self):
|
||||
self.do_create_update_delete_router_with_size('large', 20.0)
|
||||
|
||||
|
||||
class XlargeRouterTest(RouterSizeBaseTest):
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('091dad07-6044-4ca3-b16c-54a3ef92254b')
|
||||
def test_create_update_delete_xlarge_router(self):
|
||||
self.do_create_update_delete_router_with_size('xlarge', 20.0)
|
||||
|
||||
|
||||
class QuadlargeRouterTest(RouterSizeBaseTest):
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('0f69bf8a-4b06-47ac-a3f7-eedba95fd395')
|
||||
def test_create_update_delete_quadlarge_router(self):
|
||||
self.do_create_update_delete_router_with_size('quadlarge', 30.0)
|
||||
|
||||
|
||||
class RouterSizeChangeTest(RouterSizeBaseTest):
|
||||
@decorators.idempotent_id('3201b0a9-702c-46cf-8512-f166a6ea5109')
|
||||
def test_router_size_1sizeup_change(self):
|
||||
self.do_router_size_change_test(
|
||||
'compact',
|
||||
('large', 'xlarge', 'quadlarge'))
|
||||
|
||||
@decorators.idempotent_id('c7ee9f78-4938-4bdd-b39c-1d736d41a84b')
|
||||
def test_router_size_outofseq_change(self):
|
||||
self.do_router_size_change_test(
|
||||
"large",
|
||||
('quadlarge', 'compact', 'xlarge', 'large'))
|
@ -1,151 +0,0 @@
|
||||
# Copyright 2015 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest import test
|
||||
from vmware_nsx_tempest.services import nsxv_client
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class ExcRouterTest(base.BaseAdminNetworkTest):
|
||||
"""
|
||||
Test class for exclusive router type, which is 1:1 mapping of
|
||||
NSX-v service edge. Tests will skipped if the router-type
|
||||
extension is not enabled.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(ExcRouterTest, cls).skip_checks()
|
||||
if not test.is_extension_enabled('nsxv-router-type', 'network'):
|
||||
msg = "router-type extension is not enabled"
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(ExcRouterTest, cls).setup_clients()
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ExcRouterTest, cls).resource_setup()
|
||||
cls.tenant_cidr = (CONF.network.project_network_cidr
|
||||
if cls._ip_version == 4 else
|
||||
CONF.network.project_network_v6_cidr)
|
||||
manager_ip = re.search(r"(\d{1,3}\.){3}\d{1,3}",
|
||||
CONF.nsxv.manager_uri).group(0)
|
||||
cls.vsm = nsxv_client.VSMClient(
|
||||
manager_ip, CONF.nsxv.user, CONF.nsxv.password)
|
||||
|
||||
def _delete_router(self, router):
|
||||
body = self.ports_client.list_ports(device_id=router['id'])
|
||||
interfaces = body['ports']
|
||||
for interface in interfaces:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.routers_client.remove_router_interface, router['id'],
|
||||
subnet_id=interface['fixed_ips'][0]['subnet_id'])
|
||||
self.routers_client.delete_router(router['id'])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('ac1639a0-2a8d-4c68-bccd-54849fd45f86')
|
||||
def test_create_exc_router(self):
|
||||
"""
|
||||
Test create an exclusive router. After creation, check nsx_v
|
||||
backend create service for the exclusive router.
|
||||
"""
|
||||
name = data_utils.rand_name('router-')
|
||||
router = self.routers_client.create_router(
|
||||
name=name, external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id},
|
||||
admin_state_up=False, router_type='exclusive')
|
||||
self.addCleanup(self._delete_router, router['router'])
|
||||
router_nsxv_name = '%s-%s' % (router['router']['name'],
|
||||
router['router']['id'])
|
||||
self.assertEqual(router['router']['name'], name)
|
||||
exc_edge = self.vsm.get_edge(router_nsxv_name)
|
||||
self.assertTrue(exc_edge is not None)
|
||||
self.assertEqual(exc_edge['edgeType'], 'gatewayServices')
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('c4b94988-0bc7-11e5-9203-0050568833db')
|
||||
def test_update_exc_router(self):
|
||||
"""
|
||||
Test update an exclusive router
|
||||
"""
|
||||
name = data_utils.rand_name('router-')
|
||||
router = self.routers_client.create_router(
|
||||
name=name, external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id},
|
||||
admin_state_up=False, router_type='exclusive')
|
||||
self.addCleanup(self._delete_router, router['router'])
|
||||
self.assertEqual(router['router']['name'], name)
|
||||
updated_name = 'updated' + name
|
||||
update_body = self.routers_client.update_router(
|
||||
router['router']['id'], name=updated_name)
|
||||
self.assertEqual(update_body['router']['name'], updated_name)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('a0ff5afa-0bcc-11e5-9203-0050568833db')
|
||||
def test_list_show_exc_router(self):
|
||||
"""
|
||||
Test list and show exclusive router.
|
||||
"""
|
||||
name = data_utils.rand_name('router-')
|
||||
router = self.routers_client.create_router(
|
||||
name=name, external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id},
|
||||
admin_state_up=False, router_type='exclusive')
|
||||
self.addCleanup(self._delete_router, router['router'])
|
||||
self.assertEqual(router['router']['name'], name)
|
||||
# Show details of exclusive router
|
||||
show_body = self.routers_client.show_router(router['router']['id'])
|
||||
self.assertEqual(show_body['router']['name'], name)
|
||||
self.assertEqual(show_body['router']['admin_state_up'], False)
|
||||
# List routers and verify if created router in list
|
||||
list_body = self.routers_client.list_routers()
|
||||
routers_list = [r['id'] for r in list_body['routers']]
|
||||
self.assertIn(router['router']['id'], routers_list)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('adef8d1e-0bce-11e5-9203-0050568833db')
|
||||
def test_delete_exc_router(self):
|
||||
"""
|
||||
Test create, update, and delete an exclusive router
|
||||
"""
|
||||
name = data_utils.rand_name('router-')
|
||||
router = self.routers_client.create_router(
|
||||
name=name, external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id},
|
||||
admin_state_up=False, router_type='exclusive')
|
||||
self.assertEqual(router['router']['name'], name)
|
||||
# Update the name of the exclusive router
|
||||
updated_name = 'updated' + name
|
||||
update_body = self.routers_client.update_router(
|
||||
router['router']['id'], name=updated_name)
|
||||
self.assertEqual(update_body['router']['name'], updated_name)
|
||||
# Delete the exclusive router and verify it has been deleted
|
||||
# from nsxv backend
|
||||
self.routers_client.delete_router(router['router']['id'])
|
||||
list_body = self.routers_client.list_routers()
|
||||
routers_list = [r['id'] for r in list_body['routers']]
|
||||
self.assertNotIn(router['router']['id'], routers_list)
|
||||
nsxv_edge_name = "%s-%s" % (name, router['router']['id'])
|
||||
self.assertIsNone(self.vsm.get_edge(nsxv_edge_name))
|
@ -1,263 +0,0 @@
|
||||
# Copyright 2015 GlobalLogic. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
from tempest import test
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class SubnetPoolsTestJSON(base.BaseNetworkTest):
|
||||
"""Tests the following operations in the subnetpools API:
|
||||
|
||||
1. Create, update, delete list and show subnet pool.
|
||||
2. Check shared subnetpool created by admin only.
|
||||
3. Check no-admin tenant can't delete shared pool created by admin.
|
||||
4. Create subentpool with quota limit for subnet and check subnet
|
||||
exhaust.
|
||||
5. Create subnets from subnetpool till the time no more ip left in
|
||||
subnetpool.
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is assumed that subnet_allocation
|
||||
options mentioned in the [network-feature-enabled] section and
|
||||
default_network option mentioned in the [network] section of
|
||||
etc/tempest.conf:
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(SubnetPoolsTestJSON, cls).skip_checks()
|
||||
if not test.is_extension_enabled('subnet_allocation', 'network'):
|
||||
msg = "subnet_allocation extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(SubnetPoolsTestJSON, cls).setup_clients()
|
||||
cls.cmgr_pri = cls.get_client_manager('primary')
|
||||
cls.cmgr_alt = cls.get_client_manager('alt')
|
||||
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||
|
||||
def clean_subnet(self, subnet_client, subnet_id):
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnet_client.delete_subnet,
|
||||
subnet_id)
|
||||
|
||||
def _create_subnet_pool(self, client, name, default_quota=None,
|
||||
shared='false'):
|
||||
# create subnet pool
|
||||
prefix = CONF.network.default_network
|
||||
subnetpool_client = client.subnetpools_client
|
||||
if default_quota is None:
|
||||
body = subnetpool_client.create_subnetpool(name=name,
|
||||
prefixes=prefix,
|
||||
shared=shared)
|
||||
else:
|
||||
body = subnetpool_client.create_subnetpool(
|
||||
name=name, prefixes=prefix, shared=shared,
|
||||
default_quota=default_quota)
|
||||
subnetpool_id = body["subnetpool"]["id"]
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnetpool_client.delete_subnetpool,
|
||||
subnetpool_id)
|
||||
return body
|
||||
|
||||
def _create_network_topo(self, subnetpool_id, prefixlen=26):
|
||||
net_client = self.cmgr_adm.networks_client
|
||||
body = {'name': 'provider-network'}
|
||||
network = net_client.create_network(**body)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
net_client.delete_network,
|
||||
network['network']['id'])
|
||||
body = {"network_id": network['network']['id'],
|
||||
"ip_version": 4, "subnetpool_id": subnetpool_id,
|
||||
"prefixlen": 28}
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
subnet = subnet_client.create_subnet(**body)
|
||||
self.clean_subnet(subnet_client, subnet['subnet']['id'])
|
||||
network_topo = dict(network=network, subnet=subnet)
|
||||
return network_topo
|
||||
|
||||
@decorators.idempotent_id('b39c237a-a1e8-4372-8f97-7fc9ff3660e3')
|
||||
def test_subnetpools_crud_operations(self):
|
||||
# create subnet pool
|
||||
subnetpool_name = data_utils.rand_name('subnetpools')
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name)
|
||||
subnetpool_client = self.cmgr_adm.subnetpools_client
|
||||
subnetpool_id = body["subnetpool"]["id"]
|
||||
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
|
||||
# get detail about subnet pool
|
||||
body = subnetpool_client.show_subnetpool(subnetpool_id)
|
||||
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
|
||||
# update the subnet pool
|
||||
subnetpool_name = data_utils.rand_name('subnetpools_update')
|
||||
body = subnetpool_client.update_subnetpool(subnetpool_id,
|
||||
name=subnetpool_name)
|
||||
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
|
||||
# delete subnet pool
|
||||
body = subnetpool_client.delete_subnetpool(subnetpool_id)
|
||||
self.assertRaises(lib_exc.NotFound,
|
||||
subnetpool_client.show_subnetpool,
|
||||
subnetpool_id)
|
||||
|
||||
@decorators.idempotent_id('de7b8aa9-0a94-4159-b3b8-4d41ae8348b1')
|
||||
def test_subnetpools_shared_operations(self):
|
||||
subnetpool_name = data_utils.rand_name('subnetpools')
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name,
|
||||
shared='true')
|
||||
subnetpool_id = body["subnetpool"]["id"]
|
||||
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
|
||||
# get detail about subnet pool
|
||||
subnetpool_alt_client = self.cmgr_alt.subnetpools_client
|
||||
body = subnetpool_alt_client.show_subnetpool(subnetpool_id)
|
||||
self.assertIn(subnetpool_id, body['subnetpool']['id'])
|
||||
body = self._create_subnet_pool(self.cmgr_alt, subnetpool_name,
|
||||
shared='false')
|
||||
subnetpool_alt_id = body["subnetpool"]["id"]
|
||||
subnetpool_pri_client = self.cmgr_pri.subnetpools_client
|
||||
self.assertRaises(lib_exc.NotFound,
|
||||
subnetpool_pri_client.show_subnetpool,
|
||||
subnetpool_alt_id)
|
||||
|
||||
@decorators.idempotent_id('7eaf09a1-c0d4-403d-b6ef-f9d173b61219')
|
||||
def test_shared_subnetpool_created_by_admin_only(self):
|
||||
subnetpool_name = data_utils.rand_name('subnetpools')
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name,
|
||||
shared='true')
|
||||
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
|
||||
# get detail about subnet pool
|
||||
subnetpool_alt_client = self.cmgr_alt.subnetpools_client
|
||||
prefix = CONF.network.default_network
|
||||
# PolicyNotAuthorized disallowed by policy
|
||||
self.assertRaises(lib_exc.Forbidden,
|
||||
subnetpool_alt_client.create_subnetpool,
|
||||
name=subnetpool_name, prefixes=prefix,
|
||||
shared='true')
|
||||
|
||||
@decorators.idempotent_id('99c3f9dc-64e2-4868-bfed-0838345e4684')
|
||||
def test_shared_subnetpool_not_deleted_by_non_admin(self):
|
||||
subnetpool_name = data_utils.rand_name('subnetpools')
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name,
|
||||
shared='true')
|
||||
subnetpool_id = body["subnetpool"]["id"]
|
||||
self.assertEqual(subnetpool_name, body["subnetpool"]["name"])
|
||||
# get detail about subnet pool
|
||||
subnetpool_alt_client = self.cmgr_alt.subnetpools_client
|
||||
# PolicyNotAuthorized disallowed by policy
|
||||
self.assertRaises(lib_exc.NotFound,
|
||||
subnetpool_alt_client.delete_subnetpool,
|
||||
subnetpool_id)
|
||||
|
||||
@decorators.idempotent_id('76988ed9-6eed-491c-89a5-ba4be430c7e2')
|
||||
def test_subnetpools_with_quota_limit_subnets(self):
|
||||
subnetpool_name = data_utils.rand_name('subnetpools')
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name,
|
||||
default_quota=70, shared='true')
|
||||
subnetpool_id = body["subnetpool"]["id"]
|
||||
network_topo = self._create_network_topo(subnetpool_id, prefixlen=28)
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
body = {"network_id": network_topo['network']['network']['id'],
|
||||
"ip_version": 4, "subnetpool_id": subnetpool_id,
|
||||
"prefixlen": 26, "enable_dhcp": 'false'}
|
||||
# "Per-tenant subnet pool prefix quota exceeded"
|
||||
self.assertRaises(lib_exc.Conflict,
|
||||
subnet_client.create_subnet, **body)
|
||||
|
||||
@decorators.idempotent_id('bfc82211-20ae-4e3d-878d-f567bcefcec6')
|
||||
def test_subnetpools_with_overlapping_subnets(self):
|
||||
subnetpool_name = data_utils.rand_name('subnetpools')
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name)
|
||||
subnetpool_id = body["subnetpool"]["id"]
|
||||
network_topo = self._create_network_topo(subnetpool_id, prefixlen=28)
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
body = {"network_id": network_topo['network']['network']['id'],
|
||||
"ip_version": 4, "subnetpool_id": subnetpool_id,
|
||||
"prefixlen": 28, "enable_dhcp": 'false'}
|
||||
subnet = subnet_client.create_subnet(**body)
|
||||
self.clean_subnet(subnet_client, subnet['subnet']['id'])
|
||||
body = {"network_id": network_topo['network']['network']['id'],
|
||||
"admin_state_up": 'true'}
|
||||
port_client = self.cmgr_adm.ports_client
|
||||
port_id = port_client.create_port(**body)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
port_client.delete_port,
|
||||
port_id['port']['id'])
|
||||
|
||||
@decorators.idempotent_id('e6828de5-8b81-4e38-8c6f-5821ec75230f')
|
||||
def test_multiple_subnets_from_multi_nets_under_same_pool(self):
|
||||
# create subnet pool
|
||||
subnetpool_name = data_utils.rand_name('subnetpools')
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name)
|
||||
subnetpool_id = body["subnetpool"]["id"]
|
||||
# Create subnet1 , subnet2
|
||||
self._create_network_topo(subnetpool_id, prefixlen=28)
|
||||
self._create_network_topo(subnetpool_id, prefixlen=28)
|
||||
|
||||
@decorators.idempotent_id('7ecbc5c5-2c63-42e8-8120-3cf2c7e5b292')
|
||||
def test_multiple_subnets_from_multi_pools_under_same_net(self):
|
||||
# create subnetpool1
|
||||
subnetpool_name = data_utils.rand_name('subnetpools')
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name)
|
||||
subnetpool_id = body["subnetpool"]["id"]
|
||||
# create subnet1
|
||||
network_topo = self._create_network_topo(subnetpool_id, prefixlen=28)
|
||||
# create subnetpool2
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name)
|
||||
subnetpool_id1 = body["subnetpool"]["id"]
|
||||
body = {"network_id": network_topo['network']['network']['id'],
|
||||
"ip_version": 4, "subnetpool_id": subnetpool_id1,
|
||||
"prefixlen": 28, "enable_dhcp": 'false'}
|
||||
# create subnet2
|
||||
self.assertRaises(lib_exc.BadRequest,
|
||||
subnet_client.create_subnet,
|
||||
**body)
|
||||
|
||||
@decorators.idempotent_id('82412f8d-df29-4a23-b6c7-2c6d1035cf0b')
|
||||
def test_subnetpools_with_overlapping_multi_subnets(self):
|
||||
# create subnetpool1
|
||||
subnetpool_name = data_utils.rand_name('subnetpools')
|
||||
body = self._create_subnet_pool(self.cmgr_adm, subnetpool_name)
|
||||
subnetpool_id = body["subnetpool"]["id"]
|
||||
prefix = CONF.network.default_network
|
||||
prefixlen = 26
|
||||
# create network and subnet1
|
||||
network_topo = self._create_network_topo(subnetpool_id,
|
||||
prefixlen=prefixlen)
|
||||
subnet_client = self.cmgr_adm.subnets_client
|
||||
body = {"network_id": network_topo['network']['network']['id'],
|
||||
"ip_version": 4, "subnetpool_id": subnetpool_id,
|
||||
"prefixlen": prefixlen, "enable_dhcp": 'false'}
|
||||
actual_netmask = int(prefix[0].split('/')[1])
|
||||
no_of_ips = 2 ** (32 - actual_netmask)
|
||||
no_of_ips_per_prefix = 2 ** (32 - prefixlen)
|
||||
no_of_subnets = no_of_ips / no_of_ips_per_prefix
|
||||
for subnet_num in range(1, no_of_subnets + 1):
|
||||
try:
|
||||
# create subnet2
|
||||
subnet = subnet_client.create_subnet(**body)
|
||||
self.clean_subnet(subnet_client, subnet['subnet']['id'])
|
||||
except lib_exc.ServerFault:
|
||||
pass
|
||||
LOG.info("Failed to allocate subnet: Insufficient "
|
||||
"prefix space to allocate subnet size")
|
@ -1,496 +0,0 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import base_provider as base
|
||||
from tempest.common import custom_matchers
|
||||
from tempest import config
|
||||
|
||||
import netaddr
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SubnetTestJSON(base.BaseAdminNetworkTest):
|
||||
_provider_network_body = {}
|
||||
|
||||
"""
|
||||
[NOTE: This module copied/modified from api/network/test_networks.py
|
||||
to create provider networks/subnets tests]
|
||||
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
create a network for a tenant
|
||||
list tenant's networks
|
||||
show a tenant network details
|
||||
create a subnet for a tenant
|
||||
list tenant's subnets
|
||||
show a tenant subnet details
|
||||
network update
|
||||
subnet update
|
||||
delete a network also deletes its subnets
|
||||
|
||||
All subnet tests are run once with ipv4 and once with ipv6.
|
||||
|
||||
v2.0 of the Neutron API is assumed. It is also assumed that the following
|
||||
options are defined in the [network] section of etc/tempest.conf:
|
||||
|
||||
project_network_cidr with a block of cidr's from which smaller blocks
|
||||
can be allocated for tenant ipv4 subnets
|
||||
|
||||
project_network_v6_cidr is the equivalent for ipv6 subnets
|
||||
|
||||
project_network_mask_bits with the mask bits to be used to partition
|
||||
the block defined by project_network_cidr
|
||||
|
||||
project_network_v6_mask_bits is the equivalent for ipv6 subnets
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(SubnetTestJSON, cls).resource_setup()
|
||||
for k, v in cls._provider_network_body.items():
|
||||
if not v:
|
||||
cls._provider_network_body.pop(k)
|
||||
body = cls.create_network(client=cls.admin_networks_client,
|
||||
**cls._provider_network_body)
|
||||
cls.network = body['network']
|
||||
cls.name = cls.network['name']
|
||||
cls.subnet = cls._create_subnet_with_last_subnet_block(cls.network)
|
||||
cls.cidr = cls.subnet['cidr']
|
||||
cls._subnet_data = {6: {'gateway':
|
||||
str(cls._get_gateway_from_tempest_conf(6)),
|
||||
'allocation_pools':
|
||||
cls._get_allocation_pools_from_gateway(6),
|
||||
'dns_nameservers': ['2001:4860:4860::8844',
|
||||
'2001:4860:4860::8888'],
|
||||
'host_routes': [{'destination': '2001::/64',
|
||||
'nexthop': '2003::1'}],
|
||||
'new_host_routes': [{'destination':
|
||||
'2001::/64',
|
||||
'nexthop': '2005::1'}],
|
||||
'new_dns_nameservers':
|
||||
['2001:4860:4860::7744',
|
||||
'2001:4860:4860::7888']},
|
||||
4: {'gateway':
|
||||
str(cls._get_gateway_from_tempest_conf(4)),
|
||||
'allocation_pools':
|
||||
cls._get_allocation_pools_from_gateway(4),
|
||||
'dns_nameservers': ['8.8.4.4', '8.8.8.8'],
|
||||
'host_routes': [{'destination': '10.20.0.0/32',
|
||||
'nexthop': '10.100.1.1'}],
|
||||
'new_host_routes': [{'destination':
|
||||
'10.20.0.0/32',
|
||||
'nexthop':
|
||||
'10.100.1.2'}],
|
||||
'new_dns_nameservers': ['7.8.8.8', '7.8.4.4']}}
|
||||
|
||||
@classmethod
|
||||
def _create_subnet_with_last_subnet_block(cls, network, ip_version=4):
|
||||
"""Derive last subnet CIDR block from tenant CIDR and
|
||||
create the subnet with that derived CIDR
|
||||
"""
|
||||
if ip_version == 4:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
|
||||
mask_bits = CONF.network.project_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
|
||||
mask_bits = CONF.network.project_network_v6_mask_bits
|
||||
|
||||
subnet_cidr = list(cidr.subnet(mask_bits))[-1]
|
||||
gateway_ip = str(netaddr.IPAddress(subnet_cidr) + 1)
|
||||
body = cls.create_subnet(network, gateway=gateway_ip,
|
||||
cidr=subnet_cidr, mask_bits=mask_bits)
|
||||
return body['subnet']
|
||||
|
||||
@classmethod
|
||||
def _get_gateway_from_tempest_conf(cls, ip_version):
|
||||
"""Return first subnet gateway for configured CIDR."""
|
||||
if ip_version == 4:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
|
||||
mask_bits = CONF.network.project_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = netaddr.IPNetwork(CONF.network.project_network_v6_cidr)
|
||||
mask_bits = CONF.network.project_network_v6_mask_bits
|
||||
|
||||
if mask_bits >= cidr.prefixlen:
|
||||
return netaddr.IPAddress(cidr) + 1
|
||||
else:
|
||||
for subnet in cidr.subnet(mask_bits):
|
||||
return netaddr.IPAddress(subnet) + 1
|
||||
|
||||
@classmethod
|
||||
def _get_allocation_pools_from_gateway(cls, ip_version):
|
||||
"""Return allocation range for subnet of given gateway."""
|
||||
gateway = cls._get_gateway_from_tempest_conf(ip_version)
|
||||
return [{'start': str(gateway + 2), 'end': str(gateway + 3)}]
|
||||
|
||||
def subnet_dict(self, include_keys):
|
||||
"""Return a subnet dict which has include_keys and their corresponding
|
||||
value from self._subnet_data
|
||||
"""
|
||||
return dict((key, self._subnet_data[self._ip_version][key])
|
||||
for key in include_keys)
|
||||
|
||||
def _create_network(self, _auto_clean_up=True, network_name=None,
|
||||
**kwargs):
|
||||
network_name = network_name or data_utils.rand_name('adm-netwk')
|
||||
post_body = {'name': network_name}
|
||||
post_body.update(kwargs)
|
||||
LOG.debug("create ADM network: %s", str(post_body))
|
||||
body = self.create_network(client=self.admin_networks_client,
|
||||
**post_body)
|
||||
network = body['network']
|
||||
if _auto_clean_up:
|
||||
self.addCleanup(self._try_delete_network, network['id'])
|
||||
return network
|
||||
|
||||
# when you call _delete_network() you mean it is part of test,
|
||||
# so we will not pass exception
|
||||
def _delete_network(self, net_id):
|
||||
self._remove_network_from_book(net_id)
|
||||
return self.delete_network(net_id)
|
||||
|
||||
def _remove_network_from_book(self, net_id):
|
||||
for idx, netwk_info in zip(range(0, len(self.admin_netwk_info)),
|
||||
self.admin_netwk_info):
|
||||
net_client, network = netwk_info
|
||||
if network['id'] == net_id:
|
||||
self.admin_netwk_info.pop(idx)
|
||||
return
|
||||
|
||||
# call _try_delete_network() for teardown purpose, so pass exception
|
||||
def _try_delete_network(self, net_id):
|
||||
# delete network, if it exists
|
||||
self._remove_network_from_book(net_id)
|
||||
try:
|
||||
self.delete_network(net_id)
|
||||
# if network is not found, this means it was deleted in the test
|
||||
except exceptions.NotFound:
|
||||
pass
|
||||
|
||||
# by default, subnet will be deleted when its network is deleted
|
||||
def _create_subnet(self, network, gateway='', cidr=None, mask_bits=None,
|
||||
ip_version=None, cidr_offset=0,
|
||||
_auto_clean_up=False, **kwargs):
|
||||
body = self.create_subnet(network,
|
||||
gateway=gateway,
|
||||
cidr=cidr,
|
||||
mask_bits=mask_bits,
|
||||
ip_version=ip_version,
|
||||
cidr_offset=cidr_offset,
|
||||
**kwargs)
|
||||
subnet = body['subnet']
|
||||
if _auto_clean_up:
|
||||
self.addCleanup(self._try_delete_subnet, subnet['id'])
|
||||
return subnet
|
||||
|
||||
def _try_delete_subnet(self, net_id):
|
||||
# delete subnet, if it exists
|
||||
try:
|
||||
self.delete_subnet(net_id)
|
||||
# if network is not found, this means it was deleted in the test
|
||||
except exceptions.NotFound:
|
||||
pass
|
||||
|
||||
def _compare_resource_attrs(self, actual, expected):
|
||||
exclude_keys = set(actual).symmetric_difference(expected)
|
||||
self.assertThat(actual, custom_matchers.MatchesDictExceptForKeys(
|
||||
expected, exclude_keys))
|
||||
|
||||
def _create_verify_delete_subnet(self, cidr=None, mask_bits=None,
|
||||
**kwargs):
|
||||
network = self._create_network(_auto_clean_up=True)
|
||||
net_id = network['id']
|
||||
gateway = kwargs.pop('gateway', None)
|
||||
subnet = self._create_subnet(network, gateway, cidr, mask_bits,
|
||||
**kwargs)
|
||||
compare_args_full = dict(gateway_ip=gateway, cidr=cidr,
|
||||
mask_bits=mask_bits, **kwargs)
|
||||
compare_args = (dict((k, v)
|
||||
for k, v in six.iteritems(compare_args_full)
|
||||
if v is not None))
|
||||
|
||||
if 'dns_nameservers' in set(subnet).intersection(compare_args):
|
||||
self.assertEqual(sorted(compare_args['dns_nameservers']),
|
||||
sorted(subnet['dns_nameservers']))
|
||||
del subnet['dns_nameservers'], compare_args['dns_nameservers']
|
||||
|
||||
self._compare_resource_attrs(subnet, compare_args)
|
||||
self._delete_network(net_id)
|
||||
|
||||
@decorators.idempotent_id('2ecbc3ab-93dd-44bf-a827-95beeb008e9a')
|
||||
def test_create_update_delete_network_subnet(self):
|
||||
# Create a network
|
||||
network = self._create_network(_auto_clean_up=True)
|
||||
net_id = network['id']
|
||||
self.assertEqual('ACTIVE', network['status'])
|
||||
# Verify network update
|
||||
new_name = data_utils.rand_name('new-adm-netwk')
|
||||
body = self.update_network(net_id, name=new_name)
|
||||
updated_net = body['network']
|
||||
self.assertEqual(updated_net['name'], new_name)
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
subnet = self._create_subnet(network)
|
||||
subnet_id = subnet['id']
|
||||
# Verify subnet update
|
||||
new_name = data_utils.rand_name('new-subnet')
|
||||
body = self.update_subnet(subnet_id, name=new_name)
|
||||
updated_subnet = body['subnet']
|
||||
self.assertEqual(updated_subnet['name'], new_name)
|
||||
self._delete_network(net_id)
|
||||
|
||||
@decorators.idempotent_id('a2cf6398-aece-4256-88a6-0dfe8aa44975')
|
||||
def test_show_network(self):
|
||||
# Verify the details of a network
|
||||
body = self.show_network(self.network['id'])
|
||||
network = body['network']
|
||||
for key in ['id', 'name']:
|
||||
self.assertEqual(network[key], self.network[key])
|
||||
|
||||
@decorators.idempotent_id('5b42067d-4b9d-4f04-bb6a-adb9756ebe0c')
|
||||
def test_show_network_fields(self):
|
||||
# Verify specific fields of a network
|
||||
fields = ['id', 'name']
|
||||
body = self.show_network(self.network['id'], fields=fields)
|
||||
network = body['network']
|
||||
self.assertEqual(sorted(network.keys()), sorted(fields))
|
||||
for field_name in fields:
|
||||
self.assertEqual(network[field_name], self.network[field_name])
|
||||
|
||||
@decorators.idempotent_id('324be3c2-457d-4e21-b0b3-5106bbbf1a28')
|
||||
def test_list_networks(self):
|
||||
# Verify the network exists in the list of all networks
|
||||
body = self.list_networks()
|
||||
networks = [network['id'] for network in body['networks']
|
||||
if network['id'] == self.network['id']]
|
||||
self.assertNotEmpty(networks, "Created network not found in the list")
|
||||
|
||||
@decorators.idempotent_id('3a934a8d-6b52-427e-af49-3dfdd224fdeb')
|
||||
def test_list_networks_fields(self):
|
||||
# Verify specific fields of the networks
|
||||
fields = ['id', 'name']
|
||||
body = self.list_networks(fields=fields)
|
||||
networks = body['networks']
|
||||
self.assertNotEmpty(networks, "Network list returned is empty")
|
||||
for network in networks:
|
||||
self.assertEqual(sorted(network.keys()), sorted(fields))
|
||||
|
||||
@decorators.idempotent_id('5f6616c4-bfa7-4308-8eab-f45d75c94c6d')
|
||||
def test_show_subnet(self):
|
||||
# Verify the details of a subnet
|
||||
body = self.show_subnet(self.subnet['id'])
|
||||
subnet = body['subnet']
|
||||
self.assertNotEmpty(subnet, "Subnet returned has no fields")
|
||||
for key in ['id', 'cidr']:
|
||||
self.assertIn(key, subnet)
|
||||
self.assertEqual(subnet[key], self.subnet[key])
|
||||
|
||||
@decorators.idempotent_id('2f326955-551e-4e9e-a4f6-e5db77c34c8d')
|
||||
def test_show_subnet_fields(self):
|
||||
# Verify specific fields of a subnet
|
||||
fields = ['id', 'network_id']
|
||||
body = self.show_subnet(self.subnet['id'], fields=fields)
|
||||
subnet = body['subnet']
|
||||
self.assertEqual(sorted(subnet.keys()), sorted(fields))
|
||||
for field_name in fields:
|
||||
self.assertEqual(subnet[field_name], self.subnet[field_name])
|
||||
|
||||
@decorators.idempotent_id('66631557-2466-4827-bba6-d961b0242be3')
|
||||
def test_list_subnets(self):
|
||||
# Verify the subnet exists in the list of all subnets
|
||||
body = self.list_subnets()
|
||||
subnets = [subnet['id'] for subnet in body['subnets']
|
||||
if subnet['id'] == self.subnet['id']]
|
||||
self.assertNotEmpty(subnets, "Created subnet not found in the list")
|
||||
|
||||
@decorators.idempotent_id('3d5ea69b-f122-43e7-b7f4-c78586629eb8')
|
||||
def test_list_subnets_fields(self):
|
||||
# Verify specific fields of subnets
|
||||
fields = ['id', 'network_id']
|
||||
body = self.list_subnets(fields=fields)
|
||||
subnets = body['subnets']
|
||||
self.assertNotEmpty(subnets, "Subnet list returned is empty")
|
||||
for subnet in subnets:
|
||||
self.assertEqual(sorted(subnet.keys()), sorted(fields))
|
||||
|
||||
@decorators.idempotent_id('e966bb2f-402c-49b7-8147-b275cee584c4')
|
||||
def test_delete_network_with_subnet(self):
|
||||
# Creates a network
|
||||
network = self._create_network(_auto_clean_up=True)
|
||||
net_id = network['id']
|
||||
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
subnet = self._create_subnet(network)
|
||||
subnet_id = subnet['id']
|
||||
|
||||
# Delete network while the subnet still exists
|
||||
self._delete_network(net_id)
|
||||
|
||||
# Verify that the subnet got automatically deleted.
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self.show_subnet, subnet_id)
|
||||
|
||||
@decorators.idempotent_id('8aba0e1b-4b70-4181-a8a4-792c08db699d')
|
||||
def test_create_delete_subnet_without_gateway(self):
|
||||
self._create_verify_delete_subnet()
|
||||
|
||||
@decorators.idempotent_id('67364a4b-6725-4dbe-84cf-504bdb20ac06')
|
||||
def test_create_delete_subnet_with_gw(self):
|
||||
self._create_verify_delete_subnet(
|
||||
**self.subnet_dict(['gateway']))
|
||||
|
||||
@decorators.idempotent_id('f8f43e65-5090-4902-b5d2-2b610505cca6')
|
||||
def test_create_delete_subnet_with_allocation_pools(self):
|
||||
self._create_verify_delete_subnet(
|
||||
**self.subnet_dict(['allocation_pools']))
|
||||
|
||||
@decorators.idempotent_id('5b085669-97e6-48e0-b99e-315a9b4d8482')
|
||||
def test_create_delete_subnet_with_gw_and_allocation_pools(self):
|
||||
self._create_verify_delete_subnet(**self.subnet_dict(
|
||||
['gateway', 'allocation_pools']))
|
||||
|
||||
@decorators.skip_because(bug="1501827")
|
||||
@decorators.idempotent_id('3c4c36a1-684b-4e89-8e71-d528f19322a0')
|
||||
def test_create_delete_subnet_with_host_routes_and_dns_nameservers(self):
|
||||
self._create_verify_delete_subnet(
|
||||
**self.subnet_dict(['host_routes', 'dns_nameservers']))
|
||||
|
||||
@decorators.idempotent_id('df518c87-b817-48b5-9365-bd1daaf68955')
|
||||
def test_create_delete_subnet_with_dns_nameservers(self):
|
||||
self._create_verify_delete_subnet(
|
||||
**self.subnet_dict(['dns_nameservers']))
|
||||
|
||||
@decorators.idempotent_id('b6822feb-6760-4052-b550-f0fe8bac7451')
|
||||
def test_create_delete_subnet_with_dhcp_enabled(self):
|
||||
self._create_verify_delete_subnet(enable_dhcp=True)
|
||||
|
||||
@decorators.skip_because(bug="1501827")
|
||||
@decorators.idempotent_id('3c4c36a1-684a-4e89-8e71-d528f19324a0')
|
||||
def test_update_subnet_gw_dns_host_routes_dhcp(self):
|
||||
network = self._create_network(_auto_clean_up=True)
|
||||
subnet_attrs = ['gateway', 'host_routes',
|
||||
'dns_nameservers', 'allocation_pools']
|
||||
subnet_dict = self.subnet_dict(subnet_attrs)
|
||||
subnet = self._create_subnet(network, **subnet_dict)
|
||||
subnet_id = subnet['id']
|
||||
new_gateway = str(netaddr.IPAddress(
|
||||
self._subnet_data[self._ip_version]['gateway']) + 1)
|
||||
# Verify subnet update
|
||||
new_host_routes = self._subnet_data[self._ip_version][
|
||||
'new_host_routes']
|
||||
|
||||
new_dns_nameservers = self._subnet_data[self._ip_version][
|
||||
'new_dns_nameservers']
|
||||
kwargs = {'host_routes': new_host_routes,
|
||||
'dns_nameservers': new_dns_nameservers,
|
||||
'gateway_ip': new_gateway, 'enable_dhcp': True}
|
||||
|
||||
new_name = "New_subnet"
|
||||
body = self.update_subnet(subnet_id, name=new_name, **kwargs)
|
||||
updated_subnet = body['subnet']
|
||||
kwargs['name'] = new_name
|
||||
self.assertEqual(sorted(updated_subnet['dns_nameservers']),
|
||||
sorted(kwargs['dns_nameservers']))
|
||||
del subnet['dns_nameservers'], kwargs['dns_nameservers']
|
||||
|
||||
self._compare_resource_attrs(updated_subnet, kwargs)
|
||||
self._delete_network(network['id'])
|
||||
|
||||
@decorators.idempotent_id('a5caa7d9-ab71-4278-a57c-d6631b7474f8')
|
||||
def test_update_subnet_gw_dns_dhcp(self):
|
||||
network = self._create_network(_auto_clean_up=True)
|
||||
subnet_attrs = ['gateway',
|
||||
'dns_nameservers', 'allocation_pools']
|
||||
subnet_dict = self.subnet_dict(subnet_attrs)
|
||||
subnet = self._create_subnet(network, **subnet_dict)
|
||||
subnet_id = subnet['id']
|
||||
new_gateway = str(netaddr.IPAddress(
|
||||
self._subnet_data[self._ip_version]['gateway']) + 1)
|
||||
# Verify subnet update
|
||||
new_dns_nameservers = self._subnet_data[self._ip_version][
|
||||
'new_dns_nameservers']
|
||||
kwargs = {'dns_nameservers': new_dns_nameservers,
|
||||
'gateway_ip': new_gateway, 'enable_dhcp': True}
|
||||
|
||||
new_name = "New_subnet"
|
||||
body = self.update_subnet(subnet_id, name=new_name, **kwargs)
|
||||
updated_subnet = body['subnet']
|
||||
kwargs['name'] = new_name
|
||||
self.assertEqual(sorted(updated_subnet['dns_nameservers']),
|
||||
sorted(kwargs['dns_nameservers']))
|
||||
del subnet['dns_nameservers'], kwargs['dns_nameservers']
|
||||
|
||||
self._compare_resource_attrs(updated_subnet, kwargs)
|
||||
self._delete_network(network['id'])
|
||||
|
||||
@decorators.skip_because(bug="1501827")
|
||||
@decorators.idempotent_id('a5caa7d5-ab71-4278-a57c-d6631b7474f8')
|
||||
def test_create_delete_subnet_all_attributes(self):
|
||||
self._create_verify_delete_subnet(
|
||||
enable_dhcp=True,
|
||||
**self.subnet_dict(['gateway',
|
||||
'host_routes',
|
||||
'dns_nameservers']))
|
||||
|
||||
@decorators.idempotent_id('a5caa7d9-ab71-4278-a57c-d6631b7474c8')
|
||||
def test_create_delete_subnet_with_gw_dns(self):
|
||||
self._create_verify_delete_subnet(
|
||||
enable_dhcp=True,
|
||||
**self.subnet_dict(['gateway',
|
||||
'dns_nameservers']))
|
||||
|
||||
@decorators.idempotent_id('3c4c36a1-684b-4e89-8e71-d518f19324a0')
|
||||
def test_add_upd_del_multiple_overlapping_networks_subnet(self):
|
||||
r0, R1 = 0, 3 # (todo) get from CONF
|
||||
return self._add_upd_del_multiple_networks_subnet(
|
||||
r0, R1, "ovla-netwk")
|
||||
|
||||
@decorators.idempotent_id('5267bf9d-de82-4af9-914a-8320e9f4c38c')
|
||||
def test_add_upd_del_multiple_nonoverlapping_networks_subnet(self):
|
||||
r0, R1 = 1, 4 # (todo) get from CONF
|
||||
return self._add_upd_del_multiple_networks_subnet(
|
||||
r0, R1, "noov-netwk", _step_cidr=2)
|
||||
|
||||
def _add_upd_del_multiple_networks_subnet(self, r0, R1,
|
||||
name_prefix="m-network",
|
||||
_step_cidr=0):
|
||||
m_name = data_utils.rand_name(name_prefix)
|
||||
netwk = []
|
||||
for x in range(r0, R1):
|
||||
network = self._create_network(_auto_clean_up=True)
|
||||
net_id = network['id']
|
||||
self.assertEqual('ACTIVE', network['status'])
|
||||
new_name = m_name + "-%02d" % x
|
||||
body = self.update_network(net_id, name=new_name)
|
||||
network = body['network']
|
||||
cidr_offset = (x * _step_cidr) if _step_cidr > 0 else 0
|
||||
subnet = self._create_subnet(network, cidr_offset=cidr_offset)
|
||||
subnet_id = subnet['id']
|
||||
netwk.append([x, net_id, subnet_id])
|
||||
for x, net_id, subnet_id in netwk:
|
||||
# make sure subnet is updatable after creation
|
||||
new_name = m_name + "-%02d-snet" % x
|
||||
body = self.update_subnet(subnet_id, name=new_name)
|
||||
updated_subnet = body['subnet']
|
||||
self.assertEqual(updated_subnet['name'], new_name)
|
||||
self._delete_network(net_id)
|
@ -1,264 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
import base_provider as base
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ProjectDeleteTest(base.BaseAdminNetworkTest):
|
||||
"""Check Purge network resources using tenant-Id.
|
||||
|
||||
Validate that network resources which are not in use should get
|
||||
deleted once neutron purge <tenant-id> is called.
|
||||
"""
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(ProjectDeleteTest, cls).skip_checks()
|
||||
if not (CONF.network.project_networks_reachable
|
||||
or CONF.network.public_network_id):
|
||||
msg = ('Either project_networks_reachable must be "true", or '
|
||||
'public_network_id must be defined.')
|
||||
raise cls.skipException(msg)
|
||||
if not (CONF.auth.admin_username and CONF.auth.admin_password and
|
||||
CONF.auth.admin_project_name):
|
||||
msg = ('admin_username admin_password and admin_project_name\
|
||||
should be provided in tempest.conf')
|
||||
raise cls.skipException(msg)
|
||||
process_obj = subprocess.Popen('neutron --version', shell=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT)
|
||||
neutron_version = process_obj.stdout.readlines()
|
||||
if neutron_version[0] < '4.1.2':
|
||||
msg = ("Please update neutron verion,"
|
||||
"run pip --upgrade pip and"
|
||||
"pip install python-neutronclient upgrade")
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(ProjectDeleteTest, cls).setup_clients()
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ProjectDeleteTest, cls).resource_setup()
|
||||
|
||||
@classmethod
|
||||
def create_tenant(self):
|
||||
self.admin_manager.tenants_client
|
||||
|
||||
@classmethod
|
||||
def create_network_subnet(self, cidr=None, cidr_offset=0):
|
||||
network_name = data_utils.rand_name('project-network-')
|
||||
resp = self.create_network(network_name)
|
||||
network = resp.get('network', resp)
|
||||
net_id = network['id']
|
||||
resp = self.create_subnet(network,
|
||||
name=network_name,
|
||||
cidr=cidr,
|
||||
cidr_offset=cidr_offset)
|
||||
subnet = resp.get('subnet', resp)
|
||||
resp = self.show_network(net_id)
|
||||
s_network = resp.get('network', resp)
|
||||
return (net_id, s_network, subnet)
|
||||
|
||||
def create_router_by_type(self, router_type, name=None, **kwargs):
|
||||
routers_client = self.admin_manager.routers_client
|
||||
router_name = name or data_utils.rand_name('mtz-')
|
||||
create_kwargs = dict(name=router_name, external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id})
|
||||
if router_type in ('shared', 'exclusive'):
|
||||
create_kwargs['router_type'] = router_type
|
||||
elif router_type in ('distributed'):
|
||||
create_kwargs['distributed'] = True
|
||||
kwargs.update(create_kwargs)
|
||||
router = routers_client.create_router(**kwargs)
|
||||
router = router['router'] if 'router' in router else router
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.delete_router, router['id'])
|
||||
self.assertEqual(router['name'], router_name)
|
||||
return (routers_client, router)
|
||||
|
||||
def create_router_and_add_interfaces(self, router_type, nets):
|
||||
(routers_client, router) = self.create_router_by_type(router_type)
|
||||
for net_id, (network, subnet) in six.iteritems(nets):
|
||||
# register to cleanup before adding interfaces so interfaces
|
||||
# and router can be deleted if test is aborted.
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.remove_router_interface,
|
||||
router['id'], subnet_id=subnet['id'])
|
||||
routers_client.add_router_interface(
|
||||
router['id'], subnet_id=subnet['id'])
|
||||
return router
|
||||
|
||||
@decorators.idempotent_id('44e24f6b-9d9e-41a7-9d54-09d79b77dea5')
|
||||
def test_project_delete_purge_using_non_admin(self):
|
||||
nets = {}
|
||||
net_id, network, subnet = self.create_network_subnet(cidr_offset=0)
|
||||
nets[net_id] = (network, subnet)
|
||||
router_type = 'shared'
|
||||
self.create_router_and_add_interfaces(router_type, nets)
|
||||
uri = CONF.identity.uri
|
||||
os.environ['OS_AUTH_URL'] = uri
|
||||
os.environ['OS_REGION_NAME'] = 'nova'
|
||||
os.environ['OS_USERNAME'] = CONF.auth.admin_username
|
||||
os.environ['OS_TENANT_NAME'] = CONF.auth.admin_project_name
|
||||
os.environ['OS_PASSWORD'] = CONF.auth.admin_password
|
||||
name = data_utils.rand_name('tenant-delete-')
|
||||
tenant = self.admin_manager.tenants_client.create_tenant(name=name)
|
||||
username = name + 'user'
|
||||
kwargs = {'name': username, 'pass': 'password'}
|
||||
tenant_user = self.admin_manager.users_client.create_user(**kwargs)
|
||||
os.environ['OS_USERNAME'] = tenant_user['user']['username']
|
||||
os.environ['OS_TENANT_NAME'] = tenant['tenant']['name']
|
||||
os.environ['OS_PASSWORD'] = 'password'
|
||||
local_tenant_id = network['tenant_id']
|
||||
purge_output =\
|
||||
os.popen('neutron --insecure purge %s --tenant-id=%s' %
|
||||
(local_tenant_id,
|
||||
tenant['tenant']['id'])).read().strip()
|
||||
self.assertEqual(purge_output, '')
|
||||
os.environ['OS_USERNAME'] = CONF.auth.admin_username
|
||||
os.environ['OS_TENANT_NAME'] = CONF.auth.admin_project_name
|
||||
os.environ['OS_PASSWORD'] = CONF.auth.admin_password
|
||||
admin_tenant_id = os.popen(
|
||||
"openstack --insecure project list | grep admin | awk '{print $2}'")\
|
||||
.read()
|
||||
purge_output =\
|
||||
os.popen('neutron --insecure purge %s --tenant-id=%s' %
|
||||
(local_tenant_id, admin_tenant_id)).read().strip()
|
||||
self.assertIn('Purging resources: 100% complete', purge_output)
|
||||
|
||||
@decorators.idempotent_id('77ec7045-f8f0-4aa1-8e1d-68c0647fda89')
|
||||
def test_project_delete_no_resource_for_deletion(self):
|
||||
name = data_utils.rand_name('tenant-delete-')
|
||||
network_client = self.admin_manager.networks_client
|
||||
create_kwargs = dict(name=name)
|
||||
network = network_client.create_network(**create_kwargs)
|
||||
network_client.delete_network(network['network']['id'])
|
||||
uri = CONF.identity.uri
|
||||
os.environ['OS_AUTH_URL'] = uri
|
||||
os.environ['OS_REGION_NAME'] = 'nova'
|
||||
os.environ['OS_USERNAME'] = CONF.auth.admin_username
|
||||
os.environ['OS_TENANT_NAME'] = CONF.auth.admin_project_name
|
||||
os.environ['OS_PASSWORD'] = CONF.auth.admin_password
|
||||
local_tenant_id = network['network']['tenant_id']
|
||||
admin_tenant_id = os.popen(
|
||||
"openstack --insecure project list | grep admin | awk '{print $2}'")\
|
||||
.read()
|
||||
purge_output =\
|
||||
os.popen('neutron --insecure purge %s --tenant-id=%s' %
|
||||
(local_tenant_id, admin_tenant_id)).read().strip()
|
||||
purge_output =\
|
||||
os.popen('neutron --insecure purge %s --tenant-id=%s' %
|
||||
(local_tenant_id, admin_tenant_id)).read().strip()
|
||||
LOG.debug("create VLAN network: %s", (purge_output))
|
||||
check_output = 'Tenant has no supported resources'
|
||||
self.assertIn(check_output, purge_output)
|
||||
LOG.debug("Testcase run completed")
|
||||
|
||||
@decorators.idempotent_id('38bf4e22-c67a-42db-9e9d-a087369207d4')
|
||||
def test_project_delete_with_all_resorces_deleted(self):
|
||||
name = data_utils.rand_name('tenant-delete-')
|
||||
security_client = self.admin_manager.security_groups_client
|
||||
create_kwargs = dict(name=name)
|
||||
sec_group = security_client.create_security_group(**create_kwargs)
|
||||
network_name = name
|
||||
resp = self.create_network(network_name)
|
||||
network = resp.get('network', resp)
|
||||
routers_client = self.admin_manager.routers_client
|
||||
create_kwargs = dict(name=name)
|
||||
router = routers_client.create_router(**create_kwargs)
|
||||
floatingip_client = self.admin_manager.floating_ips_client
|
||||
create_kwargs = {'floating_network_id': CONF.network.public_network_id}
|
||||
floatingip = floatingip_client.create_floatingip(**create_kwargs)
|
||||
uri = CONF.identity.uri
|
||||
os.environ['OS_AUTH_URL'] = uri
|
||||
os.environ['OS_REGION_NAME'] = 'nova'
|
||||
os.environ['OS_USERNAME'] = CONF.auth.admin_username
|
||||
os.environ['OS_TENANT_NAME'] = CONF.auth.admin_project_name
|
||||
os.environ['OS_PASSWORD'] = CONF.auth.admin_password
|
||||
self.admin_networks_client
|
||||
local_tenant_id = network['tenant_id']
|
||||
admin_tenant_id = os.popen(
|
||||
"openstack --insecure project list | grep admin | awk '{print $2}'")\
|
||||
.read()
|
||||
purge_output =\
|
||||
os.popen('neutron --insecure purge %s --tenant-id=%s' %
|
||||
(local_tenant_id, admin_tenant_id)).read().strip()
|
||||
LOG.debug("create VLAN network: %s", (purge_output))
|
||||
check_output = ("Deleted 2 security_groups, 1 router, 1 network, "
|
||||
"1 floatingip")
|
||||
self.assertIn(check_output, purge_output)
|
||||
list_of_sec_groups = security_client.list_security_groups()
|
||||
self.assertNotIn(sec_group['security_group']['id'], list_of_sec_groups)
|
||||
list_of_networks = self.admin_manager.networks_client.list_networks()
|
||||
self.assertNotIn(network['id'], list_of_networks)
|
||||
list_of_routers = routers_client.list_routers()
|
||||
self.assertNotIn(router['router']['id'], list_of_routers)
|
||||
list_of_floatingips = floatingip_client.list_floatingips()
|
||||
self.assertNotIn(floatingip['floatingip']['id'], list_of_floatingips)
|
||||
LOG.debug("Testcase run completed")
|
||||
|
||||
@decorators.idempotent_id('d617d637-5b2d-4ac8-93ce-80060d495bb2')
|
||||
def test_project_delete_with_some_resources_left(self):
|
||||
network_name = data_utils.rand_name('tenant-delete-')
|
||||
resp = self.create_network(network_name)
|
||||
network = resp.get('network', resp)
|
||||
net_id = network['id']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, net_id)
|
||||
resp = self.create_subnet(network,
|
||||
name=network_name,
|
||||
cidr=None,
|
||||
cidr_offset=0)
|
||||
subnet = resp.get('subnet', resp)
|
||||
resp = self.show_network(net_id)
|
||||
s_network = resp.get('network', resp)
|
||||
net_subnets = s_network['subnets']
|
||||
self.assertIn(subnet['id'], net_subnets)
|
||||
uri = CONF.identity.uri
|
||||
os.environ['OS_AUTH_URL'] = uri
|
||||
os.environ['OS_REGION_NAME'] = 'nova'
|
||||
os.environ['OS_USERNAME'] = CONF.auth.admin_username
|
||||
os.environ['OS_TENANT_NAME'] = CONF.auth.admin_project_name
|
||||
os.environ['OS_PASSWORD'] = CONF.auth.admin_password
|
||||
self.admin_networks_client
|
||||
local_tenant_id = network['tenant_id']
|
||||
cmd = ("openstack --insecure project list |"
|
||||
" grep admin | awk '{print $2}'")
|
||||
admin_tenant_id = os.popen(cmd).read()
|
||||
purge_output =\
|
||||
os.popen('neutron --insecure purge %s --tenant-id=%s' %
|
||||
(local_tenant_id, admin_tenant_id)).read().strip()
|
||||
check_output = 'Deleted 1 security_group, 1 network'
|
||||
self.assertIn(check_output, purge_output)
|
||||
check_output = 'The following resources could not be deleted: 1 port'
|
||||
self.assertIn(check_output, purge_output)
|
||||
list_of_subnets = self.admin_manager.subnets_client.list_subnets()
|
||||
self.assertNotIn(subnet['id'], list_of_subnets)
|
||||
list_of_networks = self.admin_manager.networks_client.list_networks()
|
||||
self.assertNotIn(network['id'], list_of_networks)
|
||||
LOG.debug("create VLAN network: %s", (purge_output))
|
@ -1,921 +0,0 @@
|
||||
# Copyright 2014 NEC Corporation. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import re
|
||||
import time
|
||||
|
||||
from neutron_lib import constants as nl_constants
|
||||
import six
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.common import constants
|
||||
from vmware_nsx_tempest.services import fwaas_client as FWAASC
|
||||
from vmware_nsx_tempest.services import nsxv_client
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class FWaaSTestJSON(base.BaseNetworkTest):
|
||||
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client for
|
||||
Neutron:
|
||||
|
||||
CRUD firewall rules
|
||||
CRUD firewall policies
|
||||
CRUD firewall rules
|
||||
Insert firewall rule to policy
|
||||
Remove firewall rule from policy
|
||||
Insert firewall rule after/before rule in policy
|
||||
Update firewall policy audited attribute
|
||||
Create exclusive router and attach to Firewall and check backend
|
||||
Create distributed router and attach to Firewall and check backend
|
||||
Create exclusive/distributed router and attach to Firewall and
|
||||
check backend
|
||||
"""
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(FWaaSTestJSON, cls).resource_setup()
|
||||
cls.fwaasv1_client = FWAASC.get_client(cls.manager)
|
||||
if not test.is_extension_enabled('fwaas', 'network'):
|
||||
msg = "FWaaS Extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
manager_ip = re.search(r"(\d{1,3}\.){3}\d{1,3}",
|
||||
CONF.nsxv.manager_uri).group(0)
|
||||
cls.vsm = nsxv_client.VSMClient(
|
||||
manager_ip, CONF.nsxv.user, CONF.nsxv.password)
|
||||
|
||||
cls.fw_rule = cls.fwaasv1_client.create_firewall_rule(action="allow",
|
||||
protocol="tcp")
|
||||
cls.fw_policy = cls.fwaasv1_client.create_firewall_policy()
|
||||
|
||||
def create_firewall_rule(self, **kwargs):
|
||||
body = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
**kwargs)
|
||||
fw_rule = body['firewall_rule']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.fwaasv1_client.delete_firewall_rule,
|
||||
fw_rule['id'])
|
||||
return fw_rule
|
||||
|
||||
def create_firewall_policy(self, **kwargs):
|
||||
body = self.fwaasv1_client.create_firewall_policy(
|
||||
name=data_utils.rand_name("fw-policy"),
|
||||
**kwargs)
|
||||
fw_policy = body['firewall_policy']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.fwaasv1_client.delete_firewall_policy,
|
||||
fw_policy['id'])
|
||||
return fw_policy
|
||||
|
||||
def delete_firewall_and_wait(self, firewall_id):
|
||||
self.fwaasv1_client.delete_firewall(firewall_id)
|
||||
self._wait_firewall_while(firewall_id, [nl_constants.PENDING_DELETE],
|
||||
not_found_ok=True)
|
||||
|
||||
def create_firewall(self, **kwargs):
|
||||
body = self.fwaasv1_client.create_firewall(
|
||||
name=data_utils.rand_name("fw"),
|
||||
**kwargs)
|
||||
fw = body['firewall']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_firewall_and_wait,
|
||||
fw['id'])
|
||||
return fw
|
||||
|
||||
def _wait_firewall_while(self, firewall_id, statuses, not_found_ok=False):
|
||||
start = int(time.time())
|
||||
if not_found_ok:
|
||||
expected_exceptions = (lib_exc.NotFound)
|
||||
else:
|
||||
expected_exceptions = ()
|
||||
while True:
|
||||
try:
|
||||
fw = self.fwaasv1_client.show_firewall(firewall_id)
|
||||
except expected_exceptions:
|
||||
break
|
||||
status = fw['firewall']['status']
|
||||
if status not in statuses:
|
||||
break
|
||||
if int(time.time()) - start >= self.fwaasv1_client.build_timeout:
|
||||
msg = ("Firewall %(firewall)s failed to reach "
|
||||
"non PENDING status (current %(status)s)") % {
|
||||
"firewall": firewall_id,
|
||||
"status": status,
|
||||
}
|
||||
raise lib_exc.TimeoutException(msg)
|
||||
time.sleep(constants.NSX_BACKEND_VERY_SMALL_TIME_INTERVAL)
|
||||
|
||||
def _wait_firewall_ready(self, firewall_id):
|
||||
self._wait_firewall_while(firewall_id,
|
||||
[nl_constants.PENDING_CREATE,
|
||||
nl_constants.PENDING_UPDATE])
|
||||
|
||||
def _try_delete_router(self, router):
|
||||
# delete router, if it exists
|
||||
try:
|
||||
self.delete_router(router)
|
||||
# if router is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
def _try_delete_policy(self, policy_id):
|
||||
# delete policy, if it exists
|
||||
try:
|
||||
self.fwaasv1_client.delete_firewall_policy(policy_id)
|
||||
# if policy is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
def _try_delete_rule(self, rule_id):
|
||||
# delete rule, if it exists
|
||||
try:
|
||||
self.fwaasv1_client.delete_firewall_rule(rule_id)
|
||||
# if rule is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
|
||||
def _try_delete_firewall(self, fw_id):
|
||||
# delete firewall, if it exists
|
||||
try:
|
||||
self.fwaasv1_client.delete_firewall(fw_id)
|
||||
# if firewall is not found, this means it was deleted in the test
|
||||
except lib_exc.NotFound:
|
||||
pass
|
||||
self.fwaasv1_client.wait_for_resource_deletion(fw_id)
|
||||
|
||||
def _wait_until_ready(self, fw_id):
|
||||
target_states = ('ACTIVE', 'CREATED')
|
||||
|
||||
def _wait():
|
||||
firewall = self.fwaasv1_client.show_firewall(fw_id)
|
||||
firewall = firewall['firewall']
|
||||
return firewall['status'] in target_states
|
||||
if not test_utils.call_until_true(_wait, CONF.network.build_timeout,
|
||||
CONF.network.build_interval):
|
||||
m = ("Timed out waiting for firewall %s to reach %s state(s)" %
|
||||
(fw_id, target_states))
|
||||
raise lib_exc.TimeoutException(m)
|
||||
|
||||
def _wait_until_deleted(self, fw_id):
|
||||
def _wait():
|
||||
try:
|
||||
firewall = self.fwaasv1_client.show_firewall(fw_id)
|
||||
except lib_exc.NotFound:
|
||||
return True
|
||||
fw_status = firewall['firewall']['status']
|
||||
if fw_status == 'ERROR':
|
||||
raise lib_exc.DeleteErrorException(resource_id=fw_id)
|
||||
|
||||
if not test_utils.call_until_true(_wait, CONF.network.build_timeout,
|
||||
CONF.network.build_interval):
|
||||
m = ("Timed out waiting for firewall %s deleted" % fw_id)
|
||||
raise lib_exc.TimeoutException(m)
|
||||
|
||||
def _check_firewall_rule_exists_at_backend(self, rules,
|
||||
firewall_rule_name):
|
||||
for rule in rules:
|
||||
if rule['name'] in firewall_rule_name:
|
||||
self.assertIn(rule['name'], firewall_rule_name)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _create_firewall_rule_name(self, body):
|
||||
firewall_rule_name = body['firewall_rule']['name']
|
||||
firewall_rule_name = "Fwaas-" + firewall_rule_name
|
||||
return firewall_rule_name
|
||||
|
||||
def _create_firewall_advanced_topo(self, router_type):
|
||||
fw_rule_id_list = []
|
||||
router = self.create_router_by_type(router_type)
|
||||
self.addCleanup(self._try_delete_router, router)
|
||||
edges = self.vsm.get_all_edges()
|
||||
for key in edges:
|
||||
if router['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
rules_before = len(rules)
|
||||
for rule_id in range(0, constants.NO_OF_ENTRIES):
|
||||
if rule_id % 2 == 0:
|
||||
action = "allow"
|
||||
protocol = "tcp"
|
||||
else:
|
||||
action = "allow"
|
||||
protocol = "udp"
|
||||
firewall_rule = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action=action,
|
||||
protocol=protocol)
|
||||
fw_rule_id = firewall_rule['firewall_rule']['id']
|
||||
firewall_name = self._create_firewall_rule_name(firewall_rule)
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id)
|
||||
fw_rule_id_list.append(fw_rule_id)
|
||||
# Update firewall policy
|
||||
body = self.fwaasv1_client.create_firewall_policy(
|
||||
name=data_utils.rand_name("fw-policy"))
|
||||
fw_policy_id = body['firewall_policy']['id']
|
||||
self.addCleanup(self._try_delete_policy, fw_policy_id)
|
||||
# Insert rule to firewall policy
|
||||
for fw_rule_id in fw_rule_id_list:
|
||||
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id, '', '')
|
||||
firewall_1 = self.fwaasv1_client.create_firewall(
|
||||
name=data_utils.rand_name("firewall"),
|
||||
firewall_policy_id=fw_policy_id,
|
||||
router_ids=[router['id']])
|
||||
created_firewall = firewall_1['firewall']
|
||||
self.addCleanup(self._try_delete_firewall, created_firewall['id'])
|
||||
# Wait for the firewall resource to become ready
|
||||
self._wait_until_ready(created_firewall['id'])
|
||||
firewall_topo = dict(router=router, firewall_name=firewall_name,
|
||||
fw_policy_id=fw_policy_id,
|
||||
firewall_id=created_firewall['id'],
|
||||
rules_before=rules_before)
|
||||
return firewall_topo
|
||||
|
||||
def _create_firewall_basic_topo(self, router_type, policy=None):
|
||||
router = self.create_router_by_type(router_type)
|
||||
self.addCleanup(self._try_delete_router, router)
|
||||
body = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="tcp")
|
||||
fw_rule_id1 = body['firewall_rule']['id']
|
||||
firewall_name = self._create_firewall_rule_name(body)
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id1)
|
||||
# Create firewall policy
|
||||
if not policy:
|
||||
body = self.fwaasv1_client.create_firewall_policy(
|
||||
name=data_utils.rand_name("fw-policy"))
|
||||
fw_policy_id = body['firewall_policy']['id']
|
||||
self.addCleanup(self._try_delete_policy, fw_policy_id)
|
||||
# Insert rule to firewall policy
|
||||
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id1, '', '')
|
||||
else:
|
||||
fw_policy_id = policy
|
||||
# Create firewall
|
||||
firewall_1 = self.fwaasv1_client.create_firewall(
|
||||
name=data_utils.rand_name("firewall"),
|
||||
firewall_policy_id=fw_policy_id,
|
||||
router_ids=[router['id']])
|
||||
created_firewall = firewall_1['firewall']
|
||||
self.addCleanup(self._try_delete_firewall, created_firewall['id'])
|
||||
# Wait for the firewall resource to become ready
|
||||
self._wait_until_ready(created_firewall['id'])
|
||||
firewall_topo = dict(router=router, firewall_name=firewall_name,
|
||||
fw_policy_id=fw_policy_id,
|
||||
fw_rule_id1=fw_rule_id1,
|
||||
firewall_id=created_firewall['id'])
|
||||
return firewall_topo
|
||||
|
||||
def _get_list_fw_rule_ids(self, fw_policy_id):
|
||||
fw_policy = self.fwaasv1_client.show_firewall_policy(
|
||||
fw_policy_id)
|
||||
return [ruleid for ruleid in fw_policy['firewall_policy']
|
||||
['firewall_rules']]
|
||||
|
||||
def create_router_by_type(self, router_type, name=None, **kwargs):
|
||||
routers_client = self.manager.routers_client
|
||||
router_name = name or data_utils.rand_name('mtz-')
|
||||
create_kwargs = dict(name=router_name, external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id})
|
||||
if router_type in ('shared', 'exclusive'):
|
||||
create_kwargs['router_type'] = router_type
|
||||
elif router_type in ('distributed'):
|
||||
create_kwargs['distributed'] = True
|
||||
kwargs.update(create_kwargs)
|
||||
router = routers_client.create_router(**kwargs)
|
||||
router = router['router'] if 'router' in router else router
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.delete_router, router['id'])
|
||||
self.assertEqual(router['name'], router_name)
|
||||
return router
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('c72197f1-b5c6-453f-952e-007acea6df86')
|
||||
def test_list_firewall_rules(self):
|
||||
# List firewall rules
|
||||
fw_rules = self.fwaasv1_client.list_firewall_rules()
|
||||
fw_rules = fw_rules['firewall_rules']
|
||||
self.assertEqual(self.fw_rule['firewall_rule']['id'],
|
||||
fw_rules[0]['id'])
|
||||
self.assertEqual(self.fw_rule['firewall_rule']['name'],
|
||||
fw_rules[0]['name'])
|
||||
self.assertEqual(self.fw_rule['firewall_rule']['action'],
|
||||
fw_rules[0]['action'])
|
||||
self.assertEqual(self.fw_rule['firewall_rule']['protocol'],
|
||||
fw_rules[0]['protocol'])
|
||||
self.assertEqual(self.fw_rule['firewall_rule']['enabled'],
|
||||
fw_rules[0]['enabled'])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('ef92ba0d-f7c2-46cb-ad4b-21c62cfa85a0')
|
||||
def test_create_update_delete_firewall_rule(self):
|
||||
# Create firewall rule
|
||||
body = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="tcp")
|
||||
fw_rule_id = body['firewall_rule']['id']
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id)
|
||||
|
||||
# Update firewall rule
|
||||
body = self.fwaasv1_client.update_firewall_rule(fw_rule_id,
|
||||
action="deny")
|
||||
self.assertEqual("deny", body["firewall_rule"]['action'])
|
||||
|
||||
# Delete firewall rule
|
||||
self.fwaasv1_client.delete_firewall_rule(fw_rule_id)
|
||||
# Confirm deletion
|
||||
fw_rules = self.fwaasv1_client.list_firewall_rules()
|
||||
self.assertNotIn(fw_rule_id,
|
||||
[m['id'] for m in fw_rules['firewall_rules']])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('264e8b67-a1ef-4ba1-8757-808b249a5320')
|
||||
def test_show_firewall_rule(self):
|
||||
# show a created firewall rule
|
||||
fw_rule = self.fwaasv1_client.show_firewall_rule(
|
||||
self.fw_rule['firewall_rule']['id'])
|
||||
for key, value in six.iteritems(fw_rule['firewall_rule']):
|
||||
self.assertEqual(self.fw_rule['firewall_rule'][key], value)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('029cd998-9dd4-4a0a-b79d-8bafd8223bda')
|
||||
def test_list_firewall_policies(self):
|
||||
fw_policies = self.fwaasv1_client.list_firewall_policies()
|
||||
fw_policies = fw_policies['firewall_policies']
|
||||
self.assertEqual(self.fw_policy['firewall_policy']['id'],
|
||||
fw_policies[0]['id'])
|
||||
self.assertEqual(self.fw_policy['firewall_policy']['name'],
|
||||
fw_policies[0]['name'])
|
||||
self.assertEqual(self.fw_policy['firewall_policy']['firewall_rules'],
|
||||
fw_policies[0]['firewall_rules'])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('28c261c8-4fb3-4630-8a9b-707c93536a54')
|
||||
def test_create_update_delete_firewall_policy(self):
|
||||
# Create firewall policy
|
||||
body = self.fwaasv1_client.create_firewall_policy(
|
||||
name=data_utils.rand_name("fw-policy"))
|
||||
fw_policy_id = body['firewall_policy']['id']
|
||||
self.addCleanup(self._try_delete_policy, fw_policy_id)
|
||||
|
||||
# Update firewall policy
|
||||
body = self.fwaasv1_client.update_firewall_policy(
|
||||
fw_policy_id,
|
||||
name="updated_policy")
|
||||
updated_fw_policy = body["firewall_policy"]
|
||||
self.assertEqual("updated_policy", updated_fw_policy['name'])
|
||||
|
||||
# Delete firewall policy
|
||||
self.fwaasv1_client.delete_firewall_policy(fw_policy_id)
|
||||
# Confirm deletion
|
||||
fw_policies = self.fwaasv1_client.list_firewall_policies()
|
||||
fw_policies = fw_policies['firewall_policies']
|
||||
self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('8bc7ad6d-4163-4def-9e1d-b9d24d9e8bf8')
|
||||
def test_show_firewall_policy(self):
|
||||
# show a created firewall policy
|
||||
fw_policy = self.fwaasv1_client.show_firewall_policy(
|
||||
self.fw_policy['firewall_policy']['id'])
|
||||
fw_policy = fw_policy['firewall_policy']
|
||||
for key, value in six.iteritems(fw_policy):
|
||||
self.assertEqual(self.fw_policy['firewall_policy'][key], value)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('0c320840-f3e4-4960-987d-a6f06d327fe1')
|
||||
def test_create_show_delete_firewall(self):
|
||||
# Create tenant network resources required for an ACTIVE firewall
|
||||
network = self.create_network()
|
||||
subnet = self.create_subnet(network)
|
||||
router = self.create_router_by_type('exclusive')
|
||||
self.addCleanup(self._try_delete_router, router)
|
||||
self.routers_client.add_router_interface(router['id'],
|
||||
subnet_id=subnet['id'])
|
||||
# Create firewall
|
||||
body = self.fwaasv1_client.create_firewall(
|
||||
name=data_utils.rand_name("firewall"),
|
||||
firewall_policy_id=self.fw_policy['firewall_policy']['id'])
|
||||
created_firewall = body['firewall']
|
||||
firewall_id = created_firewall['id']
|
||||
self.addCleanup(self._try_delete_firewall, firewall_id)
|
||||
# Wait for the firewall resource to become ready
|
||||
self._wait_until_ready(firewall_id)
|
||||
# show a created firewall
|
||||
firewall = self.fwaasv1_client.show_firewall(firewall_id)
|
||||
firewall = firewall['firewall']
|
||||
for key, value in six.iteritems(firewall):
|
||||
if key == 'status':
|
||||
continue
|
||||
self.assertEqual(created_firewall[key], value)
|
||||
# list firewall
|
||||
firewalls = self.fwaasv1_client.list_firewalls()
|
||||
firewalls = firewalls['firewalls']
|
||||
# Delete firewall
|
||||
self.fwaasv1_client.delete_firewall(firewall_id)
|
||||
# Wait for the firewall resource to be deleted
|
||||
self._wait_until_deleted(firewall_id)
|
||||
# Confirm deletion
|
||||
firewalls = self.fwaasv1_client.list_firewalls()['firewalls']
|
||||
self.assertNotIn(firewall_id, [m['id'] for m in firewalls])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('d9b23b3a-66ea-4591-9f8f-fa5a71fe0645')
|
||||
def test_firewall_insertion_mode_add_remove_mix_router(self):
|
||||
# Create legacy routers
|
||||
router1 = self.create_router_by_type('exclusive')
|
||||
self.addCleanup(self._try_delete_router, router1)
|
||||
router2 = self.create_router_by_type('distributed')
|
||||
self.addCleanup(self._try_delete_router, router2)
|
||||
|
||||
# Create firewall on a router1
|
||||
body = self.fwaasv1_client.create_firewall(
|
||||
name=data_utils.rand_name("firewall"),
|
||||
firewall_policy_id=self.fw_policy['firewall_policy']['id'],
|
||||
router_ids=[router1['id']])
|
||||
created_firewall = body['firewall']
|
||||
firewall_id = created_firewall['id']
|
||||
self.addCleanup(self._try_delete_firewall, firewall_id)
|
||||
self.assertEqual([router1['id']], created_firewall['router_ids'])
|
||||
# Legacy routers are scheduled on L3 agents on network plug events
|
||||
# Hence firewall resource will not became ready at this stage
|
||||
network = self.create_network()
|
||||
subnet = self.create_subnet(network)
|
||||
self.routers_client.add_router_interface(router1['id'],
|
||||
subnet_id=subnet['id'])
|
||||
# Wait for the firewall resource to become ready
|
||||
self._wait_until_ready(firewall_id)
|
||||
# Add router2 to the firewall
|
||||
body = self.fwaasv1_client.update_firewall(
|
||||
firewall_id, router_ids=[router1['id'], router2['id']])
|
||||
updated_firewall = body['firewall']
|
||||
self.assertIn(router2['id'], updated_firewall['router_ids'])
|
||||
self.assertEqual(2, len(updated_firewall['router_ids']))
|
||||
# Wait for the firewall resource to become ready
|
||||
self._wait_until_ready(firewall_id)
|
||||
# Remove router1 from the firewall
|
||||
body = self.fwaasv1_client.update_firewall(
|
||||
firewall_id, router_ids=[router2['id']])
|
||||
updated_firewall = body['firewall']
|
||||
self.assertNotIn(router1['id'], updated_firewall['router_ids'])
|
||||
self.assertEqual(1, len(updated_firewall['router_ids']))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('964e0254-e7f2-4bbe-a4c8-db09da8d79ee')
|
||||
def test_firewall_insertion_mode_add_remove_router(self):
|
||||
# Create legacy routers
|
||||
router1 = self.create_router_by_type('exclusive')
|
||||
self.addCleanup(self._try_delete_router, router1)
|
||||
router2 = self.create_router_by_type('exclusive')
|
||||
self.addCleanup(self._try_delete_router, router2)
|
||||
|
||||
# Create firewall on a router1
|
||||
body = self.fwaasv1_client.create_firewall(
|
||||
name=data_utils.rand_name("firewall"),
|
||||
firewall_policy_id=self.fw_policy['firewall_policy']['id'],
|
||||
router_ids=[router1['id']])
|
||||
created_firewall = body['firewall']
|
||||
firewall_id = created_firewall['id']
|
||||
self.addCleanup(self._try_delete_firewall, firewall_id)
|
||||
|
||||
self.assertEqual([router1['id']], created_firewall['router_ids'])
|
||||
|
||||
# Legacy routers are scheduled on L3 agents on network plug events
|
||||
# Hence firewall resource will not became ready at this stage
|
||||
network = self.create_network()
|
||||
subnet = self.create_subnet(network)
|
||||
self.routers_client.add_router_interface(router1['id'],
|
||||
subnet_id=subnet['id'])
|
||||
# Wait for the firewall resource to become ready
|
||||
self._wait_until_ready(firewall_id)
|
||||
|
||||
# Add router2 to the firewall
|
||||
body = self.fwaasv1_client.update_firewall(
|
||||
firewall_id, router_ids=[router1['id'], router2['id']])
|
||||
updated_firewall = body['firewall']
|
||||
self.assertIn(router2['id'], updated_firewall['router_ids'])
|
||||
self.assertEqual(2, len(updated_firewall['router_ids']))
|
||||
|
||||
# Wait for the firewall resource to become ready
|
||||
self._wait_until_ready(firewall_id)
|
||||
|
||||
# Remove router1 from the firewall
|
||||
body = self.fwaasv1_client.update_firewall(
|
||||
firewall_id, router_ids=[router2['id']])
|
||||
updated_firewall = body['firewall']
|
||||
self.assertNotIn(router1['id'], updated_firewall['router_ids'])
|
||||
self.assertEqual(1, len(updated_firewall['router_ids']))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('662b252f-fa1b-49fe-8599-a37feab9fae8')
|
||||
def test_firewall_insertion_one_policy_two_router_backend(self):
|
||||
# Create router required for an ACTIVE firewall
|
||||
edge_id_excl = 0
|
||||
edge_id_dist = 0
|
||||
firewall_topo1 = self._create_firewall_basic_topo('exclusive')
|
||||
firewall_topo2 = \
|
||||
self._create_firewall_basic_topo('distributed',
|
||||
firewall_topo1['fw_policy_id'])
|
||||
edges = self.vsm.get_all_edges()
|
||||
firewall_topo2['router']['name'] += '-plr'
|
||||
for key in edges:
|
||||
if firewall_topo1['router']['name'] in key['name']:
|
||||
edge_id_excl = key['id']
|
||||
if firewall_topo2['router']['name'] in key['name']:
|
||||
edge_id_dist = key['id']
|
||||
if edge_id_excl and edge_id_dist:
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id_excl)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo1['firewall_name']))
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id_dist)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo1['firewall_name']))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('00330ef3-0a2e-4556-84d1-448d09c5ca2e')
|
||||
def test_firewall_insertion_two_policy_two_router_backend(self):
|
||||
# Create router required for an ACTIVE firewall
|
||||
edge_id_excl = 0
|
||||
edge_id_dist = 0
|
||||
firewall_topo1 = self._create_firewall_basic_topo('exclusive')
|
||||
firewall_topo2 = self._create_firewall_basic_topo('distributed')
|
||||
edges = self.vsm.get_all_edges()
|
||||
firewall_topo2['router']['name'] += '-plr'
|
||||
for key in edges:
|
||||
if firewall_topo1['router']['name'] in key['name']:
|
||||
edge_id_excl = key['id']
|
||||
if firewall_topo2['router']['name'] in key['name']:
|
||||
edge_id_dist = key['id']
|
||||
if edge_id_excl and edge_id_dist:
|
||||
break
|
||||
time.sleep(constants.NSX_BACKEND_SMALL_TIME_INTERVAL)
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id_excl)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo1['firewall_name']))
|
||||
time.sleep(constants.NSX_BACKEND_SMALL_TIME_INTERVAL)
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id_dist)
|
||||
time.sleep(constants.NSX_BACKEND_SMALL_TIME_INTERVAL)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo2['firewall_name']))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('8092bd48-e4c1-4709-8a3b-70e7bf6a78c9')
|
||||
def test_firewall_insertion_mode_two_firewall_rules_check_backend(self):
|
||||
rule_no = 1
|
||||
# Create router required for an ACTIVE firewall
|
||||
firewall_topo = self._create_firewall_basic_topo('exclusive')
|
||||
# Create second firewall rule
|
||||
firewall_rule_2 = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="deny",
|
||||
protocol="icmp")
|
||||
fw_rule_id2 = firewall_rule_2['firewall_rule']['id']
|
||||
firewall_rule_name_2 = \
|
||||
"Fwaas-" + firewall_rule_2['firewall_rule']['name']
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id2)
|
||||
self.addCleanup(self._try_delete_policy, firewall_topo['fw_policy_id'])
|
||||
self.addCleanup(self._try_delete_firewall,
|
||||
firewall_topo['firewall_id'])
|
||||
# Insert rule-2 to firewall policy
|
||||
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
||||
firewall_topo['fw_policy_id'], fw_rule_id2, '',
|
||||
firewall_topo['fw_rule_id1'])
|
||||
self._wait_firewall_ready(firewall_topo['firewall_id'])
|
||||
edges = self.vsm.get_all_edges()
|
||||
for key in edges:
|
||||
if firewall_topo['router']['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
for rule in rules:
|
||||
if rule['name'] in ('VSERule', 'MDServiceIP', 'MDInterEdgeNet'):
|
||||
continue
|
||||
if rule_no == 1:
|
||||
self.assertIn(rule['name'], firewall_rule_name_2,
|
||||
"Rule exists at position 1")
|
||||
rule_no += rule_no
|
||||
continue
|
||||
if rule_no == 2:
|
||||
self.assertIn(rule['name'], firewall_topo['firewall_name'],
|
||||
"Rule exists at position 2")
|
||||
break
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('da65de07-a60f-404d-ad1d-2d2c71a3b6a5')
|
||||
def test_firewall_add_delete_between_routers(self):
|
||||
firewall_topo = self._create_firewall_basic_topo('exclusive')
|
||||
router = self.create_router_by_type('exclusive')
|
||||
self.addCleanup(self._try_delete_router, router)
|
||||
self.fwaasv1_client.update_firewall(
|
||||
firewall_topo['firewall_id'],
|
||||
router_ids=[router['id']])
|
||||
self._wait_firewall_ready(firewall_topo['firewall_id'])
|
||||
edges = self.vsm.get_all_edges()
|
||||
for key in edges:
|
||||
if router['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules,
|
||||
firewall_topo['firewall_name']))
|
||||
self.fwaasv1_client.update_firewall(
|
||||
firewall_topo['firewall_id'],
|
||||
router_ids=[router['id'], firewall_topo['router']['id']])
|
||||
self._wait_firewall_ready(firewall_topo['firewall_id'])
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo['firewall_name']))
|
||||
edges = self.vsm.get_all_edges()
|
||||
for key in edges:
|
||||
if firewall_topo['router']['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo['firewall_name']))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('c60ceff5-d51f-451d-b6e6-cb983d16ab6b')
|
||||
def test_firewall_insertion_with_multiple_rules_check_backend(self):
|
||||
# Create router required for an ACTIVE firewall
|
||||
firewall_topo = self._create_firewall_basic_topo('exclusive')
|
||||
edges = self.vsm.get_all_edges()
|
||||
for key in edges:
|
||||
if firewall_topo['router']['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo['firewall_name']))
|
||||
firewall_rule_2 = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="tcp")
|
||||
fw_rule_id2 = firewall_rule_2['firewall_rule']['id']
|
||||
firewall_name_2 = self._create_firewall_rule_name(firewall_rule_2)
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id2)
|
||||
# Update firewall policy
|
||||
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
||||
firewall_topo['fw_policy_id'], fw_rule_id2,
|
||||
firewall_topo['fw_rule_id1'], '')
|
||||
self._wait_firewall_ready(firewall_topo['firewall_id'])
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_name_2))
|
||||
firewall_rule_3 = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="tcp")
|
||||
fw_rule_id3 = firewall_rule_3['firewall_rule']['id']
|
||||
firewall_name_3 = self._create_firewall_rule_name(firewall_rule_3)
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id3)
|
||||
# Update firewall policy
|
||||
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
||||
firewall_topo['fw_policy_id'], fw_rule_id3, fw_rule_id2, '')
|
||||
self._wait_firewall_ready(firewall_topo['firewall_id'])
|
||||
self.addCleanup(self._try_delete_policy, firewall_topo['fw_policy_id'])
|
||||
self.addCleanup(self._try_delete_firewall,
|
||||
firewall_topo['firewall_id'])
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_name_3))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('a1734149-9c4b-46d3-86c8-d61f57458095')
|
||||
def test_firewall_add_remove_rule_check_backend(self):
|
||||
# Create router required for an ACTIVE firewall
|
||||
firewall_topo = self._create_firewall_basic_topo('exclusive')
|
||||
edges = self.vsm.get_all_edges()
|
||||
for key in edges:
|
||||
if firewall_topo['router']['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo['firewall_name']))
|
||||
self.fwaasv1_client.remove_firewall_rule_from_policy(
|
||||
firewall_topo['fw_policy_id'], firewall_topo['fw_rule_id1'])
|
||||
self.delete_firewall_and_wait(firewall_topo['firewall_id'])
|
||||
time.sleep(constants.NSX_BACKEND_SMALL_TIME_INTERVAL)
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
self.assertEqual(
|
||||
False, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo['firewall_name']))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('e1111959-c36a-41d6-86ee-ea6c0b927eb3')
|
||||
def test_firewall_insertion_mode_one_firewall_rule_check_backend(self):
|
||||
# Create router required for an ACTIVE firewall
|
||||
firewall_topo = self._create_firewall_basic_topo('exclusive')
|
||||
edges = self.vsm.get_all_edges()
|
||||
for key in edges:
|
||||
if firewall_topo['router']['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, firewall_topo['firewall_name']))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('e434b3c9-1148-499a-bb52-b094cdb0a186')
|
||||
def test_firewall_insertion_mode_one_firewall_per_router(self):
|
||||
# Create router required for an ACTIVE firewall
|
||||
firewall_topo = self._create_firewall_basic_topo('exclusive')
|
||||
# Try to create firewall with the same router
|
||||
self.assertRaisesRegexp(
|
||||
lib_exc.Conflict,
|
||||
"already associated with other Firewall",
|
||||
self.fwaasv1_client.create_firewall,
|
||||
name=data_utils.rand_name("firewall"),
|
||||
firewall_policy_id=self.fw_policy['firewall_policy']['id'],
|
||||
router_ids=[firewall_topo['router']['id']])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('d162abb2-9c14-45d6-bed1-06646a66803a')
|
||||
def test_firewall_insertion_mode_one_firewall_per_dist_router(self):
|
||||
# Create router required for an ACTIVE firewall
|
||||
firewall_topo = self._create_firewall_basic_topo('distributed')
|
||||
# Try to create firewall with the same router
|
||||
self.assertRaisesRegexp(
|
||||
lib_exc.Conflict,
|
||||
"already associated with other Firewall",
|
||||
self.fwaasv1_client.create_firewall,
|
||||
name=data_utils.rand_name("firewall"),
|
||||
firewall_policy_id=self.fw_policy['firewall_policy']['id'],
|
||||
router_ids=[firewall_topo['router']['id']])
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('d5531558-9b18-40bc-9388-3eded0894a85')
|
||||
def test_firewall_rule_insertion_position_removal_rule_from_policy(self):
|
||||
# Create firewall rule
|
||||
body = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="tcp")
|
||||
fw_rule_id1 = body['firewall_rule']['id']
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id1)
|
||||
# Create firewall policy
|
||||
body = self.fwaasv1_client.create_firewall_policy(
|
||||
name=data_utils.rand_name("fw-policy"))
|
||||
fw_policy_id = body['firewall_policy']['id']
|
||||
self.addCleanup(self._try_delete_policy, fw_policy_id)
|
||||
# Insert rule to firewall policy
|
||||
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id1, '', '')
|
||||
# Verify insertion of rule in policy
|
||||
self.assertIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
|
||||
# Create another firewall rule
|
||||
body = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="icmp")
|
||||
fw_rule_id2 = body['firewall_rule']['id']
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id2)
|
||||
# Insert rule to firewall policy after the first rule
|
||||
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id2, fw_rule_id1, '')
|
||||
# Verify the position of rule after insertion
|
||||
fw_rule = self.fwaasv1_client.show_firewall_rule(
|
||||
fw_rule_id2)
|
||||
self.assertEqual(int(fw_rule['firewall_rule']['position']), 2)
|
||||
# Remove rule from the firewall policy
|
||||
self.fwaasv1_client.remove_firewall_rule_from_policy(
|
||||
fw_policy_id, fw_rule_id2)
|
||||
# Insert rule to firewall policy before the first rule
|
||||
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id2, '', fw_rule_id1)
|
||||
# Verify the position of rule after insertion
|
||||
fw_rule = self.fwaasv1_client.show_firewall_rule(
|
||||
fw_rule_id2)
|
||||
self.assertEqual(int(fw_rule['firewall_rule']['position']), 1)
|
||||
# Remove rule from the firewall policy
|
||||
self.fwaasv1_client.remove_firewall_rule_from_policy(
|
||||
fw_policy_id, fw_rule_id2)
|
||||
# Verify removal of rule from firewall policy
|
||||
self.assertNotIn(fw_rule_id2, self._get_list_fw_rule_ids(fw_policy_id))
|
||||
# Remove rule from the firewall policy
|
||||
self.fwaasv1_client.remove_firewall_rule_from_policy(
|
||||
fw_policy_id, fw_rule_id1)
|
||||
# Verify removal of rule from firewall policy
|
||||
self.assertNotIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('901dae30-b148-43d9-ac86-09777aeaba20')
|
||||
def test_update_firewall_name_at_backend_excl_edge(self):
|
||||
firewall_topo = self._create_firewall_basic_topo('exclusive')
|
||||
fw_rule_id = firewall_topo['fw_rule_id1']
|
||||
body = self.fwaasv1_client.update_firewall_rule(fw_rule_id,
|
||||
name="updated_rule")
|
||||
updated_fw_rule = body["firewall_rule"]
|
||||
self.assertEqual("updated_rule", updated_fw_rule['name'])
|
||||
time.sleep(constants.NSX_FIREWALL_REALIZED_TIMEOUT)
|
||||
edges = self.vsm.get_all_edges()
|
||||
for key in edges:
|
||||
if firewall_topo['router']['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
time.sleep(constants.NSX_BACKEND_VERY_SMALL_TIME_INTERVAL)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, "Fwaas-updated_rule"))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('471ebc13-8e3b-4aca-85b8-747935bf0559')
|
||||
def test_update_firewall_name_at_backend_dist_edge(self):
|
||||
firewall_topo = self._create_firewall_basic_topo('distributed')
|
||||
fw_rule_id = firewall_topo['fw_rule_id1']
|
||||
body = self.fwaasv1_client.update_firewall_rule(fw_rule_id,
|
||||
name="updated_rule")
|
||||
updated_fw_rule = body["firewall_rule"]
|
||||
self.assertEqual("updated_rule", updated_fw_rule['name'])
|
||||
time.sleep(constants.NSX_FIREWALL_REALIZED_TIMEOUT)
|
||||
edges = self.vsm.get_all_edges()
|
||||
firewall_topo['router']['name'] += '-plr'
|
||||
for key in edges:
|
||||
if firewall_topo['router']['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
time.sleep(constants.NSX_BACKEND_VERY_SMALL_TIME_INTERVAL)
|
||||
self.assertEqual(
|
||||
True, self._check_firewall_rule_exists_at_backend(
|
||||
rules, "Fwaas-updated_rule"))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('0bdc9670-17b8-4dd5-80c8-dc6e956fc6ef')
|
||||
def test_create_multiple_firewall_rules_check_at_backend(self):
|
||||
firewall_topo = self._create_firewall_advanced_topo('exclusive')
|
||||
edges = self.vsm.get_all_edges()
|
||||
for key in edges:
|
||||
if firewall_topo['router']['name'] in key['name']:
|
||||
edge_id = key['id']
|
||||
break
|
||||
firewall_rules = self.vsm.get_edge_firewall_rules(edge_id)
|
||||
total_rules = firewall_topo['rules_before'] + len(firewall_rules)
|
||||
self.assertGreaterEqual(total_rules, constants.NO_OF_ENTRIES,
|
||||
"Firewall Rules are greater than %s" %
|
||||
constants.NO_OF_ENTRIES)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('0249db39-6284-456a-9449-2adacdca4d3b')
|
||||
def test_update_firewall_policy_audited_attribute(self):
|
||||
# Create firewall rule
|
||||
body = self.fwaasv1_client.create_firewall_rule(
|
||||
name=data_utils.rand_name("fw-rule"),
|
||||
action="allow",
|
||||
protocol="icmp")
|
||||
fw_rule_id = body['firewall_rule']['id']
|
||||
self.addCleanup(self._try_delete_rule, fw_rule_id)
|
||||
# Create firewall policy
|
||||
body = self.fwaasv1_client.create_firewall_policy(
|
||||
name=data_utils.rand_name('fw-policy'))
|
||||
fw_policy_id = body['firewall_policy']['id']
|
||||
self.addCleanup(self._try_delete_policy, fw_policy_id)
|
||||
self.assertFalse(body['firewall_policy']['audited'])
|
||||
# Update firewall policy audited attribute to true
|
||||
self.fwaasv1_client.update_firewall_policy(fw_policy_id,
|
||||
audited=True)
|
||||
# Insert Firewall rule to firewall policy
|
||||
self.fwaasv1_client.insert_firewall_rule_in_policy(
|
||||
fw_policy_id, fw_rule_id, '', '')
|
||||
body = self.fwaasv1_client.show_firewall_policy(
|
||||
fw_policy_id)
|
||||
self.assertFalse(body['firewall_policy']['audited'])
|
@ -1,495 +0,0 @@
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# Copyright 2015 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.services import load_balancer_v1_client as LBV1C
|
||||
from vmware_nsx_tempest.services import network_client_base as base_client
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class LoadBalancerTestJSON(base.BaseNetworkTest):
|
||||
"""
|
||||
Tests the following operations in the Neutron API using the REST client
|
||||
for
|
||||
Neutron:
|
||||
|
||||
create vIP, and Pool
|
||||
show vIP
|
||||
list vIP
|
||||
update vIP
|
||||
delete vIP
|
||||
update pool
|
||||
delete pool
|
||||
show pool
|
||||
list pool
|
||||
health monitoring operations
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(LoadBalancerTestJSON, cls).skip_checks()
|
||||
if not test.is_extension_enabled('lbaas', 'network'):
|
||||
msg = "lbaas extension not enabled."
|
||||
raise cls.skipException(msg)
|
||||
if not test.is_extension_enabled('nsxv-router-type', 'network'):
|
||||
msg = "nsxv-router-type extension is not enabled"
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(LoadBalancerTestJSON, cls).resource_setup()
|
||||
_params = base_client.default_params_with_timeout_values.copy()
|
||||
for p in _params.keys():
|
||||
if p in ['service', 'region', 'endpoint_type']:
|
||||
_params.pop(p)
|
||||
cls.lbv1_client = LBV1C.get_client(cls.manager)
|
||||
cls.network = cls.create_network()
|
||||
cls.name = cls.network['name']
|
||||
cls.subnet = cls.create_subnet(cls.network)
|
||||
cls.ext_net_id = CONF.network.public_network_id
|
||||
cls.router = cls.create_router(data_utils.rand_name('router-'),
|
||||
admin_state_up=True,
|
||||
external_network_id=cls.ext_net_id,
|
||||
router_type='exclusive')
|
||||
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
|
||||
pool_name = data_utils.rand_name('pool-')
|
||||
vip_name = data_utils.rand_name('vip-')
|
||||
cls.pool = cls.lbv1_client.create_pool(
|
||||
pool_name, "ROUND_ROBIN", "HTTP", cls.subnet['id'])['pool']
|
||||
cls.vip = cls.lbv1_client.create_vip(cls.pool['id'],
|
||||
subnet_id=cls.subnet['id'],
|
||||
name=vip_name,
|
||||
protocol="HTTP",
|
||||
protocol_port=80)['vip']
|
||||
cls.member = cls.lbv1_client.create_member(
|
||||
80, cls.pool['id'], cls._ip_version)['member']
|
||||
cls.member_address = ("10.0.9.47" if cls._ip_version == 4
|
||||
else "2015::beef")
|
||||
cls.health_monitor = cls.lbv1_client.create_health_monitor(
|
||||
delay=4, max_retries=3, type="TCP", timeout=1)['health_monitor']
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
"""
|
||||
Cleanup the lb resources first and then call resource_cleanup
|
||||
in BaseNetworkTest to cleanup other network resources. NSX-v
|
||||
plugin requires the lb resources to be deleted before we can
|
||||
delete subnet or remove interface from router.
|
||||
"""
|
||||
# Cleanup lb health monitors
|
||||
if cls.health_monitor:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.lbv1_client.delete_health_monitor,
|
||||
cls.health_monitor['id'])
|
||||
cls.health_monitor = None
|
||||
|
||||
# Cleanup members
|
||||
if cls.member:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.lbv1_client.delete_member, cls.member['id'])
|
||||
cls.member = None
|
||||
|
||||
# Cleanup vips
|
||||
if cls.vip:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.lbv1_client.delete_vip, cls.vip['id'])
|
||||
cls.vip = None
|
||||
|
||||
# Cleanup pool
|
||||
if cls.pool:
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.lbv1_client.delete_pool, cls.pool['id'])
|
||||
cls.pool = None
|
||||
|
||||
super(LoadBalancerTestJSON, cls).resource_cleanup()
|
||||
|
||||
def _check_list_with_filter(self, obj_name, attr_exceptions, **kwargs):
|
||||
create_obj = getattr(self.lbv1_client, 'create_' + obj_name)
|
||||
delete_obj = getattr(self.lbv1_client, 'delete_' + obj_name)
|
||||
list_objs = getattr(self.lbv1_client, 'list_' + obj_name + 's')
|
||||
|
||||
body = create_obj(**kwargs)
|
||||
obj = body[obj_name]
|
||||
self.addCleanup(delete_obj, obj['id'])
|
||||
for key, value in six.iteritems(obj):
|
||||
# It is not relevant to filter by all arguments. That is why
|
||||
# there is a list of attr to except
|
||||
if key not in attr_exceptions:
|
||||
body = list_objs(**{key: value})
|
||||
objs = [v[key] for v in body[obj_name + 's']]
|
||||
self.assertIn(value, objs)
|
||||
|
||||
@decorators.idempotent_id('1c959a37-feb3-4d58-b5fc-58ba653de065')
|
||||
def test_list_vips(self):
|
||||
# Verify the vIP exists in the list of all vIPs
|
||||
body = self.lbv1_client.list_vips()
|
||||
vips = body['vips']
|
||||
self.assertIn(self.vip['id'], [v['id'] for v in vips])
|
||||
|
||||
@decorators.idempotent_id('687b7fd1-fd15-4ffd-8166-f376407a6081')
|
||||
def test_list_vips_with_filter(self):
|
||||
pool_name = data_utils.rand_name("pool-")
|
||||
vip_name = data_utils.rand_name('vip-')
|
||||
body = self.lbv1_client.create_pool(pool_name,
|
||||
lb_method="ROUND_ROBIN",
|
||||
protocol="HTTPS",
|
||||
subnet_id=self.subnet['id'])
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.lbv1_client.delete_pool, pool['id'])
|
||||
attr_exceptions = ['status', 'session_persistence',
|
||||
'status_description']
|
||||
self._check_list_with_filter(
|
||||
'vip', attr_exceptions, name=vip_name, protocol="HTTPS",
|
||||
protocol_port=81, subnet_id=self.subnet['id'], pool_id=pool['id'],
|
||||
description=data_utils.rand_name('description-'),
|
||||
admin_state_up=False)
|
||||
|
||||
@decorators.idempotent_id('73dfc119-b64b-4e56-90d2-df61d7181098')
|
||||
def test_create_update_delete_pool_vip(self):
|
||||
# Creates a vip
|
||||
pool_name = data_utils.rand_name("pool-")
|
||||
vip_name = data_utils.rand_name('vip-')
|
||||
address = self.subnet['allocation_pools'][0]['end']
|
||||
body = self.lbv1_client.create_pool(
|
||||
pool_name,
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet['id'])
|
||||
pool = body['pool']
|
||||
body = self.lbv1_client.create_vip(pool['id'],
|
||||
name=vip_name,
|
||||
protocol="HTTP",
|
||||
protocol_port=80,
|
||||
subnet_id=self.subnet['id'],
|
||||
address=address)
|
||||
vip = body['vip']
|
||||
vip_id = vip['id']
|
||||
# Confirm VIP's address correctness with a show
|
||||
body = self.lbv1_client.show_vip(vip_id)
|
||||
vip = body['vip']
|
||||
self.assertEqual(address, vip['address'])
|
||||
# Verification of vip update
|
||||
new_name = "New_vip"
|
||||
new_description = "New description"
|
||||
persistence_type = "HTTP_COOKIE"
|
||||
update_data = {"session_persistence": {
|
||||
"type": persistence_type}}
|
||||
body = self.lbv1_client.update_vip(vip_id,
|
||||
name=new_name,
|
||||
description=new_description,
|
||||
connection_limit=10,
|
||||
admin_state_up=False,
|
||||
**update_data)
|
||||
updated_vip = body['vip']
|
||||
self.assertEqual(new_name, updated_vip['name'])
|
||||
self.assertEqual(new_description, updated_vip['description'])
|
||||
self.assertEqual(10, updated_vip['connection_limit'])
|
||||
self.assertFalse(updated_vip['admin_state_up'])
|
||||
self.assertEqual(persistence_type,
|
||||
updated_vip['session_persistence']['type'])
|
||||
self.lbv1_client.delete_vip(vip['id'])
|
||||
self.lbv1_client.wait_for_resource_deletion('vip', vip['id'])
|
||||
# Verification of pool update
|
||||
new_name = "New_pool"
|
||||
body = self.lbv1_client.update_pool(pool['id'],
|
||||
name=new_name,
|
||||
description="new_description",
|
||||
lb_method='LEAST_CONNECTIONS')
|
||||
updated_pool = body['pool']
|
||||
self.assertEqual(new_name, updated_pool['name'])
|
||||
self.assertEqual('new_description', updated_pool['description'])
|
||||
self.assertEqual('LEAST_CONNECTIONS', updated_pool['lb_method'])
|
||||
self.lbv1_client.delete_pool(pool['id'])
|
||||
|
||||
@decorators.idempotent_id('277a99ce-4b3e-451d-a18a-d26c0376d176')
|
||||
def test_show_vip(self):
|
||||
# Verifies the details of a vip
|
||||
body = self.lbv1_client.show_vip(self.vip['id'])
|
||||
vip = body['vip']
|
||||
for key, value in six.iteritems(vip):
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(self.vip[key], value)
|
||||
|
||||
@decorators.idempotent_id('432470dd-836b-4555-8388-af95a1c74d32')
|
||||
def test_show_pool(self):
|
||||
# Here we need to new pool without any dependence with vips
|
||||
pool_name = data_utils.rand_name("pool-")
|
||||
body = self.lbv1_client.create_pool(pool_name,
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet['id'])
|
||||
pool = body['pool']
|
||||
self.addCleanup(self.lbv1_client.delete_pool, pool['id'])
|
||||
# Verifies the details of a pool
|
||||
body = self.lbv1_client.show_pool(pool['id'])
|
||||
shown_pool = body['pool']
|
||||
for key, value in six.iteritems(pool):
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(value, shown_pool[key])
|
||||
|
||||
@decorators.idempotent_id('c9951820-7b24-4e67-8c0c-41065ec66071')
|
||||
def test_list_pools(self):
|
||||
# Verify the pool exists in the list of all pools
|
||||
body = self.lbv1_client.list_pools()
|
||||
pools = body['pools']
|
||||
self.assertIn(self.pool['id'], [p['id'] for p in pools])
|
||||
|
||||
@decorators.idempotent_id('55a1fb8e-e88e-4042-a46a-13a0282e4990')
|
||||
def test_list_pools_with_filters(self):
|
||||
attr_exceptions = ['status', 'vip_id', 'members', 'provider',
|
||||
'status_description']
|
||||
self._check_list_with_filter(
|
||||
'pool', attr_exceptions, name=data_utils.rand_name("pool-"),
|
||||
lb_method="ROUND_ROBIN", protocol="HTTPS",
|
||||
subnet_id=self.subnet['id'],
|
||||
description=data_utils.rand_name('description-'),
|
||||
admin_state_up=False)
|
||||
|
||||
@decorators.idempotent_id('dd441433-de8f-4992-a721-0755dec737ff')
|
||||
def test_list_members(self):
|
||||
# Verify the member exists in the list of all members
|
||||
body = self.lbv1_client.list_members()
|
||||
members = body['members']
|
||||
self.assertIn(self.member['id'], [m['id'] for m in members])
|
||||
|
||||
@decorators.idempotent_id('ccebe68a-f096-478d-b495-f17d5c0eac7b')
|
||||
def test_list_members_with_filters(self):
|
||||
attr_exceptions = ['status', 'status_description']
|
||||
self._check_list_with_filter('member', attr_exceptions,
|
||||
address=self.member_address,
|
||||
protocol_port=80,
|
||||
pool_id=self.pool['id'])
|
||||
|
||||
@decorators.idempotent_id('b4efe862-0439-4260-828c-cc09ff7e12a6')
|
||||
def test_create_update_delete_member(self):
|
||||
# Creates a member
|
||||
body = self.lbv1_client.create_member(address=self.member_address,
|
||||
protocol_port=80,
|
||||
pool_id=self.pool['id'])
|
||||
member = body['member']
|
||||
# Verification of member update
|
||||
body = self.lbv1_client.update_member(member['id'],
|
||||
admin_state_up=False)
|
||||
updated_member = body['member']
|
||||
self.assertFalse(updated_member['admin_state_up'])
|
||||
# Verification of member delete
|
||||
self.lbv1_client.delete_member(member['id'])
|
||||
|
||||
@decorators.idempotent_id('4806ca47-b3a0-4280-9962-6631c6815e93')
|
||||
def test_show_member(self):
|
||||
# Verifies the details of a member
|
||||
body = self.lbv1_client.show_member(self.member['id'])
|
||||
member = body['member']
|
||||
for key, value in six.iteritems(member):
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(self.member[key], value)
|
||||
|
||||
@decorators.idempotent_id('65c4d817-d8d2-44df-9c15-86fc7b910044')
|
||||
def test_list_health_monitors(self):
|
||||
# Verify the health monitor exists in the list of all health monitors
|
||||
body = self.lbv1_client.list_health_monitors()
|
||||
health_monitors = body['health_monitors']
|
||||
self.assertIn(self.health_monitor['id'],
|
||||
[h['id'] for h in health_monitors])
|
||||
|
||||
@decorators.idempotent_id('a2c749a0-4eac-4acc-b729-6b469c3c616a')
|
||||
def test_list_health_monitors_with_filters(self):
|
||||
attr_exceptions = ['status', 'status_description', 'pools']
|
||||
self._check_list_with_filter('health_monitor', attr_exceptions,
|
||||
delay=5, max_retries=4, type="TCP",
|
||||
timeout=2)
|
||||
|
||||
@decorators.idempotent_id('94f1e066-de6e-4cd8-b352-533d216956b7')
|
||||
def test_create_update_delete_health_monitor(self):
|
||||
# Creates a health_monitor
|
||||
body = self.lbv1_client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type="TCP",
|
||||
timeout=1)
|
||||
health_monitor = body['health_monitor']
|
||||
# Verification of health_monitor update
|
||||
body = (self.lbv1_client.update_health_monitor
|
||||
(health_monitor['id'],
|
||||
admin_state_up=False))
|
||||
updated_health_monitor = body['health_monitor']
|
||||
self.assertFalse(updated_health_monitor['admin_state_up'])
|
||||
# Verification of health_monitor delete
|
||||
body = self.lbv1_client.delete_health_monitor(health_monitor['id'])
|
||||
|
||||
@decorators.idempotent_id('82943dcf-d424-43f0-890f-4b796f5043dc')
|
||||
def test_create_health_monitor_http_type(self):
|
||||
hm_type = "HTTP"
|
||||
body = self.lbv1_client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type=hm_type,
|
||||
timeout=1)
|
||||
health_monitor = body['health_monitor']
|
||||
self.addCleanup(self.lbv1_client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
self.assertEqual(hm_type, health_monitor['type'])
|
||||
|
||||
@decorators.idempotent_id('b1279c46-822a-4406-bb16-6a6ce7bf4e4e')
|
||||
def test_update_health_monitor_http_method(self):
|
||||
body = self.lbv1_client.create_health_monitor(delay=4,
|
||||
max_retries=3,
|
||||
type="HTTP",
|
||||
timeout=1)
|
||||
health_monitor = body['health_monitor']
|
||||
self.addCleanup(self.lbv1_client.delete_health_monitor,
|
||||
health_monitor['id'])
|
||||
body = (self.lbv1_client.update_health_monitor
|
||||
(health_monitor['id'],
|
||||
http_method="POST",
|
||||
url_path="/home/user",
|
||||
expected_codes="290"))
|
||||
updated_health_monitor = body['health_monitor']
|
||||
self.assertEqual("POST", updated_health_monitor['http_method'])
|
||||
self.assertEqual("/home/user", updated_health_monitor['url_path'])
|
||||
self.assertEqual("290", updated_health_monitor['expected_codes'])
|
||||
|
||||
@decorators.idempotent_id('7beabd44-0200-4cc4-b18d-5fb1f44cf36c')
|
||||
def test_show_health_monitor(self):
|
||||
# Verifies the details of a health_monitor
|
||||
body = self.lbv1_client.show_health_monitor(self.health_monitor['id'])
|
||||
health_monitor = body['health_monitor']
|
||||
for key, value in six.iteritems(health_monitor):
|
||||
# 'status' should not be confirmed in api tests
|
||||
if key != 'status':
|
||||
self.assertEqual(self.health_monitor[key], value)
|
||||
|
||||
@decorators.idempotent_id('5386d600-1372-4f99-b0f2-316401718ac4')
|
||||
def test_associate_disassociate_health_monitor_with_pool(self):
|
||||
# Verify that a health monitor can be associated with a pool
|
||||
self.lbv1_client.associate_health_monitor_with_pool(
|
||||
self.health_monitor['id'], self.pool['id'])
|
||||
body = self.lbv1_client.show_health_monitor(
|
||||
self.health_monitor['id'])
|
||||
health_monitor = body['health_monitor']
|
||||
body = self.lbv1_client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
self.assertIn(pool['id'],
|
||||
[p['pool_id'] for p in health_monitor['pools']])
|
||||
self.assertIn(health_monitor['id'], pool['health_monitors'])
|
||||
# Verify that a health monitor can be disassociated from a pool
|
||||
(self.lbv1_client.disassociate_health_monitor_with_pool
|
||||
(self.health_monitor['id'], self.pool['id']))
|
||||
body = self.lbv1_client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
body = self.lbv1_client.show_health_monitor(
|
||||
self.health_monitor['id'])
|
||||
health_monitor = body['health_monitor']
|
||||
self.assertNotIn(health_monitor['id'], pool['health_monitors'])
|
||||
self.assertNotIn(pool['id'],
|
||||
[p['pool_id'] for p in health_monitor['pools']])
|
||||
|
||||
@decorators.idempotent_id('17a6b730-0780-46c9-bca0-cec67387e469')
|
||||
def test_get_lb_pool_stats(self):
|
||||
# Verify the details of pool stats
|
||||
body = self.lbv1_client.list_lb_pool_stats(self.pool['id'])
|
||||
stats = body['stats']
|
||||
self.assertIn("bytes_in", stats)
|
||||
self.assertIn("total_connections", stats)
|
||||
self.assertIn("active_connections", stats)
|
||||
self.assertIn("bytes_out", stats)
|
||||
|
||||
@decorators.idempotent_id('a113c740-6194-4622-a187-8343ad3e5208')
|
||||
def test_update_list_of_health_monitors_associated_with_pool(self):
|
||||
(self.lbv1_client.associate_health_monitor_with_pool
|
||||
(self.health_monitor['id'], self.pool['id']))
|
||||
self.lbv1_client.update_health_monitor(
|
||||
self.health_monitor['id'], admin_state_up=False)
|
||||
body = self.lbv1_client.show_pool(self.pool['id'])
|
||||
health_monitors = body['pool']['health_monitors']
|
||||
for health_monitor_id in health_monitors:
|
||||
body = self.lbv1_client.show_health_monitor(health_monitor_id)
|
||||
self.assertFalse(body['health_monitor']['admin_state_up'])
|
||||
(self.lbv1_client.disassociate_health_monitor_with_pool
|
||||
(self.health_monitor['id'], self.pool['id']))
|
||||
|
||||
@decorators.idempotent_id('a2843ec6-80d8-4617-b985-8c8565daac8d')
|
||||
def test_update_admin_state_up_of_pool(self):
|
||||
self.lbv1_client.update_pool(self.pool['id'],
|
||||
admin_state_up=False)
|
||||
body = self.lbv1_client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
self.assertFalse(pool['admin_state_up'])
|
||||
|
||||
@decorators.idempotent_id('fd45c684-b847-472f-a7e8-a3f70e8e08e0')
|
||||
def test_show_vip_associated_with_pool(self):
|
||||
body = self.lbv1_client.show_pool(self.pool['id'])
|
||||
pool = body['pool']
|
||||
body = self.lbv1_client.show_vip(pool['vip_id'])
|
||||
vip = body['vip']
|
||||
self.assertEqual(self.vip['name'], vip['name'])
|
||||
self.assertEqual(self.vip['id'], vip['id'])
|
||||
|
||||
@decorators.idempotent_id('1ac0ca5f-7d6a-4ac4-b286-d68c92a98405')
|
||||
def test_show_members_associated_with_pool(self):
|
||||
body = self.lbv1_client.show_pool(self.pool['id'])
|
||||
members = body['pool']['members']
|
||||
for member_id in members:
|
||||
body = self.lbv1_client.show_member(member_id)
|
||||
self.assertIsNotNone(body['member']['status'])
|
||||
self.assertEqual(member_id, body['member']['id'])
|
||||
self.assertIsNotNone(body['member']['admin_state_up'])
|
||||
|
||||
@decorators.idempotent_id('4fa308fa-ac2b-4acf-87db-adfe2ee4739c')
|
||||
def test_update_pool_related_to_member(self):
|
||||
# Create new pool
|
||||
pool_name = data_utils.rand_name("pool-")
|
||||
body = self.lbv1_client.create_pool(
|
||||
pool_name,
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet['id'])
|
||||
new_pool = body['pool']
|
||||
self.addCleanup(self.lbv1_client.delete_pool, new_pool['id'])
|
||||
# Update member with new pool's id
|
||||
body = self.lbv1_client.update_member(self.member['id'],
|
||||
pool_id=new_pool['id'])
|
||||
# Confirm with show that pool_id change
|
||||
body = self.lbv1_client.show_member(self.member['id'])
|
||||
member = body['member']
|
||||
self.assertEqual(member['pool_id'], new_pool['id'])
|
||||
# Update member with old pool id, this is needed for clean up
|
||||
body = self.lbv1_client.update_member(self.member['id'],
|
||||
pool_id=self.pool['id'])
|
||||
|
||||
@decorators.idempotent_id('0af2ff6b-a896-433d-8107-3c76262a9dfa')
|
||||
def test_update_member_weight(self):
|
||||
self.lbv1_client.update_member(self.member['id'],
|
||||
weight=2)
|
||||
body = self.lbv1_client.show_member(self.member['id'])
|
||||
member = body['member']
|
||||
self.assertEqual(2, member['weight'])
|
||||
|
||||
|
||||
@decorators.skip_because(bug="1402007")
|
||||
class LoadBalancerIpV6TestJSON(LoadBalancerTestJSON):
|
||||
_ip_version = 6
|
@ -1,169 +0,0 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import random
|
||||
|
||||
from tempest import config
|
||||
|
||||
from oslo_log import log as logging
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
import test_subnets as SNET
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
VLAN_PHYSICAL_NETWORK = CONF.nsxv.vlan_physical_network or None
|
||||
VLAN_ID_PROVIDER = CONF.nsxv.provider_vlan_id
|
||||
|
||||
|
||||
class VlanNetworksTestJSON(SNET.SubnetTestJSON):
|
||||
_interface = 'json'
|
||||
_vlanid = int(VLAN_ID_PROVIDER)
|
||||
_provider_network_body = {
|
||||
'name': data_utils.rand_name('VLAN-%04d-network' % _vlanid),
|
||||
'provider:network_type': 'vlan',
|
||||
'provider:physical_network': VLAN_PHYSICAL_NETWORK,
|
||||
'provider:segmentation_id': _vlanid}
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
cls.vlan_range = (2001, 2999)
|
||||
cls.vlan_assigned = []
|
||||
super(VlanNetworksTestJSON, cls).resource_setup()
|
||||
|
||||
def get_next_vlan(self):
|
||||
next_vlan = self.next_vlan
|
||||
self.next_vlan += 1
|
||||
if self.next_vlan > self.vlan_range[1]:
|
||||
self.next_vlan = self.vlan_range[0]
|
||||
return next_vlan
|
||||
|
||||
def get_vlan(self):
|
||||
for x in range(0, 10):
|
||||
next_vlan = random.randint(*self.vlan_range)
|
||||
if next_vlan in self.vlan_assigned:
|
||||
continue
|
||||
else:
|
||||
self.vlan_assigned.append(next_vlan)
|
||||
return next_vlan
|
||||
return 3000
|
||||
|
||||
def _create_network(self, _auto_clean_up=True, network_name=None,
|
||||
**kwargs):
|
||||
segmentation_id = kwargs.pop('provider:segmentation_id', None)
|
||||
if not segmentation_id:
|
||||
segmentation_id = self.get_vlan()
|
||||
network_name = (network_name or
|
||||
data_utils.rand_name(
|
||||
'vlan-' + str(segmentation_id) + '-netwk'))
|
||||
post_body = {'name': network_name,
|
||||
'provider:network_type': 'vlan',
|
||||
'provider:physical_network': VLAN_PHYSICAL_NETWORK,
|
||||
'provider:segmentation_id': segmentation_id}
|
||||
post_body.update(kwargs)
|
||||
for k, v in post_body.items():
|
||||
if not v:
|
||||
post_body.pop(k)
|
||||
LOG.debug("create VLAN network: %s", str(post_body))
|
||||
body = self.create_network(**post_body)
|
||||
network = body['network']
|
||||
if _auto_clean_up:
|
||||
self.addCleanup(self._try_delete_network, network['id'])
|
||||
return network
|
||||
|
||||
@decorators.idempotent_id('c5f98016-dee3-42f1-8c23-b9cd1e625561')
|
||||
def test_create_network(self):
|
||||
# Create a network as an admin user specifying the
|
||||
# vlan network type attribute
|
||||
provider_attrs = {
|
||||
'provider:network_type': 'vlan',
|
||||
'provider:physical_network': VLAN_PHYSICAL_NETWORK,
|
||||
'provider:segmentation_id': 1002}
|
||||
network = self._create_network(_auto_clean_up=False, **provider_attrs)
|
||||
# Verifies parameters
|
||||
self.assertIsNotNone(network['id'])
|
||||
self.assertEqual(network.get('provider:network_type'), 'vlan')
|
||||
if VLAN_PHYSICAL_NETWORK:
|
||||
self.assertEqual(network.get('provider:physical_network'),
|
||||
VLAN_PHYSICAL_NETWORK)
|
||||
self.assertEqual(network.get('provider:segmentation_id'), 1002)
|
||||
self._delete_network(network['id'])
|
||||
|
||||
@decorators.idempotent_id('714e69eb-bb31-4cfc-9804-8e988f04ca65')
|
||||
def test_update_network(self):
|
||||
# Update flat network as an admin user specifying the
|
||||
# flat network attribute
|
||||
net_profile = {'shared': True, '_auto_clean_up': False,
|
||||
'provider:segmentation_id': 1003}
|
||||
network = self._create_network(**net_profile)
|
||||
self.assertEqual(network.get('shared'), True)
|
||||
new_name = network['name'] + "-updated"
|
||||
update_body = {'shared': False, 'name': new_name}
|
||||
body = self.update_network(network['id'], **update_body)
|
||||
updated_network = body['network']
|
||||
# Verify that name and shared parameters were updated
|
||||
self.assertEqual(updated_network['shared'], False)
|
||||
self.assertEqual(updated_network['name'], new_name)
|
||||
# get flat network attributes and verify them
|
||||
body = self.show_network(network['id'])
|
||||
updated_network = body['network']
|
||||
# Verify that name and shared parameters were updated
|
||||
self.assertEqual(updated_network['shared'], False)
|
||||
self.assertEqual(updated_network['name'], new_name)
|
||||
self.assertEqual(updated_network['status'], network['status'])
|
||||
self.assertEqual(updated_network['subnets'], network['subnets'])
|
||||
self._delete_network(network['id'])
|
||||
|
||||
@decorators.idempotent_id('8a8b9f2c-37f8-4c53-b8e3-0c9c0910380f')
|
||||
def test_list_networks(self):
|
||||
# Create flat network
|
||||
net_profile = {'shared': True, '_auto_clean_up': False,
|
||||
'provider:segmentation_id': 1004}
|
||||
network = self._create_network(**net_profile)
|
||||
# List networks as a normal user and confirm it is available
|
||||
body = self.list_networks(client=self.networks_client)
|
||||
networks_list = [net['id'] for net in body['networks']]
|
||||
self.assertIn(network['id'], networks_list)
|
||||
update_body = {'shared': False}
|
||||
body = self.update_network(network['id'], **update_body)
|
||||
# List networks as a normal user and confirm it is not available
|
||||
body = self.list_networks(client=self.networks_client)
|
||||
networks_list = [net['id'] for net in body['networks']]
|
||||
self.assertNotIn(network['id'], networks_list)
|
||||
self._delete_network(network['id'])
|
||||
|
||||
@decorators.idempotent_id('5807958d-9ee2-48a5-937e-ddde092956a6')
|
||||
def test_show_network_attributes(self):
|
||||
# Create flat network
|
||||
net_profile = {'shared': True, '_auto_clean_up': False,
|
||||
'provider:segmentation_id': 1005}
|
||||
network = self._create_network(**net_profile)
|
||||
# Show a flat network as a normal user and confirm the
|
||||
# flat network attribute is returned.
|
||||
body = self.show_network(network['id'], client=self.networks_client)
|
||||
show_net = body['network']
|
||||
self.assertEqual(network['name'], show_net['name'])
|
||||
self.assertEqual(network['id'], show_net['id'])
|
||||
# provider attributes are for admin only
|
||||
body = self.show_network(network['id'])
|
||||
show_net = body['network']
|
||||
net_attr_list = show_net.keys()
|
||||
for attr in ('admin_state_up', 'port_security_enabled', 'shared',
|
||||
'status', 'subnets', 'tenant_id', 'router:external',
|
||||
'provider:network_type', 'provider:physical_network',
|
||||
'provider:segmentation_id'):
|
||||
self.assertIn(attr, net_attr_list)
|
||||
self._delete_network(network['id'])
|
@ -1,818 +0,0 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest.common import waiters
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
network_addon_methods as HELO)
|
||||
from vmware_nsx_tempest.tests.scenario import manager
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = manager.log.getLogger(__name__)
|
||||
|
||||
Floating_IP_tuple = collections.namedtuple(
|
||||
'Floating_IP_tuple', ['floating_ip', 'server'])
|
||||
|
||||
Z_VM2_DEST = "VM[%(h_ipaddr)s] %(msg)s [%(helper)s %(d_ipaddr)s]"
|
||||
|
||||
# Before checking for floatingIP and server connectivity, we need to wait
|
||||
# x seconds for the control-plane to push configuration to data-plane
|
||||
# prior to process add/update/delete requests.
|
||||
WAITTIME_AFTER_DISASSOC_FLOATINGIP = CONF.scenario.waitfor_disassoc
|
||||
WAITTIME_AFTER_ASSOC_FLOATINGIP = CONF.scenario.waitfor_assoc
|
||||
WAITTIME_FOR_CONNECTIVITY = CONF.scenario.waitfor_connectivity
|
||||
DNS_SERVERS_IPV4 = CONF.network.dns_servers
|
||||
OUTSIDE_WORLD_SERVERS = CONF.scenario.outside_world_servers
|
||||
# iptype
|
||||
IPTYPE_FLOATING = 'floating-ip'
|
||||
IPTYPE_FIXED = 'fixed-ip'
|
||||
IPTYPE_OUTSIDE_SERVER = 'outside-server'
|
||||
|
||||
|
||||
class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
"""Purposes for TopoDeployScenarionManager:
|
||||
|
||||
1. Each deployment scenarion create its network resources, so
|
||||
call set_network_resource at setup_credentials() to overwrite it.
|
||||
2. setUp() is for test framework. Test case topology is part of
|
||||
test and is configured during test() cycle.
|
||||
3. net_resources.py overwrite resourses.py so the method to add
|
||||
interfaces to routers are inline with CLI, and support router
|
||||
owned by admin, but subnets are primary/alt clients.
|
||||
-- mechanism removed with patch 320495
|
||||
-- we are relaying on the test framework to delete resources
|
||||
in the reverse order of creating.
|
||||
4. Ping is used for Data-plane testing. OUTSIDE_WORLD_SERVERS ping
|
||||
test make sense when tenant's DNS is pirvate to provider.
|
||||
5. Teardown is high cost, each test should perform its un-config to
|
||||
complete the whole tenant life-cycle.
|
||||
WARNING: you need to increase your quota to run in parallel as
|
||||
you might run out of quota when things went wrong.
|
||||
"""
|
||||
|
||||
# defined at test.py; used to create client managers
|
||||
credentials = ['admin', 'primary', 'alt']
|
||||
# router attributes used to create the tenant's router
|
||||
tenant_router_attrs = {}
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(TopoDeployScenarioManager, cls).skip_checks()
|
||||
for ext in ['router', 'security-group']:
|
||||
if not test.is_extension_enabled(ext, 'network'):
|
||||
msg = "%s extension not enabled." % ext
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def check_preconditions(cls):
|
||||
super(TopoDeployScenarioManager, cls).check_preconditions()
|
||||
if not (CONF.network.project_networks_reachable or
|
||||
CONF.network.public_network_id):
|
||||
msg = ('Either project_networks_reachable must be "true", or '
|
||||
'public_network_id must be defined.')
|
||||
cls.enabled = False
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_credentials(cls):
|
||||
# Each client's network is created when client manager is created,
|
||||
# and client manager is created at setup_credentials.
|
||||
# topo-deploy scenarion manager asks not to create network resources.
|
||||
cls.set_network_resources(False, False, False, False)
|
||||
super(TopoDeployScenarioManager, cls).setup_credentials()
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TopoDeployScenarioManager, cls).resource_setup()
|
||||
cls.namestart = 'topo-deploy-tenant'
|
||||
cls.public_network_id = CONF.network.public_network_id
|
||||
# The creation of the 2nd tenant is defined by class.credentials
|
||||
# cls.alt_manager = clients.Manager(credentials=cls.alt_credentials())
|
||||
cls.alt_tenant_id = cls.alt_manager.identity_client.tenant_id
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
super(TopoDeployScenarioManager, cls).resource_cleanup()
|
||||
|
||||
def setUp(self):
|
||||
super(TopoDeployScenarioManager, self).setUp()
|
||||
self.cleanup_waits = []
|
||||
self.addCleanup(self._wait_for_cleanups)
|
||||
self.servers_on_net = {}
|
||||
|
||||
def tearDown(self):
|
||||
super(TopoDeployScenarioManager, self).tearDown()
|
||||
|
||||
def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
|
||||
cleanup_callable, cleanup_args=None,
|
||||
cleanup_kwargs=None, waiter_client=None):
|
||||
"""Adds wait for async resource deletion at the end of cleanups
|
||||
|
||||
@param waiter_callable: callable to wait for the resource to delete
|
||||
with the following waiter_client if specified.
|
||||
@param thing_id: the id of the resource to be cleaned-up
|
||||
@param thing_id_param: the name of the id param in the waiter
|
||||
@param cleanup_callable: method to load pass to self.addCleanup with
|
||||
the following *cleanup_args, **cleanup_kwargs.
|
||||
usually a delete method.
|
||||
"""
|
||||
if cleanup_args is None:
|
||||
cleanup_args = []
|
||||
if cleanup_kwargs is None:
|
||||
cleanup_kwargs = {}
|
||||
self.addCleanup(cleanup_callable, *cleanup_args, **cleanup_kwargs)
|
||||
wait_dict = {
|
||||
'waiter_callable': waiter_callable,
|
||||
thing_id_param: thing_id
|
||||
}
|
||||
if waiter_client:
|
||||
wait_dict['client'] = waiter_client
|
||||
self.cleanup_waits.append(wait_dict)
|
||||
|
||||
def _wait_for_cleanups(self):
|
||||
# To handle async delete actions, a list of waits is added
|
||||
# which will be iterated over as the last step of clearing the
|
||||
# cleanup queue. That way all the delete calls are made up front
|
||||
# and the tests won't succeed unless the deletes are eventually
|
||||
# successful. This is the same basic approach used in the api tests to
|
||||
# limit cleanup execution time except here it is multi-resource,
|
||||
# because of the nature of the scenario tests.
|
||||
for wait in self.cleanup_waits:
|
||||
waiter_callable = wait.pop('waiter_callable')
|
||||
waiter_callable(**wait)
|
||||
|
||||
# overwrite parent class which does not accept NSX-v extension
|
||||
def _create_router(self, client_mgr=None, tenant_id=None,
|
||||
namestart='topo-deploy', **kwargs):
|
||||
client_mgr = client_mgr or self.manager
|
||||
routers_client = getattr(client_mgr, "routers_client")
|
||||
router = HELO.router_create(self, client=routers_client,
|
||||
tenant_id=tenant_id,
|
||||
namestart=namestart,
|
||||
**kwargs)
|
||||
return router
|
||||
|
||||
def _router_set_gateway(self, router_id, network_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_gateway_set(self, router_id, network_id,
|
||||
client=routers_client)
|
||||
|
||||
def _router_clear_gateway(self, router_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_gateway_clear(self, router_id,
|
||||
client=routers_client)
|
||||
|
||||
def _router_update_extra_routes(self, router_id, routes, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
router = routers_client.update_route(self, router_id,
|
||||
routes=routes)
|
||||
return router['router']
|
||||
|
||||
def _router_delete_extra_routes(self, router_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_delete_extra_routes(self, router_id,
|
||||
routers_client)
|
||||
|
||||
def _router_add_interface(self, net_router, net_subnet, client_mgr):
|
||||
routers_client = client_mgr.routers_client
|
||||
return HELO.router_interface_add(self, net_router['id'],
|
||||
net_subnet['id'], routers_client)
|
||||
|
||||
def router_interface_add(self, router_id, subnet_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_interface_add(self, router_id, subnet_id,
|
||||
routers_client)
|
||||
|
||||
def router_interface_delete(self, router_id, subnet_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_interface_delete(self, router_id, subnet_id,
|
||||
routers_client)
|
||||
|
||||
def create_server_on_network(self, networks, security_groups=None,
|
||||
name=None, image=None, wait_on_boot=True,
|
||||
flavor=None, servers_client=None,
|
||||
key_name=None, tenant_id=None):
|
||||
name = name or data_utils.rand_name('topo-deploy-vm')
|
||||
if security_groups is None:
|
||||
security_groups = [{'name': 'default'}]
|
||||
if type(networks) in (list, tuple):
|
||||
network_ifs = [{'uuid': nw['id']} for nw in networks]
|
||||
else:
|
||||
network_ifs = [{'uuid': networks['id']}]
|
||||
create_kwargs = {
|
||||
'networks': network_ifs,
|
||||
'security_groups': security_groups,
|
||||
}
|
||||
if key_name:
|
||||
create_kwargs['key_name'] = key_name
|
||||
if tenant_id:
|
||||
if not (servers_client and servers_client.tenant_id == tenant_id):
|
||||
create_kwargs['tenant_id'] = tenant_id
|
||||
LOG.debug("TopoDeploy Create server name=%(name)s"
|
||||
", create_kwargs=%(create_kwargs)s",
|
||||
{'name': name, 'create_kwargs': str(create_kwargs)})
|
||||
server = self.create_server(
|
||||
name=name, image=image, wait_on_boot=wait_on_boot,
|
||||
servers_client=servers_client, flavor=flavor,
|
||||
tenant_id=tenant_id, create_kwargs=create_kwargs)
|
||||
return server
|
||||
|
||||
# overwrite parent classes; add servers_client
|
||||
# BUG https://bugs.launchpad.net/tempest/+bug/1416175
|
||||
def create_server(self, name=None, image=None, flavor=None,
|
||||
wait_on_boot=True, wait_on_delete=True,
|
||||
servers_client=None, tenant_id=None,
|
||||
create_kwargs=None):
|
||||
"""Creates VM instance.
|
||||
|
||||
@param image: image from which to create the instance
|
||||
@param wait_on_boot: wait for status ACTIVE before continue
|
||||
@param wait_on_delete: force synchronous delete on cleanup
|
||||
@param servers_client: the servers_client to create VM
|
||||
@param create_kwargs: additional details for instance creation
|
||||
@return: server dict
|
||||
"""
|
||||
name = name or data_utils.rand_name('topo-deploy-vm')
|
||||
image = image or CONF.compute.image_ref
|
||||
flavor = flavor or CONF.compute.flavor_ref
|
||||
servers_client = servers_client or self.servers_client
|
||||
create_kwargs = create_kwargs or {}
|
||||
if type(tenant_id) in (str, unicode):
|
||||
if servers_client.tenant_id != tenant_id:
|
||||
create_kwargs['tenant_id'] = tenant_id
|
||||
|
||||
xmsg = ("Creating a server name=%(name)s, image=%(image)s"
|
||||
", flavor=%(flavor)s, create_kwargs=%(create_kwargs)s" %
|
||||
{'name': name, 'image': image, 'flavor': flavor,
|
||||
'create_kwargs': str(create_kwargs)})
|
||||
LOG.debug(xmsg)
|
||||
server_resp = servers_client.create_server(
|
||||
name=name, imageRef=image, flavorRef=flavor, **create_kwargs)
|
||||
server = server_resp['server']
|
||||
if wait_on_delete:
|
||||
self.addCleanup(
|
||||
waiters.wait_for_server_termination,
|
||||
servers_client, server['id'])
|
||||
self.addCleanup_with_wait(
|
||||
waiter_callable=waiters.wait_for_server_termination,
|
||||
thing_id=server['id'], thing_id_param='server_id',
|
||||
waiter_client=servers_client,
|
||||
cleanup_callable=test_utils.call_and_ignore_notfound_exc,
|
||||
cleanup_args=[servers_client.delete_server, server['id']])
|
||||
if wait_on_boot:
|
||||
waiters.wait_for_server_status(
|
||||
client=servers_client,
|
||||
server_id=server['id'], status='ACTIVE')
|
||||
# The instance retrieved on creation is missing network
|
||||
# details, necessitating retrieval after it becomes active to
|
||||
# ensure correct details.
|
||||
server_resp = servers_client.show_server(server['id'])
|
||||
server = server_resp['server']
|
||||
self.assertEqual(server['name'], name)
|
||||
self.servers_on_net[server['id']] = server
|
||||
return server
|
||||
|
||||
def create_provider_network(self, client_mgr=None, create_body=None):
|
||||
name = create_body.get('name', None) or data_utils.rand_name('P-net')
|
||||
create_body['name'] = name
|
||||
client_mgr = client_mgr or self.admin_manager
|
||||
net_network = HELO.create_network(
|
||||
self, client=client_mgr.networks_client, **create_body)
|
||||
return net_network
|
||||
|
||||
def create_provider_subnet(self, client_mgr=None, create_body=None):
|
||||
client_mgr = client_mgr or self.admin_manager
|
||||
subnets_client = client_mgr.subnets_client
|
||||
body = subnets_client.create_subnet(**create_body)
|
||||
net_subnet = body['subnet']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnets_client.delete_subnet,
|
||||
net_subnet['id'])
|
||||
return net_subnet
|
||||
|
||||
def setup_project_network(self, external_network_id,
|
||||
client_mgr=None,
|
||||
namestart=None, client=None,
|
||||
tenant_id=None, cidr_offset=0,
|
||||
**kwargs):
|
||||
"""NOTE:
|
||||
|
||||
Refer to create_networks@scenario/manager.py which might refer
|
||||
to public_router_id which we dont' want to use.
|
||||
|
||||
The test class can define class variable tenant_router_attrs
|
||||
to create different type of routers, or overwrite with kwargs.
|
||||
"""
|
||||
name = namestart or data_utils.rand_name('topo-deploy-tenant')
|
||||
client_mgr = client_mgr or self.manager
|
||||
# _create_router() edits distributed and router_type
|
||||
# Child classes use class var tenant_router_attrs to define
|
||||
# tenant's router type, however, caller can overwrite it with kwargs.
|
||||
distributed = kwargs.get('distributed',
|
||||
self.tenant_router_attrs.get('distributed'))
|
||||
router_type = kwargs.get('router_type',
|
||||
self.tenant_router_attrs.get('router_type'))
|
||||
net_router = self._create_router(
|
||||
client_mgr=client_mgr, tenant_id=tenant_id,
|
||||
namestart=name,
|
||||
distributed=distributed, router_type=router_type)
|
||||
self._router_set_gateway(net_router['id'], external_network_id,
|
||||
client=client_mgr.routers_client)
|
||||
net_network, net_subnet = self.create_network_subnet(
|
||||
client_mgr=client_mgr, name=net_router['name'],
|
||||
tenant_id=tenant_id, cidr_offset=cidr_offset)
|
||||
self._router_add_interface(net_router, net_subnet, client_mgr)
|
||||
return net_network, net_subnet, net_router
|
||||
|
||||
def create_network_subnet(self, client_mgr=None,
|
||||
tenant_id=None, name=None, cidr_offset=0):
|
||||
client_mgr = client_mgr or self.manager
|
||||
tenant_id = tenant_id or _g_tenant_id(client_mgr.networks_client)
|
||||
name = name or data_utils.rand_name('topo-deploy-network')
|
||||
net_network = self.create_network(
|
||||
client=client_mgr.networks_client,
|
||||
tenant_id=tenant_id, name=name)
|
||||
net_subnet = self.create_subnet(
|
||||
client=client_mgr.subnets_client,
|
||||
network=net_network,
|
||||
cidr_offset=cidr_offset, name=net_network['name'])
|
||||
return net_network, net_subnet
|
||||
|
||||
# cloned from _create_network@manager.py. Allow name parameter
|
||||
def create_network(self, client=None, tenant_id=None, name=None,
|
||||
**kwargs):
|
||||
networks_client = client or self.networks_client
|
||||
tenant_id = tenant_id or _g_tenant_id(networks_client)
|
||||
name = name or data_utils.rand_name('topo-deploy-network')
|
||||
return HELO.create_network(self, client=networks_client,
|
||||
tenant_id=tenant_id, name=name,
|
||||
**kwargs)
|
||||
|
||||
def create_subnet(self, network, client=None,
|
||||
gateway='', cidr=None, mask_bits=None,
|
||||
ip_version=None, cidr_offset=0,
|
||||
allocation_pools=None, dns_nameservers=None,
|
||||
**kwargs):
|
||||
subnets_client = client or self.subnets_client
|
||||
kwargs.update(client=subnets_client, gateway=gateway,
|
||||
cidr=cidr, cidr_offset=cidr_offset,
|
||||
mask_bits=mask_bits, ip_version=ip_version,
|
||||
allocation_pools=allocation_pools,
|
||||
dns_nameservers=dns_nameservers)
|
||||
return HELO.create_subnet(self, network, **kwargs)
|
||||
|
||||
def create_floatingip_for_server(self, server, external_network_id=None,
|
||||
port_id=None, client_mgr=None,
|
||||
and_check_assigned=True):
|
||||
client_mgr = client_mgr or self.manager
|
||||
net_floatingip = self.create_floating_ip(
|
||||
server,
|
||||
external_network_id=external_network_id,
|
||||
port_id=port_id,
|
||||
client=client_mgr.floating_ips_client)
|
||||
if port_id:
|
||||
# attached to port, will not check ip assignement & reachability
|
||||
return net_floatingip
|
||||
serv_fip = net_floatingip['floating_ip_address']
|
||||
# in some condiction, remove the serv_fip from your local known_hosts
|
||||
# can solve the ssh "Connection refused" problem.
|
||||
rm_sshkey(serv_fip)
|
||||
if not and_check_assigned:
|
||||
# caller will do the floatingip assigned to server and ping tests
|
||||
return net_floatingip
|
||||
self._waitfor_floatingip_assigned_to_server(client_mgr.servers_client,
|
||||
server.get('id'))
|
||||
server_pingable = self._waitfor_associated_floatingip(net_floatingip)
|
||||
STEPINTO_DEBUG_IF_TRUE(not server_pingable)
|
||||
self.assertTrue(
|
||||
server_pingable,
|
||||
msg=("Expect server to be reachable after"
|
||||
" floating-ip[%s] assigned." % serv_fip))
|
||||
return net_floatingip
|
||||
|
||||
def _waitfor_floatingip_assigned_to_server(self, server_client, server_id,
|
||||
on_network=None,
|
||||
extra_timeout=60):
|
||||
timeout = server_client.build_timeout + extra_timeout
|
||||
interval = server_client.build_interval
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
sv = server_client.show_server(server_id)
|
||||
sv = sv.get('server', sv)
|
||||
fip = self.get_server_ip_address(sv, 'floating')
|
||||
if fip:
|
||||
elapse_time = time.time() - start_time
|
||||
xmsg = ("%s Take %d seconds to assign floatingip to server[%s]"
|
||||
% ("OS-STATS:", int(elapse_time), sv.get('name')))
|
||||
LOG.debug(xmsg)
|
||||
return fip
|
||||
time.sleep(interval)
|
||||
raise Exception(
|
||||
"Server[%s] did not get its floatingip in %s seconds" %
|
||||
(server_id, timeout))
|
||||
|
||||
def get_server_ip_address(self, server, ip_type='fixed',
|
||||
network_name=None):
|
||||
if network_name and server['addresses'].get(network_name):
|
||||
s_if = network_name
|
||||
else:
|
||||
s_if = server['addresses'].keys()[0]
|
||||
|
||||
for s_address in server['addresses'][s_if]:
|
||||
if s_address['OS-EXT-IPS:type'] == ip_type:
|
||||
return s_address.get('addr')
|
||||
return None
|
||||
|
||||
def _waitfor_associated_floatingip(self, net_floatingip):
|
||||
host_ip = net_floatingip['floating_ip_address']
|
||||
return self.waitfor_host_connected(host_ip)
|
||||
|
||||
def waitfor_host_connected(self, host_ip, ping_timeout=5, msg=None):
|
||||
PING_START = 'ping-progress-start'
|
||||
PING_INSESSION = 'ping-progress-in-session'
|
||||
PING_DONE = 'ping-progress-completed'
|
||||
PING_TIMEOUT = 'ping-progress-timeout'
|
||||
if msg and type(msg) in (str, unicode):
|
||||
xmsg = ("waitfor_host_connected ip=%(ip)s! %(msg)s" %
|
||||
{'ip': host_ip, 'msg': msg})
|
||||
LOG.debug(xmsg)
|
||||
t0 = time.time()
|
||||
t1 = time.time() + WAITTIME_FOR_CONNECTIVITY
|
||||
LOG.debug("VM-IP[%(ip)s] %(msg)s: %(t1)s.",
|
||||
{'ip': host_ip, 'msg': PING_START, 't1': t0})
|
||||
while (time.time() < t1):
|
||||
# waitfor backend to create floatingip & linkages
|
||||
time.sleep(WAITTIME_AFTER_ASSOC_FLOATINGIP)
|
||||
server_pingable = self.ping_ip_address(
|
||||
host_ip, ping_timeout=ping_timeout)
|
||||
if server_pingable:
|
||||
xmsg = ("VM-IP[%(ip)s] %(msg)s: %(t1)s (%(t2)s)." %
|
||||
{'ip': host_ip, 'msg': PING_DONE,
|
||||
't1': time.time(), 't2': (time.time() - t0)})
|
||||
LOG.debug(xmsg)
|
||||
break
|
||||
xmsg = ("VM-IP[%(ip)s] %(msg)s, redo after %(t1)s seconds." %
|
||||
{'ip': host_ip, 'msg': PING_INSESSION,
|
||||
't1': WAITTIME_AFTER_ASSOC_FLOATINGIP})
|
||||
LOG.debug(xmsg)
|
||||
if not server_pingable:
|
||||
xmsg = ("VM-IP[%(ip)s] %(msg)s: %(t1)s (%(t2)s)." %
|
||||
{'ip': host_ip, 'msg': PING_TIMEOUT,
|
||||
't1': time.time(), 't2': (time.time() - t0)})
|
||||
LOG.debug(xmsg)
|
||||
return server_pingable
|
||||
|
||||
def disassociate_floatingip(self, net_floatingip, client=None,
|
||||
and_delete=False):
|
||||
floating_ips_client = client or self.floating_ips_client
|
||||
kwargs = dict(port_id=None)
|
||||
floating_ip = floating_ips_client.update_floatingip(
|
||||
net_floatingip['id'], **kwargs)
|
||||
floating_ip = floating_ip.get('floatingip', floating_ip)
|
||||
self.assertIsNone(floating_ip['port_id'])
|
||||
if and_delete:
|
||||
floating_ips_client.delete_floatingip(floating_ip['id'])
|
||||
return floating_ip
|
||||
|
||||
def associate_floatingip(self, net_floatingip, to_server, client=None):
|
||||
floating_ips_client = client or self.floating_ips_client
|
||||
port_id, _ = self._get_server_port_id_and_ip4(to_server)
|
||||
kwargs = dict(port_id=port_id)
|
||||
floating_ip = floating_ips_client.update_floatingip(
|
||||
net_floatingip['id'], **kwargs)['floatingip']
|
||||
self.assertEqual(port_id, floating_ip['port_id'])
|
||||
return floating_ip
|
||||
|
||||
def check_networks(self, net_network, net_subnet=None, net_router=None):
|
||||
return HELO.check_networks(self, net_network, net_subnet, net_router)
|
||||
|
||||
# use this carefully, as it expect existence of floating_ip_tuple
|
||||
def check_public_network_connectivity(self, should_connect=True,
|
||||
msg=None, ping_timeout=30):
|
||||
"""Verifies connectivty
|
||||
|
||||
To a VM via public network and floating IP, and verifies
|
||||
floating IP has resource status is correct.
|
||||
|
||||
@param should_connect: bool. determines if connectivity check is
|
||||
negative or positive.
|
||||
@param msg: Failure message to add to Error message. Should describe
|
||||
the place in the test scenario where the method was called,
|
||||
to indicate the context of the failure
|
||||
"""
|
||||
floating_ip, server = self.floating_ip_tuple
|
||||
return self._check_floatingip_connectivity(
|
||||
floating_ip, server, should_connect, msg, ping_timeout)
|
||||
|
||||
def _check_floatingip_connectivity(self, floating_ip, server,
|
||||
should_connect=True,
|
||||
msg=None, ping_timeout=30,
|
||||
floating_ips_client=None):
|
||||
ip_address = floating_ip['floating_ip_address']
|
||||
floatingip_status = 'ACTIVE' if should_connect else 'DOWN'
|
||||
is_pingable = self.ping_ip_address(ip_address,
|
||||
ping_timeout=ping_timeout)
|
||||
msg = msg if msg else (
|
||||
"Timeout out waiting for %s to become reachable" % ip_address)
|
||||
if should_connect:
|
||||
self.assertTrue(is_pingable, msg=msg)
|
||||
else:
|
||||
self.assertFalse(is_pingable, msg=msg)
|
||||
self.check_floating_ip_status(floating_ip, floatingip_status,
|
||||
floating_ips_client)
|
||||
|
||||
def check_floating_ip_status(self, floating_ip, status,
|
||||
floating_ips_client=None):
|
||||
"""Verifies floatingip reaches the given status
|
||||
|
||||
:param dict floating_ip: floating IP dict to check status
|
||||
:param status: target status
|
||||
:raises: AssertionError if status doesn't match
|
||||
"""
|
||||
floating_ips_client = floating_ips_client or self.floating_ips_client
|
||||
floatingip_id = floating_ip['id']
|
||||
|
||||
def refresh():
|
||||
result = (floating_ips_client.
|
||||
show_floatingip(floatingip_id)['floatingip'])
|
||||
return status == result['status']
|
||||
|
||||
test_utils.call_until_true(refresh,
|
||||
CONF.network.build_timeout,
|
||||
CONF.network.build_interval)
|
||||
floating_ip = floating_ips_client.show_floatingip(
|
||||
floatingip_id)['floatingip']
|
||||
self.assertEqual(status, floating_ip['status'],
|
||||
message="FloatingIP: {fp} is at status: {cst}. "
|
||||
"failed to reach status: {st}"
|
||||
.format(fp=floating_ip, cst=floating_ip['status'],
|
||||
st=status))
|
||||
LOG.info("FloatingIP: {fp} is at status: {st}"
|
||||
.format(fp=floating_ip, st=status))
|
||||
|
||||
def get_image_userpass(self):
|
||||
return (CONF.validation.image_ssh_user,
|
||||
CONF.validation.image_ssh_password)
|
||||
|
||||
def get_server_image(self):
|
||||
return CONF.compute.image_ref
|
||||
|
||||
def get_server_flavor(self):
|
||||
return CONF.compute.flavor_ref
|
||||
|
||||
|
||||
# common utilities
|
||||
def make_node_info(net_floatingip, username, password,
|
||||
include_outside_servers=False):
|
||||
floating_ip_address = net_floatingip['floating_ip_address']
|
||||
fixed_ip_address = net_floatingip['fixed_ip_address']
|
||||
node = dict(ipaddr=floating_ip_address,
|
||||
username=username, password=password)
|
||||
node['dest'] = [dict(ipaddr=floating_ip_address,
|
||||
reachable=None, helper=IPTYPE_FLOATING),
|
||||
dict(ipaddr=fixed_ip_address,
|
||||
reachable=None, helper=IPTYPE_FIXED)]
|
||||
if include_outside_servers:
|
||||
outside_servers = dict(ipaddr=OUTSIDE_WORLD_SERVERS[0],
|
||||
reachable=None, helper=IPTYPE_OUTSIDE_SERVER)
|
||||
node['dest'].append(outside_servers)
|
||||
|
||||
return node
|
||||
|
||||
|
||||
# we want to check the dest[iptype] is not reachable for
|
||||
# at least (x_contd=2+=1 to make it is not really reachable.
|
||||
def check_host_not_reachable(host, dest_list, iptype_list,
|
||||
time_out=10, repeat_cnt=12,
|
||||
x_contd=2):
|
||||
not_connected = 0
|
||||
for x in range(0, 12):
|
||||
not_reachable = check_host_is_reachable(
|
||||
host, dest_list, iptype_list, time_out=time_out)
|
||||
if not_reachable:
|
||||
not_connected += 1
|
||||
else:
|
||||
not_connected = 0
|
||||
if not_connected > x_contd:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
# check_hosts_connectivity
|
||||
def check_host_is_reachable(host, dest_list, iptype_list, time_out=120):
|
||||
rm_sshkey(host['ipaddr'])
|
||||
ssh_client = get_remote_client_by_password(host['ipaddr'],
|
||||
host['username'],
|
||||
host['password'])
|
||||
n_not_reachable = 0
|
||||
for dest in dest_list:
|
||||
for iptype in iptype_list:
|
||||
if not dest_has_iptype(dest, iptype):
|
||||
dest['reachable'] = None
|
||||
continue
|
||||
dest['reachable'] = is_reachable(
|
||||
ssh_client, dest['ipaddr'], time_out=time_out)
|
||||
if not dest['reachable']:
|
||||
n_not_reachable += 1
|
||||
xmsg = {'h_ipaddr': host['ipaddr'],
|
||||
'msg': "can-not-reach-dest",
|
||||
'helper': dest['helper'],
|
||||
'd_ipaddr': dest['ipaddr']}
|
||||
LOG.debug(Z_VM2_DEST, xmsg)
|
||||
else:
|
||||
xmsg = {'h_ipaddr': host['ipaddr'],
|
||||
'msg': "can-not-dest",
|
||||
'helper': dest['helper'],
|
||||
'd_ipaddr': dest['ipaddr']}
|
||||
LOG.debug(Z_VM2_DEST, xmsg)
|
||||
return (False if n_not_reachable else True)
|
||||
|
||||
|
||||
def dest_has_iptype(dest, iptype):
|
||||
if ('helper' in dest and
|
||||
re.search(iptype, dest['helper'], re.I)):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def check_hosts_connectivity(host, dest_list, ignore_helper=None,
|
||||
time_out=120):
|
||||
rm_sshkey(host['ipaddr'])
|
||||
ssh_client = get_remote_client_by_password(host['ipaddr'],
|
||||
host['username'],
|
||||
host['password'])
|
||||
n_not_reachable = 0
|
||||
for dest in dest_list:
|
||||
# caller can say to ignore dest ipaddr
|
||||
if ('helper' in dest and type(ignore_helper) in (str, unicode) and
|
||||
re.search(ignore_helper, dest['helper'], re.I)):
|
||||
dest['reachable'] = None
|
||||
continue
|
||||
dest['reachable'] = is_reachable(ssh_client, dest['ipaddr'],
|
||||
time_out=time_out)
|
||||
if not dest['reachable']:
|
||||
n_not_reachable += 1
|
||||
xmsg = {'h_ipaddr': host['ipaddr'],
|
||||
'msg': "can-not-reach-dest",
|
||||
'helper': dest['helper'],
|
||||
'd_ipaddr': dest['ipaddr']}
|
||||
LOG.debug(Z_VM2_DEST, xmsg)
|
||||
else:
|
||||
xmsg = {'h_ipaddr': host['ipaddr'],
|
||||
'msg': "can-reach-dest",
|
||||
'helper': dest['helper'],
|
||||
'd_ipaddr': dest['ipaddr']}
|
||||
LOG.debug(Z_VM2_DEST, xmsg)
|
||||
|
||||
return n_not_reachable
|
||||
|
||||
|
||||
def rm_sshkey(ip_addr):
|
||||
# ssh-keygen -f "/home/stack/.ssh/known_hosts" -R 10.34.57.3
|
||||
kh_file = os.path.join(os.environ.get('HOME', '/home/stack'),
|
||||
'.ssh/known_hosts')
|
||||
cmd = ['ssh-keygen', '-f', kh_file, '-R', ip_addr]
|
||||
|
||||
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)
|
||||
proc.communicate()
|
||||
return proc.returncode
|
||||
|
||||
|
||||
def is_reachable(ssh_client, dest_ip, time_out=60.0, ping_timeout=5.0):
|
||||
for now in run_till_timeout(time_out, ping_timeout):
|
||||
reachable = dest_is_reachable(ssh_client, dest_ip)
|
||||
if reachable:
|
||||
return True
|
||||
LOG.debug("DEST[%(ip)s] NOT-REACHABLE, retry in %(t1)s seconds.",
|
||||
{'ip': dest_ip, 't1': time_out})
|
||||
return False
|
||||
|
||||
|
||||
def isnot_reachable(ssh_client, dest_ip, time_out=60.0, ping_timeout=5.0,
|
||||
idle_time=2.0):
|
||||
if idle_time > 0.0:
|
||||
time.sleep(idle_time)
|
||||
for now in run_till_timeout(time_out, ping_timeout):
|
||||
reachable = dest_is_reachable(ssh_client, dest_ip)
|
||||
if not reachable:
|
||||
return True
|
||||
LOG.debug("DEST[%(ip)s] IS-REACHABLE, retry in %(t1)s seconds.",
|
||||
{'ip': dest_ip, 't1': time_out})
|
||||
return False
|
||||
|
||||
|
||||
def dest_is_reachable(ssh_client, dest_ip):
|
||||
XPTN = r"(\d+).*transmit.*(\d+).*receive.*(\d+).*loss"
|
||||
try:
|
||||
result = ssh_client.ping_host(dest_ip)
|
||||
m = re.search(XPTN, result, (re.I | re.M))
|
||||
if m and int(m.group(1)) > 0 and int(m.group(3)) == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except Exception:
|
||||
tb_str = traceback.format_exc()
|
||||
mesg = ("ERROR on testing dest_ip[%s] is reachable:\n%s" %
|
||||
(dest_ip, tb_str))
|
||||
LOG.debug(mesg)
|
||||
return False
|
||||
|
||||
|
||||
def run_till_timeout(seconds_to_try, interval=5.0):
|
||||
now, end_time = time.time(), time.time() + seconds_to_try
|
||||
while now < end_time:
|
||||
yield now
|
||||
time.sleep(interval)
|
||||
now = time.time()
|
||||
|
||||
|
||||
def _g_tenant_id(os_client):
|
||||
try:
|
||||
return os_client.tenant_id
|
||||
except Exception:
|
||||
return os_client.rest_client.tenant_id
|
||||
|
||||
|
||||
def get_remote_client_by_password(client_ip, username, password):
|
||||
ssh_client = remote_client.RemoteClient(client_ip, username, password)
|
||||
return ssh_client
|
||||
|
||||
|
||||
def delete_all_servers(tenant_servers_client, trys=5):
|
||||
# try at least trys+1 time to delete servers, otherwise
|
||||
# network resources can not be deleted
|
||||
for s in tenant_servers_client.list_servers()['servers']:
|
||||
tenant_servers_client.delete_server(s['id'])
|
||||
for x in range(0, trys):
|
||||
try:
|
||||
waitfor_servers_terminated(tenant_servers_client)
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
# last try
|
||||
waitfor_servers_terminated(tenant_servers_client)
|
||||
|
||||
|
||||
def waitfor_servers_terminated(tenant_servers_client, pause=2.0):
|
||||
while (True):
|
||||
s_list = tenant_servers_client.list_servers()['servers']
|
||||
if len(s_list) < 1:
|
||||
return
|
||||
time.sleep(pause)
|
||||
|
||||
|
||||
def copy_file_to_host(file_from, dest, host, username, pkey):
|
||||
dest = "%s@%s:%s" % (username, host, dest)
|
||||
cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
|
||||
"-o StrictHostKeyChecking=no " \
|
||||
"-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
|
||||
'file1': file_from,
|
||||
'dest': dest}
|
||||
args = shlex.split(cmd.encode('utf-8'))
|
||||
subprocess_args = {'stdout': subprocess.PIPE,
|
||||
'stderr': subprocess.STDOUT}
|
||||
proc = subprocess.Popen(args, **subprocess_args)
|
||||
stdout, stderr = proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
raise exceptions.SSHExecCommandFailed(cmd,
|
||||
proc.returncode,
|
||||
stdout,
|
||||
stderr)
|
||||
return stdout
|
||||
|
||||
|
||||
def STEPINTO_DEBUG_IF_TRUE(want2debug=False):
|
||||
"""Betting you are not set OS_TEST_TIMEOUT=24-hours running tempest"""
|
||||
t_timeout = int(os.environ.get('OS_TEST_TIMEOUT', 0))
|
||||
if want2debug and t_timeout > 86400:
|
||||
# uncomment following statements to turn on debuggging
|
||||
# import pdb
|
||||
# pdb.set_trace()
|
||||
pass
|
@ -1,286 +0,0 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# This module contains the methods added to test class that to be shared by
|
||||
# scenario tests that are inherent from tempest/scneario/manager.py or
|
||||
# manager_topo_deployment.py
|
||||
|
||||
import netaddr
|
||||
from oslo_log import log
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
NO_ROUTER_TYPE = CONF.nsxv.no_router_type
|
||||
|
||||
|
||||
# following router methods are not support by upstream tempest,
|
||||
def router_create(SELF, client=None, tenant_id=None,
|
||||
namestart='nsxv-router',
|
||||
admin_state_up=True, **kwargs):
|
||||
routers_client = client or SELF.routers_client
|
||||
no_router_type = kwargs.pop('no_router_type', False)
|
||||
if tenant_id:
|
||||
if routers_client.tenant_id != tenant_id:
|
||||
kwargs['tenant_id'] = tenant_id
|
||||
distributed = kwargs.pop('distributed', None)
|
||||
router_type = kwargs.pop('router_type', None)
|
||||
if distributed:
|
||||
kwargs['distributed'] = True
|
||||
elif router_type in ('shared', 'exclusive'):
|
||||
kwargs['router_type'] = router_type
|
||||
name = kwargs.pop('name', None) or data_utils.rand_name(namestart)
|
||||
kwargs['name'] = name
|
||||
kwargs['admin_state_up'] = admin_state_up
|
||||
if NO_ROUTER_TYPE or no_router_type:
|
||||
# router_type is NSX-v extension.
|
||||
# caller can set no_router_type=True to remove it
|
||||
kwargs.pop('router_type', None)
|
||||
result = routers_client.create_router(**kwargs)
|
||||
router = result['router']
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.delete_router, router['id'])
|
||||
SELF.assertEqual(router['name'], name)
|
||||
return router
|
||||
|
||||
|
||||
def router_delete(SELF, router_id):
|
||||
routers_client = SELF.routers_client
|
||||
routers_client.delete_router(router_id)
|
||||
|
||||
|
||||
def router_gateway_set(SELF, router_id, network_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
routers_client.update_router(
|
||||
router_id,
|
||||
external_gateway_info=dict(network_id=network_id))
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
router_gateway_clear, SELF,
|
||||
router_id, client=routers_client)
|
||||
router = routers_client.show_router(router_id)
|
||||
return router.get('router', router)
|
||||
|
||||
|
||||
def router_gateway_clear(SELF, router_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
routers_client.update_router(
|
||||
router_id,
|
||||
external_gateway_info=dict())
|
||||
router = routers_client.show_router(router_id)
|
||||
return router.get('router', router)
|
||||
|
||||
|
||||
def router_update_extra_routes(SELF, router_id, routes, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
router = routers_client.update_route(router_id, routes=routes)
|
||||
return router.get('router', router)
|
||||
|
||||
|
||||
def router_delete_extra_routes(SELF, router_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
router = routers_client.update_route(router_id, routes=None)
|
||||
return router.get('router', router)
|
||||
|
||||
|
||||
def router_interface_add(SELF, router_id, subnet_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
routers_client.add_router_interface(router_id,
|
||||
subnet_id=subnet_id)
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.remove_router_interface,
|
||||
router_id, subnet_id=subnet_id)
|
||||
|
||||
|
||||
def router_interface_delete(SELF, router_id, subnet_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
routers_client.remove_router_interface(router_id, subnet_id=subnet_id)
|
||||
|
||||
|
||||
def router_add_interface(SELF, net_router, net_subnet, client_mgr):
|
||||
routers_client = client_mgr.routers_client
|
||||
return router_interface_add(SELF, net_router['id'], net_subnet['id'],
|
||||
routers_client)
|
||||
|
||||
|
||||
def router_port_interface_add(SELF, router_id, port_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
routers_client.add_router_interface(router_id,
|
||||
port_id=port_id)
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.remove_router_interface,
|
||||
router_id, port_id=port_id)
|
||||
|
||||
|
||||
def router_add_port_interface(SELF, net_router, net_port, client_mgr):
|
||||
routers_client = client_mgr.routers_client
|
||||
return router_port_interface_add(SELF, net_router['id'], net_port['id'],
|
||||
routers_client)
|
||||
|
||||
|
||||
def check_networks(SELF, t_network, t_subnet=None, t_router=None):
|
||||
"""Checks that we see the newly created network/subnet/router.
|
||||
|
||||
checking the result of list_[networks,routers,subnets]
|
||||
"""
|
||||
|
||||
seen_nets = SELF.admin_manager.networks_client.list_networks()['networks']
|
||||
seen_names = [n['name'] for n in seen_nets]
|
||||
seen_ids = [n['id'] for n in seen_nets]
|
||||
SELF.assertIn(t_network['name'], seen_names)
|
||||
SELF.assertIn(t_network['id'], seen_ids)
|
||||
|
||||
if t_subnet:
|
||||
seen_subnets = SELF.admin_manager.subnets_client.list_subnets()
|
||||
seen_net_ids = [n['network_id'] for n in seen_subnets['subnets']]
|
||||
seen_subnet_ids = [n['id'] for n in seen_subnets['subnets']]
|
||||
SELF.assertIn(t_network['id'], seen_net_ids)
|
||||
SELF.assertIn(t_subnet['id'], seen_subnet_ids)
|
||||
|
||||
if t_router:
|
||||
seen_routers = SELF.admin_manager.routers_client.list_routers()
|
||||
seen_router_ids = [n['id'] for n in seen_routers['routers']]
|
||||
seen_router_names = [n['name'] for n in seen_routers['routers']]
|
||||
SELF.assertIn(t_router['name'],
|
||||
seen_router_names)
|
||||
SELF.assertIn(t_router['id'],
|
||||
seen_router_ids)
|
||||
|
||||
|
||||
def create_network_subnet(SELF, client_mgr=None, name=None,
|
||||
tenant_id=None, cidr_offset=0):
|
||||
client_mgr = client_mgr or SELF.manager
|
||||
networks_client = client_mgr.networks_client
|
||||
subnets_client = client_mgr.subnets_client
|
||||
tenant_id = tenant_id or networks_client.tenant_id
|
||||
name = name or data_utils.rand_name('network')
|
||||
net_network = create_network(SELF, client=networks_client,
|
||||
tenant_id=tenant_id, name=name)
|
||||
net_subnet = create_subnet(SELF, client=subnets_client,
|
||||
network=net_network,
|
||||
name=net_network['name'],
|
||||
cidr_offset=cidr_offset)
|
||||
return net_network, net_subnet
|
||||
|
||||
|
||||
# cloned from _create_network@manager.py. Allow name parameter
|
||||
def create_network(SELF, client=None, tenant_id=None, name=None, **kwargs):
|
||||
networks_client = client or SELF.networks_client
|
||||
tenant_id = tenant_id or networks_client.tenant_id
|
||||
name = name or data_utils.rand_name('network')
|
||||
body = networks_client.create_network(name=name,
|
||||
tenant_id=tenant_id,
|
||||
**kwargs)
|
||||
net_network = body['network']
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
networks_client.delete_network,
|
||||
net_network['id'])
|
||||
SELF.assertEqual(net_network['name'], name)
|
||||
return net_network
|
||||
|
||||
|
||||
def create_port(SELF, client=None, **kwargs):
|
||||
if not client:
|
||||
client = SELF.port_client
|
||||
result = client.create_port(**kwargs)
|
||||
net_port = result['port']
|
||||
SELF.assertIsNotNone(result, 'Unable to allocate port')
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
client.delete_port,
|
||||
net_port['id'])
|
||||
|
||||
return net_port
|
||||
|
||||
|
||||
# gateway=None means don't set gateway_ip in subnet
|
||||
def create_subnet(SELF, network, client=None,
|
||||
gateway='', cidr=None, mask_bits=None,
|
||||
ip_version=None, cidr_offset=0,
|
||||
allocation_pools=None, dns_nameservers=None,
|
||||
**kwargs):
|
||||
subnets_client = client or SELF.subnets_client
|
||||
network_id = network['id']
|
||||
ip_version = ip_version or 4
|
||||
post_body = get_subnet_create_options(
|
||||
network_id, ip_version,
|
||||
gateway=gateway, cidr=cidr, cidr_offset=cidr_offset,
|
||||
mask_bits=mask_bits, **kwargs)
|
||||
if allocation_pools:
|
||||
post_body['allocation_pools'] = allocation_pools
|
||||
if dns_nameservers:
|
||||
post_body['dns_nameservers'] = dns_nameservers
|
||||
LOG.debug("create_subnet args: %s", post_body)
|
||||
body = subnets_client.create_subnet(**post_body)
|
||||
net_subnet = body['subnet']
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnets_client.delete_subnet,
|
||||
net_subnet['id'])
|
||||
return net_subnet
|
||||
|
||||
|
||||
# utilities
|
||||
def get_subnet_create_options(network_id, ip_version=4,
|
||||
gateway='', cidr=None, mask_bits=None,
|
||||
num_subnet=1, gateway_offset=1, cidr_offset=0,
|
||||
**kwargs):
|
||||
"""When cidr_offset>0 it request only one subnet-options:
|
||||
|
||||
subnet = get_subnet_create_options('abcdefg', 4, num_subnet=4)[3]
|
||||
subnet = get_subnet_create_options('abcdefg', 4, cidr_offset=3)
|
||||
"""
|
||||
|
||||
gateway_not_set = (gateway == '')
|
||||
if ip_version == 4:
|
||||
cidr = cidr or netaddr.IPNetwork(CONF.network.project_network_cidr)
|
||||
mask_bits = mask_bits or CONF.network.project_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = (
|
||||
cidr or netaddr.IPNetwork(CONF.network.project_network_v6_cidr))
|
||||
mask_bits = mask_bits or CONF.network.project_network_v6_mask_bits
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
subnet_list = []
|
||||
if cidr_offset > 0:
|
||||
num_subnet = cidr_offset + 1
|
||||
for subnet_cidr in cidr.subnet(mask_bits):
|
||||
if gateway_not_set:
|
||||
gateway_ip = gateway or (
|
||||
str(netaddr.IPAddress(subnet_cidr) + gateway_offset))
|
||||
else:
|
||||
gateway_ip = gateway
|
||||
try:
|
||||
subnet_body = dict(network_id=network_id,
|
||||
cidr=str(subnet_cidr),
|
||||
ip_version=ip_version,
|
||||
gateway_ip=gateway_ip,
|
||||
**kwargs)
|
||||
if num_subnet <= 1:
|
||||
return subnet_body
|
||||
subnet_list.append(subnet_body)
|
||||
if len(subnet_list) >= num_subnet:
|
||||
if cidr_offset > 0:
|
||||
# user request the 'cidr_offset'th of cidr
|
||||
return subnet_list[cidr_offset]
|
||||
# user request list of cidr
|
||||
return subnet_list
|
||||
except exceptions.BadRequest as e:
|
||||
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
||||
if not is_overlapping_cidr:
|
||||
raise
|
||||
else:
|
||||
message = 'Available CIDR for subnet creation could not be found'
|
||||
raise exceptions.BuildErrorException(message)
|
||||
return {}
|
@ -1,594 +0,0 @@
|
||||
# Copyright 2016 VMware Inc
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import time
|
||||
|
||||
import six
|
||||
from tempest.common import waiters
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
manager_topo_deployment as dmgr)
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
network_addon_methods as HELO)
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = dmgr.manager.log.getLogger(__name__)
|
||||
|
||||
|
||||
class TestAdminPolicyBasicOps(dmgr.TopoDeployScenarioManager):
|
||||
"""Test VMs with security-group-policy traffic is managed by NSX
|
||||
|
||||
Test topology:
|
||||
TOPO:
|
||||
|
||||
logical-router nasa-router] -- [ public GW]
|
||||
|
|
||||
+--- [Tenant jpl interface/subnet x.y.34.0/24]
|
||||
| | |
|
||||
| + [vm-nasa-jpl-3] + [vm-nasa-jpl-4]
|
||||
|
|
||||
+--- [Tenant ames interface/subnet x.y.12.0/24]
|
||||
| | |
|
||||
| + [vm-nasa-ames-1] + [vm-nasa-ames-2]
|
||||
|
||||
Test topology setup and traffic forwarding validation:
|
||||
|
||||
1. 2 tenants (ames, jpl) each tenant has 2 VMs, and boot with
|
||||
security-group with policy==policy_AA which must allow
|
||||
ping and ssh services as automation relys on this to make
|
||||
sure test environment network connectivity is an OK.
|
||||
NOTE:
|
||||
primary user: ames -- NASA Ames Research Center
|
||||
alt user: jpl -- NASA Jet Propulsion Laboratory
|
||||
2. Admin create router (nasa-router) with both tenants' network
|
||||
so tenant:ames and tenant:jpl can talk to each other
|
||||
according to policy_AA.
|
||||
3. under policy_AA, all servers can be ping and ssh from anywhere
|
||||
4. Admin change tenant:jpl's policy to policy_BB
|
||||
5. Tenant jpl's VMs are not pingable, ssh still OK
|
||||
Tenant ames's MVs, both ping and ssh are OK
|
||||
6. Admin change tenant:ames's policy to policy_BB
|
||||
VMs from ames and jpl are not pingalbe; ssh is OK
|
||||
|
||||
ATTENTION:
|
||||
config nsxv.default_policy_id is policy_AA
|
||||
config nsxv.alt_policy_is is policy_BB
|
||||
|
||||
The testbed needs to have policy_AA and policy_BB created
|
||||
and matched with the default_policy_id & alt_plicy_id under
|
||||
session nsxv of tempest.conf or devstack local.conf.
|
||||
|
||||
Test Configuration setup:
|
||||
please refer to vmware_nsx_tempest/doc/README-AdminPolicy.rst
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(TestAdminPolicyBasicOps, cls).skip_checks()
|
||||
if not test.is_extension_enabled('security-group-policy', 'network'):
|
||||
msg = "Extension security-group-policy is not enabled."
|
||||
raise cls.skipException(msg)
|
||||
if not (CONF.nsxv.alt_policy_id.startswith('policy-') and
|
||||
CONF.nsxv.default_policy_id.startswith('policy-')):
|
||||
msg = "default and alt policy ids not set correctly."
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def setup_clients(cls):
|
||||
super(TestAdminPolicyBasicOps, cls).setup_clients()
|
||||
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||
cls.cmgr_ames = cls.get_client_manager('primary')
|
||||
cls.cmgr_jpl = cls.get_client_manager('alt')
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestAdminPolicyBasicOps, cls).resource_setup()
|
||||
cls.policy_AA = CONF.nsxv.default_policy_id
|
||||
cls.policy_BB = CONF.nsxv.alt_policy_id
|
||||
cls.conn_timeout = CONF.scenario.waitfor_connectivity
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
super(TestAdminPolicyBasicOps, cls).resource_cleanup()
|
||||
|
||||
def setUp(self):
|
||||
super(TestAdminPolicyBasicOps, self).setUp()
|
||||
self.server_id_list = []
|
||||
self.exc_step = 0
|
||||
self.exc_msg = ("Admin-Policy-Traffic-Forwarding"
|
||||
" Validation Steps:\n")
|
||||
|
||||
def tearDown(self):
|
||||
# delete all servers and make sure they are terminated
|
||||
servers_client = self.cmgr_adm.servers_client
|
||||
server_id_list = getattr(self, 'server_id_list', [])
|
||||
for server_id in server_id_list:
|
||||
servers_client.delete_server(server_id)
|
||||
for server_id in server_id_list:
|
||||
waiters.wait_for_server_termination(servers_client, server_id)
|
||||
# delete all floating-ips
|
||||
if hasattr(self, 'fip_nasa_ames_1'):
|
||||
self.delete_floatingip(self.cmgr_ames, self.fip_nasa_ames_1)
|
||||
if hasattr(self, 'fip_nasa_jpl_3'):
|
||||
self.delete_floatingip(self.cmgr_jpl, self.fip_nasa_jpl_3)
|
||||
super(TestAdminPolicyBasicOps, self).tearDown()
|
||||
|
||||
def log_exc_msg(self, msg):
|
||||
self.exc_step += 1
|
||||
self.exc_msg += ("#%02d %s %s\n" %
|
||||
(self.exc_step, time.strftime("%H:%M:%S"), msg))
|
||||
|
||||
def delete_floatingip(self, cmgr, net_floatingip):
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cmgr.floating_ips_client.delete_floatingip,
|
||||
net_floatingip.get('id'))
|
||||
|
||||
def delete_security_group(self, sg_client, sg_id):
|
||||
sg_client.delete_security_group(sg_id)
|
||||
|
||||
def update_security_group_policy(self, sg_id, policy_id):
|
||||
sg_client = self.cmgr_adm.security_groups_client
|
||||
sg = sg_client.update_security_group(sg_id, policy=policy_id)
|
||||
sg = sg.get('security_group', sg)
|
||||
self.assertEqual(policy_id, sg.get('policy'))
|
||||
return sg
|
||||
|
||||
def create_security_group_policy(self, policy_id, tenant_id,
|
||||
name_prefix=None):
|
||||
sg_name = data_utils.rand_name(name_prefix or 'admin-policy')
|
||||
sg_client = self.cmgr_adm.security_groups_client
|
||||
sg_dict = dict(name=sg_name, policy=policy_id)
|
||||
if tenant_id:
|
||||
sg_dict['tenant_id'] = tenant_id
|
||||
sg = sg_client.create_security_group(**sg_dict)
|
||||
sg = sg.get('security_group', sg)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_security_group,
|
||||
sg_client, sg.get('id'))
|
||||
return sg
|
||||
|
||||
def create_networks(self, cmgr,
|
||||
name_prefix=None, cidr_offset=0):
|
||||
net_name = data_utils.rand_name(name_prefix or 'admin-policy')
|
||||
network = self.create_network(client=cmgr.networks_client,
|
||||
name=net_name)
|
||||
network = network.get('network', network)
|
||||
subnet_kwargs = dict(name=net_name, cidr_offset=cidr_offset)
|
||||
subnet = self.create_subnet(network,
|
||||
client=cmgr.subnets_client,
|
||||
**subnet_kwargs)
|
||||
subnet = subnet.get('subnet', subnet)
|
||||
return (network, subnet)
|
||||
|
||||
def create_router_by_type(self, router_type, client=None, **kwargs):
|
||||
routers_client = client or self.cmgr_adm.routers_client
|
||||
create_kwargs = dict(namestart='nasa-router', external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id})
|
||||
if router_type in ('shared', 'exclusive'):
|
||||
create_kwargs['router_type'] = router_type
|
||||
elif router_type in ('distributed'):
|
||||
create_kwargs['distributed'] = True
|
||||
create_kwargs.update(**kwargs)
|
||||
router = HELO.router_create(self, client=routers_client,
|
||||
**create_kwargs)
|
||||
return router
|
||||
|
||||
def create_router_and_add_interfaces(self, router_type, subnet_list):
|
||||
routers_client = self.cmgr_adm.routers_client
|
||||
router = self.create_router_by_type(router_type)
|
||||
for subnet in subnet_list:
|
||||
HELO.router_interface_add(self, router['id'], subnet['id'],
|
||||
client=routers_client)
|
||||
# check interfaces/subnets are added to router
|
||||
router_port_list = self.get_router_port_list(self.cmgr_adm,
|
||||
router['id'])
|
||||
for subnet in subnet_list:
|
||||
added = self.rports_have_subnet_id(router_port_list, subnet['id'])
|
||||
self.assertTrue(
|
||||
added,
|
||||
"subnet_id:%s is not added to router" % subnet['id'])
|
||||
return router
|
||||
|
||||
def rports_have_subnet_id(self, router_port_list, subnet_id):
|
||||
for rport in router_port_list:
|
||||
for fips in rport.get('fixed_ips', []):
|
||||
if subnet_id == fips['subnet_id']:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_router_port_list(self, cmgr, router_id):
|
||||
device_owner = u'network:router_interface'
|
||||
ports_client = cmgr.ports_client
|
||||
port_list = ports_client.list_ports(device_id=router_id,
|
||||
device_owner=device_owner)
|
||||
port_list = port_list.get('ports', port_list)
|
||||
return port_list
|
||||
|
||||
def create_servers_on_networks(self, cmgr, sv_name, networks_info):
|
||||
network = networks_info.get('network')
|
||||
security_group = networks_info.get('security_group')
|
||||
security_groups = [{'name': security_group['id']}]
|
||||
svr = self.create_server_on_network(
|
||||
network, security_groups, name=sv_name,
|
||||
wait_on_boot=False,
|
||||
servers_client=cmgr.servers_client)
|
||||
self.server_id_list.append(svr.get('id'))
|
||||
return svr
|
||||
|
||||
def get_server_info(self, cmgr, server_id):
|
||||
"""Get server's ip addresses"""
|
||||
svr = cmgr.servers_client.show_server(server_id)
|
||||
svr = svr.get('server', svr)
|
||||
sinfo = dict(id=svr['id'], name=svr['name'],
|
||||
security_gropus=svr['security_groups'],
|
||||
fixed_ip_address=None, floating_ip_address=None)
|
||||
addresses = svr.get('addresses')
|
||||
for n_addresses in six.itervalues(addresses):
|
||||
for n_addr in n_addresses:
|
||||
if n_addr['OS-EXT-IPS:type'] == 'fixed':
|
||||
if not sinfo['fixed_ip_address']:
|
||||
sinfo['fixed_ip_address'] = n_addr['addr']
|
||||
elif n_addr['OS-EXT-IPS:type'] == 'floating':
|
||||
if not sinfo['floating_ip_address']:
|
||||
sinfo['floating_ip_address'] = n_addr['addr']
|
||||
return sinfo
|
||||
|
||||
def create_floatingip_for_server(self, cmgr, server):
|
||||
username, password = self.get_image_userpass()
|
||||
try:
|
||||
floatingip = super(
|
||||
TestAdminPolicyBasicOps,
|
||||
self).create_floatingip_for_server(
|
||||
server, client_mgr=cmgr, and_check_assigned=True)
|
||||
except Exception as ex:
|
||||
floatingip = None
|
||||
msg = (self.exc_msg +
|
||||
("\n**FAIL to associate floatingip to server[%s]\n%s"
|
||||
% (server['name'], str(ex))))
|
||||
self.assertTrue(floatingip, msg)
|
||||
fix_ip = floatingip['fixed_ip_address']
|
||||
float_ip = floatingip['floating_ip_address']
|
||||
self.log_exc_msg((" floatingip[%s] created for server[%s,%s]"
|
||||
" and is pingable." %
|
||||
(float_ip, server.get('name'), fix_ip)))
|
||||
return floatingip
|
||||
|
||||
def wait_for_servers_become_active(self):
|
||||
servers_client = self.cmgr_adm.servers_client
|
||||
for server_id in self.server_id_list:
|
||||
waiters.wait_for_server_status(
|
||||
servers_client, server_id, 'ACTIVE')
|
||||
|
||||
def find_servers_ips(self):
|
||||
self.server_ips = {}
|
||||
self.jpl_ips = {}
|
||||
self.server_ips['1'] = self.get_server_info(
|
||||
self.cmgr_ames, self.vm_nasa_ames_1['id'])
|
||||
self.server_ips['2'] = self.get_server_info(
|
||||
self.cmgr_ames, self.vm_nasa_ames_2['id'])
|
||||
self.server_ips['3'] = self.get_server_info(
|
||||
self.cmgr_jpl, self.vm_nasa_jpl_3['id'])
|
||||
self.server_ips['4'] = self.get_server_info(
|
||||
self.cmgr_jpl, self.vm_nasa_jpl_4['id'])
|
||||
|
||||
def create_nasa_ames_network_and_servers(self, security_group=None):
|
||||
sg = security_group or self.sg_ames
|
||||
net, subnet = self.create_networks(self.cmgr_ames, 'nasa-ames', 1)
|
||||
self.netinfo_ames = dict(network=net, subnet=subnet,
|
||||
security_group=sg)
|
||||
self.vm_nasa_ames_1 = self.create_servers_on_networks(
|
||||
self.cmgr_ames, 'vm-nasa-ames-1', self.netinfo_ames)
|
||||
self.vm_nasa_ames_2 = self.create_servers_on_networks(
|
||||
self.cmgr_ames, 'vm-nasa-ames-2', self.netinfo_ames)
|
||||
|
||||
def create_nasa_jpl_network_and_servers(self, security_group=None):
|
||||
sg = security_group or self.sg_jpl
|
||||
# jpl and ames attached to the same router, CIDR cannot overlap
|
||||
net, subnet = self.create_networks(self.cmgr_jpl, 'nasa-jpl', 3)
|
||||
self.netinfo_jpl = dict(network=net, subnet=subnet,
|
||||
security_group=sg)
|
||||
self.vm_nasa_jpl_3 = self.create_servers_on_networks(
|
||||
self.cmgr_jpl, 'vm-nasa-jpl-3', self.netinfo_jpl)
|
||||
self.vm_nasa_jpl_4 = self.create_servers_on_networks(
|
||||
self.cmgr_jpl, 'vm-nasa-jpl-4', self.netinfo_jpl)
|
||||
|
||||
def create_nasa_topo(self, router_type=None):
|
||||
router_type = router_type or 'shared'
|
||||
self.sg_ames = self.create_security_group_policy(
|
||||
self.policy_AA,
|
||||
self.cmgr_ames.networks_client.tenant_id,
|
||||
name_prefix='nasa-ames')
|
||||
self.sg_jpl = self.create_security_group_policy(
|
||||
self.policy_AA,
|
||||
self.cmgr_jpl.networks_client.tenant_id,
|
||||
name_prefix='nasa-jpl')
|
||||
self.create_nasa_ames_network_and_servers(self.sg_ames)
|
||||
self.create_nasa_jpl_network_and_servers(self.sg_jpl)
|
||||
subnet_list = [self.netinfo_ames.get('subnet'),
|
||||
self.netinfo_jpl.get('subnet')]
|
||||
self.nasa_router = self.create_router_and_add_interfaces(
|
||||
router_type, subnet_list)
|
||||
self.wait_for_servers_become_active()
|
||||
# associate floating-ip to servers and pingable
|
||||
self.fip_nasa_ames_1 = self.create_floatingip_for_server(
|
||||
self.cmgr_ames, self.vm_nasa_ames_1)
|
||||
self.fip_nasa_jpl_3 = self.create_floatingip_for_server(
|
||||
self.cmgr_jpl, self.vm_nasa_jpl_3)
|
||||
self.find_servers_ips()
|
||||
|
||||
def host_ssh_reachable(self, host_id, host_ip):
|
||||
username, password = self.get_image_userpass()
|
||||
try:
|
||||
ssh_client = dmgr.get_remote_client_by_password(
|
||||
host_ip, username, password)
|
||||
except Exception as ex:
|
||||
ssh_client = None
|
||||
msg = (self.exc_msg +
|
||||
("\n**FAIL to ssh to host[%s=%s]\n%s" %
|
||||
(host_id, str(ex))))
|
||||
self.assertTrue(ssh_client, msg)
|
||||
self.log_exc_msg(
|
||||
(" SSH host[%s] floatingip[%s] OK" % (host_id, host_ip)))
|
||||
return ssh_client
|
||||
|
||||
def host_can_reach_ips(self, host_id, host_ssh, ip_type, ip_list):
|
||||
for dest_ip in ip_list:
|
||||
reachable = dmgr.is_reachable(host_ssh, dest_ip,
|
||||
time_out=self.conn_timeout)
|
||||
msg = (self.exc_msg +
|
||||
("\n *FAILURE* VM[%s] cannot PING %s[%s]" %
|
||||
(host_id, ip_type, dest_ip)))
|
||||
if not reachable:
|
||||
reachable = dmgr.is_reachable(host_ssh, dest_ip,
|
||||
time_out=self.conn_timeout)
|
||||
dmgr.STEPINTO_DEBUG_IF_TRUE(not reachable)
|
||||
self.assertTrue(reachable, msg)
|
||||
self.log_exc_msg(
|
||||
(" VM[%s] can PING %s[%s]" % (host_id, ip_type, dest_ip)))
|
||||
|
||||
def host_cannot_reach_ips(self, host_id, host_ssh, ip_type, ip_list):
|
||||
for dest_ip in ip_list:
|
||||
not_reachable = dmgr.isnot_reachable(host_ssh, dest_ip,
|
||||
time_out=self.conn_timeout,
|
||||
ping_timeout=5.0)
|
||||
msg = (self.exc_msg +
|
||||
("\n *FAILURE* VM[%s] shouldn't able to PING %s[%s]" %
|
||||
(host_id, ip_type, dest_ip)))
|
||||
if not not_reachable:
|
||||
not_reachable = dmgr.isnot_reachable(
|
||||
host_ssh, dest_ip, time_out=self.conn_timeout,
|
||||
ping_timeout=5.0)
|
||||
dmgr.STEPINTO_DEBUG_IF_TRUE(not not_reachable)
|
||||
self.assertTrue(not_reachable, msg)
|
||||
self.log_exc_msg(
|
||||
(" VM[%s] is not able to PING %s[%s]" %
|
||||
(host_id, ip_type, dest_ip)))
|
||||
|
||||
def ican_reach_ip(self, ip_addr, ping_timeout=5):
|
||||
ip_type = 'floating-ip'
|
||||
for x in range(int(self.conn_timeout / ping_timeout)):
|
||||
reachable = self.ping_ip_address(ip_addr,
|
||||
ping_timeout=ping_timeout)
|
||||
if reachable:
|
||||
break
|
||||
time.sleep(2.0)
|
||||
msg = (self.exc_msg +
|
||||
("\n *FAILURE* Tempest cannot PING %s[%s]" %
|
||||
(ip_type, ip_addr)))
|
||||
if not reachable:
|
||||
reachable = self.ping_ip_address(ip_addr,
|
||||
ping_timeout=ping_timeout)
|
||||
dmgr.STEPINTO_DEBUG_IF_TRUE(not reachable)
|
||||
self.assertTrue(reachable, msg)
|
||||
self.log_exc_msg(" Tempest can PING %s[%s]" % (ip_type, ip_addr))
|
||||
|
||||
def icannot_reach_ip(self, ip_addr, ping_timeout=5):
|
||||
ip_type = 'floating-ip'
|
||||
for x in range(int(self.conn_timeout / ping_timeout)):
|
||||
reachable = self.ping_ip_address(ip_addr,
|
||||
ping_timeout=ping_timeout)
|
||||
if not reachable:
|
||||
break
|
||||
time.sleep(ping_timeout)
|
||||
msg = (self.exc_msg +
|
||||
("\n *FAILURE* Tempest should not PING %s[%s]" %
|
||||
(ip_type, ip_addr)))
|
||||
if reachable:
|
||||
reachable = self.ping_ip_address(ip_addr,
|
||||
ping_timeout=ping_timeout)
|
||||
dmgr.STEPINTO_DEBUG_IF_TRUE(reachable)
|
||||
self.assertFalse(reachable, msg)
|
||||
self.log_exc_msg((" Tempest isnot able to PING %s[%s]" %
|
||||
(ip_type, ip_addr)))
|
||||
|
||||
def run_admin_policy_op_scenario(self, router_type):
|
||||
self.log_exc_msg(("Setup admin-policy test with router-type[%s]" %
|
||||
router_type))
|
||||
self.create_nasa_topo(router_type)
|
||||
self.jpl_private_ips = [y['fixed_ip_address']
|
||||
for x, y in six.iteritems(self.server_ips)
|
||||
if x > '2']
|
||||
self.ames_private_ips = [y['fixed_ip_address']
|
||||
for x, y in six.iteritems(self.server_ips)
|
||||
if x < '3']
|
||||
|
||||
self.run_policy_AA_on_ames_AA_on_jpl()
|
||||
self.run_policy_AA_on_ames_BB_on_jpl()
|
||||
self.run_policy_BB_on_ames_BB_on_jpl()
|
||||
|
||||
dmgr.LOG.debug(self.exc_msg)
|
||||
|
||||
def run_policy_AA_on_ames_AA_on_jpl(self):
|
||||
self.log_exc_msg(("### tenant:jpl=policy_AA[%s]"
|
||||
", tenant:ames=policy_AA[%s]" %
|
||||
(self.policy_AA, self.policy_AA)))
|
||||
# at the beginning, can ssh to VM with floating-ip
|
||||
self.log_exc_msg(
|
||||
"Tempest can ping & ssh vm-nasa-ames-1's floatingip")
|
||||
self.ican_reach_ip(self.fip_nasa_ames_1['floating_ip_address'])
|
||||
ames_1_ssh = self.host_ssh_reachable(
|
||||
"nasa-ames-1",
|
||||
self.fip_nasa_ames_1['floating_ip_address'])
|
||||
|
||||
# from vm-nasa-ames-1 can ping all other private-ips
|
||||
self.log_exc_msg(("vm-nasa-ames-1[%s] can ping all private-ips"
|
||||
% (self.server_ips['1']['fixed_ip_address'])))
|
||||
self.host_can_reach_ips('nasa-ames-1', ames_1_ssh,
|
||||
'ame-private-ip', self.ames_private_ips)
|
||||
self.host_can_reach_ips('nasa-ames-1', ames_1_ssh,
|
||||
'jp-private-ip', self.jpl_private_ips)
|
||||
# from vm-nasa-jpl_3 can ping all other private-ips
|
||||
self.log_exc_msg(
|
||||
"Tempest can ping & ssh vm-nasa-jpl-3's floatingip")
|
||||
self.ican_reach_ip(self.fip_nasa_jpl_3['floating_ip_address'])
|
||||
jpl_3_ssh = self.host_ssh_reachable(
|
||||
"nasa-jpl-3",
|
||||
self.fip_nasa_jpl_3['floating_ip_address'])
|
||||
self.log_exc_msg(("vm-nasa-jpl-3[%s] can ping all private-ips"
|
||||
% (self.server_ips['3']['fixed_ip_address'])))
|
||||
self.host_can_reach_ips('nasa-jpl-3', jpl_3_ssh,
|
||||
'jp-private-ip', self.jpl_private_ips)
|
||||
self.host_can_reach_ips('nasa-jpl-3', jpl_3_ssh,
|
||||
'ames-private-ip', self.ames_private_ips)
|
||||
# within VM can ping both tanants' floating-ips
|
||||
self.log_exc_msg(
|
||||
"vm-nasa-ames-1 can ping vm-nasa-jpl-1's floatingip")
|
||||
self.host_can_reach_ips(
|
||||
'nasa-ames-1', ames_1_ssh, 'jpl-floating-ip',
|
||||
[self.fip_nasa_jpl_3['floating_ip_address']])
|
||||
self.log_exc_msg(
|
||||
"vm-nasa-jpl-3 can ping vm-nasa-ames-3's floatingip")
|
||||
self.host_can_reach_ips(
|
||||
'nasa-jpl-3', jpl_3_ssh, 'nasa-floating-ip',
|
||||
[self.fip_nasa_ames_1['floating_ip_address']])
|
||||
|
||||
def run_policy_AA_on_ames_BB_on_jpl(self):
|
||||
# from vm-nasa-ames-1 can ping all other private-ips
|
||||
self.log_exc_msg(
|
||||
("Update tenant:jpl to use policy_BB[%s] with group-ping"
|
||||
% self.policy_BB))
|
||||
# admin update jpl to policy_BB_GP
|
||||
self.update_security_group_policy(self.sg_jpl['id'], self.policy_BB)
|
||||
# cannot ping vm-nasa-jpl-3, can ssh to both tenants' floating-ips
|
||||
self.log_exc_msg(("### tenant:jpl=policy_BB[%s]"
|
||||
", tenant:ames=policy_AA[%s]" %
|
||||
(self.policy_BB, self.policy_AA)))
|
||||
self.log_exc_msg(
|
||||
"Tempest can ping & ssh vm-nasa-ames-1's floatingip")
|
||||
self.ican_reach_ip(self.fip_nasa_ames_1['floating_ip_address'])
|
||||
ames_1_ssh = self.host_ssh_reachable(
|
||||
"nasa-ames-1",
|
||||
self.fip_nasa_ames_1['floating_ip_address'])
|
||||
self.log_exc_msg("Tempest can ssh vm-nasa-jpl-3's floatingip"
|
||||
", but not ping")
|
||||
self.icannot_reach_ip(self.fip_nasa_jpl_3['floating_ip_address'])
|
||||
jpl_3_ssh = self.host_ssh_reachable(
|
||||
"nasa-jpl-3",
|
||||
self.fip_nasa_jpl_3['floating_ip_address'])
|
||||
# vm-nasa-jpl_3 can ping its private-ips, not other tenants
|
||||
self.log_exc_msg(("vm-nasa-jpl-3[%s] can reach all private-ips"
|
||||
% (self.server_ips['3']['fixed_ip_address'])))
|
||||
self.host_can_reach_ips('nasa-jpl-3', jpl_3_ssh,
|
||||
'jpl-private-ip', self.jpl_private_ips)
|
||||
self.host_can_reach_ips('nasa-jpl-3', jpl_3_ssh,
|
||||
'ames-private-ip', self.ames_private_ips)
|
||||
# nasa_ames_1 can not ping private-ips of tenant jpl
|
||||
# as policy_BB:ping only allowed from the same security-group
|
||||
self.log_exc_msg(("vm-nasa-ames-1[%s] can reach ames's rivate-ips"
|
||||
", not jpl's private-ips"
|
||||
% (self.server_ips['1']['fixed_ip_address'])))
|
||||
self.host_can_reach_ips('nasa-ames-1', ames_1_ssh,
|
||||
'ames-private-ip', self.ames_private_ips)
|
||||
self.host_cannot_reach_ips('nasa-ames-1', ames_1_ssh,
|
||||
'jpl-private-ip', self.jpl_private_ips)
|
||||
self.log_exc_msg(
|
||||
"vm-nasa-ames-1 cannot ping vm-nasa-jpl-1's floatingip")
|
||||
self.host_cannot_reach_ips(
|
||||
'nasa-ames-1', ames_1_ssh, 'jpl-floating-ip',
|
||||
[self.fip_nasa_jpl_3['floating_ip_address']])
|
||||
self.log_exc_msg(
|
||||
"vm-nasa-jpl-3 cannot ping vm-nasa-ames-3's floatingip")
|
||||
self.host_cannot_reach_ips(
|
||||
'nasa-jpl-3', jpl_3_ssh, 'ames-floating-ip',
|
||||
[self.fip_nasa_ames_1['floating_ip_address']])
|
||||
|
||||
def run_policy_BB_on_ames_BB_on_jpl(self):
|
||||
### tenant jpl:policy_BB_GP, tenant ames:policy_BB_GP
|
||||
self.log_exc_msg(
|
||||
("Update tenant:ames to use policy_BB[%s] with group-ping"
|
||||
% self.policy_BB))
|
||||
# admin update ames to policy_BB
|
||||
self.update_security_group_policy(self.sg_ames['id'], self.policy_BB)
|
||||
# cannot ping all VMs, but can ssh to both tenants' floating-ips
|
||||
self.log_exc_msg(("### tenant:jpl=policy_BB[%s]"
|
||||
", tenant:ames=policy_BB[%s]" %
|
||||
(self.policy_BB, self.policy_BB)))
|
||||
self.log_exc_msg("Tempest can ssh vvm-nasa-ames-1's floatingip &"
|
||||
" vm-nasa-jpl-3's floatingip, but not ping.")
|
||||
self.icannot_reach_ip(self.fip_nasa_ames_1['floating_ip_address'])
|
||||
self.icannot_reach_ip(self.fip_nasa_jpl_3['floating_ip_address'])
|
||||
ames_1_ssh = self.host_ssh_reachable(
|
||||
"nasa-ames-1",
|
||||
self.fip_nasa_ames_1['floating_ip_address'])
|
||||
jpl_3_ssh = self.host_ssh_reachable(
|
||||
"nasa-jpl-3",
|
||||
self.fip_nasa_jpl_3['floating_ip_address'])
|
||||
self.log_exc_msg(("vm-nasa-jpl-3[%s] can reach jpl private-ips"
|
||||
", not ames"
|
||||
% (self.server_ips['3']['fixed_ip_address'])))
|
||||
self.host_can_reach_ips('nasa-jpl-3', jpl_3_ssh,
|
||||
'private-ip', self.jpl_private_ips)
|
||||
self.host_cannot_reach_ips('nasa-jpl-3', jpl_3_ssh,
|
||||
'private-ip', self.ames_private_ips)
|
||||
self.log_exc_msg(("vm-nasa-ames-1[%s] can reach ames private-ips"
|
||||
", not jpl"
|
||||
% (self.server_ips['1']['fixed_ip_address'])))
|
||||
self.host_can_reach_ips('nasa-ames-1', ames_1_ssh,
|
||||
'private-ip', self.ames_private_ips)
|
||||
self.host_cannot_reach_ips('nasa-ames-1', ames_1_ssh,
|
||||
'private-ip', self.jpl_private_ips)
|
||||
self.log_exc_msg(
|
||||
"vm-nasa-ames-1 cannot ping vm-nasa-jpl-1's floatingip")
|
||||
self.host_cannot_reach_ips(
|
||||
'nasa-ames-1', ames_1_ssh, 'floating-ip',
|
||||
[self.fip_nasa_jpl_3['floating_ip_address']])
|
||||
self.log_exc_msg(
|
||||
"vm-nasa-jpl-3 cannot ping vm-nasa-ames-3's floatingip")
|
||||
self.host_cannot_reach_ips(
|
||||
'nasa-jpl-3', jpl_3_ssh, 'floating-ip',
|
||||
[self.fip_nasa_ames_1['floating_ip_address']])
|
||||
|
||||
|
||||
class TestAdminPolicySharedRouter(TestAdminPolicyBasicOps):
|
||||
@decorators.idempotent_id('78f45717-5f95-4ef5-b2a4-a1b4700ef688')
|
||||
def test_admin_policy_ops_with_shared_router(self):
|
||||
self.run_admin_policy_op_scenario('shared')
|
||||
|
||||
|
||||
class TestAdminPolicyExclusiveRouter(TestAdminPolicyBasicOps):
|
||||
@decorators.idempotent_id('68345852-da2e-4f46-816b-0afc59470a45')
|
||||
def test_admin_policy_ops_with_exclusive_router(self):
|
||||
self.run_admin_policy_op_scenario('exclusive')
|
||||
|
||||
|
||||
class TestAdminPolicyDistributedRouter(TestAdminPolicyBasicOps):
|
||||
@decorators.idempotent_id('76adbfbb-a2e5-40fa-8930-84e7ece87bd5')
|
||||
def test_admin_policy_ops_with_distributed_router(self):
|
||||
self.run_admin_policy_op_scenario('distributed')
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user