tempest: lbaas l7 switching scenario tests
1. add lbaas l7 switching clients 2. enhance to delete l7 resources if present 3. enhance NSX-v scenario to support l7 switching testing 3.1 check floatingip assiged to server before ping test NSX-v3 can take 500+ second to assign floatingip to VM 3.2 enhance test_lbaas_round_robin to support l7 test 3.3 test_lbaas_l7_switching_ops only test URL starts_with validation. 4. l7_switching_ops requires around 2000 seconds. When run test set OS_TEST_TIMEOUT=2400 to avoid fixture timeout. 5. add CONF.nsxv.bugs_to_resolve to skip tests that can not be run at sepcific NSX plugin environment. 6. doc/README-LBaaS.rst describes limitation and how to run tests at different plugin's and backends. Change-Id: Ib2ee4ce57d45882d76e25ce7b7bba3d825bf34ab
This commit is contained in:
parent
455bb910d2
commit
2065418288
@ -114,6 +114,11 @@ NSXvGroup = [
|
||||
help="router_type is NSXv extension."
|
||||
"Set it to True allow tests to remove this attribute"
|
||||
" when creating router."),
|
||||
cfg.ListOpt('bugs_to_resolve',
|
||||
default=[],
|
||||
help="Bugs to be resolved. Define this at tempest.conf and"
|
||||
" test case testtools.skipIf(condition, reasons) to"
|
||||
" skip test cannot be run at specific plugin env."),
|
||||
]
|
||||
|
||||
|
||||
|
74
vmware_nsx_tempest/doc/README-LBaaS.rst
Normal file
74
vmware_nsx_tempest/doc/README-LBaaS.rst
Normal file
@ -0,0 +1,74 @@
|
||||
Overview
|
||||
========
|
||||
|
||||
This document describes what LBaaS tests are not supported at different
|
||||
NSX plugin's and backends.
|
||||
|
||||
NOTE::
|
||||
|
||||
All LBaaS API & Scenario tests should PASS with exceptions
|
||||
due to NSX plugins and features supported by backend.
|
||||
|
||||
For how tests can be skipped for specific plugin and backend,
|
||||
please refer to paragraph "Config for Test Execution".
|
||||
|
||||
NOTE::
|
||||
|
||||
We no longer support LBaaS v1. So this document and LBaaS tests
|
||||
only applys to releases from Mitaka/Marvin or later.
|
||||
|
||||
Limitation:
|
||||
-----------
|
||||
|
||||
NSX-v with VMware LBaaS driver::
|
||||
|
||||
#. LBaaS networks need to attach to exclusive router
|
||||
#. One tenant per subnet
|
||||
#. L7 switching not supported
|
||||
|
||||
NSX-v3 with Octavia driver::
|
||||
|
||||
#. upstream implemenation - all tests should PASS.
|
||||
#. scenario tests take long time, it might fail with fixture timeout.
|
||||
|
||||
Config for Test execution:
|
||||
--------------------------
|
||||
|
||||
Following configuration attributes used to controll test execution::
|
||||
|
||||
#. no_router_type at group/session nsxv
|
||||
|
||||
Default is False, and is used to run LBaaS tests in NSX-v environment.
|
||||
To run in NSX-t environment, set it to True
|
||||
|
||||
#. bugs_to_resolve at group/session nsxv
|
||||
|
||||
For test to skip if bug-ID presented in this attribute.
|
||||
The test will use testtools.skipIf(condition, reason) to skip if its ID in the bugs_to_resolve list.
|
||||
|
||||
local.conf:
|
||||
----------
|
||||
NSX-v::
|
||||
[nsxv]
|
||||
no_router_type=False
|
||||
bugs_to_resolve=1641902,1715126,1703396,1739510
|
||||
|
||||
NSX-v3::
|
||||
[compute]
|
||||
build_timeout=900
|
||||
build_interval=2
|
||||
|
||||
[nsxv]
|
||||
no_router_type=True
|
||||
|
||||
Execution:
|
||||
----------
|
||||
|
||||
#. Use testr list-tests command to generate test suite for run API and Scenario tests::
|
||||
|
||||
tools/with_venv.sh testr list-tests nsxv.api.lbaas
|
||||
tools/with_venv.sh testr list-tests nsxv.scenarion.test_lbaas
|
||||
|
||||
#. l7 switching tests take long time to complete. If got fixture timeout, do::
|
||||
|
||||
OS_TEST_TIMEOUT=2400 ./run_tempest.sh -t test_lbaas_l7_switching_ops
|
58
vmware_nsx_tempest/services/lbaas/l7policies_client.py
Normal file
58
vmware_nsx_tempest/services/lbaas/l7policies_client.py
Normal file
@ -0,0 +1,58 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
|
||||
class L7PoliciesClient(base.BaseNetworkClient):
|
||||
resource = 'l7policy'
|
||||
resource_plural = 'l7policies'
|
||||
resource_base_path = '/lbaas/l7policies'
|
||||
resource_object_path = '/lbaas/l7policies/%s'
|
||||
|
||||
def create_l7policy(self, **kwargs):
|
||||
uri = self.resource_base_path
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_l7policy(self, policy_id, **kwargs):
|
||||
uri = self.resource_object_path % (policy_id)
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_l7policy(self, policy_id, **fields):
|
||||
uri = self.resource_object_path % (policy_id)
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_l7policy(self, policy_id):
|
||||
uri = self.resource_object_path % (policy_id)
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_l7policies(self, **filters):
|
||||
uri = self.resource_base_path
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a lbaas l7policies client from manager or networks_client"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = manager.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = L7PoliciesClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
58
vmware_nsx_tempest/services/lbaas/l7rules_client.py
Normal file
58
vmware_nsx_tempest/services/lbaas/l7rules_client.py
Normal file
@ -0,0 +1,58 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.services.network import base
|
||||
|
||||
|
||||
class L7RulesClient(base.BaseNetworkClient):
|
||||
resource = 'rule'
|
||||
resource_plural = 'rules'
|
||||
resource_base_path = '/lbaas/l7policies/%s/rules'
|
||||
resource_object_path = '/lbaas/l7policies/%s/rules/%s'
|
||||
|
||||
def create_l7rule(self, policy_id, **kwargs):
|
||||
uri = self.resource_base_path % policy_id
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.create_resource(uri, post_data)
|
||||
|
||||
def update_l7rule(self, policy_id, rule_id, **kwargs):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
post_data = {self.resource: kwargs}
|
||||
return self.update_resource(uri, post_data)
|
||||
|
||||
def show_l7rule(self, policy_id, rule_id, **fields):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_l7rule(self, policy_id, rule_id):
|
||||
uri = self.resource_object_path % (policy_id, rule_id)
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def list_l7rules(self, policy_id, **filters):
|
||||
uri = self.resource_base_path % policy_id
|
||||
return self.list_resources(uri, **filters)
|
||||
|
||||
|
||||
def get_client(client_mgr):
|
||||
"""create a lbaas l7rules client from manager or networks_client"""
|
||||
manager = getattr(client_mgr, 'manager', client_mgr)
|
||||
net_client = getattr(manager, 'networks_client')
|
||||
try:
|
||||
_params = manager.default_params_with_timeout_values.copy()
|
||||
except Exception:
|
||||
_params = {}
|
||||
client = L7RulesClient(net_client.auth_provider,
|
||||
net_client.service,
|
||||
net_client.region,
|
||||
net_client.endpoint_type,
|
||||
**_params)
|
||||
return client
|
@ -28,6 +28,8 @@ from tempest.lib import exceptions
|
||||
|
||||
from vmware_nsx_tempest._i18n import _LI
|
||||
from vmware_nsx_tempest.services.lbaas import health_monitors_client
|
||||
from vmware_nsx_tempest.services.lbaas import l7policies_client
|
||||
from vmware_nsx_tempest.services.lbaas import l7rules_client
|
||||
from vmware_nsx_tempest.services.lbaas import listeners_client
|
||||
from vmware_nsx_tempest.services.lbaas import load_balancers_client
|
||||
from vmware_nsx_tempest.services.lbaas import members_client
|
||||
@ -68,6 +70,9 @@ class BaseTestCase(base.BaseNetworkTest):
|
||||
cls.pools_client = pools_client.get_client(mgr)
|
||||
cls.members_client = members_client.get_client(mgr)
|
||||
cls.health_monitors_client = health_monitors_client.get_client(mgr)
|
||||
# l7-switching clients
|
||||
cls.l7policies_client = l7policies_client.get_client(cls.manager)
|
||||
cls.l7rules_client = l7rules_client.get_client(cls.manager)
|
||||
|
||||
@classmethod
|
||||
def setup_lbaas_core_network(cls):
|
||||
@ -95,7 +100,30 @@ class BaseTestCase(base.BaseNetworkTest):
|
||||
except exceptions.NotFound:
|
||||
continue
|
||||
for listener in lb.get('listeners', []):
|
||||
for policy in listener.get('l7policies'):
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.l7policies_client.delete_l7policy,
|
||||
policy.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
for pool in listener.get('pools'):
|
||||
cls.delete_lb_pool_resources(lb_id, pool)
|
||||
# delete listener
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.listeners_client.delete_listener,
|
||||
listener.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
# delete pools not attached to listener, but loadbalancer
|
||||
for pool in lb.get('pools', []):
|
||||
cls.delete_lb_pool_resources(lb_id, pool)
|
||||
# delete load-balancer
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls._delete_load_balancer, lb_id)
|
||||
# NSX-v: delete exclusive router
|
||||
cls.delete_router(cls.router)
|
||||
super(BaseTestCase, cls).resource_cleanup()
|
||||
|
||||
@classmethod
|
||||
def delete_lb_pool_resources(cls, lb_id, pool):
|
||||
# delete pool's health-monitor
|
||||
hm = pool.get('healthmonitor')
|
||||
if hm:
|
||||
@ -114,17 +142,6 @@ class BaseTestCase(base.BaseNetworkTest):
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.pools_client.delete_pool, pool.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
# delete listener
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.listeners_client.delete_listener,
|
||||
listener.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
# delete load-balancer
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls._delete_load_balancer, lb_id)
|
||||
# NSX-v: delete exclusive router
|
||||
cls.delete_router(cls.router)
|
||||
super(BaseTestCase, cls).resource_cleanup()
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
@ -137,16 +154,16 @@ class BaseTestCase(base.BaseNetworkTest):
|
||||
|
||||
def tearDown(cls):
|
||||
super(BaseTestCase, cls).tearDown()
|
||||
cls.LOG.info(_LI('Finished: {0}\n').format(cls._testMethodName))
|
||||
cls.LOG.info(_LI('Finished: {0}').format(cls._testMethodName))
|
||||
|
||||
@classmethod
|
||||
def _create_load_balancer(cls, wait=True, **lb_kwargs):
|
||||
lb = cls.load_balancers_client.create_load_balancer(**lb_kwargs)
|
||||
lb = lb.get('loadbalancer', lb)
|
||||
cls._lbs_to_delete.append(lb.get('id'))
|
||||
if wait:
|
||||
cls._wait_for_load_balancer_status(lb.get('id'))
|
||||
|
||||
cls._lbs_to_delete.append(lb.get('id'))
|
||||
port = cls.ports_client.show_port(lb['vip_port_id'])
|
||||
cls.ports.append(port['port'])
|
||||
return lb
|
||||
@ -414,8 +431,9 @@ class BaseAdminTestCase(BaseTestCase):
|
||||
def resource_setup(cls):
|
||||
super(BaseAdminTestCase, cls).resource_setup()
|
||||
|
||||
mgr = cls.get_client_manager(credential_type='admin')
|
||||
cls.create_lbaas_clients(mgr)
|
||||
cls.admin_mgr = cls.get_client_manager(credential_type='admin')
|
||||
cls.admin_tenant_id = cls.admin_mgr.networks_client.tenant_id
|
||||
cls.create_lbaas_clients(cls.admin_mgr)
|
||||
cls.setup_lbaas_core_network()
|
||||
|
||||
@classmethod
|
||||
|
@ -14,7 +14,6 @@ from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
from tempest import test
|
||||
|
||||
@ -70,12 +69,12 @@ class TestHealthMonitors(base.BaseAdminTestCase):
|
||||
self.assertEqual(admin_tenant_id, hm_tenant_id)
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1638148")
|
||||
@test.idempotent_id('acbff982-15d6-43c5-a015-e72b7df30998')
|
||||
def test_create_health_monitor_empty_tenant_id_field(self):
|
||||
"""Test with admin user
|
||||
|
||||
creating health monitor with an empty tenant id field should fail.
|
||||
Kilo: @decorators.skip_because(bug="1638148")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10,
|
||||
|
@ -10,7 +10,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
from tempest import test
|
||||
|
||||
@ -305,11 +304,11 @@ class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('0d637b7f-52ea-429f-8f97-584a5a9118aa')
|
||||
@decorators.skip_because(bug="1641652")
|
||||
def test_create_health_monitor_invalid_url_path(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with invalid url_path
|
||||
Kilo: @decorators.skip_because(bug="1641652")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
@ -317,11 +316,11 @@ class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('7d4061c4-1fbc-43c3-81b5-2d099a120297')
|
||||
@decorators.skip_because(bug="1641643")
|
||||
def test_create_health_monitor_invalid_http_method(self):
|
||||
"""Test if a non_admin user
|
||||
|
||||
can create a health monitor with invalid http_method
|
||||
Kilo: @decorators.skip_because(bug="1641643")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
@ -379,18 +378,22 @@ class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('9c8e8fe8-a3a2-481b-9ac8-eb9ecccd8330')
|
||||
@decorators.skip_because(bug="1639340")
|
||||
def test_create_health_monitor_empty_max_http_method(self):
|
||||
"""Test create health monitor with empty http_method"""
|
||||
"""Test create health monitor with empty http_method
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1639340")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'), http_method='')
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('9016c846-fc7c-4063-9f01-61fad37c435d')
|
||||
@decorators.skip_because(bug="1639340")
|
||||
def test_create_health_monitor_empty_max_url_path(self):
|
||||
"""Test create health monitor with empty url_path"""
|
||||
"""Test create health monitor with empty url_path
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1639340")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_health_monitor,
|
||||
type='HTTP', delay=3, max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'), url_path='')
|
||||
@ -559,8 +562,8 @@ class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('1e2fb718-de77-46a3-8897-6f5aff6cab5e')
|
||||
@decorators.skip_because(bug="1641643")
|
||||
def test_update_health_monitor_invalid_http_method(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1641643")"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
@ -570,8 +573,8 @@ class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('07d62a55-18b3-4b74-acb2-b73a0b5e4364')
|
||||
@decorators.skip_because(bug="1641652")
|
||||
def test_update_health_monitor_invalid_url_path(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1641652")"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
@ -631,8 +634,8 @@ class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('0c464bb3-ff84-4816-9237-4583e4da9881')
|
||||
@decorators.skip_because(bug="1639340")
|
||||
def test_update_health_monitor_empty_empty_http_method(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1639340")"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
@ -642,8 +645,8 @@ class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('3e87c0a8-ef15-457c-a58f-270de8c5c76c')
|
||||
@decorators.skip_because(bug="1639340")
|
||||
def test_update_health_monitor_empty_url_path(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1639340")"""
|
||||
hm = self._create_health_monitor(type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
@ -673,8 +676,8 @@ class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
@test.attr(type=['smoke', 'negative'])
|
||||
@test.idempotent_id('fe44e0d9-957b-44cf-806b-af7819444864')
|
||||
@decorators.skip_because(bug="1639340")
|
||||
def test_delete_health_monitor(self):
|
||||
"""Kilo: @decorators.skip_because(bug="1639340")"""
|
||||
hm = self._create_health_monitor(cleanup=False, type='HTTP', delay=3,
|
||||
max_retries=10, timeout=5,
|
||||
pool_id=self.pool.get('id'))
|
||||
|
@ -13,7 +13,6 @@
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
from tempest import test
|
||||
|
||||
@ -54,10 +53,12 @@ class ListenersTest(base.BaseAdminTestCase):
|
||||
super(ListenersTest, cls).resource_cleanup()
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1638738")
|
||||
@test.idempotent_id('f84bfb35-7f73-4576-b2ca-26193850d2bf')
|
||||
def test_create_listener_empty_tenant_id(self):
|
||||
"""Test create listener with an empty tenant id should fail"""
|
||||
"""Test create listener with an empty tenant id should fail
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638738")
|
||||
"""
|
||||
create_new_listener_kwargs = self.create_listener_kwargs
|
||||
create_new_listener_kwargs['protocol_port'] = 8081
|
||||
create_new_listener_kwargs['tenant_id'] = ""
|
||||
|
@ -13,7 +13,6 @@
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
@ -220,10 +219,12 @@ class ListenersTest(base.BaseTestCase):
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('59d32fd7-06f6-4466-bdd4-0be23b15970c')
|
||||
def test_create_listener_invalid_name(self):
|
||||
"""Test create listener with an invalid name"""
|
||||
"""Test create listener with an invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
@ -234,10 +235,12 @@ class ListenersTest(base.BaseTestCase):
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('95457f70-2c1a-4c14-aa80-db8e803d78a9')
|
||||
def test_create_listener_invalid_description(self):
|
||||
"""Test create listener with an invalid description"""
|
||||
"""Test create listener with an invalid description
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
@ -312,10 +315,12 @@ class ListenersTest(base.BaseTestCase):
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1638701")
|
||||
@test.idempotent_id('46fc3784-d676-42f7-953b-a23c1d62323d')
|
||||
def test_create_listener_empty_tenant_id(self):
|
||||
"""Test create listener with an empty tenant id"""
|
||||
"""Test create listener with an empty tenant id
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638701")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_listener,
|
||||
loadbalancer_id=self.load_balancer_id,
|
||||
@ -417,10 +422,12 @@ class ListenersTest(base.BaseTestCase):
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('7c0efb63-90d9-43d0-b959-eb841ef39832')
|
||||
def test_update_listener_invalid_name(self):
|
||||
"""Test update a listener with an invalid name"""
|
||||
"""Test update a listener with an invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
@ -429,10 +436,12 @@ class ListenersTest(base.BaseTestCase):
|
||||
listener_ids=[self.listener_id])
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('ba9bfad8-dbb0-4cbc-b2e3-52bf72bc1fc5')
|
||||
def test_update_listener_invalid_description(self):
|
||||
"""Test update a listener with an invalid description"""
|
||||
"""Test update a listener with an invalid description
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_listener,
|
||||
listener_id=self.listener_id,
|
||||
|
@ -11,9 +11,9 @@
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
import testtools
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
from tempest import test
|
||||
|
||||
@ -47,13 +47,15 @@ class LoadBalancersTest(base.BaseAdminTestCase):
|
||||
cls.load_balancer_id = cls.load_balancer['id']
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@decorators.skip_because(bug="1641902")
|
||||
@testtools.skipIf('1641902' in CONF.nsxv.bugs_to_resolve,
|
||||
"skip_because bug=1641902")
|
||||
@test.idempotent_id('0008ae1e-77a2-45d9-b81e-0e3119b5a26d')
|
||||
def test_create_load_balancer_missing_tenant_id_field_for_admin(self):
|
||||
"""Test create load balancer with a missing tenant id field.
|
||||
|
||||
Verify tenant_id matches when creating loadbalancer vs.
|
||||
load balancer(admin tenant)
|
||||
Kilo: @decorators.skip_because(bug="1641902")
|
||||
"""
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
@ -65,13 +67,15 @@ class LoadBalancersTest(base.BaseAdminTestCase):
|
||||
self._wait_for_load_balancer_status(load_balancer['id'])
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@decorators.skip_because(bug="1638571")
|
||||
@testtools.skipIf('1715126' in CONF.nsxv.bugs_to_resolve,
|
||||
"skip_because bug=1715126")
|
||||
@test.idempotent_id('37620941-47c1-40b2-84d8-db17ff823ebc')
|
||||
def test_create_load_balancer_missing_tenant_id_for_other_tenant(self):
|
||||
"""Test create load balancer with a missing tenant id field.
|
||||
|
||||
Verify tenant_id does not match of subnet(non-admin tenant) vs.
|
||||
load balancer(admin tenant)
|
||||
Kilo: @decorators.skip_because(bug="1638571")
|
||||
"""
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'])
|
||||
@ -81,11 +85,13 @@ class LoadBalancersTest(base.BaseAdminTestCase):
|
||||
self._wait_for_load_balancer_status(load_balancer['id'])
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1638148")
|
||||
# Empty tenant_id causing ServerFault
|
||||
@test.idempotent_id('5bf483f5-ae28-47f5-8805-642da0ffcb40')
|
||||
# Empty tenant_id causing ServerFault
|
||||
def test_create_load_balancer_empty_tenant_id_field(self):
|
||||
"""Test create load balancer with empty tenant_id field should fail"""
|
||||
"""Test create load balancer with empty tenant_id field should fail
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638148")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest,
|
||||
self._create_load_balancer,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
@ -93,11 +99,13 @@ class LoadBalancersTest(base.BaseAdminTestCase):
|
||||
tenant_id="")
|
||||
|
||||
@test.attr(type='smoke')
|
||||
@decorators.skip_because(bug="1638571")
|
||||
@test.idempotent_id('19fc8a44-1280-49f3-be5b-0d30e6e43363')
|
||||
# 2nd tenant_id at the same subnet not supported; got serverFault
|
||||
# NSX-v: 2nd tenant_id at the same subnet not supported; got serverFault
|
||||
def test_create_load_balancer_for_another_tenant(self):
|
||||
"""Test create load balancer for other tenant"""
|
||||
"""Test create load balancer for other tenant
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638571")
|
||||
"""
|
||||
tenant = 'deffb4d7c0584e89a8ec99551565713c'
|
||||
load_balancer = self._create_load_balancer(
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
|
@ -13,9 +13,9 @@
|
||||
import netaddr
|
||||
|
||||
from oslo_log import log as logging
|
||||
import testtools
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
@ -240,10 +240,12 @@ class LoadBalancersTest(base.BaseTestCase):
|
||||
tenant_id="&^%123")
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('b8c56e4a-9644-4119-8fc9-130841caf662')
|
||||
def test_create_load_balancer_invalid_name(self):
|
||||
"""Test create load balancer with an invalid name"""
|
||||
"""Test create load balancer with an invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
@ -252,10 +254,12 @@ class LoadBalancersTest(base.BaseTestCase):
|
||||
name='n' * 256)
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('d638ae60-7de5-45da-a7d9-53eca4998980')
|
||||
def test_create_load_balancer_invalid_description(self):
|
||||
"""Test create load balancer with an invalid description"""
|
||||
"""Test create load balancer with an invalid description
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._create_load_balancer,
|
||||
wait=False,
|
||||
@ -307,21 +311,23 @@ class LoadBalancersTest(base.BaseTestCase):
|
||||
tenant_id=tenant)
|
||||
|
||||
@test.attr(type='negative')
|
||||
@testtools.skipIf('1703396' in CONF.nsxv.bugs_to_resolve,
|
||||
"skip_because bug=1703396")
|
||||
@test.idempotent_id('9963cbf5-97d0-4ab9-96e5-6cbd65c98714')
|
||||
# TODO(akang): upstream is exceptions.NotFound
|
||||
def test_create_load_balancer_invalid_flavor_field(self):
|
||||
"""Test create load balancer with an invalid flavor field"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self.assertRaises(exceptions.NotFound,
|
||||
self._create_load_balancer,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
flavor_id="NO_SUCH_FLAVOR")
|
||||
|
||||
@test.attr(type='negative')
|
||||
@testtools.skipIf('1703396' in CONF.nsxv.bugs_to_resolve,
|
||||
"skip_because bug=1703396")
|
||||
@test.idempotent_id('f7319e32-0fad-450e-8f53-7567f56e8223')
|
||||
# TODO(akang): upstream is exceptions.Conflict
|
||||
def test_create_load_balancer_provider_flavor_conflict(self):
|
||||
"""Test create load balancer with both a provider and a flavor"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self.assertRaises(exceptions.Conflict,
|
||||
self._create_load_balancer,
|
||||
vip_subnet_id=self.subnet['id'],
|
||||
flavor_id="NO_SUCH_FLAVOR",
|
||||
@ -348,10 +354,12 @@ class LoadBalancersTest(base.BaseTestCase):
|
||||
self.assertEqual(load_balancer.get('name'), "")
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('551be885-215d-4941-8870-651cbc871162')
|
||||
def test_update_load_balancer_invalid_name(self):
|
||||
"""Test update load balancer with invalid name"""
|
||||
"""Test update load balancer with invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_load_balancer,
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
@ -372,10 +380,12 @@ class LoadBalancersTest(base.BaseTestCase):
|
||||
self.assertEqual(load_balancer_initial, load_balancer_new)
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('ab3550c6-8b21-463c-bc5d-e79cbae3432f')
|
||||
def test_update_load_balancer_invalid_description(self):
|
||||
"""Test update load balancer with invalid description"""
|
||||
"""Test update load balancer with invalid description
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self._update_load_balancer,
|
||||
load_balancer_id=self.load_balancer_id,
|
||||
|
@ -13,7 +13,6 @@
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
from tempest import test
|
||||
|
||||
@ -71,9 +70,11 @@ class MemberTest(base.BaseAdminTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('01c9ea0c-bdfe-4108-95d1-69ecdc0a1f26')
|
||||
@decorators.skip_because(bug="1638148")
|
||||
def test_create_member_empty_tenant_id(self):
|
||||
"""Test create member with an empty tenant_id should fail"""
|
||||
"""Test create member with an empty tenant_id should fail
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638148")
|
||||
"""
|
||||
member_opts = {}
|
||||
member_opts['address'] = "127.0.0.1"
|
||||
member_opts['protocol_port'] = 80
|
||||
|
@ -10,7 +10,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
from tempest import test
|
||||
|
||||
@ -64,9 +63,11 @@ class TestPools(base.BaseAdminTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('71b9d3e1-3f13-4c84-a905-054c9cd3d4aa')
|
||||
@decorators.skip_because(bug="1638148")
|
||||
def test_create_pool_using_empty_tenant_field(self):
|
||||
"""Test create pool with empty tenant field should fail"""
|
||||
"""Test create pool with empty tenant field should fail
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1638148")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
tenant_id="",
|
||||
|
@ -10,7 +10,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as ex
|
||||
from tempest import test
|
||||
|
||||
@ -381,11 +380,11 @@ class TestPools(base.BaseTestCase):
|
||||
|
||||
@test.attr(type='negative')
|
||||
@test.idempotent_id('cb564af8-89aa-40ca-850e-55418da0f235')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
def test_create_pool_invalid_name_field(self):
|
||||
"""known bug with
|
||||
|
||||
input more than 255 chars Test create pool with invalid name field
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._create_pool,
|
||||
protocol='HTTP',
|
||||
@ -394,12 +393,12 @@ class TestPools(base.BaseTestCase):
|
||||
name='n' * 256)
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('7f4472be-feb7-4ab7-9fb9-97e08f1fa787')
|
||||
def test_create_pool_invalid_desc_field(self):
|
||||
"""known bug with
|
||||
|
||||
input more than 255 chars Test create pool with invalid desc field
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
self.assertRaises(ex.BadRequest, self._prepare_and_create_pool,
|
||||
protocol='HTTP',
|
||||
@ -521,19 +520,23 @@ class TestPools(base.BaseTestCase):
|
||||
self.assertAlmostEqual(sess_pers, pool.get('session_persistence'))
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('23a9dbaf-105b-450e-95cf-050203b28366')
|
||||
def test_update_pool_invalid_name(self):
|
||||
"""Test update pool with invalid name"""
|
||||
"""Test update pool with invalid name
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'), name='n' * 256)
|
||||
|
||||
@test.attr(type='negative')
|
||||
@decorators.skip_because(bug="1637877")
|
||||
@test.idempotent_id('efeeb827-5cb0-4349-8272-b2dbcbf42d22')
|
||||
def test_update_pool_invalid_desc(self):
|
||||
"""Test update pool with invalid desc"""
|
||||
"""Test update pool with invalid desc
|
||||
|
||||
Kilo: @decorators.skip_because(bug="1637877")
|
||||
"""
|
||||
new_pool = self._prepare_and_create_pool()
|
||||
self.assertRaises(ex.BadRequest, self._update_pool,
|
||||
new_pool.get('id'),
|
||||
|
@ -16,6 +16,7 @@
|
||||
import collections
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess
|
||||
import time
|
||||
import traceback
|
||||
@ -25,6 +26,7 @@ from tempest.common import waiters
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions
|
||||
from tempest.scenario import manager
|
||||
from tempest import test
|
||||
|
||||
@ -90,8 +92,8 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
@classmethod
|
||||
def check_preconditions(cls):
|
||||
super(TopoDeployScenarioManager, cls).check_preconditions()
|
||||
if not (CONF.network.project_networks_reachable
|
||||
or CONF.network.public_network_id):
|
||||
if not (CONF.network.project_networks_reachable or
|
||||
CONF.network.public_network_id):
|
||||
msg = ('Either project_networks_reachable must be "true", or '
|
||||
'public_network_id must be defined.')
|
||||
cls.enabled = False
|
||||
@ -386,19 +388,61 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
return HELO.create_subnet(self, network, **kwargs)
|
||||
|
||||
def create_floatingip_for_server(self, server, external_network_id=None,
|
||||
port_id=None, client_mgr=None):
|
||||
port_id=None, client_mgr=None,
|
||||
and_check_assigned=True):
|
||||
client_mgr = client_mgr or self.manager
|
||||
net_floatingip = self.create_floating_ip(
|
||||
server,
|
||||
external_network_id=external_network_id,
|
||||
port_id=port_id,
|
||||
client=client_mgr.floating_ips_client)
|
||||
if port_id:
|
||||
# attached to port, will not check ip assignement & reachability
|
||||
return net_floatingip
|
||||
if not and_check_assigned:
|
||||
# caller will do the floatingip assigned to server and ping tests
|
||||
return net_floatingip
|
||||
self._waitfor_floatingip_assigned_to_server(client_mgr.servers_client,
|
||||
server.get('id'))
|
||||
server_pingable = self._waitfor_associated_floatingip(net_floatingip)
|
||||
self.assertTrue(
|
||||
server_pingable,
|
||||
msg="Expect server to be reachable after floatingip assigned.")
|
||||
return net_floatingip
|
||||
|
||||
def _waitfor_floatingip_assigned_to_server(self, server_client, server_id,
|
||||
on_network=None,
|
||||
extra_timeout=60):
|
||||
timeout = server_client.build_timeout + extra_timeout
|
||||
interval = server_client.build_interval
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
sv = server_client.show_server(server_id)
|
||||
sv = sv.get('server', sv)
|
||||
fip = self.get_server_ip_address(sv, 'floating')
|
||||
if fip:
|
||||
elapse_time = time.time() - start_time
|
||||
xmsg = ("%s Take %d seconds to assign floatingip to server[%s]"
|
||||
% ("OS-STATS:", int(elapse_time), sv.get('name')))
|
||||
LOG.debug(xmsg)
|
||||
return fip
|
||||
time.sleep(interval)
|
||||
raise Exception(
|
||||
"Server[%s] did not get its floatingip in %s seconds" %
|
||||
(server_id, timeout))
|
||||
|
||||
def get_server_ip_address(self, server, ip_type='fixed',
|
||||
network_name=None):
|
||||
if network_name and server['addresses'].get(network_name):
|
||||
s_if = network_name
|
||||
else:
|
||||
s_if = server['addresses'].keys()[0]
|
||||
|
||||
for s_address in server['addresses'][s_if]:
|
||||
if s_address['OS-EXT-IPS:type'] == ip_type:
|
||||
return s_address.get('addr')
|
||||
return None
|
||||
|
||||
def _waitfor_associated_floatingip(self, net_floatingip):
|
||||
host_ip = net_floatingip['floating_ip_address']
|
||||
return self.waitfor_host_connected(host_ip)
|
||||
@ -705,3 +749,23 @@ def waitfor_servers_terminated(tenant_servers_client, pause=2.0):
|
||||
if len(s_list) < 1:
|
||||
return
|
||||
time.sleep(pause)
|
||||
|
||||
|
||||
def copy_file_to_host(file_from, dest, host, username, pkey):
|
||||
dest = "%s@%s:%s" % (username, host, dest)
|
||||
cmd = "scp -v -o UserKnownHostsFile=/dev/null " \
|
||||
"-o StrictHostKeyChecking=no " \
|
||||
"-i %(pkey)s %(file1)s %(dest)s" % {'pkey': pkey,
|
||||
'file1': file_from,
|
||||
'dest': dest}
|
||||
args = shlex.split(cmd.encode('utf-8'))
|
||||
subprocess_args = {'stdout': subprocess.PIPE,
|
||||
'stderr': subprocess.STDOUT}
|
||||
proc = subprocess.Popen(args, **subprocess_args)
|
||||
stdout, stderr = proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
raise exceptions.SSHExecCommandFailed(cmd,
|
||||
proc.returncode,
|
||||
stdout,
|
||||
stderr)
|
||||
return stdout
|
||||
|
@ -0,0 +1,170 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import time
|
||||
|
||||
from tempest import config
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.services.lbaas import l7policies_client
|
||||
from vmware_nsx_tempest.services.lbaas import l7rules_client
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
test_lbaas_round_robin_ops as lbaas_ops)
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class TestL7SwitchingOps(lbaas_ops.LBaasRoundRobinBaseTest):
|
||||
"""This test validates lbaas l7 switching with round-robin opertion.
|
||||
|
||||
Test leverage test_lbaas_round_robin to create the basic round-robin
|
||||
operation, and then build l7 pool and members to forwarding url path
|
||||
starts_with value specified.
|
||||
|
||||
Manual operation can be found at test proc: https://goo.gl/btDMXy
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(TestL7SwitchingOps, cls).skip_checks()
|
||||
if '1739510' in CONF.nsxv.bugs_to_resolve:
|
||||
msg = ("skip lbaas_l7_switching_ops because bug=1739150"
|
||||
" -- l7 switching is not supported")
|
||||
raise cls.skipException(msg)
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestL7SwitchingOps, cls).resource_setup()
|
||||
cls.create_lbaas_clients(cls.manager)
|
||||
cls.l7policies_client = l7policies_client.get_client(cls.manager)
|
||||
cls.l7rules_client = l7rules_client.get_client(cls.manager)
|
||||
|
||||
@classmethod
|
||||
def setup_credentials(cls):
|
||||
super(TestL7SwitchingOps, cls).setup_credentials()
|
||||
|
||||
def setUp(self):
|
||||
super(TestL7SwitchingOps, self).setUp()
|
||||
self.switching_startswith_value1 = "/api"
|
||||
self.switching_startswith_value2 = "/api2"
|
||||
self.pool7 = None
|
||||
self.l7policy1 = None
|
||||
self.l7rule1 = None
|
||||
self.l7rule_kwargs = dict(type='PATH',
|
||||
compare_type='STARTS_WITH',
|
||||
value=self.switching_startswith_value1)
|
||||
|
||||
def tearDown(self):
|
||||
lb_id = self.loadbalancer['id']
|
||||
# teardown lbaas l7 provision
|
||||
if self.l7policy1:
|
||||
self.l7policies_client.delete_l7policy(self.l7policy1.get('id'))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
if self.pool7:
|
||||
self.pools_client.delete_pool(self.pool7.get('id'))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
super(TestL7SwitchingOps, self).tearDown()
|
||||
|
||||
def create_and_start_l7_web_servers(self):
|
||||
key_name = self.keypair['name']
|
||||
network_name = self.network['name']
|
||||
security_groups = [{'name': self.security_group['id']}]
|
||||
self.server7 = self.create_server_on_network(
|
||||
self.network, name=(network_name + "-7"),
|
||||
security_groups=security_groups,
|
||||
key_name=key_name, wait_on_boot=False,
|
||||
servers_client=self.manager.servers_client)
|
||||
self.server8 = self.create_server_on_network(
|
||||
self.network, name=(network_name + "-8"),
|
||||
security_groups=security_groups,
|
||||
key_name=key_name, wait_on_boot=False,
|
||||
servers_client=self.manager.servers_client)
|
||||
self.l7_server_list = [self.server7, self.server8]
|
||||
self.wait_for_servers_become_active(self.l7_server_list)
|
||||
self.start_web_servers(self.l7_server_list)
|
||||
|
||||
def build_l7_switching(self):
|
||||
subnet_id = self.subnet.get('id')
|
||||
lb_id = self.loadbalancer['id']
|
||||
l7_name = self.loadbalancer['name'] + "-7"
|
||||
redirect_to_listener_id = self.listener.get('id')
|
||||
# build_l7_pool(loadbalancer_id):
|
||||
self.pool7 = self.pools_client .create_pool(
|
||||
loadbalancer_id=lb_id,
|
||||
lb_algorithm=self.lb_algorithm, protocol=self.protocol_type,
|
||||
name=l7_name)['pool']
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
pool_id = self.pool7['id']
|
||||
self.member7_list = []
|
||||
for server in self.l7_server_list:
|
||||
fip = server['_floating_ip']
|
||||
fixed_ip_address = fip['fixed_ip_address']
|
||||
member = self.members_client.create_member(
|
||||
pool_id, subnet_id=subnet_id,
|
||||
address=fixed_ip_address,
|
||||
protocol_port=self.protocol_port)
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
self.member7_list.append(member)
|
||||
l7policy_kwargs = dict(action="REDIRECT_TO_POOL",
|
||||
redirect_pool_id=pool_id,
|
||||
listener_id=redirect_to_listener_id,
|
||||
name='policy1')
|
||||
l7policy1 = self.l7policies_client.create_l7policy(**l7policy_kwargs)
|
||||
self.l7policy1 = l7policy1.get(u'l7policy', l7policy1)
|
||||
policy_id = self.l7policy1.get('id')
|
||||
self.l7rule1 = self.l7rules_client.create_l7rule(
|
||||
policy_id, **self.l7rule_kwargs)['rule']
|
||||
|
||||
def check_l7_switching(self, start_path, expected_server_list,
|
||||
send_count=6):
|
||||
self.do_http_request(start_path, send_count)
|
||||
for sv_name, cnt in self.http_cnt.items():
|
||||
self.assertIn(sv_name, expected_server_list)
|
||||
self.assertTrue(cnt > 0)
|
||||
|
||||
def validate_l7_switching(self):
|
||||
l7_sv_name_list = [s['name'] for s in self.l7_server_list]
|
||||
rr_sv_name_list = [s['name'] for s in self.rr_server_list]
|
||||
# URL prefix api switching to pool7
|
||||
self.check_l7_switching('api', l7_sv_name_list, 6)
|
||||
# URL prefix ap/i switching to pool1
|
||||
self.check_l7_switching('ap/i', rr_sv_name_list, 6)
|
||||
# URL prefix api2 switching to pool7
|
||||
self.check_l7_switching('api2', l7_sv_name_list, 6)
|
||||
|
||||
# change rule starts_with's value to /api2
|
||||
# and /api & /api/2 will be swithed to default pool
|
||||
policy_id = self.l7policy1.get('id')
|
||||
rule_id = self.l7rule1.get('id')
|
||||
self.l7rule_kwargs['value'] = self.switching_startswith_value2
|
||||
self.l7rule2 = self.l7rules_client.update_l7rule(
|
||||
policy_id, rule_id, **self.l7rule_kwargs)['rule']
|
||||
time.sleep(2.0)
|
||||
# URL prefix api switching to pool
|
||||
self.check_l7_switching('api', rr_sv_name_list, 6)
|
||||
# URL prefix api switching to pool
|
||||
self.check_l7_switching('api/2', rr_sv_name_list, 6)
|
||||
# URL prefix api2 switching to pool7
|
||||
self.check_l7_switching('api2', l7_sv_name_list, 6)
|
||||
# URL prefix api2 switching to pool
|
||||
self.check_l7_switching('xapi2', rr_sv_name_list, 6)
|
||||
|
||||
@test.idempotent_id('f11e19e4-16b5-41c7-878d-59b9e943e3ce')
|
||||
@test.services('compute', 'network')
|
||||
def test_lbaas_l7_switching_ops(self):
|
||||
self.create_lbaas_networks()
|
||||
self.start_web_servers()
|
||||
self.create_project_lbaas()
|
||||
self.check_project_lbaas()
|
||||
# do l7 provision and testing
|
||||
self.create_and_start_l7_web_servers()
|
||||
self.build_l7_switching()
|
||||
self.validate_l7_switching()
|
@ -26,32 +26,26 @@ from vmware_nsx_tempest.services.lbaas import members_client
|
||||
from vmware_nsx_tempest.services.lbaas import pools_client
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
manager_topo_deployment as dmgr)
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import test_v1_lbaas_basic_ops
|
||||
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = dmgr.manager.log.getLogger(__name__)
|
||||
|
||||
|
||||
class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
class LBaasRoundRobinBaseTest(dmgr.TopoDeployScenarioManager):
|
||||
"""Base class to support LBaaS ROUND-ROBIN test.
|
||||
|
||||
"""This test checks basic load balancer V2 ROUND-ROBIN operation.
|
||||
It provides the methods to create loadbalancer network, and
|
||||
start web servers.
|
||||
|
||||
The following is the scenario outline:
|
||||
1. Create network with exclusive router, and 2 servers
|
||||
2. SSH to each instance and start web server
|
||||
3. Create a load balancer with 1 listener, 1 pool, 1 healthmonitor
|
||||
and 2 members and with ROUND_ROBIN algorithm.
|
||||
4. Associate loadbalancer's vip_address with a floating ip
|
||||
5. Send NUM requests to vip's floating ip and check that they are shared
|
||||
between the two servers.
|
||||
Default lb_algorithm is ROUND_ROBIND.
|
||||
"""
|
||||
|
||||
tenant_router_attrs = {'router_type': 'exclusive'}
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
super(TestLBaasRoundRobinOps, cls).skip_checks()
|
||||
super(LBaasRoundRobinBaseTest, cls).skip_checks()
|
||||
cfg = CONF.network
|
||||
if not test.is_extension_enabled('lbaasv2', 'network'):
|
||||
msg = 'lbaasv2 extension is not enabled.'
|
||||
@ -63,7 +57,7 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(TestLBaasRoundRobinOps, cls).resource_setup()
|
||||
super(LBaasRoundRobinBaseTest, cls).resource_setup()
|
||||
cls.create_lbaas_clients(cls.manager)
|
||||
|
||||
@classmethod
|
||||
@ -78,10 +72,10 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
def setup_credentials(cls):
|
||||
# Ask framework to not create network resources for these tests.
|
||||
cls.set_network_resources()
|
||||
super(TestLBaasRoundRobinOps, cls).setup_credentials()
|
||||
super(LBaasRoundRobinBaseTest, cls).setup_credentials()
|
||||
|
||||
def setUp(self):
|
||||
super(TestLBaasRoundRobinOps, self).setUp()
|
||||
super(LBaasRoundRobinBaseTest, self).setUp()
|
||||
CONF.validation.ssh_shell_prologue = ''
|
||||
self.namestart = 'lbaas-ops'
|
||||
self.poke_counters = 10
|
||||
@ -91,6 +85,7 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
self.hm_delay = 4
|
||||
self.hm_max_retries = 3
|
||||
self.hm_timeout = 10
|
||||
self.hm_type = 'PING'
|
||||
self.server_names = []
|
||||
self.loadbalancer = None
|
||||
self.vip_fip = None
|
||||
@ -108,7 +103,7 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
# make sure servers terminated before teardown network resources
|
||||
LOG.debug("tearDown lbaas servers")
|
||||
server_id_list = []
|
||||
for servid in ['server1', 'server2']:
|
||||
for servid in ['server1', 'server2', 'server7', 'server8']:
|
||||
server = getattr(self, servid, None)
|
||||
if server:
|
||||
if '_floating_ip' in server:
|
||||
@ -120,7 +115,7 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
waiters.wait_for_server_termination(
|
||||
self.manager.servers_client, server_id)
|
||||
# delete lbaas network before handing back to framework
|
||||
super(TestLBaasRoundRobinOps, self).tearDown()
|
||||
super(LBaasRoundRobinBaseTest, self).tearDown()
|
||||
LOG.debug("tearDown lbaas exiting...")
|
||||
|
||||
def delete_loadbalancer_resources(self, lb_id):
|
||||
@ -129,7 +124,25 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
statuses = statuses.get('statuses', statuses)
|
||||
lb = statuses.get('loadbalancer')
|
||||
for listener in lb.get('listeners', []):
|
||||
for policy in listener.get('l7policies'):
|
||||
self.l7policies_client.delete_policy(policy.get('id'))
|
||||
for pool in listener.get('pools'):
|
||||
self.delete_lb_pool_resources(lb_id, pool)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.listeners_client.delete_listener,
|
||||
listener.get('id'))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
# delete pools not attached to listener, but loadbalancer
|
||||
for pool in lb.get('pools', []):
|
||||
self.delete_lb_pool_resources(lb_id, pool)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
lb_client.delete_load_balancer, lb_id)
|
||||
self.load_balancers_client.wait_for_load_balancer_status(
|
||||
lb_id, is_delete_op=True)
|
||||
lbs = lb_client.list_load_balancers()['loadbalancers']
|
||||
self.assertEqual(0, len(lbs))
|
||||
|
||||
def delete_lb_pool_resources(self, lb_id, pool):
|
||||
pool_id = pool.get('id')
|
||||
hm = pool.get('healthmonitor')
|
||||
if hm:
|
||||
@ -145,16 +158,6 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
self.members_client.delete_member,
|
||||
pool_id, member.get('id'))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.listeners_client.delete_listener,
|
||||
listener.get('id'))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
lb_client.delete_load_balancer, lb_id)
|
||||
self.load_balancers_client.wait_for_load_balancer_status(
|
||||
lb_id, is_delete_op=True)
|
||||
lbs = lb_client.list_load_balancers()['loadbalancers']
|
||||
self.assertEqual(0, len(lbs))
|
||||
|
||||
def wait_for_load_balancer_status(self, lb_id):
|
||||
# Wait for load balancer become ONLINE and ACTIVE
|
||||
@ -180,10 +183,11 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
security_groups=security_groups,
|
||||
key_name=key_name,
|
||||
servers_client=self.manager.servers_client)
|
||||
self.wait_for_servers_become_active()
|
||||
self.rr_server_list = [self.server1, self.server2]
|
||||
self.wait_for_servers_become_active(self.rr_server_list)
|
||||
|
||||
def wait_for_servers_become_active(self):
|
||||
for serv in [self.server1, self.server2]:
|
||||
def wait_for_servers_become_active(self, server_list):
|
||||
for serv in server_list:
|
||||
waiters.wait_for_server_status(
|
||||
self.manager.servers_client,
|
||||
serv['id'], 'ACTIVE')
|
||||
@ -205,13 +209,14 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
tenant_id=self.tenant_id,
|
||||
**rule)
|
||||
|
||||
def start_web_servers(self):
|
||||
def start_web_servers(self, server_list=None):
|
||||
"""Start predefined servers:
|
||||
|
||||
1. SSH to the instance
|
||||
2. Start http backends listening on port 80
|
||||
"""
|
||||
for server in [self.server1, self.server2]:
|
||||
server_list = server_list or self.rr_server_list
|
||||
for server in server_list:
|
||||
fip = self.create_floatingip_for_server(
|
||||
server, self.public_network_id,
|
||||
client_mgr=self.manager)
|
||||
@ -220,7 +225,7 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
self.start_web_server(server, server_fip, server['name'])
|
||||
# need to wait for web server to be able to response
|
||||
time.sleep(self.web_service_start_delay)
|
||||
for server in [self.server1, self.server2]:
|
||||
for server in server_list:
|
||||
server_name = server['name']
|
||||
fip = server['_floating_ip']
|
||||
web_fip = fip['floating_ip_address']
|
||||
@ -248,8 +253,7 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
with tempfile.NamedTemporaryFile() as key:
|
||||
key.write(private_key)
|
||||
key.flush()
|
||||
test_v1_lbaas_basic_ops.copy_file_to_host(
|
||||
script.name,
|
||||
dmgr.copy_file_to_host(script.name,
|
||||
"/tmp/script",
|
||||
server_fip, username, key.name)
|
||||
|
||||
@ -294,7 +298,7 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
|
||||
self.healthmonitor = (
|
||||
self.health_monitors_client.create_health_monitor(
|
||||
pool_id=pool_id, type=self.protocol_type,
|
||||
pool_id=pool_id, type=self.hm_type,
|
||||
delay=self.hm_delay, max_retries=self.hm_max_retries,
|
||||
timeout=self.hm_timeout))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
@ -324,20 +328,30 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
port_id=self.loadbalancer['vip_port_id'],
|
||||
client_mgr=self.manager)
|
||||
self.vip_ip_address = self.vip_fip['floating_ip_address']
|
||||
time.sleep(1.0)
|
||||
self.send_request(self.vip_ip_address)
|
||||
for x in range(1, 8):
|
||||
time.sleep(2)
|
||||
resp = self.send_request(self.vip_ip_address)
|
||||
if resp:
|
||||
break
|
||||
LOG.debug('#%d LBaaS-VIP get NO response from its members', x)
|
||||
return self.vip_ip_address
|
||||
|
||||
def check_project_lbaas(self):
|
||||
def do_http_request(self, start_path='', send_counts=None):
|
||||
statuses = self.load_balancers_client.show_load_balancer_status_tree(
|
||||
self.loadbalancer['id'])
|
||||
statuses = statuses.get('statuses', statuses)
|
||||
self.http_cnt = {}
|
||||
http = urllib3.PoolManager(retries=10)
|
||||
url_path = "http://{0}/".format(self.vip_ip_address)
|
||||
for x in range(self.poke_counters):
|
||||
send_counts = send_counts or self.poke_counters
|
||||
send_counts = (send_counts * 2) / 2
|
||||
url_path = "http://{0}/{1}".format(self.vip_ip_address, start_path)
|
||||
for x in range(send_counts):
|
||||
resp = http.request('GET', url_path)
|
||||
self.count_response(resp.data.strip())
|
||||
return self.http_cnt
|
||||
|
||||
def check_project_lbaas(self):
|
||||
self.do_http_request(send_counts=self.poke_counters)
|
||||
# should response from 2 servers
|
||||
self.assertEqual(2, len(self.http_cnt))
|
||||
# ROUND_ROUBIN, so equal counts
|
||||
@ -351,10 +365,25 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
else:
|
||||
self.http_cnt[response] = 1
|
||||
|
||||
|
||||
class TestLBaasRoundRobinOps(LBaasRoundRobinBaseTest):
|
||||
|
||||
"""This test checks basic load balancer V2 ROUND-ROBIN operation.
|
||||
|
||||
The following is the scenario outline:
|
||||
1. Create network with exclusive router, and 2 servers
|
||||
2. SSH to each instance and start web server
|
||||
3. Create a load balancer with 1 listener, 1 pool, 1 healthmonitor
|
||||
and 2 members and with ROUND_ROBIN algorithm.
|
||||
4. Associate loadbalancer's vip_address with a floating ip
|
||||
5. Send NUM requests to vip's floating ip and check that they are shared
|
||||
between the two servers.
|
||||
"""
|
||||
|
||||
@test.idempotent_id('077d2a5c-4938-448f-a80f-8e65f5cc49d7')
|
||||
@test.services('compute', 'network')
|
||||
def test_lbaas_round_robin_ops(self):
|
||||
self.create_lbaas_networks()
|
||||
self.start_web_servers()
|
||||
self.start_web_servers(self.rr_server_list)
|
||||
self.create_project_lbaas()
|
||||
self.check_project_lbaas()
|
||||
|
Loading…
Reference in New Issue
Block a user