Tempest: Fix upstream patch 32049 which replacing oo-wrap
Patch 32049 replaced object-oriented wrapped network resources with python method and object reference mechanism, and relys on test framework's teardown mechanism. Partial oo-wrapped mechanism are replaced with methods defined at module network_addon_methods.py, and object referenc method is replaced with python dict/list reference methods. For API tests, their _try_delete_resources are replaced with test_utils.call_and_ignore_notfound_exc config.nsxv.no_router_type added to support running lbaasv2 under upstream environment. Potential issues whith NSX-6.2.3 during this fix: bug#1682911 and bug#1683241 Change-Id: I93a667b85d87644e8cbf27337c7355b338f67982
This commit is contained in:
parent
9e74370f6b
commit
3d39f1300c
@ -96,6 +96,11 @@ NSXvGroup = [
|
||||
cfg.IntOpt('provider_vlan_id',
|
||||
default=888,
|
||||
help="The default vlan_id for admin vlan."),
|
||||
cfg.BoolOpt('no_router_type',
|
||||
default=False,
|
||||
help="router_type is NSXv extension."
|
||||
"Set it to True allow tests to remove this attribute"
|
||||
" when creating router."),
|
||||
]
|
||||
|
||||
|
||||
|
@ -18,6 +18,7 @@ import netaddr
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
@ -49,8 +50,8 @@ class BaseAdminNetworkTest(base.BaseAdminNetworkTest):
|
||||
for netwk_info in cls.admin_netwk_info:
|
||||
net_client, network = netwk_info
|
||||
try:
|
||||
cls._try_delete_resource(net_client.delete_network,
|
||||
network['id'])
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
net_client.delete_network, network['id'])
|
||||
except Exception:
|
||||
pass
|
||||
super(BaseAdminNetworkTest, cls).resource_cleanup()
|
||||
|
@ -25,6 +25,7 @@ from tempest import config
|
||||
from tempest import test
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions
|
||||
|
||||
from vmware_nsx_tempest._i18n import _
|
||||
@ -96,23 +97,25 @@ class BaseTestCase(base.BaseNetworkTest):
|
||||
for pool in listener.get('pools'):
|
||||
hm = pool.get('healthmonitor')
|
||||
if hm:
|
||||
cls._try_delete_resource(
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.health_monitors_client.delete_health_monitor,
|
||||
pool.get('healthmonitor').get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
cls._try_delete_resource(cls.pools_client.delete_pool,
|
||||
pool.get('id'))
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.pools_client.delete_pool, pool.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
health_monitor = pool.get('healthmonitor')
|
||||
if health_monitor:
|
||||
cls._try_delete_resource(
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.health_monitors_client.delete_health_monitor,
|
||||
health_monitor.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
cls._try_delete_resource(cls.listeners_client.delete_listener,
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.listeners_client.delete_listener,
|
||||
listener.get('id'))
|
||||
cls._wait_for_load_balancer_status(lb_id)
|
||||
cls._try_delete_resource(cls._delete_load_balancer, lb_id)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls._delete_load_balancer, lb_id)
|
||||
# NSX-v: delete exclusive router
|
||||
cls.delete_router(cls.router)
|
||||
super(BaseTestCase, cls).resource_cleanup()
|
||||
|
@ -56,7 +56,7 @@ class TestHealthMonitors(base.BaseTestCase):
|
||||
|
||||
hm_list = self._list_health_monitors()
|
||||
for hm in hm_list:
|
||||
self._try_delete_resource(
|
||||
test_utils.call_and_igonre_not_found_exc(
|
||||
self._delete_health_monitor,
|
||||
hm.get('id'))
|
||||
"""
|
||||
|
@ -14,6 +14,7 @@
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
|
||||
from tempest import config
|
||||
from tempest import test
|
||||
@ -43,7 +44,8 @@ class DnsSearchDomainTest(base.BaseAdminNetworkTest):
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
# we need to cleanup resouces created at class methods
|
||||
cls._try_delete_resource(cls.networks_client.delete_network,
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.networks_client.delete_network,
|
||||
cls.project_network['id'])
|
||||
super(DnsSearchDomainTest, cls).resource_cleanup()
|
||||
|
||||
@ -51,7 +53,7 @@ class DnsSearchDomainTest(base.BaseAdminNetworkTest):
|
||||
resp = self.create_network(client=self.networks_client,
|
||||
name=network_name)
|
||||
network = resp.get('network', resp)
|
||||
self.addCleanup(self._try_delete_resource,
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.networks_client.delete_network,
|
||||
network['id'])
|
||||
resp = self.create_subnet(network,
|
||||
|
@ -16,6 +16,7 @@
|
||||
from tempest.api.network import base
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions
|
||||
from tempest import test
|
||||
|
||||
@ -35,7 +36,7 @@ class DnsSearchDoaminsNegativeTest(base.BaseAdminNetworkTest):
|
||||
network_name = data_utils.rand_name('dns-sear-negative')
|
||||
resp = networks_client.create_network(name=network_name)
|
||||
network = resp.get('network', resp)
|
||||
self.addCleanup(self._try_delete_resource,
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
networks_client.delete_network,
|
||||
network['id'])
|
||||
subnet_cfg = {
|
||||
|
@ -17,6 +17,7 @@ import re
|
||||
import six
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
import base_provider as base
|
||||
@ -94,7 +95,7 @@ class MultipleTransportZonesTest(base.BaseAdminNetworkTest):
|
||||
resp = self.create_network(network_name, **create_kwargs)
|
||||
network = resp.get('network', resp)
|
||||
net_id = network['id']
|
||||
self.addCleanup(self._try_delete_resource,
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.delete_network, net_id)
|
||||
self.assertEqual(scope_id,
|
||||
network['provider:physical_network'])
|
||||
@ -150,7 +151,7 @@ class MultipleTransportZonesTest(base.BaseAdminNetworkTest):
|
||||
kwargs.update(create_kwargs)
|
||||
router = routers_client.create_router(**kwargs)
|
||||
router = router['router'] if 'router' in router else router
|
||||
self.addCleanup(self._try_delete_resource,
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.delete_router, router['id'])
|
||||
self.assertEqual(router['name'], router_name)
|
||||
return (routers_client, router)
|
||||
@ -165,8 +166,7 @@ class MultipleTransportZonesTest(base.BaseAdminNetworkTest):
|
||||
for net_id, (s_id, network, subnet) in six.iteritems(nets):
|
||||
# register to cleanup before adding interfaces so interfaces
|
||||
# and router can be deleted if test is aborted.
|
||||
self.addCleanup(
|
||||
self._try_delete_resource,
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.remove_router_interface,
|
||||
router['id'], subnet_id=subnet['id'])
|
||||
routers_client.add_router_interface(
|
||||
|
@ -15,6 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
|
||||
from tempest.api.network import base
|
||||
@ -42,7 +43,7 @@ class MultipleTransportZonesNegativeTest(base.BaseAdminNetworkTest):
|
||||
resp = networks_client.create_network(name=network_name,
|
||||
**create_kwargs)
|
||||
network = resp['network'] if 'network' in resp else resp
|
||||
self.addCleanup(self._try_delete_resource,
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
networks_client.delete_network,
|
||||
network['id'])
|
||||
return network
|
||||
|
@ -17,6 +17,7 @@
|
||||
import six
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
from tempest.api.network import base
|
||||
@ -99,26 +100,27 @@ class LoadBalancerTestJSON(base.BaseNetworkTest):
|
||||
"""
|
||||
# Cleanup lb health monitors
|
||||
if cls.health_monitor:
|
||||
cls._try_delete_resource(cls.lbv1_client.delete_health_monitor,
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.lbv1_client.delete_health_monitor,
|
||||
cls.health_monitor['id'])
|
||||
cls.health_monitor = None
|
||||
|
||||
# Cleanup members
|
||||
if cls.member:
|
||||
cls._try_delete_resource(cls.lbv1_client.delete_member,
|
||||
cls.member['id'])
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.lbv1_client.delete_member, cls.member['id'])
|
||||
cls.member = None
|
||||
|
||||
# Cleanup vips
|
||||
if cls.vip:
|
||||
cls._try_delete_resource(cls.lbv1_client.delete_vip,
|
||||
cls.vip['id'])
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.lbv1_client.delete_vip, cls.vip['id'])
|
||||
cls.vip = None
|
||||
|
||||
# Cleanup pool
|
||||
if cls.pool:
|
||||
cls._try_delete_resource(cls.lbv1_client.delete_pool,
|
||||
cls.pool['id'])
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
cls.lbv1_client.delete_pool, cls.pool['id'])
|
||||
cls.pool = None
|
||||
|
||||
super(LoadBalancerTestJSON, cls).resource_cleanup()
|
||||
|
@ -14,24 +14,22 @@
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
from fixtures._fixtures import timeout as fixture_timeout
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import net_resources
|
||||
import netaddr
|
||||
|
||||
from tempest.common.utils.linux import remote_client
|
||||
from tempest.common import waiters
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.scenario import manager
|
||||
from tempest import test
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import exceptions
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
network_addon_methods as HELO)
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = manager.log.getLogger(__name__)
|
||||
@ -65,6 +63,9 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
3. net_resources.py overwrite resourses.py so the method to add
|
||||
interfaces to routers are inline with CLI, and support router
|
||||
owned by admin, but subnets are primary/alt clients.
|
||||
-- mechanism removed with patch 320495
|
||||
-- we are relaying on the test framework to delete resources
|
||||
in the reverse order of creating.
|
||||
4. Ping is used for Data-plane testing. OUTSIDE_WORLD_SERVERS ping
|
||||
test make sense when tenant's DNS is pirvate to provider.
|
||||
5. Teardown is high cost, each test should perform its un-config to
|
||||
@ -124,56 +125,80 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
def tearDown(self):
|
||||
super(TopoDeployScenarioManager, self).tearDown()
|
||||
|
||||
# bypass pareant _create_router() to use the net_resources module.
|
||||
# Scenario: routers belong to admin, subnets belon to tenent
|
||||
# overwrite parent class which does not accept NSX-v extension
|
||||
def _create_router(self, client_mgr=None, tenant_id=None,
|
||||
namestart='topo-deploy', **kwargs):
|
||||
client_mgr = client_mgr or self.manager
|
||||
routers_client = getattr(client_mgr, "routers_client")
|
||||
if not tenant_id:
|
||||
tenant_id = routers_client.tenant_id
|
||||
distributed = kwargs.pop('distributed', None)
|
||||
router_type = kwargs.pop('router_type', None)
|
||||
if distributed:
|
||||
kwargs['distributed'] = True
|
||||
elif router_type in ('shared', 'exclusive'):
|
||||
kwargs['router_type'] = router_type
|
||||
name = data_utils.rand_name(namestart)
|
||||
result = routers_client.create_router(name=name,
|
||||
admin_state_up=True,
|
||||
router = HELO.router_create(self, client=routers_client,
|
||||
tenant_id=tenant_id,
|
||||
namestart=namestart,
|
||||
**kwargs)
|
||||
router = net_resources.DeletableRouter(client=routers_client,
|
||||
routers_client=routers_client,
|
||||
**result['router'])
|
||||
self.assertEqual(router.name, name)
|
||||
self.addCleanup(self.delete_wrapper, router.delete)
|
||||
return router
|
||||
|
||||
def _router_set_gateway(self, router_id, network_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_gateway_set(self, router_id, network_id,
|
||||
client=routers_client)
|
||||
|
||||
def _router_clear_gateway(self, router_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_gateway_clear(self, router_id,
|
||||
client=routers_client)
|
||||
|
||||
def _router_update_extra_routes(self, router_id, routes, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
router = routers_client.update_route(self, router_id,
|
||||
routes=routes)
|
||||
return router['router']
|
||||
|
||||
def _router_delete_extra_routes(self, router_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_delete_extra_routes(self, router_id,
|
||||
routers_client)
|
||||
|
||||
def _router_add_interface(self, net_router, net_subnet, client_mgr):
|
||||
routers_client = client_mgr.routers_client
|
||||
return HELO.router_interface_add(self, net_router['id'],
|
||||
net_subnet['id'], routers_client)
|
||||
|
||||
def router_interface_add(self, router_id, subnet_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_interface_add(self, router_id, subnet_id,
|
||||
routers_client)
|
||||
|
||||
def router_interface_delete(self, router_id, subnet_id, client=None):
|
||||
routers_client = client or self.routers_client
|
||||
return HELO.router_interface_delete(self, router_id, subnet_id,
|
||||
routers_client)
|
||||
|
||||
def create_server_on_network(self, networks, security_groups=None,
|
||||
name=None, image=None, wait_on_boot=True,
|
||||
flavor=None, servers_client=None,
|
||||
key_name=None):
|
||||
key_name=None, tenant_id=None):
|
||||
name = name or data_utils.rand_name('topo-deploy-vm')
|
||||
if security_groups is None:
|
||||
security_groups = [{'name': 'default'}]
|
||||
if type(networks) in (list, tuple):
|
||||
network_ifs = [{'uuid': nw.id} for nw in networks]
|
||||
network_ifs = [{'uuid': nw['id']} for nw in networks]
|
||||
else:
|
||||
network_ifs = [{'uuid': networks.id}]
|
||||
network_ifs = [{'uuid': networks['id']}]
|
||||
create_kwargs = {
|
||||
'networks': network_ifs,
|
||||
'security_groups': security_groups,
|
||||
}
|
||||
if key_name:
|
||||
create_kwargs['key_name'] = key_name
|
||||
if tenant_id:
|
||||
if not (servers_client and servers_client.tenant_id == tenant_id):
|
||||
create_kwargs['tenant_id'] = tenant_id
|
||||
LOG.debug("TopoDeploy Create server name=%(name)s"
|
||||
", create_kwargs=%(create_kwargs)s",
|
||||
{'name': name, 'create_kwargs': str(create_kwargs)})
|
||||
server = self.create_server(
|
||||
name=name, image=image, wait_on_boot=wait_on_boot,
|
||||
servers_client=servers_client, flavor=flavor,
|
||||
create_kwargs=create_kwargs)
|
||||
tenant_id=tenant_id, create_kwargs=create_kwargs)
|
||||
return server
|
||||
|
||||
# overwrite parent classes; add servers_client
|
||||
@ -197,6 +222,7 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
servers_client = servers_client or self.servers_client
|
||||
create_kwargs = create_kwargs or {}
|
||||
if type(tenant_id) in (str, unicode):
|
||||
if servers_client.tenant_id != tenant_id:
|
||||
create_kwargs['tenant_id'] = tenant_id
|
||||
|
||||
xmsg = ("Creating a server name=%(name)s, image=%(image)s"
|
||||
@ -215,7 +241,7 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
waiter_callable=waiters.wait_for_server_termination,
|
||||
thing_id=server['id'], thing_id_param='server_id',
|
||||
waiter_client=servers_client,
|
||||
cleanup_callable=self.delete_wrapper,
|
||||
cleanup_callable=test_utils.call_and_ignore_notfound_exc,
|
||||
cleanup_args=[servers_client.delete_server, server['id']])
|
||||
if wait_on_boot:
|
||||
waiters.wait_for_server_status(
|
||||
@ -234,21 +260,18 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
name = create_body.get('name', None) or data_utils.rand_name('P-net')
|
||||
create_body['name'] = name
|
||||
client_mgr = client_mgr or self.admin_manager
|
||||
networks_client = client_mgr.networks_client
|
||||
body = networks_client.create_network(**create_body)
|
||||
net_network = net_resources.DeletableNetwork(
|
||||
networks_client=networks_client, **body['network'])
|
||||
self.assertEqual(net_network.name, name)
|
||||
self.addCleanup(self.delete_wrapper, net_network.delete)
|
||||
net_network = HELO.create_network(
|
||||
self, client=client_mgr.networks_client, **create_body)
|
||||
return net_network
|
||||
|
||||
def create_provider_subnet(self, client_mgr=None, create_body=None):
|
||||
client_mgr = client_mgr or self.admin_manager
|
||||
subnets_client = client_mgr.subnets_client
|
||||
body = subnets_client.create_subnet(**create_body)
|
||||
net_subnet = net_resources.DeletableSubnet(
|
||||
subnets_client=subnets_client, **body['subnet'])
|
||||
self.addCleanup(self.delete_wrapper, net_subnet.delete)
|
||||
net_subnet = body['subnet']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnets_client.delete_subnet,
|
||||
net_subnet['id'])
|
||||
return net_subnet
|
||||
|
||||
def setup_project_network(self, external_network_id,
|
||||
@ -277,13 +300,12 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
client_mgr=client_mgr, tenant_id=tenant_id,
|
||||
namestart=name,
|
||||
distributed=distributed, router_type=router_type)
|
||||
net_router.set_gateway(external_network_id)
|
||||
self._router_set_gateway(net_router['id'], external_network_id,
|
||||
client=client_mgr.routers_client)
|
||||
net_network, net_subnet = self.create_network_subnet(
|
||||
client_mgr=client_mgr,
|
||||
tenant_id=tenant_id, name=net_router.name,
|
||||
cidr_offset=cidr_offset)
|
||||
# different from the resources.py
|
||||
net_router.add_interface(net_subnet)
|
||||
client_mgr=client_mgr, name=net_router['name'],
|
||||
tenant_id=tenant_id, cidr_offset=cidr_offset)
|
||||
self._router_add_interface(net_router, net_subnet, client_mgr)
|
||||
return net_network, net_subnet, net_router
|
||||
|
||||
def create_network_subnet(self, client_mgr=None,
|
||||
@ -303,40 +325,25 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
# cloned from _create_network@manager.py. Allow name parameter
|
||||
def create_network(self, client=None, tenant_id=None, name=None,
|
||||
**kwargs):
|
||||
client = client or self.networks_client
|
||||
tenant_id = tenant_id or _g_tenant_id(client)
|
||||
networks_client = client or self.networks_client
|
||||
tenant_id = tenant_id or _g_tenant_id(networks_client)
|
||||
name = name or data_utils.rand_name('topo-deploy-network')
|
||||
result = client.create_network(name=name, tenant_id=tenant_id,
|
||||
return HELO.create_network(self, client=networks_client,
|
||||
tenant_id=tenant_id, name=name,
|
||||
**kwargs)
|
||||
net_network = net_resources.DeletableNetwork(
|
||||
client=client, networks_client=client,
|
||||
**result['network'])
|
||||
self.assertEqual(net_network.name, name)
|
||||
self.addCleanup(self.delete_wrapper, net_network.delete)
|
||||
return net_network
|
||||
|
||||
def create_subnet(self, network, client=None,
|
||||
gateway='', cidr=None, mask_bits=None,
|
||||
ip_version=None, cidr_offset=0,
|
||||
allocation_pools=None, dns_nameservers=None,
|
||||
**kwargs):
|
||||
client = client or self.subnets_client
|
||||
ip_version = ip_version or 4
|
||||
post_body = get_subnet_create_options(
|
||||
network['id'], ip_version,
|
||||
gateway=gateway, cidr=cidr, cidr_offset=cidr_offset,
|
||||
mask_bits=mask_bits, **kwargs)
|
||||
if allocation_pools:
|
||||
post_body['allocation_pools'] = allocation_pools
|
||||
if dns_nameservers:
|
||||
post_body['dns_nameservers'] = dns_nameservers
|
||||
LOG.debug("create_subnet args: %s", post_body)
|
||||
body = client.create_subnet(**post_body)
|
||||
net_subnet = net_resources.DeletableSubnet(
|
||||
client=client, subnets_client=client,
|
||||
**body['subnet'])
|
||||
self.addCleanup(self.delete_wrapper, net_subnet.delete)
|
||||
return net_subnet
|
||||
subnets_client = client or self.subnets_client
|
||||
kwargs.update(client=subnets_client, gateway=gateway,
|
||||
cidr=cidr, cidr_offset=cidr_offset,
|
||||
mask_bits=mask_bits, ip_version=ip_version,
|
||||
allocation_pools=allocation_pools,
|
||||
dns_nameservers=dns_nameservers)
|
||||
return HELO.create_subnet(self, network, **kwargs)
|
||||
|
||||
def create_floatingip_for_server(self, server, external_network_id=None,
|
||||
port_id=None, client_mgr=None):
|
||||
@ -391,34 +398,29 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
LOG.debug(xmsg)
|
||||
return server_pingable
|
||||
|
||||
def disassociate_floatingip(self, net_floatingip, and_delete=False):
|
||||
self._disassociate_floating_ip(net_floatingip)
|
||||
def disassociate_floatingip(self, net_floatingip, client=None,
|
||||
and_delete=False):
|
||||
floating_ips_client = client or self.floating_ips_client
|
||||
kwargs = dict(port_id=None)
|
||||
floating_ip = floating_ips_client.update_floatingip(
|
||||
net_floatingip['id'], **kwargs)
|
||||
floating_ip = floating_ip.get('floatingip', floating_ip)
|
||||
self.assertIsNone(floating_ip['port_id'])
|
||||
if and_delete:
|
||||
net_floatingip.delete()
|
||||
floating_ips_client.delete_floatingip(floating_ip['id'])
|
||||
return floating_ip
|
||||
|
||||
def associate_floatingip(self, net_floatingip, to_server):
|
||||
self._associate_floating_ip(net_floatingip, to_server)
|
||||
def associate_floatingip(self, net_floatingip, to_server, client=None):
|
||||
floating_ips_client = client or self.floating_ips_client
|
||||
port_id, _ = self._get_server_port_id_and_ip4(to_server)
|
||||
kwargs = dict(port_id=port_id)
|
||||
floating_ip = floating_ips_client.update_floatingip(
|
||||
net_floatingip['id'], **kwargs)['floatingip']
|
||||
self.assertEqual(port_id, floating_ip['port_id'])
|
||||
return floating_ip
|
||||
|
||||
def check_networks(self, net_network, net_subnet=None, net_router=None):
|
||||
seen_nets = self._list_networks()
|
||||
seen_names = [n['name'] for n in seen_nets]
|
||||
seen_ids = [n['id'] for n in seen_nets]
|
||||
self.assertIn(net_network.name, seen_names)
|
||||
self.assertIn(net_network.id, seen_ids)
|
||||
|
||||
if net_subnet:
|
||||
seen_subnets = self._list_subnets()
|
||||
seen_net_ids = [n['network_id'] for n in seen_subnets]
|
||||
seen_subnet_ids = [n['id'] for n in seen_subnets]
|
||||
self.assertIn(net_network.id, seen_net_ids)
|
||||
self.assertIn(net_subnet.id, seen_subnet_ids)
|
||||
|
||||
if net_router:
|
||||
seen_routers = self._list_routers()
|
||||
seen_router_ids = [n['id'] for n in seen_routers]
|
||||
seen_router_names = [n['name'] for n in seen_routers]
|
||||
self.assertIn(net_router.name, seen_router_names)
|
||||
self.assertIn(net_router.id, seen_router_ids)
|
||||
return HELO.check_networks(self, net_network, net_subnet, net_router)
|
||||
|
||||
# use this carefully, as it expect existence of floating_ip_tuple
|
||||
def check_public_network_connectivity(self, should_connect=True,
|
||||
@ -441,7 +443,7 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
def _check_floatingip_connectivity(self, floating_ip, server,
|
||||
should_connect=True,
|
||||
msg=None, ping_timeout=30):
|
||||
ip_address = floating_ip.floating_ip_address
|
||||
ip_address = floating_ip['floating_ip_address']
|
||||
floatingip_status = 'ACTIVE' if should_connect else 'DOWN'
|
||||
is_pingable = self.ping_ip_address(ip_address,
|
||||
ping_timeout=ping_timeout)
|
||||
@ -463,36 +465,17 @@ class TopoDeployScenarioManager(manager.NetworkScenarioTest):
|
||||
def get_server_flavor(self):
|
||||
return CONF.compute.flavor_ref
|
||||
|
||||
# replaced by call_and_ignore_notfound_exc method
|
||||
# at tempest/lib/common/utils/test_utils.py
|
||||
def delete_wrapper(self, delete_thing, *args, **kwargs):
|
||||
"""Ignores NotFound exceptions for delete operations.
|
||||
|
||||
@param delete_thing: delete method of a resource. method will be
|
||||
executed as delete_thing(*args, **kwargs)
|
||||
|
||||
"""
|
||||
try:
|
||||
delete_thing(*args, **kwargs)
|
||||
except exceptions.NotFound:
|
||||
# If the resource is already missing, mission accomplished.
|
||||
pass
|
||||
except fixture_timeout.TimeoutException:
|
||||
# one more time
|
||||
try:
|
||||
delete_thing(*args, **kwargs)
|
||||
except exceptions.NotFound:
|
||||
pass
|
||||
|
||||
|
||||
# common utilities
|
||||
def make_node_info(net_floatingip, username, password,
|
||||
include_outside_servers=False):
|
||||
node = dict(ipaddr=net_floatingip.floating_ip_address,
|
||||
floating_ip_address = net_floatingip['floating_ip_address']
|
||||
fixed_ip_address = net_floatingip['fixed_ip_address']
|
||||
node = dict(ipaddr=floating_ip_address,
|
||||
username=username, password=password)
|
||||
node['dest'] = [dict(ipaddr=net_floatingip.floating_ip_address,
|
||||
node['dest'] = [dict(ipaddr=floating_ip_address,
|
||||
reachable=None, helper=IPTYPE_FLOATING),
|
||||
dict(ipaddr=net_floatingip.fixed_ip_address,
|
||||
dict(ipaddr=fixed_ip_address,
|
||||
reachable=None, helper=IPTYPE_FIXED)]
|
||||
if include_outside_servers:
|
||||
outside_servers = dict(ipaddr=OUTSIDE_WORLD_SERVERS[0],
|
||||
@ -656,59 +639,6 @@ def _g_tenant_id(os_client):
|
||||
return os_client.rest_client.tenant_id
|
||||
|
||||
|
||||
def get_subnet_create_options(network_id, ip_version=4,
|
||||
gateway='', cidr=None, mask_bits=None,
|
||||
num_subnet=1, gateway_offset=1, cidr_offset=0,
|
||||
**kwargs):
|
||||
"""When cidr_offset>0 it request only one subnet-options:
|
||||
|
||||
subnet = get_subnet_create_options('abcdefg', 4, num_subnet=4)[3]
|
||||
subnet = get_subnet_create_options('abcdefg', 4, cidr_offset=3)
|
||||
"""
|
||||
|
||||
gateway_not_set = gateway == ''
|
||||
if ip_version == 4:
|
||||
cidr = cidr or netaddr.IPNetwork(CONF.network.project_network_cidr)
|
||||
mask_bits = mask_bits or CONF.network.project_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = (
|
||||
cidr or netaddr.IPNetwork(CONF.network.project_network_v6_cidr))
|
||||
mask_bits = mask_bits or CONF.network.project_network_v6_mask_bits
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
subnet_list = []
|
||||
if cidr_offset > 0:
|
||||
num_subnet = cidr_offset + 1
|
||||
for subnet_cidr in cidr.subnet(mask_bits):
|
||||
if gateway_not_set:
|
||||
gateway_ip = gateway or (
|
||||
str(netaddr.IPAddress(subnet_cidr) + gateway_offset))
|
||||
else:
|
||||
gateway_ip = gateway
|
||||
try:
|
||||
subnet_body = dict(network_id=network_id,
|
||||
cidr=str(subnet_cidr),
|
||||
ip_version=ip_version,
|
||||
gateway_ip=gateway_ip,
|
||||
**kwargs)
|
||||
if num_subnet <= 1:
|
||||
return subnet_body
|
||||
subnet_list.append(subnet_body)
|
||||
if len(subnet_list) >= num_subnet:
|
||||
if cidr_offset > 0:
|
||||
# user request the 'cidr_offset'th of cidr
|
||||
return subnet_list[cidr_offset]
|
||||
# user request list of cidr
|
||||
return subnet_list
|
||||
except exceptions.BadRequest as e:
|
||||
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
||||
if not is_overlapping_cidr:
|
||||
raise
|
||||
else:
|
||||
message = 'Available CIDR for subnet creation could not be found'
|
||||
raise exceptions.BuildErrorException(message)
|
||||
return {}
|
||||
|
||||
|
||||
def get_remote_client_by_password(client_ip, username, password):
|
||||
ssh_client = remote_client.RemoteClient(client_ip, username, password)
|
||||
return ssh_client
|
||||
|
@ -1,101 +0,0 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# This module inherents from resources module and enhances router functions
|
||||
# and block subnet's add_to/delete_from_router so it is more similar to CLI.
|
||||
|
||||
# from tempest.scenario import network_resources as n_resources
|
||||
import network_resources as n_resources
|
||||
|
||||
|
||||
DELETABLE_CLASS_DEF = """class %(cls_name)s(n_resources.%(cls_name)s):
|
||||
pass
|
||||
"""
|
||||
IGNORE_LIST = ['DeletableSubnet', 'DeletableRouter']
|
||||
|
||||
|
||||
# inhere Deletable<Class> from parent module
|
||||
for cls_name in [x for x in dir(n_resources)
|
||||
if x.startswith('Deletable') and x not in IGNORE_LIST]:
|
||||
class_def = DELETABLE_CLASS_DEF % dict(cls_name=cls_name)
|
||||
exec class_def
|
||||
|
||||
|
||||
# Add/mod methods so we can use it while sustain original functions.
|
||||
MSG_BLOCK_BY_ADMIN = "Block %s as router might be owned by ADMIN. " \
|
||||
"Use DeletableRouter instead."
|
||||
|
||||
|
||||
class DeletableSubnet(n_resources.DeletableSubnet):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeletableSubnet, self).__init__(*args, **kwargs)
|
||||
|
||||
def add_to_router(self, router_id):
|
||||
raise Exception(MSG_BLOCK_BY_ADMIN % "add_to_router()")
|
||||
|
||||
def delete_from_router(self, router_id):
|
||||
raise Exception(MSG_BLOCK_BY_ADMIN % "delete_from_router()")
|
||||
|
||||
|
||||
# DeletableSubnet should not deal with router which when owned by ADMIN
|
||||
# will raise privilege issue. Always let the router deals with interfaces.
|
||||
class DeletableRouter(n_resources.DeletableRouter):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeletableRouter, self).__init__(*args, **kwargs)
|
||||
self._subnets = set()
|
||||
|
||||
def set_gateway(self, network_id):
|
||||
return self.client.update_router(
|
||||
self.id,
|
||||
external_gateway_info=dict(network_id=network_id))
|
||||
|
||||
def unset_gateway(self):
|
||||
return self.client.update_router(
|
||||
self.id,
|
||||
external_gateway_info=dict())
|
||||
|
||||
def add_subnet(self, subnet):
|
||||
return self.add_interface(subnet)
|
||||
|
||||
def add_interface(self, subnet):
|
||||
self.client.add_router_interface(self.id, subnet_id=subnet.id)
|
||||
self._subnets.add(subnet)
|
||||
|
||||
def delete_subnet(self, subnet):
|
||||
return self.delete_interface(subnet)
|
||||
|
||||
def delete_interface(self, subnet):
|
||||
self.client.remove_router_interface(self.id, subnet_id=subnet.id)
|
||||
self._subnets.remove(subnet)
|
||||
|
||||
def update_extra_routes(self, nexthop, destination):
|
||||
return self.client.update_extra_routes(self.id, nexthop, destination)
|
||||
|
||||
# to-be-fixed by https://bugs.launchpad.net/tempest/+bug/1468600
|
||||
def update_extra_routes_future(self, routes):
|
||||
return self.client.update_extra_routes(self.id, routes)
|
||||
|
||||
def delete_extra_routes(self):
|
||||
return self.client.delete_extra_routes(self.id)
|
||||
|
||||
def delete(self):
|
||||
try:
|
||||
self.delete_extra_routes()
|
||||
except Exception:
|
||||
pass
|
||||
self.unset_gateway()
|
||||
for subnet in self._subnets.copy():
|
||||
self.delete_interface(subnet)
|
||||
super(DeletableRouter, self).delete()
|
258
vmware_nsx_tempest/tests/nsxv/scenario/network_addon_methods.py
Normal file
258
vmware_nsx_tempest/tests/nsxv/scenario/network_addon_methods.py
Normal file
@ -0,0 +1,258 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# This module contains the methods added to test class that to be shared by
|
||||
# scenario tests that are inherent from tempest/scneario/manager.py or
|
||||
# manager_topo_deployment.py
|
||||
|
||||
import netaddr
|
||||
from oslo_log import log
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import exceptions
|
||||
|
||||
CONF = config.CONF
|
||||
LOG = log.getLogger(__name__)
|
||||
NO_ROUTER_TYPE = CONF.nsxv.no_router_type
|
||||
|
||||
|
||||
# following router methods are not support by upstream tempest,
|
||||
def router_create(SELF, client=None, tenant_id=None,
|
||||
namestart='nsxv-router',
|
||||
admin_state_up=True, **kwargs):
|
||||
routers_client = client or SELF.routers_client
|
||||
no_router_type = kwargs.pop('no_router_type', False)
|
||||
if tenant_id:
|
||||
if routers_client.tenant_id != tenant_id:
|
||||
kwargs['tenant_id'] = tenant_id
|
||||
distributed = kwargs.pop('distributed', None)
|
||||
router_type = kwargs.pop('router_type', None)
|
||||
if distributed:
|
||||
kwargs['distributed'] = True
|
||||
elif router_type in ('shared', 'exclusive'):
|
||||
kwargs['router_type'] = router_type
|
||||
name = kwargs.pop('name', None) or data_utils.rand_name(namestart)
|
||||
kwargs['name'] = name
|
||||
kwargs['admin_state_up'] = admin_state_up
|
||||
if NO_ROUTER_TYPE or no_router_type:
|
||||
# router_type is NSX-v extension.
|
||||
# caller can set no_router_type=True to remove it
|
||||
kwargs.pop('router_type', None)
|
||||
result = routers_client.create_router(**kwargs)
|
||||
router = result['router']
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.delete_router, router['id'])
|
||||
SELF.assertEqual(router['name'], name)
|
||||
return router
|
||||
|
||||
|
||||
def router_delete(SELF, router_id):
|
||||
routers_client = SELF.routers_client
|
||||
routers_client.delete_router(router_id)
|
||||
|
||||
|
||||
def router_gateway_set(SELF, router_id, network_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
routers_client.update_router(
|
||||
router_id,
|
||||
external_gateway_info=dict(network_id=network_id))
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
router_gateway_clear, SELF,
|
||||
router_id, client=routers_client)
|
||||
router = routers_client.show_router(router_id)
|
||||
return router.get('router', router)
|
||||
|
||||
|
||||
def router_gateway_clear(SELF, router_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
routers_client.update_router(
|
||||
router_id,
|
||||
external_gateway_info=dict())
|
||||
router = routers_client.show_router(router_id)
|
||||
return router.get('router', router)
|
||||
|
||||
|
||||
def router_update_extra_routes(SELF, router_id, routes, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
router = routers_client.update_route(router_id, routes=routes)
|
||||
return router.get('router', router)
|
||||
|
||||
|
||||
def router_delete_extra_routes(SELF, router_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
router = routers_client.update_route(router_id, routes=None)
|
||||
return router.get('router', router)
|
||||
|
||||
|
||||
def router_interface_add(SELF, router_id, subnet_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
routers_client.add_router_interface(router_id,
|
||||
subnet_id=subnet_id)
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
routers_client.remove_router_interface,
|
||||
router_id, subnet_id=subnet_id)
|
||||
|
||||
|
||||
def router_interface_delete(SELF, router_id, subnet_id, client=None):
|
||||
routers_client = client or SELF.routers_client
|
||||
routers_client.remove_router_interface(router_id, subnet_id=subnet_id)
|
||||
|
||||
|
||||
def router_add_interface(SELF, net_router, net_subnet, client_mgr):
|
||||
routers_client = client_mgr.routers_client
|
||||
return router_interface_add(SELF, net_router['id'], net_subnet['id'],
|
||||
routers_client)
|
||||
|
||||
|
||||
def check_networks(SELF, t_network, t_subnet=None, t_router=None):
|
||||
"""Checks that we see the newly created network/subnet/router.
|
||||
|
||||
checking the result of list_[networks,routers,subnets]
|
||||
"""
|
||||
|
||||
seen_nets = SELF._list_networks()
|
||||
seen_names = [n['name'] for n in seen_nets]
|
||||
seen_ids = [n['id'] for n in seen_nets]
|
||||
SELF.assertIn(t_network['name'], seen_names)
|
||||
SELF.assertIn(t_network['id'], seen_ids)
|
||||
|
||||
if t_subnet:
|
||||
seen_subnets = SELF._list_subnets()
|
||||
seen_net_ids = [n['network_id'] for n in seen_subnets]
|
||||
seen_subnet_ids = [n['id'] for n in seen_subnets]
|
||||
SELF.assertIn(t_network['id'], seen_net_ids)
|
||||
SELF.assertIn(t_subnet['id'], seen_subnet_ids)
|
||||
|
||||
if t_router:
|
||||
seen_routers = SELF._list_routers()
|
||||
seen_router_ids = [n['id'] for n in seen_routers]
|
||||
seen_router_names = [n['name'] for n in seen_routers]
|
||||
SELF.assertIn(t_router['name'],
|
||||
seen_router_names)
|
||||
SELF.assertIn(t_router['id'],
|
||||
seen_router_ids)
|
||||
|
||||
|
||||
def create_network_subnet(SELF, client_mgr=None, name=None,
|
||||
tenant_id=None, cidr_offset=0):
|
||||
client_mgr = client_mgr or SELF.manager
|
||||
networks_client = client_mgr.networks_client
|
||||
subnets_client = client_mgr.subnets_client
|
||||
tenant_id = tenant_id or networks_client.tenant_id
|
||||
name = name or data_utils.rand_name('network')
|
||||
net_network = create_network(SELF, client=networks_client,
|
||||
tenant_id=tenant_id, name=name)
|
||||
net_subnet = create_subnet(SELF, client=subnets_client,
|
||||
network=net_network,
|
||||
name=net_network['name'],
|
||||
cidr_offset=cidr_offset)
|
||||
return net_network, net_subnet
|
||||
|
||||
|
||||
# cloned from _create_network@manager.py. Allow name parameter
|
||||
def create_network(SELF, client=None, tenant_id=None, name=None, **kwargs):
|
||||
networks_client = client or SELF.networks_client
|
||||
tenant_id = tenant_id or networks_client.tenant_id
|
||||
name = name or data_utils.rand_name('network')
|
||||
body = networks_client.create_network(name=name,
|
||||
tenant_id=tenant_id,
|
||||
**kwargs)
|
||||
net_network = body['network']
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
networks_client.delete_network,
|
||||
net_network['id'])
|
||||
SELF.assertEqual(net_network['name'], name)
|
||||
return net_network
|
||||
|
||||
|
||||
# gateway=None means don't set gateway_ip in subnet
|
||||
def create_subnet(SELF, network, client=None,
|
||||
gateway='', cidr=None, mask_bits=None,
|
||||
ip_version=None, cidr_offset=0,
|
||||
allocation_pools=None, dns_nameservers=None,
|
||||
**kwargs):
|
||||
subnets_client = client or SELF.subnets_client
|
||||
network_id = network['id']
|
||||
ip_version = ip_version or 4
|
||||
post_body = get_subnet_create_options(
|
||||
network_id, ip_version,
|
||||
gateway=gateway, cidr=cidr, cidr_offset=cidr_offset,
|
||||
mask_bits=mask_bits, **kwargs)
|
||||
if allocation_pools:
|
||||
post_body['allocation_pools'] = allocation_pools
|
||||
if dns_nameservers:
|
||||
post_body['dns_nameservers'] = dns_nameservers
|
||||
LOG.debug("create_subnet args: %s", post_body)
|
||||
body = subnets_client.create_subnet(**post_body)
|
||||
net_subnet = body['subnet']
|
||||
SELF.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
subnets_client.delete_subnet,
|
||||
net_subnet['id'])
|
||||
return net_subnet
|
||||
|
||||
|
||||
# utilities
|
||||
def get_subnet_create_options(network_id, ip_version=4,
|
||||
gateway='', cidr=None, mask_bits=None,
|
||||
num_subnet=1, gateway_offset=1, cidr_offset=0,
|
||||
**kwargs):
|
||||
"""When cidr_offset>0 it request only one subnet-options:
|
||||
|
||||
subnet = get_subnet_create_options('abcdefg', 4, num_subnet=4)[3]
|
||||
subnet = get_subnet_create_options('abcdefg', 4, cidr_offset=3)
|
||||
"""
|
||||
|
||||
gateway_not_set = (gateway == '')
|
||||
if ip_version == 4:
|
||||
cidr = cidr or netaddr.IPNetwork(CONF.network.project_network_cidr)
|
||||
mask_bits = mask_bits or CONF.network.project_network_mask_bits
|
||||
elif ip_version == 6:
|
||||
cidr = (
|
||||
cidr or netaddr.IPNetwork(CONF.network.project_network_v6_cidr))
|
||||
mask_bits = mask_bits or CONF.network.project_network_v6_mask_bits
|
||||
# Find a cidr that is not in use yet and create a subnet with it
|
||||
subnet_list = []
|
||||
if cidr_offset > 0:
|
||||
num_subnet = cidr_offset + 1
|
||||
for subnet_cidr in cidr.subnet(mask_bits):
|
||||
if gateway_not_set:
|
||||
gateway_ip = gateway or (
|
||||
str(netaddr.IPAddress(subnet_cidr) + gateway_offset))
|
||||
else:
|
||||
gateway_ip = gateway
|
||||
try:
|
||||
subnet_body = dict(network_id=network_id,
|
||||
cidr=str(subnet_cidr),
|
||||
ip_version=ip_version,
|
||||
gateway_ip=gateway_ip,
|
||||
**kwargs)
|
||||
if num_subnet <= 1:
|
||||
return subnet_body
|
||||
subnet_list.append(subnet_body)
|
||||
if len(subnet_list) >= num_subnet:
|
||||
if cidr_offset > 0:
|
||||
# user request the 'cidr_offset'th of cidr
|
||||
return subnet_list[cidr_offset]
|
||||
# user request list of cidr
|
||||
return subnet_list
|
||||
except exceptions.BadRequest as e:
|
||||
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
|
||||
if not is_overlapping_cidr:
|
||||
raise
|
||||
else:
|
||||
message = 'Available CIDR for subnet creation could not be found'
|
||||
raise exceptions.BuildErrorException(message)
|
||||
return {}
|
@ -1,220 +0,0 @@
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
import time
|
||||
|
||||
import six
|
||||
|
||||
from tempest import exceptions
|
||||
from tempest.lib.common.utils import misc
|
||||
|
||||
|
||||
class AttributeDict(dict):
|
||||
"""Provide attribute access (dict.key) to dictionary values."""
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Allow attribute access for all keys in the dict."""
|
||||
if name in self:
|
||||
return self[name]
|
||||
return super(AttributeDict, self).__getattribute__(name)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class DeletableResource(AttributeDict):
|
||||
"""Support deletion of neutron resources (networks, subnets)
|
||||
|
||||
via a delete() method, as is supported by keystone and nova resources.
|
||||
"""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.client = kwargs.pop('client', None)
|
||||
self.networks_client = kwargs.pop('networks_client', None)
|
||||
self.routers_client = kwargs.pop('routers_client', None)
|
||||
self.subnets_client = kwargs.pop('subnets_client', None)
|
||||
self.ports_client = kwargs.pop('ports_client', None)
|
||||
super(DeletableResource, self).__init__(*args, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return '<%s id="%s" name="%s">' % (self.__class__.__name__,
|
||||
self.id, self.name)
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def refresh(self):
|
||||
return
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.id)
|
||||
|
||||
def wait_for_status(self, status):
|
||||
if not hasattr(self, 'status'):
|
||||
return
|
||||
|
||||
def helper_get():
|
||||
self.refresh()
|
||||
return self
|
||||
|
||||
return self.wait_for_resource_status(helper_get, status)
|
||||
|
||||
def wait_for_resource_status(self, fetch, status):
|
||||
"""Waits for a network resource to reach a status
|
||||
|
||||
@param fetch: the callable to be used to query the resource status
|
||||
@type fetch: callable that takes no parameters and returns the resource
|
||||
@param status: the status that the resource has to reach
|
||||
@type status: String
|
||||
"""
|
||||
interval = self.build_interval
|
||||
timeout = self.build_timeout
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time <= timeout:
|
||||
resource = fetch()
|
||||
if resource['status'] == status:
|
||||
return
|
||||
time.sleep(interval)
|
||||
|
||||
# At this point, the wait has timed out
|
||||
message = 'Resource %s' % (str(resource))
|
||||
message += ' failed to reach status %s' % status
|
||||
message += ' (current: %s)' % resource['status']
|
||||
message += ' within the required time %s' % timeout
|
||||
caller = misc.find_test_caller()
|
||||
if caller:
|
||||
message = '(%s) %s' % (caller, message)
|
||||
raise exceptions.TimeoutException(message)
|
||||
|
||||
|
||||
class DeletableNetwork(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.networks_client.delete_network(self.id)
|
||||
|
||||
|
||||
class DeletableSubnet(DeletableResource):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeletableSubnet, self).__init__(*args, **kwargs)
|
||||
self._router_ids = set()
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
result = self.subnets_client.update_subnet(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
return super(DeletableSubnet, self).update(**result['subnet'])
|
||||
|
||||
def add_to_router(self, router_id):
|
||||
self._router_ids.add(router_id)
|
||||
self.routers_client.add_router_interface(router_id,
|
||||
subnet_id=self.id)
|
||||
|
||||
def delete(self):
|
||||
for router_id in self._router_ids.copy():
|
||||
self.routers_client.remove_router_interface(router_id,
|
||||
subnet_id=self.id)
|
||||
self._router_ids.remove(router_id)
|
||||
self.subnets_client.delete_subnet(self.id)
|
||||
|
||||
|
||||
class DeletableRouter(DeletableResource):
|
||||
|
||||
def set_gateway(self, network_id):
|
||||
return self.update(external_gateway_info=dict(network_id=network_id))
|
||||
|
||||
def unset_gateway(self):
|
||||
return self.update(external_gateway_info=dict())
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
result = self.routers_client.update_router(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
return super(DeletableRouter, self).update(**result['router'])
|
||||
|
||||
def delete(self):
|
||||
self.unset_gateway()
|
||||
self.routers_client.delete_router(self.id)
|
||||
|
||||
|
||||
class DeletableFloatingIp(DeletableResource):
|
||||
|
||||
def refresh(self, *args, **kwargs):
|
||||
result = self.client.show_floatingip(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
super(DeletableFloatingIp, self).update(**result['floatingip'])
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
result = self.client.update_floatingip(self.id,
|
||||
*args,
|
||||
**kwargs)
|
||||
super(DeletableFloatingIp, self).update(**result['floatingip'])
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s addr="%s">' % (self.__class__.__name__,
|
||||
self.floating_ip_address)
|
||||
|
||||
def __str__(self):
|
||||
return '<"FloatingIP" addr="%s" id="%s">' % (self.floating_ip_address,
|
||||
self.id)
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_floatingip(self.id)
|
||||
|
||||
|
||||
class DeletablePort(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.ports_client.delete_port(self.id)
|
||||
|
||||
|
||||
class DeletableSecurityGroup(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_security_group(self.id)
|
||||
|
||||
|
||||
class DeletableSecurityGroupRule(DeletableResource):
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s id="%s">' % (self.__class__.__name__, self.id)
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_security_group_rule(self.id)
|
||||
|
||||
|
||||
class DeletablePool(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_pool(self.id)
|
||||
|
||||
|
||||
class DeletableMember(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_member(self.id)
|
||||
|
||||
|
||||
class DeletableVip(DeletableResource):
|
||||
|
||||
def delete(self):
|
||||
self.client.delete_vip(self.id)
|
||||
|
||||
def refresh(self):
|
||||
result = self.client.show_vip(self.id)
|
||||
super(DeletableVip, self).update(**result['vip'])
|
@ -65,8 +65,8 @@ class TestSimpleFlatNetwork(dmgr.TopoDeployScenarioManager):
|
||||
def create_subnet(self, net_network, info_flat):
|
||||
alloc_pool = [{'start': info_flat['start'],
|
||||
'end': info_flat['end']}]
|
||||
post_body = {'name': net_network.name,
|
||||
'network_id': net_network.id,
|
||||
post_body = {'name': net_network['name'],
|
||||
'network_id': net_network['id'],
|
||||
'ip_version': 4,
|
||||
'gateway_ip': info_flat['gateway'],
|
||||
'cidr': info_flat['cidr'],
|
||||
@ -94,7 +94,7 @@ class TestSimpleFlatNetwork(dmgr.TopoDeployScenarioManager):
|
||||
security_groups_client=self.security_groups_client,
|
||||
security_group_rules_client=self.security_group_rules_client,
|
||||
namestart='FLAT-tenant')
|
||||
security_groups = [{'name': self.security_group['name']}]
|
||||
security_groups = [{'name': self.security_group['id']}]
|
||||
self.serv1 = self.create_server_on_network(
|
||||
self.net_network, security_groups,
|
||||
image=self.get_server_image(),
|
||||
@ -124,15 +124,6 @@ class TestTenantConnectivity(dmgr.TopoDeployScenarioManager):
|
||||
|
||||
def tearDown(self):
|
||||
# do mini teardown if test failed already
|
||||
try:
|
||||
self.disassociate_floatingip(self.fip1)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
self.router.unset_gateway()
|
||||
self.router.delete()
|
||||
except Exception:
|
||||
pass
|
||||
super(TestTenantConnectivity, self).tearDown()
|
||||
|
||||
@test.idempotent_id('3c6cd4fe-de25-47ef-b638-a6bbb312da09')
|
||||
@ -147,9 +138,10 @@ class TestTenantConnectivity(dmgr.TopoDeployScenarioManager):
|
||||
security_group_rules_client=client_mgr.security_group_rules_client,
|
||||
namestart='deploy-connect')
|
||||
self.network, self.subnet, self.router = self.setup_project_network(
|
||||
self.public_network_id, namestart='deploy-connect')
|
||||
self.public_network_id, client_mgr=client_mgr,
|
||||
namestart='deploy-connect')
|
||||
self.check_networks(self.network, self.subnet, self.router)
|
||||
security_groups = [{'name': self.security_group['name']}]
|
||||
security_groups = [{'name': self.security_group['id']}]
|
||||
self.serv1 = self.create_server_on_network(
|
||||
self.network, security_groups,
|
||||
image=self.get_server_image(),
|
||||
@ -169,7 +161,8 @@ class TestTenantConnectivity(dmgr.TopoDeployScenarioManager):
|
||||
"VM=%s CAN-NOT-REACH-OUTSIDE-WORLD" % (node1['ipaddr']))
|
||||
LOG.debug('tenant[%s] CAN-REACH-OUTSIDE-WORLD',
|
||||
node1['ipaddr'])
|
||||
self.disassociate_floatingip(self.fip1)
|
||||
self.disassociate_floatingip(self.fip1,
|
||||
client=self.manager.floating_ips_client)
|
||||
time.sleep(dmgr.WAITTIME_AFTER_DISASSOC_FLOATINGIP)
|
||||
msg = "after disassociate floatingip[%s] from server#1" % self.fip1
|
||||
self._check_floatingip_connectivity(
|
||||
@ -179,17 +172,19 @@ class TestTenantConnectivity(dmgr.TopoDeployScenarioManager):
|
||||
image=self.get_server_image(),
|
||||
flavor=self.get_server_flavor(),
|
||||
name=self.network['name'])
|
||||
self.associate_floatingip(self.fip1, self.serv2)
|
||||
self.associate_floatingip(self.fip1, self.serv2,
|
||||
client=self.manager.floating_ips_client)
|
||||
server_pingable = self._waitfor_associated_floatingip(self.fip1)
|
||||
self.assertTrue(
|
||||
server_pingable,
|
||||
msg="Expect server#2 to be reachable after floatingip assigned.")
|
||||
self.disassociate_floatingip(self.fip1)
|
||||
self.disassociate_floatingip(self.fip1,
|
||||
client=self.manager.floating_ips_client,
|
||||
and_delete=True)
|
||||
LOG.debug(Z_DEPLOY_DELETE_SERVER, "tenant connectivity")
|
||||
self.servers_client.delete_server(self.serv1['id'])
|
||||
self.servers_client.delete_server(self.serv2['id'])
|
||||
self.router.unset_gateway()
|
||||
self.router.delete()
|
||||
# self._router_unset_gateway(self.router['id'])
|
||||
LOG.debug(Z_DEPLOY_COMPLETED, "tenant connectivity")
|
||||
|
||||
|
||||
@ -223,22 +218,18 @@ class TestMultiTenantsNetwork(dmgr.TopoDeployScenarioManager):
|
||||
if tenant and 'fip1' in tenant:
|
||||
servers_client = tenant['client_mgr'].servers_client
|
||||
dmgr.delete_all_servers(servers_client)
|
||||
self.disassociate_floatingip(tenant['fip1'])
|
||||
self.disassociate_floatingip(tenant['fip2'])
|
||||
if from_test:
|
||||
time.sleep(dmgr.WAITTIME_AFTER_DISASSOC_FLOATINGIP)
|
||||
fip_client = tenant['client_mgr'].floating_ips_client
|
||||
fip_client.delete_floatingip(tenant['fip1'].id)
|
||||
fip_client.delete_floatingip(tenant['fip2'].id)
|
||||
self.disassociate_floatingip(tenant['fip1'],
|
||||
client=fip_client,
|
||||
and_delete=True)
|
||||
self.disassociate_floatingip(tenant['fip2'],
|
||||
client=fip_client,
|
||||
and_delete=True)
|
||||
tenant.pop('fip1')
|
||||
tenant['router'].delete()
|
||||
if from_test:
|
||||
time.sleep(dmgr.WAITTIME_AFTER_ASSOC_FLOATINGIP)
|
||||
tenant['network'].delete()
|
||||
|
||||
def create_project_network_env(self, client_mgr, t_id,
|
||||
check_outside_world=True,
|
||||
cidr_offset=0):
|
||||
cidr_offset=1):
|
||||
username, password = self.get_image_userpass()
|
||||
t_security_group = self._create_security_group(
|
||||
security_groups_client=client_mgr.security_groups_client,
|
||||
@ -352,36 +343,58 @@ class TestProviderRouterTenantNetwork(dmgr.TopoDeployScenarioManager):
|
||||
super(TestProviderRouterTenantNetwork, self).tearDown()
|
||||
|
||||
def remove_project_network(self, from_test=True):
|
||||
router_id = self.p_router['id']
|
||||
for tn in ['yellow', 'blue']:
|
||||
tenant = getattr(self, tn, None)
|
||||
if tenant and 'fip' in tenant:
|
||||
servers_client = tenant['client_mgr'].servers_client
|
||||
dmgr.delete_all_servers(servers_client)
|
||||
self.disassociate_floatingip(tenant['fip'])
|
||||
if from_test:
|
||||
time.sleep(dmgr.WAITTIME_AFTER_DISASSOC_FLOATINGIP)
|
||||
fip_client = tenant['client_mgr'].floating_ips_client
|
||||
fip_client.delete_floatingip(tenant['fip'].id)
|
||||
self.disassociate_floatingip(tenant['fip'],
|
||||
client=fip_client,
|
||||
and_delete=True)
|
||||
tenant.pop('fip')
|
||||
tenant['router'].delete_subnet(tenant['subnet'])
|
||||
tenant['network'].delete()
|
||||
self.p_router.unset_gateway()
|
||||
self.p_router.delete()
|
||||
self.router_interface_delete(
|
||||
router_id, tenant['subnet']['id'],
|
||||
self.admin_manager.routers_client)
|
||||
self.admin_manager.networks_client.delete_network(
|
||||
tenant['network']['id'])
|
||||
tenant.pop('subnet')
|
||||
tenant.pop('network')
|
||||
self._router_clear_gateway(
|
||||
router_id, client=self.admin_manager.routers_client)
|
||||
|
||||
def create_project_network_env(self, to_router, t_id, client_mgr=None,
|
||||
cidr_offset=0, **kwargs):
|
||||
def create_project_network_env(self, t_id, client_mgr=None,
|
||||
tenant_id=None, cidr_offset=0, **kwargs):
|
||||
tenant = self.create_tenant_network(t_id, client_mgr, tenant_id,
|
||||
cidr_offset, **kwargs)
|
||||
tenant = self.create_server_and_assign_floatingip(tenant)
|
||||
return tenant
|
||||
|
||||
def create_tenant_network(self, t_id, client_mgr=None,
|
||||
tenant_id=None, cidr_offset=0, **kwargs):
|
||||
namestart = "deploy-%s-tenant" % t_id
|
||||
name = data_utils.rand_name(namestart)
|
||||
client_mgr = client_mgr or self.manager
|
||||
servers_client = client_mgr.servers_client
|
||||
security_groups_client = client_mgr.security_groups_client
|
||||
security_group_rules_client = client_mgr.security_group_rules_client
|
||||
t_network, t_subnet = self.create_network_subnet(
|
||||
client_mgr, name=name,
|
||||
client_mgr, name=name, tenant_id=tenant_id,
|
||||
cidr_offset=cidr_offset,)
|
||||
to_router.add_subnet(t_subnet)
|
||||
t_security_group = self._create_security_group(
|
||||
security_groups_client=client_mgr.security_groups_client,
|
||||
security_group_rules_client=client_mgr.security_group_rules_client,
|
||||
namestart=namestart)
|
||||
security_groups_client=security_groups_client,
|
||||
security_group_rules_client=security_group_rules_client,
|
||||
namestart=namestart, tenant_id=tenant_id)
|
||||
self._router_add_interface(
|
||||
self.p_router, t_subnet, self.admin_manager)
|
||||
return dict(id=t_id, network=t_network, subnet=t_subnet,
|
||||
client_mgr=client_mgr, security_group=t_security_group)
|
||||
|
||||
def create_server_and_assign_floatingip(self, tenant):
|
||||
t_network = tenant['network']
|
||||
t_security_group = tenant['security_group']
|
||||
client_mgr = tenant['client_mgr']
|
||||
servers_client = client_mgr.servers_client
|
||||
security_groups = [{'name': t_security_group['name']}]
|
||||
t_serv = self.create_server_on_network(
|
||||
t_network, security_groups,
|
||||
@ -391,12 +404,8 @@ class TestProviderRouterTenantNetwork(dmgr.TopoDeployScenarioManager):
|
||||
servers_client=servers_client)
|
||||
t_fip = self.create_floatingip_for_server(
|
||||
t_serv, client_mgr=client_mgr)
|
||||
|
||||
return dict(network=t_network, subnet=t_subnet,
|
||||
router=to_router,
|
||||
client_mgr=client_mgr,
|
||||
secuirty_group=t_security_group,
|
||||
serv=t_serv, fip=t_fip)
|
||||
tenant.update(serv=t_serv, fip=t_fip)
|
||||
return tenant
|
||||
|
||||
@test.idempotent_id('a31712de-33ad-4dc2-9755-1a0631a4f66a')
|
||||
@test.services('compute', 'network')
|
||||
@ -406,11 +415,12 @@ class TestProviderRouterTenantNetwork(dmgr.TopoDeployScenarioManager):
|
||||
client_mgr=self.admin_manager, namestart="deploy-provider-router",
|
||||
distributed=self.tenant_router_attrs.get('distributed'),
|
||||
router_type=self.tenant_router_attrs.get('router_type'))
|
||||
self.p_router.set_gateway(self.public_network_id)
|
||||
self._router_set_gateway(self.p_router['id'], self.public_network_id,
|
||||
client=self.admin_manager.routers_client)
|
||||
self.yellow = self.create_project_network_env(
|
||||
self.p_router, 'yellow', self.manager, 0)
|
||||
'yellow', self.manager, cidr_offset=1)
|
||||
self.blue = self.create_project_network_env(
|
||||
self.p_router, 'blue', self.alt_manager, 2)
|
||||
'blue', self.alt_manager, cidr_offset=2)
|
||||
username, password = self.get_image_userpass()
|
||||
yellow = dmgr.make_node_info(self.yellow['fip'], username, password)
|
||||
blue = dmgr.make_node_info(self.blue['fip'], username, password)
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common import ssh
|
||||
@ -80,16 +79,181 @@ class TestDHCP121BasicOps(dmgr.TopoDeployScenarioManager):
|
||||
def resource_cleanup(cls):
|
||||
super(TestDHCP121BasicOps, cls).resource_cleanup()
|
||||
|
||||
def setUp(self):
|
||||
super(TestDHCP121BasicOps, self).setUp()
|
||||
|
||||
def tearDown(self):
|
||||
try:
|
||||
self.remove_project_network(False)
|
||||
except Exception:
|
||||
pass
|
||||
self.remove_project_network()
|
||||
super(TestDHCP121BasicOps, self).tearDown()
|
||||
|
||||
def remove_project_network(self):
|
||||
project_name = 'green'
|
||||
tenant = getattr(self, project_name, None)
|
||||
if tenant:
|
||||
servers_client = tenant['client_mgr'].servers_client
|
||||
dmgr.delete_all_servers(servers_client)
|
||||
self.disassociate_floatingip(tenant['fip1'],
|
||||
and_delete=True)
|
||||
|
||||
def check_server_connected(self, serv):
|
||||
# Fetch tenant-network from where vm deployed
|
||||
serv_net = list(serv['addresses'].keys())[0]
|
||||
serv_addr = serv['addresses'][serv_net][0]
|
||||
host_ip = serv_addr['addr']
|
||||
self.waitfor_host_connected(host_ip)
|
||||
|
||||
def create_project_network_subnet(self,
|
||||
name_prefix='dhcp-project'):
|
||||
network_name = data_utils.rand_name(name_prefix)
|
||||
network, subnet = self.create_network_subnet(
|
||||
name=network_name)
|
||||
return (network['id'], network, subnet)
|
||||
|
||||
def dhcp_121_metadata_hostroutes_check_on_vm_nsxv(self, vm_env):
|
||||
self.serv_fip = vm_env['fip1']['floating_ip_address']
|
||||
username, password = self.get_image_userpass()
|
||||
# Connect to instance launched using ssh lib
|
||||
client = ssh.Client(self.serv_fip, username=username,
|
||||
password=password)
|
||||
# Executes route over launched instance
|
||||
cmd = ('route -n')
|
||||
out_data = client.exec_command(cmd)
|
||||
self.assertIn(Metadataserver_ip, out_data)
|
||||
LOG.info(_LI("Metadata routes available on vm"))
|
||||
cmd = ('wget http://169.254.169.254 -O sample.txt')
|
||||
client.exec_command(cmd)
|
||||
cmd = ('cat sample.txt')
|
||||
out_data = client.exec_command(cmd)
|
||||
# Check metadata server inforamtion available or not
|
||||
self.assertIn('latest', out_data)
|
||||
LOG.info(_LI("metadata server is acessible"))
|
||||
# Fetch dhcp edge infor from nsx-v
|
||||
exc_edge = self.vsm.get_dhcp_edge_info()
|
||||
self.assertIsNotNone(exc_edge)
|
||||
# Fetch host-route and metadata info from nsx-v
|
||||
dhcp_options_info = {}
|
||||
dhcp_options_info = \
|
||||
exc_edge['staticBindings']['staticBindings'][0]['dhcpOptions']
|
||||
# Check Host Route information avaialable at beckend
|
||||
self.assertIn(
|
||||
Metadataserver_ip,
|
||||
dhcp_options_info['option121'][
|
||||
'staticRoutes'][0]['destinationSubnet'])
|
||||
# Storing sec-group, network, subnet, router, server info in dict
|
||||
project_dict = dict(security_group=vm_env['security_group'],
|
||||
network=vm_env['network'], subnet=vm_env['subnet'],
|
||||
router=vm_env['router'],
|
||||
client_mgr=vm_env['client_mgr'],
|
||||
serv1=vm_env['serv1'], fip1=vm_env['fip1'])
|
||||
return project_dict
|
||||
|
||||
def dhcp_121_hostroutes_clear(self, vm_env):
|
||||
# Fetch next hop information from tempest.conf
|
||||
next_hop = CONF.network.project_network_cidr
|
||||
self.nexthop_host_route = next_hop.rsplit('.', 1)[0]
|
||||
self.nexthop1 = self.nexthop_host_route + ".2"
|
||||
# Floating-ip of VM
|
||||
self.serv_fip = vm_env['fip1']['floating_ip_address']
|
||||
username, password = self.get_image_userpass()
|
||||
# Update subnet with host routes
|
||||
_subnet_data = {'host_routes': [{'destination': '10.20.0.0/32',
|
||||
'nexthop': '10.100.1.1'}],
|
||||
'new_host_routes': [{'destination': '10.20.0.0/32',
|
||||
'nexthop': self.nexthop1}]}
|
||||
new_host_routes = _subnet_data['new_host_routes']
|
||||
kwargs = {'host_routes': new_host_routes}
|
||||
new_name = "New_subnet"
|
||||
subnet_id = vm_env['subnet']['id']
|
||||
# Update subnet with host-route info
|
||||
self.subnets_client.update_subnet(
|
||||
subnet_id, name=new_name, **kwargs)
|
||||
# Connect to instance launched using ssh lib
|
||||
client = ssh.Client(self.serv_fip, username=username,
|
||||
password=password)
|
||||
# Executes route over instance launched
|
||||
cmd = ('route -n')
|
||||
out_data = client.exec_command(cmd)
|
||||
self.assertIn(
|
||||
_subnet_data['new_host_routes'][0]['nexthop'], out_data)
|
||||
self.assertIn(self.nexthop_host_route, out_data)
|
||||
LOG.info(_LI("Host routes available on vm"))
|
||||
# Check Host route info at beckend
|
||||
exc_edge = self.vsm.get_dhcp_edge_info()
|
||||
self.assertIsNotNone(exc_edge)
|
||||
# Fetch host-route and metadata info from nsx-v
|
||||
dhcp_options_info = {}
|
||||
dhcp_options_info = exc_edge['staticBindings']['staticBindings'][0][
|
||||
'dhcpOptions']['option121']['staticRoutes']
|
||||
# Check Host Route information avaialable at beckend
|
||||
for destination_net in dhcp_options_info:
|
||||
dest = _subnet_data['new_host_routes'][0]['destination']
|
||||
dest_subnet = destination_net['destinationSubnet']
|
||||
dest_router = destination_net['router']
|
||||
if (dest in dest_subnet and self.nexthop1 in dest_router):
|
||||
LOG.info(_LI("Host routes available on nsxv"))
|
||||
# Update subnet with no host-routes
|
||||
_subnet_data1 = {'new_host_routes': []}
|
||||
new_host_routes = _subnet_data1['new_host_routes']
|
||||
kwargs = {'host_routes': new_host_routes}
|
||||
new_name = "New_subnet"
|
||||
self.subnets_client.update_subnet(
|
||||
subnet_id, name=new_name, **kwargs)
|
||||
# Executes route over instance launched
|
||||
cmd = ('dhclient eth0')
|
||||
client.exec_command(cmd)
|
||||
cmd = ('route -n')
|
||||
out_data = client.exec_command(cmd)
|
||||
self.assertIsNotNone(out_data)
|
||||
# Check Host routes on VM shouldn't be avialable
|
||||
self.assertNotIn(
|
||||
_subnet_data['new_host_routes'][0]['destination'], out_data)
|
||||
# Check Host-routes at beckend after deletion
|
||||
exc_edge = self.vsm.get_dhcp_edge_info()
|
||||
self.assertIsNotNone(exc_edge)
|
||||
dhcp_options_info = []
|
||||
dhcp_options_info = exc_edge['staticBindings']['staticBindings'][0][
|
||||
'dhcpOptions']['option121']['staticRoutes']
|
||||
# Check Host Route information avaialable at beckend
|
||||
for destination_net in dhcp_options_info:
|
||||
if (_subnet_data['new_host_routes'][0]['destination']
|
||||
not in destination_net['destinationSubnet']):
|
||||
LOG.info(_LI("Host routes not available on nsxv"))
|
||||
project_dict = dict(security_group=vm_env['security_group'],
|
||||
network=vm_env['network'], subnet=vm_env['subnet'],
|
||||
router=vm_env['router'],
|
||||
client_mgr=vm_env['client_mgr'],
|
||||
serv1=vm_env['serv1'], fip1=vm_env['fip1'])
|
||||
return project_dict
|
||||
|
||||
def setup_vm_enviornment(self, client_mgr, t_id,
|
||||
check_outside_world=True,
|
||||
cidr_offset=0):
|
||||
t_network, t_subnet, t_router = self.setup_project_network(
|
||||
self.public_network_id, namestart=("deploy-%s-dhcp" % t_id),
|
||||
cidr_offset=1)
|
||||
t_security_group = self._create_security_group(
|
||||
security_groups_client=self.security_groups_client,
|
||||
security_group_rules_client=self.security_group_rules_client,
|
||||
namestart='adm')
|
||||
username, password = self.get_image_userpass()
|
||||
security_groups = [{'name': t_security_group['id']}]
|
||||
t_serv1 = self.create_server_on_network(
|
||||
t_network, security_groups,
|
||||
image=self.get_server_image(),
|
||||
flavor=self.get_server_flavor(),
|
||||
name=t_network['name'])
|
||||
self.check_server_connected(t_serv1)
|
||||
t_floatingip = self.create_floatingip_for_server(
|
||||
t_serv1, client_mgr=self.admin_manager)
|
||||
msg = ("Associate t_floatingip[%s] to server[%s]"
|
||||
% (t_floatingip, t_serv1['name']))
|
||||
self._check_floatingip_connectivity(
|
||||
t_floatingip, t_serv1, should_connect=True, msg=msg)
|
||||
vm_enviornment = dict(security_group=t_security_group,
|
||||
network=t_network, subnet=t_subnet,
|
||||
router=t_router, client_mgr=client_mgr,
|
||||
serv1=t_serv1, fip1=t_floatingip)
|
||||
return vm_enviornment
|
||||
|
||||
|
||||
class TestDhcpMetadata(TestDHCP121BasicOps):
|
||||
@test.attr(type='nsxv')
|
||||
@test.idempotent_id('95d06aba-895f-47f8-b47d-ae48c6853a85')
|
||||
def test_dhcp_121_metadata_check_on_vm_nsxv(self):
|
||||
@ -98,23 +262,21 @@ class TestDHCP121BasicOps(dmgr.TopoDeployScenarioManager):
|
||||
self.vm_env = self.setup_vm_enviornment(self.manager, 'green', True)
|
||||
self.green = self.dhcp_121_metadata_hostroutes_check_on_vm_nsxv(
|
||||
self.vm_env)
|
||||
self.remove_project_network()
|
||||
self.green['router'].unset_gateway()
|
||||
self.green['router'].delete()
|
||||
LOG.info(_LI("Testcase DHCP-121 option metadata check on vm and on \
|
||||
nsx completed"))
|
||||
|
||||
|
||||
class TestDhcpHostroutesClear(TestDHCP121BasicOps):
|
||||
@test.attr(type='nsxv')
|
||||
@test.idempotent_id('6bec6eb4-8632-493d-a895-a3ee87cb3002')
|
||||
def test_dhcp_121_hostroutes_clear(self):
|
||||
LOG.info(_LI("Testcase DHCP-121 option host routes clear deploying"))
|
||||
self.vm_env = self.setup_vm_enviornment(self.manager, 'green', True)
|
||||
self.green = self.dhcp_121_hostroutes_clear(self.vm_env)
|
||||
self.remove_project_network()
|
||||
self.green['router'].unset_gateway()
|
||||
self.green['router'].delete()
|
||||
LOG.info(_LI("Testcase DHCP-121 option host routes clear completed"))
|
||||
|
||||
|
||||
class TestDhcpNegative(TestDHCP121BasicOps):
|
||||
@test.attr(type='nsxv')
|
||||
@test.idempotent_id('a58dc6c5-9f28-4184-baf7-37ded52593c4')
|
||||
def test_dhcp121_negative_test(self):
|
||||
@ -181,6 +343,8 @@ class TestDHCP121BasicOps(dmgr.TopoDeployScenarioManager):
|
||||
pass
|
||||
LOG.info(_LI("Testcase DHCP-121 option negative test completed"))
|
||||
|
||||
|
||||
class TestDhcpMultiHostRoute(TestDHCP121BasicOps):
|
||||
@test.attr(type='nsxv')
|
||||
@test.idempotent_id('c3ca96d7-b704-4d94-b42d-e7bae94b82cd')
|
||||
def test_dhcp121_multi_host_route(self):
|
||||
@ -246,176 +410,3 @@ class TestDHCP121BasicOps(dmgr.TopoDeployScenarioManager):
|
||||
if (len(subnet['subnet']['host_routes']) == 19):
|
||||
LOG.info(_LI("Multiple entries for host routes available"))
|
||||
LOG.info(_LI("Testcase DHCP-121 option multi host routes completed"))
|
||||
|
||||
def remove_project_network(self, from_test=True):
|
||||
project_name = 'green'
|
||||
tenant = getattr(self, project_name, None)
|
||||
servers_client = tenant['client_mgr'].servers_client
|
||||
dmgr.delete_all_servers(servers_client)
|
||||
self.disassociate_floatingip(tenant['fip1'])
|
||||
if from_test:
|
||||
time.sleep(dmgr.WAITTIME_AFTER_DISASSOC_FLOATINGIP)
|
||||
fip_client = tenant['client_mgr'].floating_ips_client
|
||||
fip_client.delete_floatingip(tenant['fip1'].id)
|
||||
tenant.pop('fip1')
|
||||
tenant['router'].delete_subnet(tenant['subnet'])
|
||||
tenant['subnet'].delete()
|
||||
tenant['network'].delete()
|
||||
|
||||
def check_server_connected(self, serv):
|
||||
# Fetch tenant-network from where vm deployed
|
||||
serv_net = list(serv['addresses'].keys())[0]
|
||||
serv_addr = serv['addresses'][serv_net][0]
|
||||
host_ip = serv_addr['addr']
|
||||
self.waitfor_host_connected(host_ip)
|
||||
|
||||
def create_project_network_subnet(self,
|
||||
name_prefix='dhcp-project'):
|
||||
network_name = data_utils.rand_name(name_prefix)
|
||||
network, subnet = self.create_network_subnet(
|
||||
name=network_name)
|
||||
return (network.id, network, subnet)
|
||||
|
||||
def dhcp_121_metadata_hostroutes_check_on_vm_nsxv(self, vm_env):
|
||||
self.serv_fip = vm_env['fip1'].floating_ip_address
|
||||
username, password = self.get_image_userpass()
|
||||
# Connect to instance launched using ssh lib
|
||||
client = ssh.Client(self.serv_fip, username=username,
|
||||
password=password)
|
||||
# Executes route over launched instance
|
||||
cmd = ('route -n')
|
||||
out_data = client.exec_command(cmd)
|
||||
self.assertIn(Metadataserver_ip, out_data)
|
||||
LOG.info(_LI("Metadata routes available on vm"))
|
||||
cmd = ('wget http://169.254.169.254 -O sample.txt')
|
||||
client.exec_command(cmd)
|
||||
cmd = ('cat sample.txt')
|
||||
out_data = client.exec_command(cmd)
|
||||
# Check metadata server inforamtion available or not
|
||||
self.assertIn('latest', out_data)
|
||||
LOG.info(_LI("metadata server is acessible"))
|
||||
# Fetch dhcp edge infor from nsx-v
|
||||
exc_edge = self.vsm.get_dhcp_edge_info()
|
||||
self.assertIsNotNone(exc_edge)
|
||||
# Fetch host-route and metadata info from nsx-v
|
||||
dhcp_options_info = {}
|
||||
dhcp_options_info = \
|
||||
exc_edge['staticBindings']['staticBindings'][0]['dhcpOptions']
|
||||
# Check Host Route information avaialable at beckend
|
||||
self.assertIn(
|
||||
Metadataserver_ip,
|
||||
dhcp_options_info['option121'][
|
||||
'staticRoutes'][0]['destinationSubnet'])
|
||||
# Storing sec-group, network, subnet, router, server info in dict
|
||||
project_dict = dict(security_group=vm_env['security_group'],
|
||||
network=vm_env['network'], subnet=vm_env['subnet'],
|
||||
router=vm_env['router'],
|
||||
client_mgr=vm_env['client_mgr'],
|
||||
serv1=vm_env['serv1'], fip1=vm_env['fip1'])
|
||||
return project_dict
|
||||
|
||||
def dhcp_121_hostroutes_clear(self, vm_env):
|
||||
# Fetch next hop information from tempest.conf
|
||||
next_hop = CONF.network.project_network_cidr
|
||||
self.nexthop_host_route = next_hop.rsplit('.', 1)[0]
|
||||
self.nexthop1 = self.nexthop_host_route + ".2"
|
||||
# Floating-ip of VM
|
||||
self.serv_fip = vm_env['fip1'].floating_ip_address
|
||||
username, password = self.get_image_userpass()
|
||||
# Update subnet with host routes
|
||||
_subnet_data = {'host_routes': [{'destination': '10.20.0.0/32',
|
||||
'nexthop': '10.100.1.1'}],
|
||||
'new_host_routes': [{'destination': '10.20.0.0/32',
|
||||
'nexthop': self.nexthop1}]}
|
||||
new_host_routes = _subnet_data['new_host_routes']
|
||||
kwargs = {'host_routes': new_host_routes}
|
||||
new_name = "New_subnet"
|
||||
subnet_id = vm_env['subnet']['id']
|
||||
# Update subnet with host-route info
|
||||
self.subnets_client.update_subnet(
|
||||
subnet_id, name=new_name, **kwargs)
|
||||
# Connect to instance launched using ssh lib
|
||||
client = ssh.Client(self.serv_fip, username=username,
|
||||
password=password)
|
||||
# Executes route over instance launched
|
||||
cmd = ('route -n')
|
||||
out_data = client.exec_command(cmd)
|
||||
self.assertIn(
|
||||
_subnet_data['new_host_routes'][0]['nexthop'], out_data)
|
||||
self.assertIn(self.nexthop_host_route, out_data)
|
||||
LOG.info(_LI("Host routes available on vm"))
|
||||
# Check Host route info at beckend
|
||||
exc_edge = self.vsm.get_dhcp_edge_info()
|
||||
self.assertIsNotNone(exc_edge)
|
||||
# Fetch host-route and metadata info from nsx-v
|
||||
dhcp_options_info = {}
|
||||
dhcp_options_info = exc_edge['staticBindings']['staticBindings'][0][
|
||||
'dhcpOptions']['option121']['staticRoutes']
|
||||
# Check Host Route information avaialable at beckend
|
||||
for destination_net in dhcp_options_info:
|
||||
if _subnet_data['new_host_routes'][0]['destination']\
|
||||
in destination_net['destinationSubnet'] and\
|
||||
self.nexthop1 in destination_net['router']:
|
||||
LOG.info(_LI("Host routes available on nsxv"))
|
||||
# Update subnet with no host-routes
|
||||
_subnet_data1 = {'new_host_routes': []}
|
||||
new_host_routes = _subnet_data1['new_host_routes']
|
||||
kwargs = {'host_routes': new_host_routes}
|
||||
new_name = "New_subnet"
|
||||
self.subnets_client.update_subnet(
|
||||
subnet_id, name=new_name, **kwargs)
|
||||
# Executes route over instance launched
|
||||
cmd = ('dhclient eth0')
|
||||
client.exec_command(cmd)
|
||||
cmd = ('route -n')
|
||||
out_data = client.exec_command(cmd)
|
||||
self.assertIsNotNone(out_data)
|
||||
# Check Host routes on VM shouldn't be avialable
|
||||
self.assertNotIn(
|
||||
_subnet_data['new_host_routes'][0]['destination'], out_data)
|
||||
# Check Host-routes at beckend after deletion
|
||||
exc_edge = self.vsm.get_dhcp_edge_info()
|
||||
self.assertIsNotNone(exc_edge)
|
||||
dhcp_options_info = []
|
||||
dhcp_options_info = exc_edge['staticBindings']['staticBindings'][0][
|
||||
'dhcpOptions']['option121']['staticRoutes']
|
||||
# Check Host Route information avaialable at beckend
|
||||
for destination_net in dhcp_options_info:
|
||||
if (_subnet_data['new_host_routes'][0]['destination']
|
||||
not in destination_net['destinationSubnet']):
|
||||
LOG.info(_LI("Host routes not available on nsxv"))
|
||||
project_dict = dict(security_group=vm_env['security_group'],
|
||||
network=vm_env['network'], subnet=vm_env['subnet'],
|
||||
router=vm_env['router'],
|
||||
client_mgr=vm_env['client_mgr'],
|
||||
serv1=vm_env['serv1'], fip1=vm_env['fip1'])
|
||||
return project_dict
|
||||
|
||||
def setup_vm_enviornment(self, client_mgr, t_id,
|
||||
check_outside_world=True,
|
||||
cidr_offset=0):
|
||||
t_network, t_subnet, t_router = self.setup_project_network(
|
||||
self.public_network_id, namestart=("deploy-%s-tenant" % t_id))
|
||||
t_security_group = self._create_security_group(
|
||||
security_groups_client=self.security_groups_client,
|
||||
security_group_rules_client=self.security_group_rules_client,
|
||||
namestart='adm')
|
||||
username, password = self.get_image_userpass()
|
||||
security_groups = [{'name': t_security_group['name']}]
|
||||
t_serv1 = self.create_server_on_network(
|
||||
t_network, security_groups,
|
||||
image=self.get_server_image(),
|
||||
flavor=self.get_server_flavor(),
|
||||
name=t_network['name'])
|
||||
self.check_server_connected(t_serv1)
|
||||
t_floatingip = self.create_floatingip_for_server(
|
||||
t_serv1, client_mgr=self.admin_manager)
|
||||
msg = ("Associate t_floatingip[%s] to server[%s]"
|
||||
% (t_floatingip, t_serv1['name']))
|
||||
self._check_floatingip_connectivity(
|
||||
t_floatingip, t_serv1, should_connect=True, msg=msg)
|
||||
vm_enviornment = dict(security_group=t_security_group,
|
||||
network=t_network, subnet=t_subnet,
|
||||
router=t_router, client_mgr=client_mgr,
|
||||
serv1=t_serv1, fip1=t_floatingip)
|
||||
return vm_enviornment
|
||||
|
@ -21,6 +21,8 @@ from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
manager_topo_deployment as dmgr)
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
network_addon_methods as HELO)
|
||||
|
||||
CONF = config.CONF
|
||||
DNS_SEARCH_DOMAIN = 'dns_search_domain'
|
||||
@ -71,22 +73,29 @@ class TestDnsSearchDomainBasicOps(dmgr.TopoDeployScenarioManager):
|
||||
self.assertEqual(dns_search_domain, subnet[DNS_SEARCH_DOMAIN])
|
||||
return (network, subnet, dns_search_domain)
|
||||
|
||||
def create_router_by_type(self, router_type, name=None, **kwargs):
|
||||
def create_router_by_type(self, router_type, client=None,
|
||||
name=None, **kwargs):
|
||||
routers_client = client or self.admin_manager.routers_client
|
||||
create_kwargs = dict(namestart='dns-search', external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id})
|
||||
if router_type in ('shared', 'exclusive'):
|
||||
create_kwargs['router_type'] = router_type
|
||||
elif router_type in ('distributed'):
|
||||
create_kwargs['distributed'] = True
|
||||
kwargs.update(create_kwargs)
|
||||
router = self._create_router(client_mgr=self.admin_manager,
|
||||
**kwargs)
|
||||
create_kwargs.update(**kwargs)
|
||||
router = HELO.router_create(self, client=routers_client,
|
||||
**create_kwargs)
|
||||
return router
|
||||
|
||||
def create_router_and_add_interfaces(self, router_type, net_list):
|
||||
router = self.create_router_by_type(router_type)
|
||||
def create_router_and_add_interfaces(self, router_type, net_list,
|
||||
client_mgr=None):
|
||||
client_mgr = client_mgr or self.admin_manager
|
||||
routers_client = client_mgr.routers_client
|
||||
router = self.create_router_by_type(router_type,
|
||||
client=routers_client)
|
||||
for (network, subnet, dns_search_domain) in net_list:
|
||||
router.add_subnet(subnet)
|
||||
HELO.router_interface_add(self, router['id'], subnet['id'],
|
||||
client=routers_client)
|
||||
return router
|
||||
|
||||
def setup_tenant_networks(self, router_type):
|
||||
@ -135,7 +144,7 @@ class TestDnsSearchDomainBasicOps(dmgr.TopoDeployScenarioManager):
|
||||
% (floatingip, server['name']))
|
||||
self._check_floatingip_connectivity(
|
||||
floatingip, server, should_connect=True, msg=msg)
|
||||
serv_fip = floatingip.floating_ip_address
|
||||
serv_fip = floatingip['floating_ip_address']
|
||||
dmgr.rm_sshkey(serv_fip)
|
||||
ssh_client = dmgr.get_remote_client_by_password(
|
||||
serv_fip, username, password)
|
||||
|
@ -21,12 +21,12 @@ from oslo_log import log as logging
|
||||
import testtools
|
||||
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.scenario import manager
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import net_resources
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
network_addon_methods as HELO)
|
||||
|
||||
CONF = config.CONF
|
||||
FIP_OPS_TIMEOUT = 10
|
||||
@ -113,7 +113,7 @@ class TestDvrBasicOps(manager.NetworkScenarioTest):
|
||||
self.port_id = None
|
||||
if boot_with_port:
|
||||
# create a port on the network and boot with that
|
||||
self.port_id = self._create_port(self.network['id']).id
|
||||
self.port_id = self._create_port(self.network['id'])['id']
|
||||
|
||||
name = data_utils.rand_name('server-smoke')
|
||||
server = self._create_server(name, self.network, self.port_id)
|
||||
@ -139,7 +139,9 @@ class TestDvrBasicOps(manager.NetworkScenarioTest):
|
||||
if k in ('distributed', 'router_type', 'router_size'):
|
||||
router_kwargs[k] = kwargs.pop(k)
|
||||
router = self._create_router(**router_kwargs)
|
||||
router.set_gateway(CONF.network.public_network_id)
|
||||
HELO.router_gateway_set(self, router['id'],
|
||||
CONF.network.public_network_id,
|
||||
routers_client)
|
||||
|
||||
subnet_kwargs = dict(network=network,
|
||||
namestart=namestart,
|
||||
@ -148,54 +150,21 @@ class TestDvrBasicOps(manager.NetworkScenarioTest):
|
||||
if dns_nameservers is not None:
|
||||
subnet_kwargs['dns_nameservers'] = dns_nameservers
|
||||
subnet = self._create_subnet(**subnet_kwargs)
|
||||
subnet.add_to_router(router.id)
|
||||
HELO.router_interface_add(self, router['id'], subnet['id'],
|
||||
routers_client)
|
||||
return network, subnet, router
|
||||
|
||||
# overwrite super class
|
||||
def _create_router(self, client=None, tenant_id=None,
|
||||
namestart='dvr-ops', **kwargs):
|
||||
if not client:
|
||||
client = self.routers_client
|
||||
if not tenant_id:
|
||||
tenant_id = client.tenant_id
|
||||
name = data_utils.rand_name(namestart)
|
||||
result = client.create_router(name=name,
|
||||
admin_state_up=True,
|
||||
return HELO.router_create(self, client,
|
||||
tenant_id=tenant_id,
|
||||
namestart=namestart,
|
||||
admin_state_up=True,
|
||||
**kwargs)
|
||||
router = net_resources.DeletableRouter(
|
||||
routers_client=client, **result['router'])
|
||||
self.assertEqual(router.name, name)
|
||||
self.addCleanup(self.delete_wrapper, router.delete)
|
||||
return router
|
||||
|
||||
def check_networks(self):
|
||||
"""
|
||||
Checks that we see the newly created network/subnet/router via
|
||||
checking the result of list_[networks,routers,subnets]
|
||||
"""
|
||||
|
||||
seen_nets = self._list_networks()
|
||||
seen_names = [n['name'] for n in seen_nets]
|
||||
seen_ids = [n['id'] for n in seen_nets]
|
||||
self.assertIn(self.network.name, seen_names)
|
||||
self.assertIn(self.network.id, seen_ids)
|
||||
|
||||
if self.subnet:
|
||||
seen_subnets = self._list_subnets()
|
||||
seen_net_ids = [n['network_id'] for n in seen_subnets]
|
||||
seen_subnet_ids = [n['id'] for n in seen_subnets]
|
||||
self.assertIn(self.network.id, seen_net_ids)
|
||||
self.assertIn(self.subnet.id, seen_subnet_ids)
|
||||
|
||||
if self.router:
|
||||
seen_routers = self._list_routers()
|
||||
seen_router_ids = [n['id'] for n in seen_routers]
|
||||
seen_router_names = [n['name'] for n in seen_routers]
|
||||
self.assertIn(self.router.name,
|
||||
seen_router_names)
|
||||
self.assertIn(self.router.id,
|
||||
seen_router_ids)
|
||||
HELO.check_networks(self, self.network, self.subnet, self.router)
|
||||
|
||||
def _create_server(self, name, network, port_id=None):
|
||||
keypair = self.create_keypair()
|
||||
@ -203,7 +172,7 @@ class TestDvrBasicOps(manager.NetworkScenarioTest):
|
||||
security_groups = [{'name': self.security_group['name']}]
|
||||
create_kwargs = {
|
||||
'networks': [
|
||||
{'uuid': network.id},
|
||||
{'uuid': network['id']},
|
||||
],
|
||||
'key_name': keypair['name'],
|
||||
'security_groups': security_groups,
|
||||
@ -243,7 +212,7 @@ class TestDvrBasicOps(manager.NetworkScenarioTest):
|
||||
"""
|
||||
ssh_login = CONF.validation.image_ssh_user
|
||||
floating_ip, server = self.floating_ip_tuple
|
||||
ip_address = floating_ip.floating_ip_address
|
||||
ip_address = floating_ip['floating_ip_address']
|
||||
private_key = None
|
||||
floatingip_status = 'DOWN'
|
||||
if should_connect:
|
||||
@ -282,58 +251,6 @@ class TestDvrBasicOps(manager.NetworkScenarioTest):
|
||||
network=self.new_net,
|
||||
gateway_ip=None)
|
||||
|
||||
def _hotplug_server(self):
|
||||
old_floating_ip, server = self.floating_ip_tuple
|
||||
ip_address = old_floating_ip.floating_ip_address
|
||||
private_key = self._get_server_key(server)
|
||||
ssh_client = self.get_remote_client(ip_address,
|
||||
private_key=private_key)
|
||||
old_nic_list = self._get_server_nics(ssh_client)
|
||||
# get a port from a list of one item
|
||||
port_list = self._list_ports(device_id=server['id'])
|
||||
self.assertEqual(1, len(port_list))
|
||||
old_port = port_list[0]
|
||||
interface = self.interface_client.create_interface(
|
||||
server=server['id'],
|
||||
network_id=self.new_net.id)
|
||||
self.addCleanup(self.ports_client.wait_for_resource_deletion,
|
||||
interface['port_id'])
|
||||
self.addCleanup(self.delete_wrapper,
|
||||
self.interface_client.delete_interface,
|
||||
server['id'], interface['port_id'])
|
||||
|
||||
def check_ports():
|
||||
self.new_port_list = [port for port in
|
||||
self._list_ports(device_id=server['id'])
|
||||
if port['id'] != old_port['id']]
|
||||
return len(self.new_port_list) == 1
|
||||
|
||||
if not test.call_until_true(check_ports, CONF.network.build_timeout,
|
||||
CONF.network.build_interval):
|
||||
raise exceptions.TimeoutException(
|
||||
"No new port attached to the server in time (%s sec)! "
|
||||
"Old port: %s. Number of new ports: %d" % (
|
||||
CONF.network.build_timeout, old_port,
|
||||
len(self.new_port_list)))
|
||||
new_port = net_resources.DeletablePort(client=self.ports_client,
|
||||
**self.new_port_list[0])
|
||||
|
||||
def check_new_nic():
|
||||
new_nic_list = self._get_server_nics(ssh_client)
|
||||
self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
|
||||
return len(self.diff_list) == 1
|
||||
|
||||
if not test.call_until_true(check_new_nic, CONF.network.build_timeout,
|
||||
CONF.network.build_interval):
|
||||
raise exceptions.TimeoutException("Interface not visible on the "
|
||||
"guest after %s sec"
|
||||
% CONF.network.build_timeout)
|
||||
|
||||
num, new_nic = self.diff_list[0]
|
||||
ssh_client.assign_static_ip(nic=new_nic,
|
||||
addr=new_port.fixed_ips[0]['ip_address'])
|
||||
ssh_client.turn_nic_on(nic=new_nic)
|
||||
|
||||
def _get_server_nics(self, ssh_client):
|
||||
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+):')
|
||||
ipatxt = ssh_client.get_ip_list()
|
||||
@ -351,7 +268,7 @@ class TestDvrBasicOps(manager.NetworkScenarioTest):
|
||||
# get all network ports in the new network
|
||||
internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
|
||||
self._list_ports(tenant_id=server['tenant_id'],
|
||||
network_id=network.id)
|
||||
network_id=network['id'])
|
||||
if (p['device_owner'].startswith('network') and
|
||||
not p['device_owner'].endswith('dhcp')))
|
||||
|
||||
@ -383,7 +300,7 @@ class TestDvrBasicOps(manager.NetworkScenarioTest):
|
||||
|
||||
def _check_server_connectivity(self, floating_ip, address_list,
|
||||
should_connect=True):
|
||||
ip_address = floating_ip.floating_ip_address
|
||||
ip_address = floating_ip['floating_ip_address']
|
||||
private_key = self._get_server_key(self.floating_ip_tuple.server)
|
||||
# ssh_source = self._ssh_to_server(ip_address, private_key)
|
||||
ssh_source = self.get_remote_client(ip_address,
|
||||
@ -506,7 +423,8 @@ class TestDvrBasicOps(manager.NetworkScenarioTest):
|
||||
self._create_server(name, self.new_net)
|
||||
self._check_network_internal_connectivity(network=self.new_net,
|
||||
should_connect=False)
|
||||
self.new_subnet.add_to_router(self.router.id)
|
||||
HELO.router_interface_add(self, self.router['id'],
|
||||
self.new_subnet['id'])
|
||||
self._check_network_internal_connectivity(network=self.new_net,
|
||||
should_connect=True)
|
||||
|
||||
|
@ -13,10 +13,10 @@ import tempfile
|
||||
import time
|
||||
import urllib2
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
|
||||
from tempest.common import waiters
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.services.lbaas import health_monitors_client
|
||||
@ -120,10 +120,6 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
waiters.wait_for_server_termination(
|
||||
self.manager.servers_client, server_id)
|
||||
# delete lbaas network before handing back to framework
|
||||
LOG.debug("tearDown lbaas network")
|
||||
self.delete_wrapper(self.router.delete)
|
||||
self.delete_wrapper(self.subnet.delete)
|
||||
self.delete_wrapper(self.network.delete)
|
||||
super(TestLBaasRoundRobinOps, self).tearDown()
|
||||
LOG.debug("tearDown lbaas exiting...")
|
||||
|
||||
@ -137,21 +133,24 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
pool_id = pool.get('id')
|
||||
hm = pool.get('healthmonitor')
|
||||
if hm:
|
||||
self.delete_wrapper(
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.health_monitors_client.delete_health_monitor,
|
||||
pool.get('healthmonitor').get('id'))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
self.delete_wrapper(self.pools_client.delete_pool,
|
||||
pool.get('id'))
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.pools_client.delete_pool, pool.get('id'))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
for member in pool.get('members', []):
|
||||
self.delete_wrapper(self.members_client.delete_member,
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.members_client.delete_member,
|
||||
pool_id, member.get('id'))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
self.delete_wrapper(self.listeners_client.delete_listener,
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.listeners_client.delete_listener,
|
||||
listener.get('id'))
|
||||
self.wait_for_load_balancer_status(lb_id)
|
||||
self.delete_wrapper(lb_client.delete_load_balancer, lb_id)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
lb_client.delete_load_balancer, lb_id)
|
||||
self.load_balancers_client.wait_for_load_balancers_status(
|
||||
lb_id, is_delete_op=True)
|
||||
lbs = lb_client.list_load_balancers()['loadbalancers']
|
||||
@ -170,13 +169,14 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
security_groups = [{'name': self.security_group['id']}]
|
||||
self.keypair = self.create_keypair()
|
||||
key_name = self.keypair['name']
|
||||
network_name = self.network['name']
|
||||
self.server1 = self.create_server_on_network(
|
||||
self.network, name=(self.network.name + "-1"),
|
||||
self.network, name=(network_name + "-1"),
|
||||
security_groups=security_groups,
|
||||
key_name=key_name, wait_on_boot=False,
|
||||
servers_client=self.manager.servers_client)
|
||||
self.server2 = self.create_server_on_network(
|
||||
self.network, name=(self.network.name + "-2"),
|
||||
self.network, name=(network_name + "-2"),
|
||||
security_groups=security_groups,
|
||||
key_name=key_name,
|
||||
servers_client=self.manager.servers_client)
|
||||
@ -269,7 +269,7 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
|
||||
return None
|
||||
|
||||
def create_project_lbaas(self):
|
||||
vip_subnet_id = self.subnet.id
|
||||
vip_subnet_id = self.subnet['id']
|
||||
lb_name = data_utils.rand_name(self.namestart)
|
||||
self.loadbalancer = self.load_balancers_client.create_load_balancer(
|
||||
name=lb_name, vip_subnet_id=vip_subnet_id)['loadbalancer']
|
||||
|
@ -16,15 +16,17 @@
|
||||
import re
|
||||
import six
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
|
||||
from tempest.common import waiters
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.services import nsxv_client
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario \
|
||||
import manager_topo_deployment as dmgr
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
manager_topo_deployment as dmgr)
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
network_addon_methods as HELO)
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
@ -69,9 +71,6 @@ class TestMultipleTransportZonesBasicOps(dmgr.TopoDeployScenarioManager):
|
||||
"provider_network_type",
|
||||
'vxlan')
|
||||
cls.MAX_MTZ = getattr(CONF.nsxv, 'max_mtz', 0) or 3
|
||||
cls.admin_networks_client = cls.admin_manager.networks_client
|
||||
cls.admin_subnets_client = cls.admin_manager.subnets_client
|
||||
cls.admin_routers_client = cls.admin_manager.routers_client
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
@ -90,91 +89,109 @@ class TestMultipleTransportZonesBasicOps(dmgr.TopoDeployScenarioManager):
|
||||
pass
|
||||
return scope_id_list
|
||||
|
||||
def create_project_network_subnet(self,
|
||||
name_prefix='mtz-project'):
|
||||
network_name = data_utils.rand_name(name_prefix)
|
||||
network, subnet = self.create_network_subnet(
|
||||
name=network_name)
|
||||
return (network.id, network, subnet)
|
||||
def setup(self):
|
||||
super(TestMultipleTransportZonesBasicOps, self).setUp()
|
||||
self.tp_svrs = {}
|
||||
|
||||
def create_mtz_network_subnet(self, scope_id,
|
||||
def tearDown(self):
|
||||
self.delete_all_servers()
|
||||
super(TestMultipleTransportZonesBasicOps, self).tearDown()
|
||||
|
||||
def create_project_network_subnet(self, name_prefix='mtz-project',
|
||||
client_mgr=None):
|
||||
client_mgr = client_mgr or self.manager
|
||||
network_name = data_utils.rand_name(name_prefix)
|
||||
network, subnet = HELO.create_network_subnet(
|
||||
self, client_mgr=client_mgr, name=network_name)
|
||||
return (network['id'], network, subnet)
|
||||
|
||||
def create_mtz_network_subnet(self, scope_id, tenant_project_id,
|
||||
cidr=None, cidr_offset=0):
|
||||
"""MTZ networks can only be created by ADMIN
|
||||
|
||||
All tenant network resources will be created by ADMIN.
|
||||
"""
|
||||
networks_client = self.admin_manager.networks_client
|
||||
subnets_client = self.admin_manager.subnets_client
|
||||
network_name = data_utils.rand_name('mtz-net')
|
||||
create_body = {'name': network_name,
|
||||
'provider:network_type': self.provider_network_type,
|
||||
'provider:physical_network': scope_id}
|
||||
network = self.create_network(
|
||||
client=self.admin_manager.networks_client,
|
||||
network = HELO.create_network(self, client=networks_client,
|
||||
tenant_id=tenant_project_id,
|
||||
**create_body)
|
||||
subnet = self.create_subnet(
|
||||
network,
|
||||
client=self.admin_manager.subnets_client,
|
||||
subnet = HELO.create_subnet(self, network, client=subnets_client,
|
||||
name=network_name,
|
||||
tenant_id=tenant_project_id,
|
||||
cidr=cidr, cidr_offset=cidr_offset)
|
||||
lswitch_list = self.vsm.get_all_logical_switches(scope_id)
|
||||
lswitch_list = [x for x in lswitch_list if x['name'] == network.id]
|
||||
lswitch_list = [x for x in lswitch_list if x['name'] == network['id']]
|
||||
msg = ("network=%s is not configured by specified vdn_scope_id=%s"
|
||||
% (network.id, scope_id))
|
||||
% (network['id'], scope_id))
|
||||
self.assertTrue(len(lswitch_list) == 1, msg=msg)
|
||||
return (network.id, network, subnet)
|
||||
return (network['id'], network, subnet)
|
||||
|
||||
def create_router_by_type(self, router_type, name=None, **kwargs):
|
||||
def create_router_by_type(self, router_type, client=None,
|
||||
name=None, **kwargs):
|
||||
routers_client = client or self.manager.routers_client
|
||||
create_kwargs = dict(namestart='mtz-', external_gateway_info={
|
||||
"network_id": CONF.network.public_network_id})
|
||||
if router_type in ('shared', 'exclusive'):
|
||||
create_kwargs['router_type'] = router_type
|
||||
elif router_type in ('distributed'):
|
||||
create_kwargs['distributed'] = True
|
||||
kwargs.update(create_kwargs)
|
||||
router = self._create_router(client_mgr=self.admin_manager,
|
||||
**kwargs)
|
||||
create_kwargs.update(**kwargs)
|
||||
router = HELO.router_create(self, client=routers_client,
|
||||
**create_kwargs)
|
||||
return router
|
||||
|
||||
def create_router_and_add_interfaces(self, router_type, nets):
|
||||
router = self.create_router_by_type(router_type)
|
||||
def create_router_and_add_interfaces(self, router_type, nets,
|
||||
client_mgr=None):
|
||||
client_mgr = client_mgr or self.admin_manager
|
||||
routers_client = client_mgr.routers_client
|
||||
router = self.create_router_by_type(router_type,
|
||||
client=routers_client)
|
||||
if router_type == 'exclusive':
|
||||
router_nsxv_name = '%s-%s' % (router.name, router.id)
|
||||
router_nsxv_name = '%s-%s' % (router['name'], router['id'])
|
||||
exc_edge = self.vsm.get_edge(router_nsxv_name)
|
||||
self.assertIsNotNone(exc_edge)
|
||||
self.assertEqual(exc_edge['edgeType'], 'gatewayServices')
|
||||
for net_id, (s_id, network, subnet, sg) in six.iteritems(nets):
|
||||
router.add_subnet(subnet)
|
||||
# import pdb; pdb.set_trace()
|
||||
HELO.router_interface_add(self, router['id'], subnet['id'],
|
||||
client=routers_client)
|
||||
return router
|
||||
|
||||
def clear_router_gateway_and_interfaces(self, router, nets):
|
||||
routers_client = self.admin_routers_client
|
||||
routers_client.update_router(router['id'],
|
||||
external_gateway_info=dict())
|
||||
def clear_router_gateway_and_interfaces(self, router, nets, client=None):
|
||||
routers_client = client or self.manager.routers_client
|
||||
HELO.router_gateway_clear(self, router['id'],
|
||||
client=routers_client)
|
||||
for net_id, (s_id, network, subnet, sg) in six.iteritems(nets):
|
||||
try:
|
||||
routers_client.remove_router_interface_with_subnet_id(
|
||||
router['id'], subnet['id'])
|
||||
except Exception:
|
||||
pass
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
HELO.router_interface_delete,
|
||||
self, router['id'], subnet['id'], client=routers_client)
|
||||
|
||||
def _test_router_with_network_and_mtz_networks(self, router_type):
|
||||
"""router attached with multiple TZs and one tenant network."""
|
||||
client_mgr = self.manager
|
||||
scope_id_list = self.get_all_scope_id_list(with_default_scope=True)
|
||||
nets = {}
|
||||
net_id, network, subnet = self.create_project_network_subnet(
|
||||
'mtz-tenant')
|
||||
'mtz-tenant', client_mgr=client_mgr)
|
||||
tenant_project_id = client_mgr.networks_client.tenant_id
|
||||
# create security_group with loginable rules
|
||||
security_group = self._create_security_group(
|
||||
security_groups_client=self.security_groups_client,
|
||||
security_group_rules_client=self.security_group_rules_client,
|
||||
security_groups_client=client_mgr.security_groups_client,
|
||||
security_group_rules_client=client_mgr.security_group_rules_client,
|
||||
namestart='mtz-tenant')
|
||||
nets[net_id] = [None, network, subnet, security_group]
|
||||
admin_security_group = self._create_security_group(
|
||||
security_groups_client=self.admin_manager.security_groups_client,
|
||||
security_group_rules_client=(
|
||||
self.admin_manager.security_group_rules_client),
|
||||
namestart='mtz-admin')
|
||||
for cidr_step in range(0, self.MAX_MTZ):
|
||||
s_id = scope_id_list[cidr_step % len(scope_id_list)]
|
||||
net_id, network, subnet = self.create_mtz_network_subnet(
|
||||
s_id, cidr_offset=(cidr_step + 2))
|
||||
nets[net_id] = [s_id, network, subnet, admin_security_group]
|
||||
router = self.create_router_and_add_interfaces(router_type, nets)
|
||||
s_id, tenant_project_id, cidr_offset=(cidr_step + 2))
|
||||
nets[net_id] = [s_id, network, subnet, security_group]
|
||||
router = self.create_router_and_add_interfaces(router_type, nets,
|
||||
client_mgr=client_mgr)
|
||||
return router, nets
|
||||
|
||||
def run_servers_connectivity_test(self, servers):
|
||||
@ -183,15 +200,15 @@ class TestMultipleTransportZonesBasicOps(dmgr.TopoDeployScenarioManager):
|
||||
net_id = net_id_list[0]
|
||||
other_net_id_list = net_id_list[1:]
|
||||
username, password = self.get_image_userpass()
|
||||
nsv = self.servers[net_id]
|
||||
nsv = self.tp_svrs[net_id]
|
||||
serv = nsv['server']
|
||||
floatingip = self.create_floatingip_for_server(
|
||||
serv, client_mgr=self.admin_manager)
|
||||
serv, client_mgr=self.manager)
|
||||
msg = ("Associate floatingip[%s] to server[%s]"
|
||||
% (floatingip, serv['name']))
|
||||
self._check_floatingip_connectivity(
|
||||
floatingip, serv, should_connect=True, msg=msg)
|
||||
serv_fip = floatingip.floating_ip_address
|
||||
serv_fip = floatingip['floating_ip_address']
|
||||
dmgr.rm_sshkey(serv_fip)
|
||||
ssh_client = dmgr.get_remote_client_by_password(
|
||||
serv_fip, username, password)
|
||||
@ -214,34 +231,51 @@ class TestMultipleTransportZonesBasicOps(dmgr.TopoDeployScenarioManager):
|
||||
return addr['addr']
|
||||
return None
|
||||
|
||||
def wait_for_servers_become_active(self, servers):
|
||||
servers_client = self.admin_manager.servers_client
|
||||
def wait_for_servers_become_active(self, servers, client=None):
|
||||
servers_client = client or self.admin_manager.servers_client
|
||||
net_id_list = servers.keys()
|
||||
for net_id in net_id_list:
|
||||
nsv = self.servers[net_id]
|
||||
nsv = self.tp_svrs[net_id]
|
||||
serv = nsv['server']
|
||||
waiters.wait_for_server_status(
|
||||
servers_client, serv['id'], 'ACTIVE')
|
||||
# update server context. A server might not have ip address
|
||||
# if not in running/active state
|
||||
act_server = servers_client.show_server(serv['id'])
|
||||
self.tp_svrs[net_id]['server'] = act_server.get('server',
|
||||
act_server)
|
||||
|
||||
def delete_all_servers(self, client=None):
|
||||
servers_client = client or self.admin_manager.servers_client
|
||||
for net_id in six.iterkeys(self.tp_svrs):
|
||||
server = self.tp_svrs[net_id]['server']
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
servers_client.delete, server['id'])
|
||||
dmgr.waitfor_servers_terminated(servers_client)
|
||||
|
||||
def run_mtz_basic_ops(self, router_type):
|
||||
self.servers = {}
|
||||
self.tp_svrs = {}
|
||||
router, nets = self._test_router_with_network_and_mtz_networks(
|
||||
router_type)
|
||||
servers_client = self.manager.servers_client
|
||||
for net_id in six.iterkeys(nets):
|
||||
s_id, network, subnet, security_group = nets[net_id]
|
||||
"""
|
||||
servers_client = (self.manager.servers_client if s_id is None
|
||||
else self.admin_manager.servers_client)
|
||||
"""
|
||||
security_groups = [{'name': security_group['id']}]
|
||||
svr = self.create_server_on_network(
|
||||
network, security_groups,
|
||||
name=network['name'],
|
||||
servers_client=servers_client)
|
||||
self.servers[net_id] = dict(server=svr, s_id=s_id,
|
||||
servers_client=servers_client,
|
||||
wait_on_boot=False)
|
||||
self.tp_svrs[net_id] = dict(server=svr, s_id=s_id,
|
||||
network=network, subnet=subnet,
|
||||
security_group=security_group,
|
||||
servers_client=servers_client)
|
||||
self.wait_for_servers_become_active(self.servers)
|
||||
self.run_servers_connectivity_test(self.servers)
|
||||
self.wait_for_servers_become_active(self.tp_svrs)
|
||||
self.run_servers_connectivity_test(self.tp_svrs)
|
||||
|
||||
|
||||
class TestMTZBasicOpsOverSharedRouter(TestMultipleTransportZonesBasicOps):
|
||||
|
@ -28,7 +28,7 @@ class TestNetworkBasicOps(network_ops.TestNetworkBasicOps):
|
||||
# NSX-v: dhcp is not reachable
|
||||
internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
|
||||
self._list_ports(tenant_id=server['tenant_id'],
|
||||
network_id=network.id)
|
||||
network_id=network['id'])
|
||||
if (p['device_owner'].startswith('network') and
|
||||
not p['device_owner'].endswith('dhcp')))
|
||||
|
||||
|
@ -23,11 +23,13 @@ import urllib2
|
||||
from tempest import config
|
||||
from tempest import exceptions
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.scenario import manager
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.services import load_balancer_v1_client as LBV1C
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import net_resources
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
network_addon_methods as HELO)
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
@ -119,7 +121,8 @@ class TestLBaaSBasicOps(manager.NetworkScenarioTest):
|
||||
if k in ('distributed', 'router_type', 'router_size'):
|
||||
router_kwargs[k] = kwargs.pop(k)
|
||||
router = self._create_router(**router_kwargs)
|
||||
router.set_gateway(CONF.network.public_network_id)
|
||||
HELO.router_gateway_set(self, router['id'],
|
||||
CONF.network.public_network_id)
|
||||
|
||||
network = self._create_network(
|
||||
routers_client=routers_client,
|
||||
@ -134,54 +137,21 @@ class TestLBaaSBasicOps(manager.NetworkScenarioTest):
|
||||
if dns_nameservers is not None:
|
||||
subnet_kwargs['dns_nameservers'] = dns_nameservers
|
||||
subnet = self._create_subnet(**subnet_kwargs)
|
||||
subnet.add_to_router(router.id)
|
||||
HELO.router_interface_add(self, router['id'], subnet['id'],
|
||||
routers_client)
|
||||
return network, subnet, router
|
||||
|
||||
# overwrite super class
|
||||
def _create_router(self, client=None, tenant_id=None,
|
||||
namestart='router-lbv1', **kwargs):
|
||||
if not client:
|
||||
client = self.routers_client
|
||||
if not tenant_id:
|
||||
tenant_id = client.tenant_id
|
||||
name = data_utils.rand_name(namestart)
|
||||
result = client.create_router(name=name,
|
||||
admin_state_up=True,
|
||||
return HELO.router_create(self, client,
|
||||
tenant_id=tenant_id,
|
||||
namestart=namestart,
|
||||
admin_state_up=True,
|
||||
**kwargs)
|
||||
router = net_resources.DeletableRouter(
|
||||
routers_client=client, **result['router'])
|
||||
self.assertEqual(router.name, name)
|
||||
self.addCleanup(self.delete_wrapper, router.delete)
|
||||
return router
|
||||
|
||||
def check_networks(self):
|
||||
"""Checks that we see the newly created network/subnet/router.
|
||||
|
||||
checking the result of list_[networks,routers,subnets]
|
||||
"""
|
||||
|
||||
seen_nets = self._list_networks()
|
||||
seen_names = [n['name'] for n in seen_nets]
|
||||
seen_ids = [n['id'] for n in seen_nets]
|
||||
self.assertIn(self.network.name, seen_names)
|
||||
self.assertIn(self.network.id, seen_ids)
|
||||
|
||||
if self.subnet:
|
||||
seen_subnets = self._list_subnets()
|
||||
seen_net_ids = [n['network_id'] for n in seen_subnets]
|
||||
seen_subnet_ids = [n['id'] for n in seen_subnets]
|
||||
self.assertIn(self.network.id, seen_net_ids)
|
||||
self.assertIn(self.subnet.id, seen_subnet_ids)
|
||||
|
||||
if self.router:
|
||||
seen_routers = self._list_routers()
|
||||
seen_router_ids = [n['id'] for n in seen_routers]
|
||||
seen_router_names = [n['name'] for n in seen_routers]
|
||||
self.assertIn(self.router.name,
|
||||
seen_router_names)
|
||||
self.assertIn(self.router.id,
|
||||
seen_router_ids)
|
||||
HELO.check_networks(self, self.network, self.subnet, self.router)
|
||||
|
||||
def _create_security_group_for_test(self):
|
||||
self.security_group = self._create_security_group(
|
||||
@ -222,7 +192,7 @@ class TestLBaaSBasicOps(manager.NetworkScenarioTest):
|
||||
floating_ip = self.create_floating_ip(
|
||||
server, public_network_id)
|
||||
self.floating_ips[floating_ip] = server
|
||||
self.server_ips[serv_id] = floating_ip.floating_ip_address
|
||||
self.server_ips[serv_id] = floating_ip['floating_ip_address']
|
||||
else:
|
||||
self.server_ips[serv_id] = self._server_ip(server, net_name)
|
||||
self.server_fixed_ips[serv_id] = self._server_ip(server, net_name)
|
||||
@ -320,24 +290,29 @@ class TestLBaaSBasicOps(manager.NetworkScenarioTest):
|
||||
pool_name,
|
||||
lb_method='ROUND_ROBIN',
|
||||
protocol='HTTP',
|
||||
subnet_id=self.subnet.id)['pool']
|
||||
self.pool = net_resources.DeletablePool(client=self.lbv1_client,
|
||||
**pool)
|
||||
subnet_id=self.subnet['id'])
|
||||
self.pool = pool.get('pool', pool)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.lbv1_client.delete_pool,
|
||||
self.pool['id'])
|
||||
self.assertTrue(self.pool)
|
||||
return self.pool
|
||||
|
||||
def _create_vip(self, pool_id, **kwargs):
|
||||
result = self.lbv1_client.create_vip(pool_id, **kwargs)
|
||||
vip = net_resources.DeletableVip(client=self.lbv1_client,
|
||||
**result['vip'])
|
||||
vip = result.get('vip', result)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.lbv1_client.delete_vip,
|
||||
vip['id'])
|
||||
return vip
|
||||
|
||||
def _create_member(self, protocol_port, pool_id, ip_version=4, **kwargs):
|
||||
result = self.lbv1_client.create_member(protocol_port, pool_id,
|
||||
ip_version, **kwargs)
|
||||
member = net_resources.DeletableMember(client=self.lbv1_client,
|
||||
**result['member'])
|
||||
return member
|
||||
member = result.get('member', result)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.lbv1_client.delete_member,
|
||||
member['id'])
|
||||
|
||||
def _create_members(self):
|
||||
"""Create two members.
|
||||
@ -346,63 +321,68 @@ class TestLBaaSBasicOps(manager.NetworkScenarioTest):
|
||||
but with different ports to listen on.
|
||||
"""
|
||||
|
||||
pool_id = self.pool['id']
|
||||
for server_id, ip in six.iteritems(self.server_fixed_ips):
|
||||
if len(self.server_fixed_ips) == 1:
|
||||
member1 = self._create_member(address=ip,
|
||||
protocol_port=self.port1,
|
||||
pool_id=self.pool.id)
|
||||
pool_id=pool_id)
|
||||
member2 = self._create_member(address=ip,
|
||||
protocol_port=self.port2,
|
||||
pool_id=self.pool.id)
|
||||
pool_id=pool_id)
|
||||
self.members.extend([member1, member2])
|
||||
else:
|
||||
member = self._create_member(address=ip,
|
||||
protocol_port=self.port1,
|
||||
pool_id=self.pool.id)
|
||||
pool_id=pool_id)
|
||||
self.members.append(member)
|
||||
self.assertTrue(self.members)
|
||||
|
||||
def _assign_floating_ip_to_vip(self, vip):
|
||||
public_network_id = CONF.network.public_network_id
|
||||
port_id = vip.port_id
|
||||
vip_id = vip['id']
|
||||
port_id = vip['port_id']
|
||||
floating_ip = self.create_floating_ip(vip, public_network_id,
|
||||
port_id=port_id)
|
||||
self.floating_ips.setdefault(vip.id, [])
|
||||
self.floating_ips[vip.id].append(floating_ip)
|
||||
#?# self.floating_ips.setdefault(vip_id, [])
|
||||
self.floating_ips[vip_id].append(floating_ip)
|
||||
# Check for floating ip status before you check load-balancer
|
||||
self.check_floating_ip_status(floating_ip, "ACTIVE")
|
||||
|
||||
def _create_load_balancer(self):
|
||||
self._create_pool()
|
||||
self._create_members()
|
||||
vip_id = self.vip['id']
|
||||
self.vip = self._create_vip(protocol='HTTP',
|
||||
protocol_port=80,
|
||||
subnet_id=self.subnet.id,
|
||||
pool_id=self.pool.id)
|
||||
subnet_id=self.subnet['id'],
|
||||
pool_id=self.pool['id'])
|
||||
self.vip_wait_for_status(self.vip, 'ACTIVE')
|
||||
if (CONF.network.public_network_id and not
|
||||
CONF.network.project_networks_reachable):
|
||||
self._assign_floating_ip_to_vip(self.vip)
|
||||
self.vip_ip = self.floating_ips[
|
||||
self.vip.id][0]['floating_ip_address']
|
||||
vip_id][0]['floating_ip_address']
|
||||
else:
|
||||
self.vip_ip = self.vip.address
|
||||
self.vip_ip = self.vip['address']
|
||||
|
||||
# Currently the ovs-agent is not enforcing security groups on the
|
||||
# vip port - see https://bugs.launchpad.net/neutron/+bug/1163569
|
||||
# However the linuxbridge-agent does, and it is necessary to add a
|
||||
# security group with a rule that allows tcp port 80 to the vip port.
|
||||
self.ports_client.update_port(
|
||||
self.vip.port_id, security_groups=[self.security_group.id])
|
||||
self.vip['port_id'],
|
||||
security_groups=[self.security_group['id']])
|
||||
|
||||
def vip_wait_for_status(self, vip, status='ACTIVE'):
|
||||
# vip is DelatableVip
|
||||
interval = vip.client.build_interval
|
||||
timeout = vip.client.build_timeout
|
||||
interval = self.lbv1_client.build_interval
|
||||
timeout = self.lbv1_client.build_timeout
|
||||
start_time = time.time()
|
||||
|
||||
vip_id = vip['id']
|
||||
while time.time() - start_time <= timeout:
|
||||
resource = vip.client.show_vip(vip.id)['vip']
|
||||
resource = self.lbv1_client.show_vip(vip_id)['vip']
|
||||
if resource['status'] == status:
|
||||
return
|
||||
time.sleep(interval)
|
||||
|
@ -17,6 +17,7 @@ import time
|
||||
|
||||
from tempest.common import waiters
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest import test
|
||||
|
||||
from vmware_nsx_tempest.tests.nsxv.scenario import (
|
||||
@ -61,7 +62,7 @@ class TestXnetMultiSubnetsOps(dmgr.TopoDeployScenarioManager):
|
||||
|
||||
ATTENTION:
|
||||
Because, this test consumes floatingip's so both subnets ip-ranges
|
||||
will be used. NO OTHER TESTS should run when execute this test.
|
||||
will be used. NO OTHER TESTS should run when executing this test.
|
||||
|
||||
Run this test module sequencially :
|
||||
|
||||
@ -181,15 +182,6 @@ class TestXnetMultiSubnetsOps(dmgr.TopoDeployScenarioManager):
|
||||
namestart='xnet-subnets')
|
||||
|
||||
def tearDown(self):
|
||||
if self.my_network:
|
||||
self.delete_floatingips_and_servers()
|
||||
if self.my_network['router']:
|
||||
self.delete_wrapper(self.my_network['router'].delete)
|
||||
# Delete subnet - distributed router take longer time.
|
||||
if self.my_network['subnet']:
|
||||
self.delete_wrapper(self.my_network['subnet'].delete)
|
||||
if self.my_network['network']:
|
||||
self.delete_wrapper(self.my_network['network'].delete)
|
||||
super(TestXnetMultiSubnetsOps, self).tearDown()
|
||||
|
||||
def create_user_servers(self, num_servers=5):
|
||||
@ -220,7 +212,7 @@ class TestXnetMultiSubnetsOps(dmgr.TopoDeployScenarioManager):
|
||||
for sv in self.my_network['servers']:
|
||||
floatingip, sshc = self.create_floatingip_for_server(sv)
|
||||
self.my_network['floatingips'].append(floatingip)
|
||||
self.fixed_ip_addresses.append(floatingip.fixed_ip_address)
|
||||
self.fixed_ip_addresses.append(floatingip['fixed_ip_address'])
|
||||
# check inside this tenant network, all VMs are reachable.
|
||||
self.validate_all_servers_private_address_are_reachable(
|
||||
sshc, self.fixed_ip_addresses)
|
||||
@ -241,7 +233,7 @@ class TestXnetMultiSubnetsOps(dmgr.TopoDeployScenarioManager):
|
||||
% (floatingip, server['name']))
|
||||
self._check_floatingip_connectivity(
|
||||
floatingip, server, should_connect=True, msg=msg)
|
||||
serv_fip = floatingip.floating_ip_address
|
||||
serv_fip = floatingip['floating_ip_address']
|
||||
dmgr.rm_sshkey(serv_fip)
|
||||
ssh_client = dmgr.get_remote_client_by_password(
|
||||
serv_fip, username, password)
|
||||
@ -249,7 +241,9 @@ class TestXnetMultiSubnetsOps(dmgr.TopoDeployScenarioManager):
|
||||
|
||||
def delete_floatingips_and_servers(self):
|
||||
for net_floatingip in self.my_network['floatingips']:
|
||||
self.delete_wrapper(net_floatingip.delete)
|
||||
test_utils.call_and_ignore_notfound_exc(
|
||||
self.floating_ips_client.delete_floatingip,
|
||||
net_floatingip['id'])
|
||||
fip_list = self.floating_ips_client.list_floatingips()['floatingips']
|
||||
if len(fip_list) > 0:
|
||||
time.sleep(dmgr.WAITTIME_AFTER_DISASSOC_FLOATINGIP)
|
||||
@ -264,14 +258,14 @@ class TestXnetMultiSubnetsOps(dmgr.TopoDeployScenarioManager):
|
||||
reachable = dmgr.is_reachable(ssh_client, ip_addr)
|
||||
self.assertTrue(reachable, msg)
|
||||
|
||||
def _test_xnet_multiple_subnets_basic_ops(self,
|
||||
router_type='exclusive',
|
||||
def _test_xnet_multiple_subnets_basic_ops(self, router_type='exclusive',
|
||||
namestart='xnet-subnets',
|
||||
distributed=None):
|
||||
network, subnet, router = self.setup_project_network(
|
||||
self.public_network_id,
|
||||
client_mgr=self.admin_manager,
|
||||
tenant_id=self.primary_tenant_id,
|
||||
namestart='xnet-subnets',
|
||||
namestart=namestart,
|
||||
router_type=router_type, distributed=distributed)
|
||||
self.my_network = {'router': router,
|
||||
'subnet': subnet,
|
||||
@ -288,7 +282,7 @@ class TestXnetMultiSubnetsOpsOnSharedRouter(TestXnetMultiSubnetsOps):
|
||||
@test.idempotent_id('e25d030f-7fdf-4500-bd55-4ed6f62c0a5c')
|
||||
def test_xnet_multiple_subnets_basic_ops_on_shared_router(self):
|
||||
return self._test_xnet_multiple_subnets_basic_ops(
|
||||
'shared', False)
|
||||
'shared', 'xnet-shared', False)
|
||||
|
||||
|
||||
class TestXnetMultiSubnetsOpsOnExclusiveRouter(TestXnetMultiSubnetsOps):
|
||||
@ -296,7 +290,7 @@ class TestXnetMultiSubnetsOpsOnExclusiveRouter(TestXnetMultiSubnetsOps):
|
||||
@test.idempotent_id('5b09351a-0560-4555-99f0-a1f80d54d435')
|
||||
def test_xnet_multiple_subnets_basic_ops_on_exclusive_router(self):
|
||||
return self._test_xnet_multiple_subnets_basic_ops(
|
||||
'exclusive', False)
|
||||
'exclusive', 'xnet-exclusive', False)
|
||||
|
||||
|
||||
class TestXnetMultiSubnetsOpsOnDistributedRouter(TestXnetMultiSubnetsOps):
|
||||
@ -304,4 +298,4 @@ class TestXnetMultiSubnetsOpsOnDistributedRouter(TestXnetMultiSubnetsOps):
|
||||
@test.idempotent_id('9652d36b-8816-4212-a6e1-3a8b2580deee')
|
||||
def test_xnet_multiple_subnets_basic_ops_on_distributed_router(self):
|
||||
return self._test_xnet_multiple_subnets_basic_ops(
|
||||
'', True)
|
||||
'', 'xnet-distributed', True)
|
||||
|
Loading…
Reference in New Issue
Block a user