Tempest: Support lbaas api & scenario tests to run against upstream

NSX-v lbaas requires lbaas's network attached to an exclusive router.
To run lbaas tests on upstream implemantion, we need to remove
rotuer_type attributes at creating router.

Fixed lbaas API:
  delete-all load-balanacer resources without deleting listeners.
  wait for status function is now at load_balancers client.

Lbaas scenario tests replace urllib2 with urllib3.

Change-Id: Ief935eff8007c4c46115309f55ab3447ce146340
This commit is contained in:
Alex Kang 2016-06-30 21:11:18 -07:00
parent b3f1ff4ba2
commit 8dca2a4031
3 changed files with 48 additions and 72 deletions

View File

@ -61,10 +61,10 @@ class LoadBalancersClient(base.BaseNetworkClient):
uri = self.resource_base_path
return self.list_resources(uri, **filters)
def wait_for_load_balancers_status(self, load_balancer_id,
provisioning_status='ACTIVE',
operating_status='ONLINE',
is_delete_op=False):
def wait_for_load_balancer_status(self, load_balancer_id,
provisioning_status='ACTIVE',
operating_status='ONLINE',
is_delete_op=False):
"""Must have utility method for load-balancer CRUD operation.
This is the method you must call to make sure load_balancer_id is

View File

@ -16,8 +16,6 @@
# ported from neutron-lbaas to comply to tempest framework
# NSX-v require vip-subnet attached to exclusive router
import time
from oslo_log import log as logging
from tempest.api.network import base
@ -28,7 +26,6 @@ from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions
from vmware_nsx_tempest._i18n import _
from vmware_nsx_tempest._i18n import _LI
from vmware_nsx_tempest.services.lbaas import health_monitors_client
from vmware_nsx_tempest.services.lbaas import listeners_client
@ -38,6 +35,7 @@ from vmware_nsx_tempest.services.lbaas import pools_client
CONF = config.CONF
LOG = logging.getLogger(__name__)
NO_ROUTER_TYPE = CONF.nsxv.no_router_type
class BaseTestCase(base.BaseNetworkTest):
@ -82,6 +80,9 @@ class BaseTestCase(base.BaseNetworkTest):
cls.subnet_id = cls.subnet.get('id')
# NSX-v: load-balancer's subnet need to attach to exclusive-router
router_cfg = dict(router_name=router_name, router_type='exclusive')
if NO_ROUTER_TYPE:
# router_type is NSX-v extension.
router_cfg.pop('router_type', None)
cls.router = cls.create_router(**router_cfg)
cls.create_router_interface(cls.router['id'], cls.subnet['id'])
@ -95,25 +96,30 @@ class BaseTestCase(base.BaseNetworkTest):
continue
for listener in lb.get('listeners', []):
for pool in listener.get('pools'):
# delete pool's health-monitor
hm = pool.get('healthmonitor')
if hm:
test_utils.call_and_ignore_notfound_exc(
cls.health_monitors_client.delete_health_monitor,
pool.get('healthmonitor').get('id'))
cls._wait_for_load_balancer_status(lb_id)
# delete pool's members
members = pool.get('members', [])
for member in members:
test_utils.call_and_ignore_notfound_exc(
cls.members_client.delete_member,
pool.get('id'), member.get('id'))
cls._wait_for_load_balancer_status(lb_id)
# delete pool
test_utils.call_and_ignore_notfound_exc(
cls.pools_client.delete_pool, pool.get('id'))
cls._wait_for_load_balancer_status(lb_id)
health_monitor = pool.get('healthmonitor')
if health_monitor:
test_utils.call_and_ignore_notfound_exc(
cls.health_monitors_client.delete_health_monitor,
health_monitor.get('id'))
cls._wait_for_load_balancer_status(lb_id)
# delete listener
test_utils.call_and_ignore_notfound_exc(
cls.listeners_client.delete_listener,
listener.get('id'))
cls._wait_for_load_balancer_status(lb_id)
# delete load-balancer
test_utils.call_and_ignore_notfound_exc(
cls._delete_load_balancer, lb_id)
# NSX-v: delete exclusive router
@ -186,54 +192,11 @@ class BaseTestCase(base.BaseNetworkTest):
provisioning_status='ACTIVE',
operating_status='ONLINE',
delete=False):
interval_time = 1
timeout = 600
end_time = time.time() + timeout
lb = {}
while time.time() < end_time:
try:
lb = cls.load_balancers_client.show_load_balancer(
load_balancer_id)
if not lb:
# loadbalancer not found
if delete:
break
else:
raise Exception(
_("loadbalancer {lb_id} not"
" found").format(
lb_id=load_balancer_id))
lb = lb.get('loadbalancer', lb)
if (lb.get('provisioning_status') == provisioning_status and
lb.get('operating_status') == operating_status):
break
time.sleep(interval_time)
except exceptions.NotFound as e:
# if wait is for delete operation do break
if delete:
break
else:
# raise original exception
raise e
else:
if delete:
raise exceptions.TimeoutException(
_("Waited for load balancer {lb_id} to be deleted for "
"{timeout} seconds but can still observe that it "
"exists.").format(
lb_id=load_balancer_id,
timeout=timeout))
else:
raise exceptions.TimeoutException(
_("Wait for load balancer ran for {timeout} seconds and "
"did not observe {lb_id} reach {provisioning_status} "
"provisioning status and {operating_status} "
"operating status.").format(
timeout=timeout,
lb_id=load_balancer_id,
provisioning_status=provisioning_status,
operating_status=operating_status))
return lb
return cls.load_balancers_client.wait_for_load_balancer_status(
load_balancer_id,
provisioning_status=provisioning_status,
operating_status=operating_status,
is_delete_op=delete)
@classmethod
def _show_load_balancer_status_tree(cls, load_balancer_id):

View File

@ -11,7 +11,7 @@
# under the License.
import tempfile
import time
import urllib2
import urllib3
from tempest.common import waiters
from tempest import config
@ -151,14 +151,14 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
self.wait_for_load_balancer_status(lb_id)
test_utils.call_and_ignore_notfound_exc(
lb_client.delete_load_balancer, lb_id)
self.load_balancers_client.wait_for_load_balancers_status(
self.load_balancers_client.wait_for_load_balancer_status(
lb_id, is_delete_op=True)
lbs = lb_client.list_load_balancers()['loadbalancers']
self.assertEqual(0, len(lbs))
def wait_for_load_balancer_status(self, lb_id):
# Wait for load balancer become ONLINE and ACTIVE
self.load_balancers_client.wait_for_load_balancers_status(lb_id)
self.load_balancers_client.wait_for_load_balancer_status(lb_id)
def create_lbaas_networks(self):
"""Create network, subnet and router for lbaasv2 environment."""
@ -263,9 +263,12 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
def send_request(self, web_ip):
try:
response = urllib2.urlopen("http://{0}/".format(web_ip)).read()
return response
except urllib2.HTTPError:
url_path = "http://{0}/".format(web_ip)
# lbaas servers use nc, might be slower to response
http = urllib3.PoolManager(retries=10)
resp = http.request('GET', url_path)
return resp.data.strip()
except Exception:
return None
def create_project_lbaas(self):
@ -306,11 +309,19 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
self.wait_for_load_balancer_status(lb_id)
self.members.append(member)
# Currently the ovs-agent is not enforcing security groups on the
# vip port - see https://bugs.launchpad.net/neutron/+bug/1163569
# However the linuxbridge-agent does, and it is necessary to add a
# security group with a rule that allows tcp port 80 to the vip port.
# NSX-v lbaasv2 OK, but for upstream neutron-lbaas needs this.
self.ports_client.update_port(
self.loadbalancer['vip_port_id'],
security_groups=[self.security_group['id']])
# create lbaas public interface
self.vip_fip = self.create_floatingip_for_server(
self.loadbalancer, self.public_network_id,
port_id=self.loadbalancer['vip_port_id'],
client_mgr=self.manager)
self.loadbalancer, self.public_network_id,
port_id=self.loadbalancer['vip_port_id'],
client_mgr=self.manager)
self.vip_ip_address = self.vip_fip['floating_ip_address']
time.sleep(1.0)
self.send_request(self.vip_ip_address)
@ -321,9 +332,11 @@ class TestLBaasRoundRobinOps(dmgr.TopoDeployScenarioManager):
self.loadbalancer['id'])
statuses = statuses.get('statuses', statuses)
self.http_cnt = {}
http = urllib3.PoolManager(retries=10)
url_path = "http://{0}/".format(self.vip_ip_address)
for x in range(self.poke_counters):
response = self.send_request(self.vip_ip_address)
self.count_response(response)
resp = http.request('GET', url_path)
self.count_response(resp.data.strip())
# should response from 2 servers
self.assertEqual(2, len(self.http_cnt))
# ROUND_ROUBIN, so equal counts