ipv6 test fix, octavia cascade case, octavia two listenr case, check dhcpRelay rule on edge
Change-Id: I3e5a29405f7e0aecbaadf618cf955187e9dfe4dc
This commit is contained in:
parent
1435005d56
commit
4ad5280275
@ -1605,6 +1605,9 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
admin_state_up=True)['loadbalancer']
|
||||
lb_id = self.loadbalancer['id']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.octavia_admin_client.
|
||||
delete_octavia_load_balancer, lb_id)
|
||||
if barbican:
|
||||
tls_id = barbican_container["container_ref"]
|
||||
else:
|
||||
@ -1617,6 +1620,10 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
allowed_cidrs=allowed_cidrs,
|
||||
default_tls_container_ref=tls_id
|
||||
)['listener']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.octavia_admin_listener_client.
|
||||
delete_octavia_listener,
|
||||
self.listener['id'])
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
if l7policy and action != 'REDIRECT_TO_POOL':
|
||||
l7p = self.octavia_admin_l7policies_client.\
|
||||
@ -1653,12 +1660,20 @@ class FeatureManager(traffic_manager.IperfManager,
|
||||
session_persistence=session_persistence)
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
pool_id = self.pool['pool']['id']
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.octavia_admin_pools_client.
|
||||
delete_octavia_pool,
|
||||
pool_id)
|
||||
if hm_type:
|
||||
self.healthmonitor = self.octavia_hm_client.\
|
||||
create_octavia_hm(pool_id=pool_id, type=hm_type, delay=delay,
|
||||
timeout=timeout, max_retries=max_retries,
|
||||
name=lb_name)
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
|
||||
self.octavia_hm_client.
|
||||
delete_octavia_hm,
|
||||
self.healthmonitor['healthmonitor']['id'])
|
||||
self.members = []
|
||||
for server_name in self.topology_servers.keys():
|
||||
if count < member_count:
|
||||
|
@ -386,6 +386,22 @@ class VSMClient(object):
|
||||
else:
|
||||
return False
|
||||
|
||||
def verify_dhcp_relay_on_dhcp_edge(self):
|
||||
edges = self.get_all_edges()
|
||||
edge_list = []
|
||||
for e in edges:
|
||||
if (not e['edgeStatus'] == 'GREY' and
|
||||
not e['state'] == 'undeployed'):
|
||||
p = re.compile(r'dhcp*')
|
||||
if (p.match(e['name'])):
|
||||
edge_list.append(e['id'])
|
||||
rules = self.get_edge_firewall_rules(edge_list[-1])
|
||||
rule_names = [r['name'] for r in rules]
|
||||
if 'DHCPRelay' in rule_names:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_edge_name_substring(self, name):
|
||||
"""Get edge based on the name, which is OpenStack router.
|
||||
Return edge if found, else return None.
|
||||
|
@ -656,8 +656,10 @@ class OctaviaLB_Client(base.BaseNetworkClient):
|
||||
uri = self.resource_object_path % load_balancer_id
|
||||
return self.show_resource(uri, **fields)
|
||||
|
||||
def delete_octavia_load_balancer(self, load_balancer_id):
|
||||
def delete_octavia_load_balancer(self, load_balancer_id, cascade=False):
|
||||
uri = self.resource_object_path % load_balancer_id
|
||||
if cascade:
|
||||
uri = uri + '?cascade=True'
|
||||
return self.delete_resource(uri)
|
||||
|
||||
def show_octavia_lb_status_tree(self, load_balancer_id, **fields):
|
||||
|
@ -14,6 +14,7 @@
|
||||
# under the License.
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib.common.utils import test_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest import test
|
||||
@ -91,7 +92,9 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||
super(OctaviaRoundRobin, self).tearDown()
|
||||
|
||||
def deploy_octavia_topology(self, no_of_servers=2, image_id=None):
|
||||
kwargs = {'name': "router_lbaas",
|
||||
router_name = data_utils.rand_name('router_lbaas')
|
||||
kwargs = {'name': router_name,
|
||||
'router_type': 'exclusive',
|
||||
'external_gateway_info':
|
||||
{"network_id": CONF.network.public_network_id}}
|
||||
router_lbaas = self.cmgr_adm.routers_client.create_router(**kwargs)
|
||||
@ -786,3 +789,58 @@ class OctaviaRoundRobin(feature_manager.FeatureManager):
|
||||
l7p_id = i['id']
|
||||
self.octavia_admin_l7policies_client.delete_octavia_l7policy(
|
||||
l7p_id)
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('ca5c4368-6769-4a7b-8704-3844b11b1b66')
|
||||
def test_delete_lb_with_cascade(self):
|
||||
diction = self.deploy_octavia_topology()
|
||||
subnet_id = diction['subnet']['subnet']['id']
|
||||
lb_name = data_utils.rand_name(self.namestart)
|
||||
self.loadbalancer = self.octavia_admin_client.\
|
||||
create_octavia_load_balancer(name=lb_name,
|
||||
vip_subnet_id=subnet_id,
|
||||
admin_state_up=True)['loadbalancer']
|
||||
lb_id = self.loadbalancer['id']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.listener = self.octavia_admin_listener_client.\
|
||||
create_octavia_listener(loadbalancer_id=lb_id,
|
||||
protocol='TCP',
|
||||
protocol_port='1212',
|
||||
allowed_cidrs=None,
|
||||
name=lb_name)['listener']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.listener2 = self.octavia_admin_listener_client.\
|
||||
create_octavia_listener(loadbalancer_id=lb_id,
|
||||
protocol='TCP',
|
||||
protocol_port='1212',
|
||||
allowed_cidrs=None,
|
||||
name='listener2')['listener']
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.pool2 = self.octavia_admin_pools_client.\
|
||||
create_octavia_pool(listener_id=self.listener2['id'],
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
protocol='TCP',
|
||||
name='pool2',
|
||||
session_persistence=None)
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.pool = self.octavia_admin_pools_client.\
|
||||
create_octavia_pool(listener_id=self.listener['id'],
|
||||
lb_algorithm='ROUND_ROBIN',
|
||||
protocol='TCP',
|
||||
name=lb_name,
|
||||
session_persistence=None)
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
pool_id = self.pool['pool']['id']
|
||||
self.healthmonitor = self.octavia_hm_client.\
|
||||
create_octavia_hm(pool_id=pool_id, type='PING', delay=2,
|
||||
timeout=10, max_retries=5,
|
||||
name=lb_name)
|
||||
self.octavia_admin_client.wait_for_load_balancer_status(lb_id)
|
||||
self.octavia_admin_client.delete_octavia_load_balancer(lb_id,
|
||||
cascade=True)
|
||||
self.octavia_admin_client.\
|
||||
wait_for_load_balancer_status(lb_id, is_delete_op=True)
|
||||
lbs = self.octavia_admin_client.\
|
||||
list_octavia_load_balancers()['loadbalancers']
|
||||
lb_names = [lb['name'] for lb in lbs]
|
||||
self.assertFalse(lb_name in lb_names)
|
||||
|
@ -45,6 +45,7 @@ class TestNewCase(feature_manager.FeatureManager):
|
||||
super(TestNewCase, cls).setup_clients()
|
||||
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||
cls.cmgr_alt = cls.get_client_manager('alt')
|
||||
cls.cmgr_adm = cls.get_client_manager('admin')
|
||||
cls.routers_client = cls.cmgr_adm.routers_client
|
||||
cls.networks_client = cls.cmgr_adm.networks_client
|
||||
cls.subnets_client = cls.cmgr_adm.subnets_client
|
||||
@ -278,7 +279,7 @@ class TestNewCase(feature_manager.FeatureManager):
|
||||
"""
|
||||
self.create_topo_single_network(
|
||||
"route-port", create_instance=False)
|
||||
p_client = self.cmgr_adm.ports_client
|
||||
p_client = self.ports_client
|
||||
port = self.get_router_port(p_client)
|
||||
kwargs = {'port_security_enabled': True}
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
@ -451,7 +452,7 @@ class TestNewCase(feature_manager.FeatureManager):
|
||||
topology_dict = self.create_topo_single_network(
|
||||
"dhcp_port", create_instance=False)
|
||||
network_state = topology_dict['network_state']
|
||||
ports = self.cmgr_adm.ports_client.list_ports()
|
||||
ports = self.ports_client.list_ports()
|
||||
for port in ports['ports']:
|
||||
if 'device_owner' in port:
|
||||
if port['device_owner'] == "network:dhcp" \
|
||||
@ -461,8 +462,7 @@ class TestNewCase(feature_manager.FeatureManager):
|
||||
image_id = self.get_glance_image_id(['cirros', "esx"])
|
||||
self.assertRaises(exceptions.Conflict, self.create_topology_instance,
|
||||
"state_vm_1", create_floating_ip=False,
|
||||
image_id=image_id, port=port,
|
||||
clients=self.cmgr_adm, security_groups="undef")
|
||||
image_id=image_id, port=port)
|
||||
|
||||
@decorators.idempotent_id('1206016a-91cc-8905-b217-98844caa2212')
|
||||
@testtools.skipUnless(
|
||||
@ -505,7 +505,7 @@ class TestNewCase(feature_manager.FeatureManager):
|
||||
kwargs = {"port_security_enabled": "false"}
|
||||
self.networks_client.update_network(network_state['id'], **kwargs)
|
||||
self.create_topology_subnet("test-sub", network_state)
|
||||
ports = self.cmgr_adm.ports_client.list_ports()
|
||||
ports = self.ports_client.list_ports()
|
||||
for port in ports['ports']:
|
||||
if 'device_owner' in port:
|
||||
if port['device_owner'] == "network:dhcp" and \
|
||||
@ -598,7 +598,7 @@ class TestNewCase(feature_manager.FeatureManager):
|
||||
1)
|
||||
fix_ip = [
|
||||
{'subnet_id': subnet_state.get('id'),
|
||||
'ip_address': network_cidr[0] + '.222'}]
|
||||
'ip_address': network_cidr[0] + '.2'}]
|
||||
args = {'fixed_ips': fix_ip, 'network_id': network_state['id']}
|
||||
self.assertRaises(exceptions.BadRequest,
|
||||
self.cmgr_adm.ports_client.create_port,
|
||||
@ -1791,6 +1791,24 @@ class TestNewCase(feature_manager.FeatureManager):
|
||||
self.assertTrue(self.vsm.verify_default_snat_rule(name, routerIP,
|
||||
cidr_value))
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('2226016a-91cc-8806-b217-12344caa25b3')
|
||||
def test_dhcprelay_firewall_rule_on_dhcp_edge(self):
|
||||
"""
|
||||
Check if default snat works fine with exc router
|
||||
"""
|
||||
net_name = data_utils.rand_name(name='tempest-network')
|
||||
network = self.\
|
||||
create_topology_network(net_name,
|
||||
networks_client=self.networks_client)
|
||||
subnet_name = network['name'] + 'sub'
|
||||
cidr_value = '10.198.111.0/24'
|
||||
self.create_topology_subnet(subnet_name, network,
|
||||
routers_client=self.routers_client,
|
||||
subnets_client=self.subnets_client,
|
||||
cidr=cidr_value)
|
||||
self.assertTrue(self.vsm.verify_dhcp_relay_on_dhcp_edge())
|
||||
|
||||
@decorators.attr(type='nsxv')
|
||||
@decorators.idempotent_id('2226016a-91cc-8905-b217-12344cab24a1')
|
||||
def test_update_router_with_static_route_via_any_CIDR(self):
|
||||
@ -1863,7 +1881,7 @@ class TestNewCase(feature_manager.FeatureManager):
|
||||
security_group_rules_client=sec_rule_client,
|
||||
security_groups_client=sec_client)
|
||||
self.sg = self.create_topology_security_group(**kwargs)
|
||||
image_id = self.get_glance_image_id(['c22'])
|
||||
image_id = self.get_glance_image_id(['cirros'])
|
||||
security_groups = [{'name': self.sg['name']}]
|
||||
s1 = self.create_topology_instance("state_vm_1",
|
||||
[network_ipv4, network_ipv6],
|
||||
@ -1872,7 +1890,8 @@ class TestNewCase(feature_manager.FeatureManager):
|
||||
security_groups=security_groups,
|
||||
clients=self.cmgr_adm)
|
||||
ip_address = s1['floating_ips'][0]['floating_ip_address']
|
||||
ipv6_address = s1['addresses'].values()[1][1]['addr']
|
||||
address = list(s1['addresses'].values())
|
||||
ipv6_address = address[1][0]['addr']
|
||||
ssh_src1 = self._get_remote_client(ip_address, username='cirros',
|
||||
use_password=True)
|
||||
self._assign_ipv6_address(ssh_src1, 'eth1', ipv6_address)
|
||||
|
Loading…
Reference in New Issue
Block a user