Separate mdproxy tempest testcases for policy

Change-Id: I066c50895b6980373e1ade798af74c904054c7b4
This commit is contained in:
dkhairnar 2019-12-10 07:59:24 +00:00
parent 18238d2957
commit f4912d5e94
4 changed files with 447 additions and 72 deletions

View File

@ -209,6 +209,36 @@ NSXv3Group = [
help="Uplink profile transport vlan"), help="Uplink profile transport vlan"),
] ]
nsxp_group = cfg.OptGroup(name='nsxp',
title="NSXP Configuration Options")
NSXPGroup = [
cfg.StrOpt('nsx_manager',
default='',
help="NSX manager IP address"),
cfg.StrOpt('nsx_user',
default='admin',
help="NSX manager username"),
cfg.StrOpt('nsx_password',
default='default',
help="NSX manager password"),
cfg.BoolOpt('native_dhcp_metadata',
default=False,
help="Enable or disable Native DHCP and MDProxy for nsxp"),
cfg.BoolOpt('ens',
default=False,
help="enable ens based changes like port-security-disabled"
" no security-group"),
cfg.BoolOpt('bridge_cluster',
default=False,
help="Use Bridge ESXi based cluster id or Bridge Edge Endpoint"
" profile"),
cfg.StrOpt('transport_vlan',
default='20',
help="Uplink profile transport vlan"),
]
dns_group = cfg.OptGroup(name='dns', dns_group = cfg.OptGroup(name='dns',
title="DNS Configuration Options") title="DNS Configuration Options")

View File

@ -31,7 +31,8 @@ _opts = [
(config_nsx.dns_group, config_nsx.DNSGroup), (config_nsx.dns_group, config_nsx.DNSGroup),
(config_nsx.barbican_group, config_nsx.BarbicanGroup), (config_nsx.barbican_group, config_nsx.BarbicanGroup),
(config_nsx.nsx_edge_group, config_nsx.NsxEdgeGroup), (config_nsx.nsx_edge_group, config_nsx.NsxEdgeGroup),
(config_nsx.trunk_group, config_nsx.TrunkGroup) (config_nsx.trunk_group, config_nsx.TrunkGroup),
(config_nsx.nsxp_group, config_nsx.NSXPGroup)
] ]

View File

@ -103,79 +103,59 @@ class TestMDProxy(feature_manager.FeatureManager):
def verify_md_proxy_logical_ports_on_backend(self, tenant_id, network): def verify_md_proxy_logical_ports_on_backend(self, tenant_id, network):
md_counter = 0 md_counter = 0
network_name = network["name"]
network_id = network["id"] network_id = network["id"]
if CONF.network.backend == 'nsxp': logical_ports = self.nsx.get_os_logical_ports()
logical_switch = self.nsxp_client.get_logical_switch(network_name, for port_index in range(len(logical_ports)):
network_id) if "attachment" in logical_ports[port_index]:
switch_id = str(logical_switch["id"]) if logical_ports[port_index]["attachment"][
mdproxy_path = logical_switch['metadata_proxy_paths'] "attachment_type"] == "METADATA_PROXY":
md_proxy_uuid = str(mdproxy_path[0].split("/")[-1]) if logical_ports[port_index][
if mdproxy_path is None: "tags"][0]["tag"] == network_id:
raise ValueError('MDPorxy is not attached to logical switch') msg = "MDproxy logical port does not "\
display_name = "infra|%s|%s|md-proxy-attach-port" % (switch_id, "have proper tenant "\
md_proxy_uuid) "id!!!"
logical_ports = self.nsx.get_logical_ports()
for port_index in range(len(logical_ports)):
if "attachment" in logical_ports[port_index]:
if logical_ports[port_index]["attachment"][
"attachment_type"] == "METADATA_PROXY":
if logical_ports[port_index]["display_name"] == \
display_name:
md_counter += 1
else:
logical_ports = self.nsx.get_os_logical_ports()
for port_index in range(len(logical_ports)):
if "attachment" in logical_ports[port_index]:
if logical_ports[port_index]["attachment"][
"attachment_type"] == "METADATA_PROXY":
if logical_ports[port_index][
"tags"][0]["tag"] == network_id:
msg = "MDproxy logical port does not "\
"have proper tenant "\
"id!!!"
self.assertEqual(
tenant_id, logical_ports[port_index]["tags"][
1]["tag"], msg)
md_counter += 1
msg1 = "Admin state of MDProxy logical port is DOWN!!!"
msg2 = "LS name does not start with mdproxy!!!"
msg3 = "MDproxy logical port does not have any auto " \
"tag!"
msg4 = "MDproxy logical port does not have scope " \
"tag as os-neutron-net-id!!!"
msg5 = "MDproxy logical port does not have scope " \
"tag as os-project-id!!!"
msg6 = "MDproxy logical port does not have scope " \
"tag as os-project-name!!!"
msg7 = "MDproxy logical port does not have scope " \
"tag as os-api-version!!!"
self.assertEqual( self.assertEqual(
"UP", logical_ports[port_index]["admin_state"], tenant_id, logical_ports[port_index]["tags"][
msg1) 1]["tag"], msg)
self.assertIn("mdproxy-", md_counter += 1
logical_ports[port_index] msg1 = "Admin state of MDProxy logical port is DOWN!!!"
["display_name"], msg2) msg2 = "LS name does not start with mdproxy!!!"
self.assertNotEqual( msg3 = "MDproxy logical port does not have any auto " \
0, "tag!"
len(logical_ports[port_index]["tags"]), msg4 = "MDproxy logical port does not have scope " \
msg3) "tag as os-neutron-net-id!!!"
self.assertEqual( msg5 = "MDproxy logical port does not have scope " \
"os-neutron-net-id", "tag as os-project-id!!!"
logical_ports[port_index]["tags"][0]["scope"], msg6 = "MDproxy logical port does not have scope " \
msg4) "tag as os-project-name!!!"
self.assertEqual( msg7 = "MDproxy logical port does not have scope " \
"os-project-id", "tag as os-api-version!!!"
logical_ports[port_index]["tags"][1]["scope"], self.assertEqual(
msg5) "UP", logical_ports[port_index]["admin_state"],
self.assertEqual( msg1)
"os-project-name", self.assertIn("mdproxy-",
logical_ports[port_index]["tags"][2]["scope"], logical_ports[port_index]
msg6) ["display_name"], msg2)
self.assertEqual( self.assertNotEqual(
"os-api-version", 0,
logical_ports[port_index]["tags"][3]["scope"], len(logical_ports[port_index]["tags"]),
msg7) msg3)
self.assertEqual(
"os-neutron-net-id",
logical_ports[port_index]["tags"][0]["scope"],
msg4)
self.assertEqual(
"os-project-id",
logical_ports[port_index]["tags"][1]["scope"],
msg5)
self.assertEqual(
"os-project-name",
logical_ports[port_index]["tags"][2]["scope"],
msg6)
self.assertEqual(
"os-api-version",
logical_ports[port_index]["tags"][3]["scope"],
msg7)
self.assertNotEqual(0, md_counter, "No logical port found for MD " self.assertNotEqual(0, md_counter, "No logical port found for MD "
"proxy!!!") "proxy!!!")

View File

@ -0,0 +1,364 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
from vmware_nsx_tempest_plugin.common import constants
from vmware_nsx_tempest_plugin.lib import feature_manager
from vmware_nsx_tempest_plugin.services import nsxp_client
from vmware_nsx_tempest_plugin.services import nsxv3_client
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestMDProxyPolicy(feature_manager.FeatureManager):
"""Test MDProxy.
Adding test cases to test MDProxy in different scenarios such as
testing it over multiple created networks, verify MDProxy realization
with nsxv3 backend, test MDProxy with isolated network and so on.
"""
@classmethod
def skip_checks(cls):
super(TestMDProxyPolicy, cls).skip_checks()
if not CONF.nsxp.native_dhcp_metadata:
msg = " native_dhcp_metadata is not enabled under nsxp config" \
", skipping all the MDProxy tests!!!"
raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
cls.set_network_resources()
cls.admin_mgr = cls.get_client_manager('admin')
super(TestMDProxyPolicy, cls).setup_credentials()
@classmethod
def setup_clients(cls):
"""
Create various client connections. Such as NSX.
"""
super(TestMDProxyPolicy, cls).setup_clients()
cls.nsx = nsxv3_client.NSXV3Client(
CONF.nsxv3.nsx_manager,
CONF.nsxv3.nsx_user,
CONF.nsxv3.nsx_password)
cls.nsxp_client = nsxp_client.NSXPClient(CONF.nsxv3.nsx_manager,
CONF.nsxv3.nsx_user,
CONF.nsxv3.nsx_password)
def _verify_md(self, md_url, expected_value="",
sub_result=None, ssh_client=None):
cmd = "curl " + md_url
self.exec_cmd_on_server_using_fip(
cmd, ssh_client=ssh_client, sub_result=sub_result,
expected_value=expected_value)
def verify_metadata_in_detail(self, instance, ssh_client, floatingip,
fixed_ip):
# Check floating IPv4 in Metadata.
md_url_pubic_ipv4 = constants.MD_BASE_URL + \
"latest/meta-data/public-ipv4"
instance_name = instance["name"].replace("_", "-")
self._verify_md(md_url=md_url_pubic_ipv4, expected_value=floatingip,
ssh_client=ssh_client)
# Check hostname in Metadata.
md_url_hostname = constants.MD_BASE_URL + "latest/meta-data/hostname"
self._verify_md(md_url=md_url_hostname,
expected_value=instance_name + ".novalocal",
ssh_client=ssh_client)
# Check local IPv4 in Metadata.
md_url_local_ipv4 = constants.MD_BASE_URL + \
"latest/meta-data/local-ipv4"
self._verify_md(md_url=md_url_local_ipv4, expected_value=fixed_ip,
ssh_client=ssh_client)
# Check hostname in Metadata of 2009-04-04 folder.
md_url_hostname = constants.MD_BASE_URL + \
"2009-04-04/meta-data/hostname"
self._verify_md(md_url=md_url_hostname,
expected_value=instance_name + ".novalocal",
ssh_client=ssh_client)
# Check hostname in Metadata of 1.0 folder.
md_url_hostname = constants.MD_BASE_URL + "1.0/meta-data/hostname"
self._verify_md(md_url=md_url_hostname,
expected_value=instance_name + ".novalocal",
ssh_client=ssh_client)
def verify_md_proxy_logical_ports_on_backend(self, tenant_id, network):
md_counter = 0
network_name = network["name"]
network_id = network["id"]
logical_switch = self.nsxp_client.get_logical_switch(network_name,
network_id)
switch_id = str(logical_switch["id"])
mdproxy_path = logical_switch['metadata_proxy_paths']
md_proxy_uuid = str(mdproxy_path[0].split("/")[-1])
if mdproxy_path is None:
raise ValueError('MDPorxy is not attached to logical switch')
display_name = "infra|%s|%s|md-proxy-attach-port" % (switch_id,
md_proxy_uuid)
logical_ports = self.nsx.get_logical_ports()
for port_index in range(len(logical_ports)):
if "attachment" in logical_ports[port_index]:
if logical_ports[port_index]["attachment"][
"attachment_type"] == "METADATA_PROXY":
if logical_ports[port_index]["display_name"] == \
display_name:
md_counter += 1
def deploy_mdproxy_topology(self, glance_image_id=None):
router_mdproxy = self.create_topology_router("router_mdproxy")
network_mdproxy = self.create_topology_network("network_mdproxy")
self.create_topology_subnet(
"subnet_web", network_mdproxy, router_id=router_mdproxy["id"])
self.create_topology_instance(
"server_mdproxy_1", [network_mdproxy], image_id=glance_image_id)
def deploy_mdproxy_topology_2(self):
network_mdproxy = self.create_topology_network("network_mdproxy")
self.create_topology_subnet(
"subnet_web", network_mdproxy)
self.create_topology_instance(
"server_mdproxy_1", [network_mdproxy], create_floating_ip=False)
def deploy_mdproxy_topology_3(self):
router_mdproxy = self.create_topology_router("router_mdproxy")
network_mdproxy_1 = self.create_topology_network("network_mdproxy_1")
self.create_topology_subnet(
"subnet_web_1", network_mdproxy_1, router_id=router_mdproxy["id"])
self.create_topology_instance(
"server_mdproxy_1", [network_mdproxy_1])
network_mdproxy_2 = self.create_topology_network("network_mdproxy_2")
self.create_topology_subnet("subnet_web_2", network_mdproxy_2,
router_id=router_mdproxy["id"])
self.create_topology_instance("server_mdproxy_2", [network_mdproxy_2])
def metadata_test_on_various_glance_image(self, image_id):
self.deploy_mdproxy_topology(glance_image_id=image_id)
# Verify ssh, detailed metadata and verify backend data
ssh_client = self.verify_server_ssh(
server=self.topology_servers["server_mdproxy_1"],
use_password=True)
fixed_ip = \
self.topology_servers["server_mdproxy_1"]["floating_ips"][0][
"fixed_ip_address"]
fip = self.topology_servers["server_mdproxy_1"]["floating_ips"][0][
"floating_ip_address"]
self.verify_metadata_in_detail(
instance=self.topology_servers["server_mdproxy_1"],
ssh_client=ssh_client, floatingip=fip, fixed_ip=fixed_ip)
tenant_id = self.topology_networks["network_mdproxy"]["tenant_id"]
network = self.topology_networks["network_mdproxy"]["id"]
self.verify_md_proxy_logical_ports_on_backend(tenant_id, network)
@decorators.idempotent_id("e9a93161-d852-415d-aa55-36d465ea45df")
@utils.services("compute", "network")
def test_mdproxy_ping(self):
self.deploy_mdproxy_topology()
# Verify ssh connection and basic mdproxy data.
ssh_client = self.verify_server_ssh(server=self.topology_servers[
"server_mdproxy_1"])
md_url_pubic_ipv4 = constants.MD_BASE_URL + \
"latest/meta-data/public-ipv4"
fip = self.topology_servers["server_mdproxy_1"][
"floating_ips"][0]["floating_ip_address"]
self._verify_md(md_url=md_url_pubic_ipv4, expected_value=fip,
ssh_client=ssh_client)
@decorators.idempotent_id("743f34a6-58b8-4298-a07f-7bee21c55051")
@utils.services("compute", "network")
def test_mdproxy_verify_backend(self):
self.deploy_mdproxy_topology()
# Verify ssh, detailed metadata and verify backend data
ssh_client = self.verify_server_ssh(
server=self.topology_servers["server_mdproxy_1"])
fixed_ip = self.topology_servers["server_mdproxy_1"]["floating_ips"][
0]["fixed_ip_address"]
fip = self.topology_servers["server_mdproxy_1"]["floating_ips"][0][
"floating_ip_address"]
self.verify_metadata_in_detail(
instance=self.topology_servers["server_mdproxy_1"],
ssh_client=ssh_client, floatingip=fip, fixed_ip=fixed_ip)
tenant_id = self.topology_networks["network_mdproxy"]["tenant_id"]
network = self.topology_networks["network_mdproxy"]
self.verify_md_proxy_logical_ports_on_backend(tenant_id, network)
@decorators.skip_because(bug="2004971")
@decorators.idempotent_id("fce2acc8-c850-40fe-bf02-958dd3cd4343")
@utils.services("compute", "network")
def test_mdproxy_with_server_on_two_ls(self):
router_mdproxy = self.create_topology_router("router_mdproxy")
network_mdproxy = self.create_topology_network("network_mdproxy")
self.create_topology_subnet("subnet_web", network_mdproxy,
router_id=router_mdproxy["id"])
network2_mdproxy = self.create_topology_network("network2_mdproxy")
self.create_topology_subnet("subnet2_web", network2_mdproxy,
router_id=router_mdproxy["id"])
# Instance has 2 network ports.
self.create_topology_instance(
"server_mdproxy_1", [network_mdproxy, network2_mdproxy])
floating_ip_1 = self.topology_servers["server_mdproxy_1"][
"floating_ips"][0]["floating_ip_address"]
fixed_ip_1 = self.topology_servers["server_mdproxy_1"][
"floating_ips"][0]["fixed_ip_address"]
ssh_client1 = self.verify_server_ssh(
server=self.topology_servers["server_mdproxy_1"],
floating_ip=floating_ip_1)
floating_ip_2 = self.topology_servers["server_mdproxy_1"][
"floating_ips"][1]["floating_ip_address"]
self.verify_server_ssh(
server=self.topology_servers["server_mdproxy_1"],
floating_ip=floating_ip_2)
self.verify_metadata_in_detail(
instance=self.topology_servers["server_mdproxy_1"],
ssh_client=ssh_client1, floatingip=floating_ip_1,
fixed_ip=fixed_ip_1)
@decorators.idempotent_id("67332752-c295-42cb-a8c3-99210fb6b00b")
@utils.services("compute", "network")
def test_mdproxy_isolated_network(self):
# Deploy topology without tier1 router
self.deploy_mdproxy_topology_2()
tenant_id = self.topology_networks["network_mdproxy"]["tenant_id"]
network = self.topology_networks["network_mdproxy"]
# Verify MDProxy logical ports on Backend
self.verify_md_proxy_logical_ports_on_backend(tenant_id, network)
@decorators.idempotent_id("cc8d2ab8-1bea-4e32-bf80-c9c46a7612b7")
@decorators.attr(type=["negative"])
@utils.services("compute", "network")
def test_mdproxy_delete_when_ls_bounded(self):
self.deploy_mdproxy_topology_2()
md_proxy_uuid = self.nsx.get_md_proxies()[0]["id"]
result = self.nsx.delete_md_proxy(md_proxy_uuid)
# Delete mdproxy server when it is still attached to LS
self.assertEqual(str(result["error_code"]),
constants.MD_ERROR_CODE_WHEN_LS_BOUNDED)
@decorators.idempotent_id("501fc3ea-696b-4e94-b383-293ab94e2545")
@utils.services("compute", "network")
def test_mdproxy_with_multiple_ports_on_network(self):
self.deploy_mdproxy_topology()
# Boot 2nd vm on same network
network = self.topology_networks["network_mdproxy"]
self.create_topology_instance(
"server_mdproxy_2", [network])
# Verify Metadata from vm1
ssh_client_1 = self.verify_server_ssh(
server=self.topology_servers["server_mdproxy_1"])
fixed_ip_1 = self.topology_servers["server_mdproxy_1"][
"floating_ips"][0][
"fixed_ip_address"]
fip_1 = self.topology_servers["server_mdproxy_1"]["floating_ips"][0][
"floating_ip_address"]
self.verify_metadata_in_detail(
instance=self.topology_servers["server_mdproxy_1"],
ssh_client=ssh_client_1, floatingip=fip_1, fixed_ip=fixed_ip_1)
# Verify Metadata from vm2
ssh_client_2 = self.verify_server_ssh(
server=self.topology_servers["server_mdproxy_2"])
fixed_ip_2 = self.topology_servers["server_mdproxy_2"][
"floating_ips"][0][
"fixed_ip_address"]
fip_2 = self.topology_servers["server_mdproxy_2"]["floating_ips"][0][
"floating_ip_address"]
self.verify_metadata_in_detail(
instance=self.topology_servers["server_mdproxy_2"],
ssh_client=ssh_client_2, floatingip=fip_2, fixed_ip=fixed_ip_2)
# Verify Metadata on backend
tenant_id = self.topology_networks["network_mdproxy"]["tenant_id"]
network = self.topology_networks["network_mdproxy"]
self.verify_md_proxy_logical_ports_on_backend(tenant_id, network)
@decorators.idempotent_id("eae21afc-50ea-42e5-9c49-2be38cee9f06")
@utils.services("compute", "network")
def test_mdproxy_with_multiple_metadata_ports(self):
self.deploy_mdproxy_topology_3()
# Verify 1st instance on the network1
ssh_client_1 = self.verify_server_ssh(
server=self.topology_servers["server_mdproxy_1"])
fixed_ip_1 = self.topology_servers["server_mdproxy_1"][
"floating_ips"][0][
"fixed_ip_address"]
fip_1 = self.topology_servers["server_mdproxy_1"]["floating_ips"][0][
"floating_ip_address"]
self.verify_metadata_in_detail(
instance=self.topology_servers["server_mdproxy_1"],
ssh_client=ssh_client_1, floatingip=fip_1, fixed_ip=fixed_ip_1)
# Verify 2nd instance on the network2
ssh_client_2 = self.verify_server_ssh(
server=self.topology_servers["server_mdproxy_2"])
fixed_ip_2 = self.topology_servers["server_mdproxy_2"][
"floating_ips"][0][
"fixed_ip_address"]
fip_2 = self.topology_servers["server_mdproxy_2"]["floating_ips"][0][
"floating_ip_address"]
self.verify_metadata_in_detail(
instance=self.topology_servers["server_mdproxy_2"],
ssh_client=ssh_client_2, floatingip=fip_2, fixed_ip=fixed_ip_2)
@decorators.idempotent_id("29d44d7c-6ea1-4b30-a6c3-a2795c2486fe")
@decorators.attr(type=["negative"])
@utils.services("compute", "network")
def test_mdproxy_with_incorrect_password(self):
self.deploy_mdproxy_topology()
ssh_client = self.verify_server_ssh(
server=self.topology_servers["server_mdproxy_1"])
md_url_pubic_ipv4 = constants.MD_BASE_URL + \
"latest/meta-data/public-ipv4"
# Query metadata and query should fail
self._verify_md(md_url=md_url_pubic_ipv4, expected_value="",
ssh_client=ssh_client, sub_result="403 Forbidden")
@decorators.skip_because(bug="2004971")
@decorators.idempotent_id("74e5d545-3ccc-46c8-9cda-16ccf8730a5b")
@utils.services("compute", "network")
def test_mdproxy_with_cirros_kvm_server_image(self):
image_id = self.get_glance_image_id(["cirros", "kvm"])
self.metadata_test_on_various_glance_image(image_id)
@decorators.skip_because(bug="2004971")
@decorators.idempotent_id("35babffc-f098-5705-82b7-ab96a6f4fdd8")
@utils.services("compute", "network")
def test_mdproxy_with_debian_esx_server_image(self):
image_id = self.get_glance_image_id(["debian", "esx"])
self.metadata_test_on_various_glance_image(image_id)
@decorators.skip_because(bug="2004971")
@decorators.idempotent_id("72ba325f-083b-4247-9192-a9f54d3ecfd2")
@utils.services("compute", "network")
def test_mdproxy_with_debian_kvm_server_image(self):
image_id = self.get_glance_image_id(["debian", "kvm"])
self.metadata_test_on_various_glance_image(image_id)
@decorators.skip_because(bug="2004971")
@decorators.idempotent_id("dfed6074-c4a2-4bf7-a805-80a191ea7875")
@utils.services("compute", "network")
def test_mdproxy_with_xenial_esx_server_image(self):
image_id = self.get_glance_image_id(["xenial", "esx"])
self.metadata_test_on_various_glance_image(image_id)
@decorators.skip_because(bug="2004971")
@decorators.idempotent_id("55829b7f-1535-41f8-833f-b20ac0ee48e0")
@utils.services("compute", "network")
def test_mdproxy_with_xenial_kvm_server_image(self):
image_id = self.get_glance_image_id(["xenial", "kvm"])
self.metadata_test_on_various_glance_image(image_id)