NSX|P: support policy MDproxy

Use policy MDproxy objects.
The initialization supports both MP & Policy MD proxy, and uses the one
the was configured, so there is no need for migration or upgrade process.

In addition, remove the dependency between the AZs and _has_native_dhcp_metadata
which existed for the v3 plugin long time ago.

Change-Id: I7df49d6a347715cf1ec5453f921e67bd71b1f225
This commit is contained in:
Adit Sarfaty 2019-09-15 14:44:36 +03:00
parent 27a93cb2e6
commit a4e35f0ad3
6 changed files with 132 additions and 70 deletions

View File

@ -975,10 +975,7 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
def _default_physical_net(self, physical_net): def _default_physical_net(self, physical_net):
return physical_net is None or physical_net == 'default' return physical_net is None or physical_net == 'default'
def _validate_provider_create(self, context, network_data, def _validate_provider_create(self, context, network_data, az,
default_vlan_tz_uuid,
default_overlay_tz_uuid,
mdproxy_uuid,
nsxlib_tz, nsxlib_network, nsxlib_tz, nsxlib_network,
transparent_vlan=False): transparent_vlan=False):
"""Validate the parameters of a new provider network """Validate the parameters of a new provider network
@ -991,6 +988,11 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
- vlan_id: vlan tag, 0 or None - vlan_id: vlan tag, 0 or None
- switch_mode: standard or ENS - switch_mode: standard or ENS
""" """
# initialize the relevant parameters from the AZ
default_vlan_tz_uuid = az._default_vlan_tz_uuid
default_overlay_tz_uuid = az._default_overlay_tz_uuid
mdproxy_uuid = az._native_md_proxy_uuid
is_provider_net = any( is_provider_net = any(
validators.is_attr_set(network_data.get(f)) validators.is_attr_set(network_data.get(f))
for f in (pnet.NETWORK_TYPE, for f in (pnet.NETWORK_TYPE,
@ -1121,9 +1123,10 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
switch_mode = nsxlib_tz.get_host_switch_mode(physical_net) switch_mode = nsxlib_tz.get_host_switch_mode(physical_net)
# validate the mdproxy TZ matches this one. # validate the mdproxy TZ matches this one.
if (not err_msg and physical_net and self.nsxlib and if (not err_msg and physical_net and
self._has_native_dhcp_metadata()): self._has_native_dhcp_metadata()):
if not self._validate_net_mdproxy_tz(physical_net, mdproxy_uuid): if not self._validate_net_mdproxy_tz(
az, physical_net, mdproxy_uuid):
err_msg = (_('Network TZ %(tz)s does not match MD proxy ' err_msg = (_('Network TZ %(tz)s does not match MD proxy '
'%(md)s edge cluster') % '%(md)s edge cluster') %
{'tz': physical_net, 'md': mdproxy_uuid}) {'tz': physical_net, 'md': mdproxy_uuid})
@ -1143,19 +1146,11 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
'vlan_id': vlan_id, 'vlan_id': vlan_id,
'switch_mode': switch_mode} 'switch_mode': switch_mode}
def _validate_net_mdproxy_tz(self, tz_uuid, mdproxy_uuid): def _validate_net_mdproxy_tz(self, az, tz_uuid, mdproxy_uuid):
"""Validate that the network TZ matches the mdproxy edge cluster""" """Validate that the network TZ matches the mdproxy edge cluster
# TODO(asarfaty): separate the validation when using policy mdproxy Should be implemented by each plugin.
mdproxy_obj = self.nsxlib.native_md_proxy.get(mdproxy_uuid) """
ec_id = mdproxy_obj['edge_cluster_id'] pass
ec_nodes = self.nsxlib.edge_cluster.get_transport_nodes(ec_id)
ec_tzs = []
for tn_uuid in ec_nodes:
ec_tzs.extend(self.nsxlib.transport_node.get_transport_zones(
tn_uuid))
if tz_uuid not in ec_tzs:
return False
return True
def _network_is_nsx_net(self, context, network_id): def _network_is_nsx_net(self, context, network_id):
bindings = nsx_db.get_network_bindings(context.session, network_id) bindings = nsx_db.get_network_bindings(context.session, network_id)
@ -1958,7 +1953,6 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
{'router': {az_def.AZ_HINTS: az_hints}}) {'router': {az_def.AZ_HINTS: az_hints}})
def get_network_availability_zones(self, net_db): def get_network_availability_zones(self, net_db):
if self._has_native_dhcp_metadata():
hints = az_validator.convert_az_string_to_list( hints = az_validator.convert_az_string_to_list(
net_db[az_def.AZ_HINTS]) net_db[az_def.AZ_HINTS])
# When using the configured AZs, the az will always be the same # When using the configured AZs, the az will always be the same
@ -1968,8 +1962,6 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
else: else:
az_name = self.get_default_az().name az_name = self.get_default_az().name
return [az_name] return [az_name]
else:
return []
def _get_router_az_obj(self, router): def _get_router_az_obj(self, router):
l3_attrs_db.ExtraAttributesMixin._extend_extra_router_dict( l3_attrs_db.ExtraAttributesMixin._extend_extra_router_dict(
@ -1987,11 +1979,6 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
force=True) force=True)
def _list_availability_zones(self, context, filters=None): def _list_availability_zones(self, context, filters=None):
# If no native_dhcp_metadata - use neutron AZs
if not self._has_native_dhcp_metadata():
return super(NsxPluginV3Base, self)._list_availability_zones(
context, filters=filters)
result = {} result = {}
for az in self._availability_zones_data.list_availability_zones(): for az in self._availability_zones_data.list_availability_zones():
# Add this availability zone as a network & router resource # Add this availability zone as a network & router resource
@ -2010,10 +1997,6 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
if self._is_sub_plugin and not force: if self._is_sub_plugin and not force:
# validation should be done together for both plugins # validation should be done together for both plugins
return return
# If no native_dhcp_metadata - use neutron AZs
if not self._has_native_dhcp_metadata():
return super(NsxPluginV3Base, self).validate_availability_zones(
context, resource_type, availability_zones)
# Validate against the configured AZs # Validate against the configured AZs
return self.validate_obj_azs(availability_zones) return self.validate_obj_azs(availability_zones)
@ -2706,7 +2689,8 @@ class NsxPluginV3Base(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
net_name or 'network'), net_name or 'network'),
net_id) net_id)
def _create_net_mdproxy_port(self, context, network, az, nsx_net_id): def _create_net_mp_mdproxy_port(self, context, network, az, nsx_net_id):
"""Add MD proxy on the MP logical-switch by creating a logical port"""
if (not self.nsxlib or if (not self.nsxlib or
not self._has_native_dhcp_metadata()): not self._has_native_dhcp_metadata()):
return return

View File

@ -21,6 +21,7 @@ from vmware_nsx.common import config
from vmware_nsx.common import exceptions as nsx_exc from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.plugins.common_v3 import availability_zones as v3_az from vmware_nsx.plugins.common_v3 import availability_zones as v3_az
from vmware_nsxlib.v3 import exceptions as nsx_lib_exc from vmware_nsxlib.v3 import exceptions as nsx_lib_exc
from vmware_nsxlib.v3 import nsx_constants
from vmware_nsxlib.v3.policy import utils as p_utils from vmware_nsxlib.v3.policy import utils as p_utils
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -136,13 +137,37 @@ class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone):
auto_config=True, is_mandatory=False, auto_config=True, is_mandatory=False,
search_scope=search_scope) search_scope=search_scope)
self.use_policy_md = False
if (nsxpolicy.feature_supported(
nsx_constants.FEATURE_NSX_POLICY_MDPROXY)):
# Try to initialize md-proxy from the policy
try:
self._native_md_proxy_uuid = self._init_default_resource(
nsxpolicy, nsxpolicy.md_proxy, 'metadata_proxy',
auto_config=True, is_mandatory=True,
search_scope=search_scope)
LOG.info("NSX-P az using policy MD proxy: %s",
self._native_md_proxy_uuid)
self.use_policy_md = True
except Exception:
LOG.info("NSX-P az could not use policy MD proxy. Using MP "
"one instead")
if not self.use_policy_md:
# Try to initialize md-proxy from the MP
if nsxlib:
self._translate_metadata_proxy(
nsxlib, search_scope=search_scope)
LOG.info("NSX-P az using MP MD proxy: %s",
self._native_md_proxy_uuid)
else:
self._native_md_proxy_uuid = None
# If passthrough api is supported, also initialize those NSX objects # If passthrough api is supported, also initialize those NSX objects
if nsxlib: if nsxlib:
self._translate_dhcp_profile(nsxlib, search_scope=search_scope) self._translate_dhcp_profile(nsxlib, search_scope=search_scope)
self._translate_metadata_proxy(nsxlib, search_scope=search_scope)
else: else:
self._native_dhcp_profile_uuid = None self._native_dhcp_profile_uuid = None
self._native_md_proxy_uuid = None
def _get_edge_cluster_tzs(self, nsxpolicy, nsxlib, ec_uuid): def _get_edge_cluster_tzs(self, nsxpolicy, nsxlib, ec_uuid):
ec_nodes = nsxpolicy.edge_cluster.get_edge_node_ids(ec_uuid) ec_nodes = nsxpolicy.edge_cluster.get_edge_node_ids(ec_uuid)
@ -207,6 +232,13 @@ class NsxPAvailabilityZone(v3_az.NsxV3AvailabilityZone):
dhcp_ec) dhcp_ec)
if self._native_md_proxy_uuid: if self._native_md_proxy_uuid:
# Validate that the edge cluster of the MD proxy (MP or policy one)
# match the configured TZs
if self.use_policy_md:
md_ec_path = nsxpolicy.md_proxy.get(
self._native_md_proxy_uuid).get('edge_cluster_path')
md_ec = p_utils.path_to_id(md_ec_path)
else:
md_ec = nsxlib.native_md_proxy.get( md_ec = nsxlib.native_md_proxy.get(
self._native_md_proxy_uuid).get('edge_cluster_id') self._native_md_proxy_uuid).get('edge_cluster_id')
if md_ec != tier0_ec_uuid: if md_ec != tier0_ec_uuid:

View File

@ -98,6 +98,7 @@ from vmware_nsxlib.v3 import exceptions as nsx_lib_exc
from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts from vmware_nsxlib.v3 import nsx_constants as nsxlib_consts
from vmware_nsxlib.v3.policy import constants as policy_constants from vmware_nsxlib.v3.policy import constants as policy_constants
from vmware_nsxlib.v3.policy import core_defs as policy_defs from vmware_nsxlib.v3.policy import core_defs as policy_defs
from vmware_nsxlib.v3.policy import utils as p_utils
from vmware_nsxlib.v3 import security from vmware_nsxlib.v3 import security
from vmware_nsxlib.v3 import utils as nsxlib_utils from vmware_nsxlib.v3 import utils as nsxlib_utils
@ -584,7 +585,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
def _create_network_on_backend(self, context, net_data, def _create_network_on_backend(self, context, net_data,
transparent_vlan, transparent_vlan,
provider_data): provider_data, az):
net_data['id'] = net_data.get('id') or uuidutils.generate_uuid() net_data['id'] = net_data.get('id') or uuidutils.generate_uuid()
# update the network name to indicate the neutron id too. # update the network name to indicate the neutron id too.
@ -610,13 +611,18 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
else: else:
vlan_ids = None vlan_ids = None
kwargs = {
'segment_id': net_data['id'],
'description': net_data.get('description'),
'vlan_ids': vlan_ids,
'transport_zone_id': provider_data['physical_net'],
'tags': tags}
if az.use_policy_md:
kwargs['metadata_proxy_id'] = az._native_md_proxy_uuid
self.nsxpolicy.segment.create_or_overwrite( self.nsxpolicy.segment.create_or_overwrite(
net_name, net_name, **kwargs)
segment_id=net_data['id'],
description=net_data.get('description'),
vlan_ids=vlan_ids,
transport_zone_id=provider_data['physical_net'],
tags=tags)
if not admin_state and cfg.CONF.nsx_p.allow_passthrough: if not admin_state and cfg.CONF.nsx_p.allow_passthrough:
# This api uses the passthrough api # This api uses the passthrough api
@ -689,10 +695,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
is_backend_network = False is_backend_network = False
else: else:
provider_data = self._validate_provider_create( provider_data = self._validate_provider_create(
context, net_data, context, net_data, az,
az._default_vlan_tz_uuid,
az._default_overlay_tz_uuid,
az._native_md_proxy_uuid,
self.nsxpolicy.transport_zone, self.nsxpolicy.transport_zone,
self.nsxpolicy.segment, self.nsxpolicy.segment,
transparent_vlan=vlt) transparent_vlan=vlt)
@ -737,7 +740,7 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
if is_backend_network: if is_backend_network:
try: try:
self._create_network_on_backend( self._create_network_on_backend(
context, created_net, vlt, provider_data) context, created_net, vlt, provider_data, az)
except Exception as e: except Exception as e:
LOG.exception("Failed to create NSX network network: %s", e) LOG.exception("Failed to create NSX network network: %s", e)
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
@ -750,12 +753,13 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
resource_extend.apply_funcs('networks', created_net, net_model) resource_extend.apply_funcs('networks', created_net, net_model)
# MD Proxy is currently supported by the passthrough api only # MD Proxy is currently supported by the passthrough api only
if is_backend_network and cfg.CONF.nsx_p.allow_passthrough: if (is_backend_network and not az.use_policy_md and
cfg.CONF.nsx_p.allow_passthrough):
try: try:
# The new segment was not realized yet. Waiting for a bit. # The new segment was not realized yet. Waiting for a bit.
time.sleep(cfg.CONF.nsx_p.realization_wait_sec) time.sleep(cfg.CONF.nsx_p.realization_wait_sec)
nsx_net_id = self._get_network_nsx_id(context, net_id) nsx_net_id = self._get_network_nsx_id(context, net_id)
self._create_net_mdproxy_port( self._create_net_mp_mdproxy_port(
context, created_net, az, nsx_net_id) context, created_net, az, nsx_net_id)
except Exception as e: except Exception as e:
LOG.exception("Failed to create mdproxy port for network %s: " LOG.exception("Failed to create mdproxy port for network %s: "
@ -785,9 +789,13 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
# checks on active ports # checks on active ports
self._retry_delete_network(context, network_id) self._retry_delete_network(context, network_id)
# MD Proxy is currently supported by the passthrough api only. # Delete MD proxy port. This is relevant only if the plugin used
# Use it to delete mdproxy ports # MP MD proxy when this network is created.
if not is_external_net and cfg.CONF.nsx_p.allow_passthrough: # If not - the port will not be found, and it is ok.
# Note(asarfaty): In the future this code can be removed.
if (not is_external_net and cfg.CONF.nsx_p.allow_passthrough and
not self.nsxpolicy.feature_supported(
nsxlib_consts.FEATURE_NSX_POLICY_MDPROXY)):
self._delete_nsx_port_by_network(network_id) self._delete_nsx_port_by_network(network_id)
# Delete the network segment from the backend # Delete the network segment from the backend
@ -3087,3 +3095,28 @@ class NsxPolicyPlugin(nsx_plugin_common.NsxPluginV3Base):
extra_rules.extend(vpn_rules) extra_rules.extend(vpn_rules)
return extra_rules return extra_rules
def _validate_net_mdproxy_tz(self, az, tz_uuid, mdproxy_uuid):
"""Validate that the network TZ matches the mdproxy edge cluster"""
if not self.nsxlib:
# No passthrough api support
return True
if az.use_policy_md:
# Policy obj
md_ec_path = self.nsxpolicy.md_proxy.get(
mdproxy_uuid).get('edge_cluster_path')
md_ec = p_utils.path_to_id(md_ec_path)
else:
# MP obj
md_ec = self.nsxlib.native_md_proxy.get(
mdproxy_uuid).get('edge_cluster_id')
ec_nodes = self.nsxlib.edge_cluster.get_transport_nodes(md_ec)
ec_tzs = []
for tn_uuid in ec_nodes:
ec_tzs.extend(self.nsxlib.transport_node.get_transport_zones(
tn_uuid))
if tz_uuid not in ec_tzs:
return False
return True

View File

@ -815,10 +815,7 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
def _create_network_at_the_backend(self, context, net_data, az, def _create_network_at_the_backend(self, context, net_data, az,
transparent_vlan): transparent_vlan):
provider_data = self._validate_provider_create( provider_data = self._validate_provider_create(
context, net_data, context, net_data, az,
az._default_vlan_tz_uuid,
az._default_overlay_tz_uuid,
az._native_md_proxy_uuid,
self.nsxlib.transport_zone, self.nsxlib.transport_zone,
self.nsxlib.logical_switch, self.nsxlib.logical_switch,
transparent_vlan=transparent_vlan) transparent_vlan=transparent_vlan)
@ -978,7 +975,7 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
resource_extend.apply_funcs('networks', created_net, net_model) resource_extend.apply_funcs('networks', created_net, net_model)
if is_backend_network: if is_backend_network:
self._create_net_mdproxy_port( self._create_net_mp_mdproxy_port(
context, created_net, az, nsx_net_id) context, created_net, az, nsx_net_id)
except Exception: except Exception:
@ -3442,3 +3439,16 @@ class NsxV3Plugin(nsx_plugin_common.NsxPluginV3Base,
if len(port_tags) != orig_len: if len(port_tags) != orig_len:
self.nsxlib.logical_port.update( self.nsxlib.logical_port.update(
nsx_lport_id, False, tags=port_tags) nsx_lport_id, False, tags=port_tags)
def _validate_net_mdproxy_tz(self, az, tz_uuid, mdproxy_uuid):
"""Validate that the network TZ matches the mdproxy edge cluster"""
mdproxy_obj = self.nsxlib.native_md_proxy.get(mdproxy_uuid)
ec_id = mdproxy_obj['edge_cluster_id']
ec_nodes = self.nsxlib.edge_cluster.get_transport_nodes(ec_id)
ec_tzs = []
for tn_uuid in ec_nodes:
ec_tzs.extend(self.nsxlib.transport_node.get_transport_zones(
tn_uuid))
if tz_uuid not in ec_tzs:
return False
return True

View File

@ -919,7 +919,8 @@ class NsxNativeMetadataTestCase(test_plugin.NsxPPluginTestCaseMixin):
def test_metadata_proxy_with_create_network(self): def test_metadata_proxy_with_create_network(self):
# Test if native metadata proxy is enabled on a network when it is # Test if native metadata proxy is enabled on a network when it is
# created. # created (Using MP MDproxy).
self.plugin._availability_zones_data._default_az.use_policy_md = False
with mock.patch.object(nsx_resources.LogicalPort, with mock.patch.object(nsx_resources.LogicalPort,
'create') as create_logical_port: 'create') as create_logical_port:
with self.network() as network: with self.network() as network:
@ -938,7 +939,9 @@ class NsxNativeMetadataTestCase(test_plugin.NsxPPluginTestCaseMixin):
def test_metadata_proxy_with_create_az_network(self): def test_metadata_proxy_with_create_az_network(self):
# Test if native metadata proxy is enabled on a network when it is # Test if native metadata proxy is enabled on a network when it is
# created. # created (Using MP MDproxy).
azs = self.plugin._availability_zones_data.availability_zones
azs[self._az_name].use_policy_md = False
with mock.patch.object(nsx_resources.LogicalPort, with mock.patch.object(nsx_resources.LogicalPort,
'create') as create_logical_port: 'create') as create_logical_port:
with self.network( with self.network(

View File

@ -100,7 +100,7 @@ class NsxPPluginTestCaseMixin(
'display_name': 'test'}]} 'display_name': 'test'}]}
mock.patch( mock.patch(
"vmware_nsxlib.v3.policy.NsxPolicyLib.get_version", "vmware_nsxlib.v3.policy.NsxPolicyLib.get_version",
return_value=nsx_constants.NSX_VERSION_2_5_0).start() return_value=nsx_constants.NSX_VERSION_3_0_0).start()
mock.patch( mock.patch(
"vmware_nsxlib.v3.client.RESTClient.get").start() "vmware_nsxlib.v3.client.RESTClient.get").start()
mock.patch( mock.patch(
@ -142,7 +142,7 @@ class NsxPPluginTestCaseMixin(
return_value=nsxlib_utils.TagLimits(20, 40, 15)).start() return_value=nsxlib_utils.TagLimits(20, 40, 15)).start()
# Add some nsxlib mocks for the passthrough apis # Add some nsxlib mocks for the passthrough apis
mock.patch("vmware_nsxlib.v3.NsxLib.get_version", mock.patch("vmware_nsxlib.v3.NsxLib.get_version",
return_value=nsx_constants.NSX_VERSION_2_5_0).start() return_value=nsx_constants.NSX_VERSION_3_0_0).start()
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter." mock.patch("vmware_nsxlib.v3.core_resources.NsxLibLogicalRouter."
"update").start() "update").start()
mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportNode." mock.patch("vmware_nsxlib.v3.core_resources.NsxLibTransportNode."