NSX|V3: Add support for native metadata proxy service

This patch allows VMs on Neutron networks to switch from
Neutron metadata proxy/agent to NSX native metadata proxy/agent.

It also includes admin tool to enable native metadata on existing
Neutron networks.

Change-Id: I0a0a025dad765106f7a6e593266d412f921f96fa
This commit is contained in:
Shih-Hao Li 2016-06-22 09:09:41 -07:00 committed by garyk
parent eb88764461
commit 03eea64ef9
11 changed files with 248 additions and 25 deletions

View File

@ -128,6 +128,7 @@ function neutron_plugin_configure_service {
_nsxv3_ini_set ca_file $NSX_CA_FILE
_nsxv3_ini_set default_bridge_cluster $DEFAULT_BRIDGE_CLUSTER_UUID
_nsxv3_ini_set dhcp_profile_uuid $DHCP_PROFILE_UUID
_nsxv3_ini_set metadata_proxy_uuid $METADATA_PROXY_UUID
}
function neutron_plugin_setup_interface_driver {

View File

@ -12,3 +12,4 @@ EDGE_CLUSTER_UUID=<FILL_IN>
NSX_MANAGER=<FILL_IN>
NSX_CONTROLLERS=<FILL_IN>
DHCP_PROFILE_UUID=<FILL_IN>
METADATA_PROXY_UUID=<FILL_IN>

View File

@ -369,6 +369,11 @@ nsx_v3_opts = [
help=_("List of nameservers to configure for the DHCP "
"binding entries. These will be used if there are no "
"nameservers defined on the subnet.")),
cfg.StrOpt('metadata_proxy_uuid',
help=_("This is the UUID of the NSX Metadata Proxy that will "
"be used to enable native metadata service. It needs "
"to be created in NSX before starting Neutron with "
"the NSX plugin.")),
cfg.BoolOpt('log_security_groups_blocked_traffic',
default=False,
help=_("(Optional) Indicates whether distributed-firewall "

View File

@ -23,6 +23,7 @@ ADMIN_STATUSES = [ADMIN_STATE_UP, ADMIN_STATE_DOWN]
ATTACHMENT_VIF = "VIF"
ATTACHMENT_LR = "LOGICALROUTER"
ATTACHMENT_DHCP = "DHCP_SERVICE"
ATTACHMENT_MDPROXY = "METADATA_PROXY"
ATTACHMENT_CIF = "CIF"
CIF_RESOURCE_TYPE = "CifAttachmentContext"

View File

@ -453,6 +453,19 @@ class DhcpProfile(AbstractRESTResource):
pass
class MetaDataProxy(AbstractRESTResource):
@property
def uri_segment(self):
return 'md-proxies'
def create(self, *args, **kwargs):
pass
def update(self, uuid, *args, **kwargs):
pass
class LogicalDhcpServer(AbstractRESTResource):
@property

View File

@ -80,6 +80,7 @@ from vmware_nsx.db import extended_security_group
from vmware_nsx.db import extended_security_group_rule as extend_sg_rule
from vmware_nsx.db import maclearning as mac_db
from vmware_nsx.dhcp_meta import rpc as nsx_rpc
from vmware_nsx.extensions import advancedserviceproviders as as_providers
from vmware_nsx.extensions import maclearning as mac_ext
from vmware_nsx.extensions import securitygrouplogging as sg_logging
from vmware_nsx.nsxlib import v3 as nsxlib
@ -163,7 +164,7 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
self.cfg_group = 'nsx_v3' # group name for nsx_v3 section in nsx.ini
self.tier0_groups_dict = {}
self._init_dhcp()
self._init_dhcp_metadata()
self._port_client = nsx_resources.LogicalPort(self._nsx_client)
self.nsgroup_manager, self.default_section = (
@ -374,18 +375,19 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
with locking.LockManager.get_lock('nsxv3_nsgroup_manager_init'):
return security.init_nsgroup_manager_and_default_section_rules()
def _init_dhcp(self):
def _init_dhcp_metadata(self):
if cfg.CONF.nsx_v3.native_dhcp_metadata:
if cfg.CONF.dhcp_agent_notification:
msg = _("Need to disable dhcp_agent_notification when "
"native_dhcp_metadata is enabled")
raise nsx_exc.NsxPluginException(err_msg=msg)
self._init_native_dhcp()
self._init_native_metadata()
else:
self._setup_dhcp()
self._start_rpc_notifiers()
def _init_native_dhcp(self):
if cfg.CONF.dhcp_agent_notification:
msg = _("Need to disable dhcp_agent_notification when "
"native_dhcp_metadata is enabled")
raise nsx_exc.NsxPluginException(err_msg=msg)
if not cfg.CONF.nsx_v3.dhcp_profile_uuid:
raise cfg.RequiredOptError("dhcp_profile_uuid")
try:
@ -399,6 +401,18 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
"native DHCP service is not supported"),
cfg.CONF.nsx_v3.dhcp_profile_uuid)
def _init_native_metadata(self):
if not cfg.CONF.nsx_v3.metadata_proxy_uuid:
raise cfg.RequiredOptError("metadata_proxy_uuid")
try:
nsx_resources.MetaDataProxy(self._nsx_client).get(
cfg.CONF.nsx_v3.metadata_proxy_uuid)
except nsx_exc.ManagerError:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Unable to retrieve Metadata Proxy %s, "
"native metadata service is not supported"),
cfg.CONF.nsx_v3.metadata_proxy_uuid)
def _setup_rpc(self):
self.endpoints = [dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback(),
@ -585,6 +599,21 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
err_msg = _("Cannot configure QOS on networks")
raise n_exc.InvalidInput(error_message=err_msg)
def get_subnets(self, context, filters=None, fields=None, sorts=None,
limit=None, marker=None, page_reverse=False):
filters = filters or {}
lswitch_ids = filters.pop(as_providers.ADV_SERVICE_PROVIDERS, [])
if lswitch_ids:
# This is a request from Nova for metadata processing.
# Find the corresponding neutron network for each logical switch.
network_ids = filters.pop('network_id', [])
context = context.elevated()
for lswitch_id in lswitch_ids:
network_ids += nsx_db.get_net_ids(context.session, lswitch_id)
filters['network_id'] = network_ids
return super(NsxV3Plugin, self).get_subnets(
context, filters, fields, sorts, limit, marker, page_reverse)
def create_network(self, context, network):
net_data = network['network']
external = net_data.get(ext_net_extn.EXTERNAL)
@ -637,6 +666,20 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
context.session,
neutron_net_id,
nsx_net_id)
if cfg.CONF.nsx_v3.native_dhcp_metadata:
# Enable native metadata proxy for this network.
tags = utils.build_v3_tags_payload(
net_data, resource_type='os-neutron-net-id',
project_name=context.tenant_name)
md_port = self._port_client.create(
nsx_net_id, cfg.CONF.nsx_v3.metadata_proxy_uuid,
tags=tags,
attachment_type=nsx_constants.ATTACHMENT_MDPROXY)
LOG.info(_LI("Created MD-Proxy logical port %(port)s "
"for network %(network)s"),
{'port': md_port['id'],
'network': net_data['id']})
except Exception:
with excutils.save_and_reraise_exception():
# Undo creation on the backend
@ -995,7 +1038,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
{'server': dhcp_service['nsx_service_id'],
'network': orig_subnet['network_id']})
if cfg.CONF.nsx_v3.metadata_on_demand:
if (cfg.CONF.nsx_v3.metadata_on_demand and
not cfg.CONF.nsx_v3.native_dhcp_metadata):
# If enable_dhcp is changed on a subnet attached to a router,
# update internal metadata network accordingly.
if 'enable_dhcp' in subnet['subnet']:
@ -1547,7 +1591,8 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
if cfg.CONF.nsx_v3.native_dhcp_metadata:
self._add_dhcp_binding(context, port_data)
nsx_rpc.handle_port_metadata_access(self, context, neutron_db)
if not cfg.CONF.nsx_v3.native_dhcp_metadata:
nsx_rpc.handle_port_metadata_access(self, context, neutron_db)
return port_data
def _pre_delete_port_check(self, context, port_id, l2gw_port_check):
@ -1587,8 +1632,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# Remove Mac/IP binding from native DHCP server and neutron DB.
if cfg.CONF.nsx_v3.native_dhcp_metadata:
self._delete_dhcp_binding(context, port)
nsx_rpc.handle_port_metadata_access(self, context, port,
is_delete=True)
else:
nsx_rpc.handle_port_metadata_access(self, context, port,
is_delete=True)
super(NsxV3Plugin, self).delete_port(context, port_id)
def _update_port_preprocess_security(
@ -2072,8 +2118,9 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
return self.get_router(context, router['id'])
def delete_router(self, context, router_id):
nsx_rpc.handle_router_metadata_access(self, context, router_id,
interface=None)
if not cfg.CONF.nsx_v3.native_dhcp_metadata:
nsx_rpc.handle_router_metadata_access(self, context, router_id,
interface=None)
router = self.get_router(context, router_id)
if router.get(l3.EXTERNAL_GW_INFO):
self._update_router_gw_info(context, router_id, {})
@ -2289,11 +2336,12 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
# TODO(berlin): Announce the subnet on tier0 if enable_snat
# is False
pass
# Ensure the NSX logical router has a connection to a
# 'metadata access' network (with a proxy listening on
# its DHCP port), by creating it if needed.
nsx_rpc.handle_router_metadata_access(self, context, router_id,
interface=info)
if not cfg.CONF.nsx_v3.native_dhcp_metadata:
# Ensure the NSX logical router has a connection to a
# 'metadata access' network (with a proxy listening on
# its DHCP port), by creating it if needed.
nsx_rpc.handle_router_metadata_access(self, context, router_id,
interface=info)
except nsx_exc.ManagerError:
with excutils.save_and_reraise_exception():
self.remove_router_interface(
@ -2367,10 +2415,11 @@ class NsxV3Plugin(agentschedulers_db.AZDhcpAgentSchedulerDbMixin,
'net_id': subnet['network_id']})
info = super(NsxV3Plugin, self).remove_router_interface(
context, router_id, interface_info)
# Ensure the connection to the 'metadata access network' is removed
# (with the network) if this is the last DHCP-disabled subnet on the
# router.
nsx_rpc.handle_router_metadata_access(self, context, router_id)
if not cfg.CONF.nsx_v3.native_dhcp_metadata:
# Ensure the connection to the 'metadata access network' is removed
# (with the network) if this is the last DHCP-disabled subnet on
# the router.
nsx_rpc.handle_router_metadata_access(self, context, router_id)
return info
def create_floatingip(self, context, floatingip):

View File

@ -38,3 +38,4 @@ BACKUP_EDGES = 'backup-edges'
ORPHANED_EDGES = 'orphaned-edges'
MISSING_EDGES = 'missing-edges'
METADATA = 'metadata'
METADATA_PROXY = 'metadata-proxy'

View File

@ -0,0 +1,63 @@
# Copyright 2016 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from neutron.callbacks import registry
from oslo_config import cfg
from vmware_nsx._i18n import _LI, _LE
from vmware_nsx.common import nsx_constants
from vmware_nsx.common import utils as nsx_utils
from vmware_nsx.nsxlib.v3 import client
from vmware_nsx.nsxlib.v3 import cluster
from vmware_nsx.nsxlib.v3 import resources
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.nsxv3.resources import utils
import vmware_nsx.shell.nsxadmin as shell
LOG = logging.getLogger(__name__)
neutron_client = utils.NeutronDbClient()
@admin_utils.output_header
def nsx_update_metadata_proxy(resource, event, trigger, **kwargs):
"""Update Metadata proxy for NSXv3 CrossHairs."""
cluster_api = cluster.NSXClusteredAPI()
nsx_client = client.NSX3Client(cluster_api)
client._set_default_api_cluster(cluster_api)
port_resource = resources.LogicalPort(nsx_client)
for network in neutron_client.get_networks():
# For each Neutron network, create a logical switch port with
# MD-Proxy attachment.
lswitch_id = neutron_client.net_id_to_lswitch_id(network['id'])
if lswitch_id:
tags = nsx_utils.build_v3_tags_payload(
network, resource_type='os-neutron-net-id',
project_name='NSX Neutron plugin upgrade')
port_resource.create(
lswitch_id, cfg.CONF.nsx_v3.metadata_proxy_uuid, tags=tags,
attachment_type=nsx_constants.ATTACHMENT_MDPROXY)
LOG.info(_LI("Enabled native metadata proxy for network %s"),
network['id'])
else:
LOG.error(_LE("Unable to find logical switch for network %s"),
network['id'])
registry.subscribe(nsx_update_metadata_proxy,
constants.METADATA_PROXY,
shell.Operations.NSX_UPDATE.value)

View File

@ -73,6 +73,14 @@ def make_fake_dhcp_profile():
"edge_cluster_member_indexes": [0, 1]}
def make_fake_metadata_proxy():
return {"id": uuidutils.generate_uuid(),
"metadata_server_url": "http://1.2.3.4",
"secret": "my secret",
"edge_cluster_id": uuidutils.generate_uuid(),
"edge_cluster_member_indexes": [0, 1]}
def get_resource(resource):
return {'id': resource.split('/')[-1]}

View File

@ -50,6 +50,7 @@ from vmware_nsx.common import nsx_constants
from vmware_nsx.common import utils
from vmware_nsx.db import db as nsx_db
from vmware_nsx.dhcp_meta import rpc as nsx_rpc
from vmware_nsx.extensions import advancedserviceproviders as as_providers
from vmware_nsx.nsxlib.v3 import client as nsx_client
from vmware_nsx.nsxlib.v3 import cluster as nsx_cluster
from vmware_nsx.nsxlib.v3 import resources as nsx_resources
@ -797,9 +798,9 @@ class NsxNativeDhcpTestCase(NsxV3PluginTestCaseMixin):
cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3')
self._patcher = mock.patch.object(nsx_resources.DhcpProfile, 'get')
self._patcher.start()
# Need to run _init_native_dhcp() manually because plugin was started
# Need to run _init_dhcp_metadata() manually because plugin was started
# before setUp() overrides CONF.nsx_v3.native_dhcp_metadata.
self.plugin._init_native_dhcp()
self.plugin._init_dhcp_metadata()
def tearDown(self):
self._patcher.stop()
@ -849,12 +850,14 @@ class NsxNativeDhcpTestCase(NsxV3PluginTestCaseMixin):
# configured correctly.
orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification
cfg.CONF.set_override('dhcp_agent_notification', True)
self.assertRaises(nsx_exc.NsxPluginException, self.plugin._init_dhcp)
self.assertRaises(nsx_exc.NsxPluginException,
self.plugin._init_dhcp_metadata)
cfg.CONF.set_override('dhcp_agent_notification',
orig_dhcp_agent_notification)
orig_dhcp_profile_uuid = cfg.CONF.nsx_v3.dhcp_profile_uuid
cfg.CONF.set_override('dhcp_profile_uuid', '', 'nsx_v3')
self.assertRaises(cfg.RequiredOptError, self.plugin._init_dhcp)
self.assertRaises(cfg.RequiredOptError,
self.plugin._init_dhcp_metadata)
cfg.CONF.set_override('dhcp_profile_uuid', orig_dhcp_profile_uuid,
'nsx_v3')
@ -1134,3 +1137,78 @@ class NsxNativeDhcpTestCase(NsxV3PluginTestCaseMixin):
context.get_admin_context(),
port['port']['id'])
self.assertEqual(delete_dhcp_binding.call_count, 2)
class NsxNativeMetadataTestCase(NsxV3PluginTestCaseMixin):
def setUp(self):
super(NsxNativeMetadataTestCase, self).setUp()
self._orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification
self._orig_native_dhcp_metadata = cfg.CONF.nsx_v3.native_dhcp_metadata
cfg.CONF.set_override('dhcp_agent_notification', False)
cfg.CONF.set_override('native_dhcp_metadata', True, 'nsx_v3')
self._patcher = mock.patch.object(nsx_resources.MetaDataProxy, 'get')
self._patcher.start()
# Need to run _init_dhcp_metadata() manually because plugin was
# started before setUp() overrides CONF.nsx_v3.native_dhcp_metadata.
self.plugin._init_dhcp_metadata()
def tearDown(self):
self._patcher.stop()
cfg.CONF.set_override('dhcp_agent_notification',
self._orig_dhcp_agent_notification)
cfg.CONF.set_override('native_dhcp_metadata',
self._orig_native_dhcp_metadata, 'nsx_v3')
super(NsxNativeMetadataTestCase, self).tearDown()
def test_metadata_proxy_configuration(self):
# Test if dhcp_agent_notification and metadata_proxy_uuid are
# configured correctly.
orig_dhcp_agent_notification = cfg.CONF.dhcp_agent_notification
cfg.CONF.set_override('dhcp_agent_notification', True)
self.assertRaises(nsx_exc.NsxPluginException,
self.plugin._init_dhcp_metadata)
cfg.CONF.set_override('dhcp_agent_notification',
orig_dhcp_agent_notification)
orig_metadata_proxy_uuid = cfg.CONF.nsx_v3.metadata_proxy_uuid
cfg.CONF.set_override('metadata_proxy_uuid', '', 'nsx_v3')
self.assertRaises(cfg.RequiredOptError,
self.plugin._init_dhcp_metadata)
cfg.CONF.set_override('metadata_proxy_uuid', orig_metadata_proxy_uuid,
'nsx_v3')
def test_metadata_proxy_with_create_network(self):
# Test if native metadata proxy is enabled on a network when it is
# created.
with mock.patch.object(nsx_resources.LogicalPort,
'create') as create_logical_port:
with self.network() as network:
nsx_net_id = self.plugin._get_network_nsx_id(
context.get_admin_context(), network['network']['id'])
tags = utils.build_v3_tags_payload(
network['network'], resource_type='os-neutron-net-id',
project_name=None)
create_logical_port.assert_called_once_with(
nsx_net_id, cfg.CONF.nsx_v3.metadata_proxy_uuid, tags=tags,
attachment_type=nsx_constants.ATTACHMENT_MDPROXY)
def test_metadata_proxy_with_get_subnets(self):
# Test if get_subnets() handles advanced-service-provider extension,
# which is used when processing metadata requests.
with self.network() as n1, self.network() as n2:
with self.subnet(network=n1) as s1, self.subnet(network=n2) as s2:
# Get all the subnets.
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 2)
self.assertEqual(set([s['id'] for s in subnets]),
set([s1['subnet']['id'], s2['subnet']['id']]))
lswitch_id = nsx_db.get_nsx_switch_ids(
context.get_admin_context().session,
n1['network']['id'])[0]
# Get only the subnets associated with a particular advanced
# service provider (i.e. logical switch).
subnets = self._list('subnets', query_params='%s=%s' %
(as_providers.ADV_SERVICE_PROVIDERS,
lswitch_id))['subnets']
self.assertEqual(len(subnets), 1)
self.assertEqual(subnets[0]['id'], s1['subnet']['id'])

View File

@ -32,6 +32,7 @@ NSX_HTTP_TIMEOUT = 10
NSX_HTTP_READ_TIMEOUT = 180
NSX_TZ_NAME = 'default transport zone'
NSX_DHCP_PROFILE_ID = 'default dhcp profile'
NSX_METADATA_PROXY_ID = 'default metadata proxy'
V3_CLIENT_PKG = 'vmware_nsx.nsxlib.v3.client'
BRIDGE_FNS = ['create_resource', 'delete_resource',
@ -46,6 +47,8 @@ class NsxLibTestCase(unittest.TestCase):
cfg.CONF.set_override('native_dhcp_metadata', False, 'nsx_v3')
cfg.CONF.set_override('dhcp_profile_uuid',
NSX_DHCP_PROFILE_ID, 'nsx_v3')
cfg.CONF.set_override('metadata_proxy_uuid',
NSX_METADATA_PROXY_ID, 'nsx_v3')
cfg.CONF.set_override('nsx_api_user', NSX_USER, 'nsx_v3')
cfg.CONF.set_override('nsx_api_password', NSX_PASSWORD, 'nsx_v3')
cfg.CONF.set_override('nsx_api_managers', [NSX_MANAGER], 'nsx_v3')