NSX: get rid of the last Nicira/NVP bits

These were kept for bw compatibility in Icehouse, but
these are removed in Juno.

The king is dead, long live the king!

DocImpact
Blueprint: nicira-plugin-renaming

Change-Id: I49d19fec90ba191ed5b895fac8a884070f52119e
This commit is contained in:
armando-migliaccio 2014-05-20 14:21:36 -07:00 committed by mark mcclain
parent 6c05a83071
commit 8343181645
10 changed files with 11 additions and 293 deletions

View File

@ -1,207 +0,0 @@
# Nicira and NVP are going to be replaced with VMware and
# NSX during the Icehouse timeframe. The old configuration
# file plugins/nicira/nvp.ini is going to be removed soon.
# Please consider using plugins/vmware/nsx.ini, instead.
[DEFAULT]
# User name for NSX controller
# nsx_user = admin
# Password for NSX controller
# nsx_password = admin
# Total time limit for a cluster request
# (including retries across different controllers)
# req_timeout = 30
# Time before aborting a request on an unresponsive controller
# http_timeout = 10
# Maximum number of times a particular request should be retried
# retries = 2
# Maximum number of times a redirect response should be followed
# redirects = 2
# Comma-separated list of NSX controller endpoints (<ip>:<port>). When port
# is omitted, 443 is assumed. This option MUST be specified, e.g.:
# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
# UUID of the pre-existing default NSX Transport zone to be used for creating
# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
# (Optional) UUID for the default l3 gateway service to use with this cluster.
# To be specified if planning to use logical routers with external gateways.
# default_l3_gw_service_uuid =
# (Optional) UUID for the default l2 gateway service to use with this cluster.
# To be specified for providing a predefined gateway tenant for connecting their networks.
# default_l2_gw_service_uuid =
# (Optional) UUID for the default service cluster. A service cluster is introduced to
# represent a group of gateways and it is needed in order to use Logical Services like
# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this
# config parameter *MUST BE* set to a valid pre-existent service cluster uuid.
# default_service_cluster_uuid =
# Name of the default interface name to be used on network-gateway. This value
# will be used for any device associated with a network gateway for which an
# interface name was not specified
# default_interface_name = breth0
[quotas]
# number of network gateways allowed per tenant, -1 means unlimited
# quota_network_gateway = 5
[vcns]
# URL for VCNS manager
# manager_uri = https://management_ip
# User name for VCNS manager
# user = admin
# Password for VCNS manager
# password = default
# (Optional) Datacenter ID for Edge deployment
# datacenter_moid =
# (Optional) Deployment Container ID for NSX Edge deployment
# If not specified, either a default global container will be used, or
# the resource pool and datastore specified below will be used
# deployment_container_id =
# (Optional) Resource pool ID for NSX Edge deployment
# resource_pool_id =
# (Optional) Datastore ID for NSX Edge deployment
# datastore_id =
# (Required) UUID of logic switch for physical network connectivity
# external_network =
# (Optional) Asynchronous task status check interval
# default is 2000 (millisecond)
# task_status_check_interval = 2000
[nsx]
# Maximum number of ports for each bridged logical switch
# The recommended value for this parameter varies with NSX version
# Please use:
# NSX 2.x -> 64
# NSX 3.0, 3.1 -> 5000
# NSX 3.2 -> 10000
# max_lp_per_bridged_ls = 5000
# Maximum number of ports for each overlay (stt, gre) logical switch
# max_lp_per_overlay_ls = 256
# Number of connections to each controller node.
# default is 10
# concurrent_connections = 10
# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
# nsx_gen_timeout = -1
# Acceptable values for 'metadata_mode' are:
# - 'access_network': this enables a dedicated connection to the metadata
# proxy for metadata server access via Neutron router.
# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
# This option is only useful if running on a host that does not support
# namespaces otherwise access_network should be used.
# metadata_mode = access_network
# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
# default_transport_type = stt
# Specifies in which mode the plugin needs to operate in order to provide DHCP and
# metadata proxy services to tenant instances. If 'agent' is chosen (default)
# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
# provide such services. In this mode, the plugin supports API extensions 'agent'
# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse),
# the plugin will use NSX logical services for DHCP and metadata proxy. This
# simplifies the deployment model for Neutron, in that the plugin no longer requires
# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
# becomes ineffective. The 'agentless' mode is supported from NSX 4.2 or above.
# Furthermore, a 'combined' mode is also provided and is used to support existing
# deployments that want to adopt the agentless mode going forward. With this mode,
# existing networks keep being served by the existing infrastructure (thus preserving
# backward compatibility, whereas new networks will be served by the new infrastructure.
# Migration tools are provided to 'move' one network from one model to another; with
# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is
# ignored, as new networks will no longer be scheduled to existing dhcp agents.
# agent_mode = agent
# Specifies which mode packet replication should be done in. If set to service
# a service node is required in order to perform packet replication. This can
# also be set to source if one wants replication to be performed locally (NOTE:
# usually only useful for testing if one does not want to deploy a service node).
# replication_mode = service
[nsx_sync]
# Interval in seconds between runs of the status synchronization task.
# The plugin will aim at resynchronizing operational status for all
# resources in this interval, and it should be therefore large enough
# to ensure the task is feasible. Otherwise the plugin will be
# constantly synchronizing resource status, ie: a new task is started
# as soon as the previous is completed.
# If this value is set to 0, the state synchronization thread for this
# Neutron instance will be disabled.
# state_sync_interval = 120
# Random additional delay between two runs of the state synchronization task.
# An additional wait time between 0 and max_random_sync_delay seconds
# will be added on top of state_sync_interval.
# max_random_sync_delay = 0
# Minimum delay, in seconds, between two status synchronization requests for NSX.
# Depending on chunk size, controller load, and other factors, state
# synchronization requests might be pretty heavy. This means the
# controller might take time to respond, and its load might be quite
# increased by them. This parameter allows to specify a minimum
# interval between two subsequent requests.
# The value for this parameter must never exceed state_sync_interval.
# If this does, an error will be raised at startup.
# min_sync_req_delay = 10
# Minimum number of resources to be retrieved from NSX in a single status
# synchronization request.
# The actual size of the chunk will increase if the number of resources is such
# that using the minimum chunk size will cause the interval between two
# requests to be less than min_sync_req_delay
# min_chunk_size = 500
# Enable this option to allow punctual state synchronization on show
# operations. In this way, show operations will always fetch the operational
# status of the resource from the NSX backend, and this might have
# a considerable impact on overall performance.
# always_read_status = False
[nsx_lsn]
# Pull LSN information from NSX in case it is missing from the local
# data store. This is useful to rebuild the local store in case of
# server recovery
# sync_on_missing_data = False
[nsx_dhcp]
# (Optional) Comma separated list of additional dns servers. Default is an empty list
# extra_domain_name_servers =
# Domain to use for building the hostnames
# domain_name = openstacklocal
# Default DHCP lease time
# default_lease_time = 43200
[nsx_metadata]
# IP address used by Metadata server
# metadata_server_address = 127.0.0.1
# TCP Port used by Metadata server
# metadata_server_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it MUST match with the configuration used by the Metadata server
# metadata_shared_secret =

View File

@ -1,24 +0,0 @@
# Copyright 2012 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutron.plugins.vmware.plugins import base
from neutron.plugins.vmware.plugins import service
# Kept for backward compatibility
sys.modules['neutron.plugins.nicira.NeutronPlugin'] = base
sys.modules['neutron.plugins.nicira.NeutronServicePlugin'] = service

View File

@ -405,7 +405,7 @@ class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
raise MultipleGatewayConnections(
gateway_id=network_gateway_id)
# Remove gateway port from network
# FIXME(salvatore-orlando): Ensure state of port in NVP is
# FIXME(salvatore-orlando): Ensure state of port in NSX is
# consistent with outcome of transaction
self.delete_port(context, net_connection['port_id'],
nw_gw_port_check=False)

View File

@ -1,39 +0,0 @@
# Copyright 2013 VMware, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# TODO(armando-migliaccio): This is deprecated in Icehouse, and
# to be removed in Juno.
from neutron.plugins.vmware.extensions import qos
class Nvp_qos(qos.Qos):
"""(Deprecated) Port Queue extension."""
@classmethod
def get_name(cls):
return "nvp-qos"
@classmethod
def get_alias(cls):
return "nvp-qos"
@classmethod
def get_description(cls):
return "NVP QoS extension (deprecated)."
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/nvp-qos/api/v2.0"

View File

@ -107,7 +107,6 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
"mac-learning",
"multi-provider",
"network-gateway",
"nvp-qos",
"port-security",
"provider",
"qos-queue",
@ -2243,7 +2242,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# Get devices from database
devices = super(NsxPluginV2, self).get_gateway_devices(
context, filters, fields, include_nsx_id=True)
# Fetch operational status from NVP, filter by tenant tag
# Fetch operational status from NSX, filter by tenant tag
# TODO(salv-orlando): Asynchronous sync for gateway device status
tenant_id = context.tenant_id if not context.is_admin else None
nsx_statuses = nsx_utils.get_nsx_device_statuses(self.cluster,
@ -2503,7 +2502,3 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
return
queuelib.delete_lqueue(self.cluster, queue_id)
return super(NsxPluginV2, self).delete_qos_queue(context, queue_id)
# for backward compatibility
NvpPluginV2 = NsxPluginV2

View File

@ -1795,7 +1795,3 @@ def _process_base_create_lswitch_args(*args, **kwargs):
if kwargs.get("tags"):
tags.extend(kwargs["tags"])
return switch_name, tz_config, tags
# For backward compatibility
NvpAdvancedPlugin = NsxAdvancedPlugin

View File

@ -13,5 +13,5 @@ http_timeout = 13
redirects = 12
retries = 11
[NVP]
[NSX]
agent_mode = agentless

View File

@ -888,7 +888,7 @@ class TestNetworkGateway(test_nsx_plugin.NsxPluginV2TestCase,
def setUp(self, plugin=vmware.PLUGIN_NAME, ext_mgr=None):
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
# Mock l2gwlib calls for gateway devices since this resource is not
# mocked through the fake NVP API client
# mocked through the fake NSX API client
create_gw_dev_patcher = mock.patch.object(
l2gwlib, 'create_gateway_device')
update_gw_dev_patcher = mock.patch.object(

View File

@ -174,7 +174,7 @@ class L2GatewayTestCase(base.NsxlibTestCase):
def test_create_gw_device(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NVP API client
# leveraging the fake NSX API client
display_name = 'fake-device'
neutron_id = 'whatever'
connector_type = 'stt'
@ -196,7 +196,7 @@ class L2GatewayTestCase(base.NsxlibTestCase):
def test_update_gw_device(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NVP API client
# leveraging the fake NSX API client
display_name = 'fake-device'
neutron_id = 'whatever'
connector_type = 'stt'
@ -220,7 +220,7 @@ class L2GatewayTestCase(base.NsxlibTestCase):
def test_update_gw_device_without_certificate(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NVP API client
# leveraging the fake NSX API client
display_name = 'fake-device'
neutron_id = 'whatever'
connector_type = 'stt'
@ -243,7 +243,7 @@ class L2GatewayTestCase(base.NsxlibTestCase):
def test_get_gw_device_status(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NVP API client
# leveraging the fake NSX API client
with mock.patch.object(nsxlib, 'do_request') as request_mock:
l2gwlib.get_gateway_device_status(self.fake_cluster, 'whatever')
request_mock.assert_called_once_with(
@ -253,7 +253,7 @@ class L2GatewayTestCase(base.NsxlibTestCase):
def test_get_gw_devices_status(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NVP API client
# leveraging the fake NSX API client
with mock.patch.object(nsxlib, 'do_request') as request_mock:
request_mock.return_value = {
'results': [],
@ -269,7 +269,7 @@ class L2GatewayTestCase(base.NsxlibTestCase):
def test_get_gw_devices_status_filter_by_tenant(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NVP API client
# leveraging the fake NSX API client
with mock.patch.object(nsxlib, 'do_request') as request_mock:
request_mock.return_value = {
'results': [],
@ -287,7 +287,7 @@ class L2GatewayTestCase(base.NsxlibTestCase):
def test_delete_gw_device(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NVP API client
# leveraging the fake NSX API client
with mock.patch.object(nsxlib, 'do_request') as request_mock:
l2gwlib.delete_gateway_device(self.fake_cluster, 'whatever')
request_mock.assert_called_once_with(

View File

@ -67,7 +67,6 @@ data_files =
etc/neutron/plugins/ml2/ml2_conf_ofa.ini
etc/neutron/plugins/mlnx = etc/neutron/plugins/mlnx/mlnx_conf.ini
etc/neutron/plugins/nec = etc/neutron/plugins/nec/nec.ini
etc/neutron/plugins/nicira = etc/neutron/plugins/nicira/nvp.ini
etc/neutron/plugins/oneconvergence = etc/neutron/plugins/oneconvergence/nvsdplugin.ini
etc/neutron/plugins/openvswitch = etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
etc/neutron/plugins/plumgrid = etc/neutron/plugins/plumgrid/plumgrid.ini
@ -85,7 +84,6 @@ setup-hooks =
[entry_points]
console_scripts =
neutron-check-nsx-config = neutron.plugins.vmware.check_nsx_config:main
neutron-check-nvp-config = neutron.plugins.vmware.check_nsx_config:main
neutron-db-manage = neutron.db.migration.cli:main
neutron-debug = neutron.debug.shell:main
neutron-dhcp-agent = neutron.agent.dhcp_agent:main
@ -123,7 +121,6 @@ neutron.core_plugins =
ml2 = neutron.plugins.ml2.plugin:Ml2Plugin
mlnx = neutron.plugins.mlnx.mlnx_plugin:MellanoxEswitchPlugin
nec = neutron.plugins.nec.nec_plugin:NECPluginV2
nicira = neutron.plugins.nicira.NeutronPlugin:NvpPluginV2
oneconvergence = neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2
openvswitch = neutron.plugins.openvswitch.ovs_neutron_plugin:OVSNeutronPluginV2
plumgrid = neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin:NeutronPluginPLUMgridV2