vmware-nsx/etc/nsx.ini
Abhishek Raut 06363890ee NSXv3: Add backend driver for Layer 2 gateway
This patch adds the backend driver to support Layer 2 gateway
API calls for NSXv3.

Change-Id: Iec1e143115579cca6c8158188217ead4209959bd
Partial-bug: #1481087
2015-08-19 10:12:36 -07:00

342 lines
13 KiB
INI
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

[DEFAULT]
# User name for NSX controller
# nsx_user = admin
# Password for NSX controller
# nsx_password = admin
# Time before aborting a request on an unresponsive controller (Seconds)
# http_timeout = 75
# Maximum number of times a particular request should be retried
# retries = 2
# Maximum number of times a redirect response should be followed
# redirects = 2
# Comma-separated list of NSX controller endpoints (<ip>:<port>). When port
# is omitted, 443 is assumed. This option MUST be specified, e.g.:
# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
# UUID of the pre-existing default NSX Transport zone to be used for creating
# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
# (Optional) UUID for the default l3 gateway service to use with this cluster.
# To be specified if planning to use logical routers with external gateways.
# default_l3_gw_service_uuid =
# (Optional) UUID for the default l2 gateway service to use with this cluster.
# To be specified for providing a predefined gateway tenant for connecting their networks.
# default_l2_gw_service_uuid =
# (Optional) UUID for the default service cluster. A service cluster is introduced to
# represent a group of gateways and it is needed in order to use Logical Services like
# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this
# config parameter *MUST BE* set to a valid pre-existent service cluster uuid.
# default_service_cluster_uuid =
# Name of the default interface name to be used on network-gateway. This value
# will be used for any device associated with a network gateway for which an
# interface name was not specified
# nsx_default_interface_name = breth0
# Reconnect connection to nsx if not used within this amount of time.
# conn_idle_timeout = 900
[quotas]
# number of network gateways allowed per tenant, -1 means unlimited
# quota_network_gateway = 5
[nsxv]
# URL for NSXv manager
# manager_uri = https://management_ip
# User name for NSXv manager
# user = admin
# Password for NSXv manager
# password = default
# Specify a CA bundle file to use in verifying the NSXv server certificate.
# ca_file =
# If true, the NSXv server certificate is not verified. If false,
# then the default CA truststore is used for verification. This option
# is ignored if "ca_file" is set.
# insecure = true
# (Required) Datacenter ID for Edge deployment
# datacenter_moid =
# (Required) Cluster IDs for clusters containing OpenStack hosts
# cluster_moid =
# (Optional) Deployment Container ID for NSX Edge deployment
# If not specified, either a default global container will be used, or
# the resource pool and datastore specified below will be used
# deployment_container_id =
# (Optional) Resource pool ID for NSX Edge deployment
# resource_pool_id =
# (Optional) Datastore ID for NSX Edge deployment
# datastore_id =
# (Required) UUID of logic switch for physical network connectivity
# external_network =
# (Optional) Asynchronous task status check interval
# default is 2000 (millisecond)
# task_status_check_interval = 2000
# (Optional) Network scope ID for VXLAN virtual wires
# vdn_scope_id =
# (Optional) DVS ID for VLANS
# dvs_id =
# (ListOpt) Define backup edge pool's management range with the four-tuple:
# <edge_type>:[edge_size]:<minimum_pooled_edges>:<maximum_pooled_edges>.
# edge_type:'service'(service edge) or 'vdr'(distributed edge).
# edge_size: 'compact', 'large'(by default), 'xlarge' or 'quadlarge'.
#
# By default, edge pool manager would manage service edge
# with compact&&large size and distributed edge with large size as following:
# backup_edge_pool = service:large:4:10,service:compact:4:10,vdr:large:4:10
# (Optional) Maximum number of sub interfaces supported per vnic in edge
# default is 20
# maximum_tunnels_per_vnic = 20
# Maximum number of API retries
# retries = 10
# (Optional) Network ID for management network connectivity
# mgt_net_moid =
# (Optional) Management network IP address for metadata proxy
# mgt_net_proxy_ips =
# (Optional) Management network netmask for metadata proxy
# mgt_net_proxy_netmask =
# (Optional) Management network default gateway for metadata proxy
# mgt_net_default_gateway =
# (Optional) IP addresses used by Nova metadata service
# nova_metadata_ips =
# (Optional) TCP Port used by Nova metadata server
# nova_metadata_port = 8775
# (Optional) Shared secret to sign metadata requests
# metadata_shared_secret =
# (Optional) Indicates if Nsxv spoofguard component is used to implement
# port-security feature.
# spoofguard_enabled = True
# (Optional) Deploys NSX Edges in HA mode
# edge_ha = True
# (ListOpt) Ordered list of router_types to allocate as tenant routers.
# It limits the router types that the Nsxv can support for tenants:
# distributed: router is supported by distributed edge at the backend.
# shared: multiple routers share the same service edge at the backend.
# exclusive: router exclusivly occupies one service edge at the backend.
# Nsxv would select the first available router type from tenant_router_types
# list if router-type is not specified.
# If the tenant defines the router type with "--distributed",
# "--router_type exclusive" or "--router_type shared", Nsxv would verify that
# the router type is in tenant_router_types.
# Admin supports all these three router types
#
# tenant_router_types = shared, distributed, exclusive
# Example: tenant_router_types = distributed, shared
# (Optional) Enable an administrator to configure the edge user and password
# Username to configure for Edge appliance login
# edge_appliance_user =
# (Optional) Password to configure for Edge appliance login
# edge_appliance_password =
# (Optional) URL for distributed locking coordination resource for lock manager
# This value is passed as a parameter to tooz coordinator.
# By default, value is None and oslo_concurrency is used for single-node
# lock management.
# locking_coordinator_url =
# (Optional) DHCP lease time
# dhcp_lease_time = 86400
[nsx]
# Maximum number of ports for each bridged logical switch
# The recommended value for this parameter varies with NSX version
# Please use:
# NSX 2.x -> 64
# NSX 3.0, 3.1 -> 5000
# NSX 3.2 -> 10000
# max_lp_per_bridged_ls = 5000
# Maximum number of ports for each overlay (stt, gre) logical switch
# max_lp_per_overlay_ls = 256
# Number of connections to each controller node.
# default is 10
# concurrent_connections = 10
# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
# nsx_gen_timeout = -1
# Acceptable values for 'metadata_mode' are:
# - 'access_network': this enables a dedicated connection to the metadata
# proxy for metadata server access via Neutron router.
# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
# This option is only useful if running on a host that does not support
# namespaces otherwise access_network should be used.
# metadata_mode = access_network
# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
# default_transport_type = stt
# Specifies in which mode the plugin needs to operate in order to provide DHCP and
# metadata proxy services to tenant instances. If 'agent' is chosen (default)
# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
# provide such services. In this mode, the plugin supports API extensions 'agent'
# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse),
# the plugin will use NSX logical services for DHCP and metadata proxy. This
# simplifies the deployment model for Neutron, in that the plugin no longer requires
# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
# becomes ineffective. The 'agentless' mode works only on NSX 4.1.
# Furthermore, a 'combined' mode is also provided and is used to support existing
# deployments that want to adopt the agentless mode. With this mode, existing networks
# keep being served by the existing infrastructure (thus preserving backward
# compatibility, whereas new networks will be served by the new infrastructure.
# Migration tools are provided to 'move' one network from one model to another; with
# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is
# ignored, as new networks will no longer be scheduled to existing dhcp agents.
# agent_mode = agent
# Specifies which mode packet replication should be done in. If set to service
# a service node is required in order to perform packet replication. This can
# also be set to source if one wants replication to be performed locally (NOTE:
# usually only useful for testing if one does not want to deploy a service node).
# In order to leverage distributed routers, replication_mode should be set to
# "service".
# replication_mode = service
# Specify the class path for the Layer 2 gateway backend driver(i.e. NSXv3/NSX-V).
# This field will be used when a L2 Gateway service plugin is configured.
# nsx_l2gw_driver = vmware_nsx.neutron.services.l2gateway.nsx_v3_driver.NsxV3Driver
[nsx_sync]
# Interval in seconds between runs of the status synchronization task.
# The plugin will aim at resynchronizing operational status for all
# resources in this interval, and it should be therefore large enough
# to ensure the task is feasible. Otherwise the plugin will be
# constantly synchronizing resource status, ie: a new task is started
# as soon as the previous is completed.
# If this value is set to 0, the state synchronization thread for this
# Neutron instance will be disabled.
# state_sync_interval = 10
# Random additional delay between two runs of the state synchronization task.
# An additional wait time between 0 and max_random_sync_delay seconds
# will be added on top of state_sync_interval.
# max_random_sync_delay = 0
# Minimum delay, in seconds, between two status synchronization requests for NSX.
# Depending on chunk size, controller load, and other factors, state
# synchronization requests might be pretty heavy. This means the
# controller might take time to respond, and its load might be quite
# increased by them. This parameter allows to specify a minimum
# interval between two subsequent requests.
# The value for this parameter must never exceed state_sync_interval.
# If this does, an error will be raised at startup.
# min_sync_req_delay = 1
# Minimum number of resources to be retrieved from NSX in a single status
# synchronization request.
# The actual size of the chunk will increase if the number of resources is such
# that using the minimum chunk size will cause the interval between two
# requests to be less than min_sync_req_delay
# min_chunk_size = 500
# Enable this option to allow punctual state synchronization on show
# operations. In this way, show operations will always fetch the operational
# status of the resource from the NSX backend, and this might have
# a considerable impact on overall performance.
# always_read_status = False
[nsx_lsn]
# Pull LSN information from NSX in case it is missing from the local
# data store. This is useful to rebuild the local store in case of
# server recovery
# sync_on_missing_data = False
[nsx_dhcp]
# (Optional) Comma separated list of additional dns servers. Default is an empty list
# extra_domain_name_servers =
# Domain to use for building the hostnames
# domain_name = openstacklocal
# Default DHCP lease time
# default_lease_time = 43200
[nsx_metadata]
# IP address used by Metadata server
# metadata_server_address = 127.0.0.1
# TCP Port used by Metadata server
# metadata_server_port = 8775
# When proxying metadata requests, Neutron signs the Instance-ID header with a
# shared secret to prevent spoofing. You may select any string for a secret,
# but it MUST match with the configuration used by the Metadata server
# metadata_shared_secret =
[nsx_v3]
# IP address of NSX manager
# nsx_manager = 1.2.3.4
# User name of NSX Manager
# nsx_user = admin
# Password of NSX Manager
# nsx_password = default
# UUID of the default NSX overlay transport zone that will be used for creating
# tunneled isolated Neutron networks
# default_overlay_tz_uuid = afc40f8a-4967-477e-a17a-9d560d1786c7
# UUID of default NSX VLAN transport zone that will be used for bridging
# between Neutron Networks
# default_vlan_tz_uuid = afc40f8a-4967-477e-a17a-9d560d1786c7
# Default Edge Cluster Identifier
# default_edge_cluster_uuid = afc40f8a-4967-477e-a17a-9d560d1786c7
# Maximum number of times to retry API requests
# retries = 10
# Specify a CA bundle file to use in verifying the NSX Manager
# server certificate. This option is ignored if "insecure" is set to True.
# ca_file =
# If true, the NSX Manager server certificate is not verified. If false,
# then the default CA truststore is used for verification.
# insecure = True
# UUID of the default tier0 router that will be used for connecting to
# tier1 logical routers and configuring external network
# default_tier0_router_uuid = 412983fd-9016-45e5-93f2-48ba2a931225
# UUID of the default NSX bridge cluster that will be used to perform
# L2 gateway bridging between VXLAN and VLAN networks. It is an optional
# field. If default bridge cluster UUID is not specified, admin will have to
# manually create a L2 gateway corresponding to a NSX Bridge Cluster using
# L2 gateway APIs.
# This field must be specified on one of the active neutron servers only.
# default_bridge_cluster_uuid =