Remove vmware-nsx's static example configuration file
This patch is a follow up to the auto generate config file patch[1] which removes the static example nsx.ini file from the repo as it is now redundant. [1]: https://review.openstack.org/#/c/303673/ Depends-On: Iff4ea37b52616295b262ead53947acb5b0cd9cd7 Change-Id: I61ee6fe873cfeac9dfe6d9eb7b0f90dd7c251d51 Partial-bug: #1568215
This commit is contained in:
parent
2d8dc8291b
commit
5f1a62a0f1
431
etc/nsx.ini
431
etc/nsx.ini
@ -1,431 +0,0 @@
|
||||
[DEFAULT]
|
||||
# User name for NSX controller
|
||||
# nsx_user = admin
|
||||
|
||||
# Password for NSX controller
|
||||
# nsx_password = admin
|
||||
|
||||
# Time before aborting a request on an unresponsive controller (Seconds)
|
||||
# http_timeout = 75
|
||||
|
||||
# Maximum number of times a particular request should be retried
|
||||
# retries = 2
|
||||
|
||||
# Maximum number of times a redirect response should be followed
|
||||
# redirects = 2
|
||||
|
||||
# Comma-separated list of NSX controller endpoints (<ip>:<port>). When port
|
||||
# is omitted, 443 is assumed. This option MUST be specified, e.g.:
|
||||
# nsx_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
|
||||
|
||||
# UUID of the pre-existing default NSX Transport zone to be used for creating
|
||||
# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
|
||||
# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
|
||||
|
||||
# (Optional) UUID for the default l3 gateway service to use with this cluster.
|
||||
# To be specified if planning to use logical routers with external gateways.
|
||||
# default_l3_gw_service_uuid =
|
||||
|
||||
# (Optional) UUID for the default l2 gateway service to use with this cluster.
|
||||
# To be specified for providing a predefined gateway tenant for connecting their networks.
|
||||
# default_l2_gw_service_uuid =
|
||||
|
||||
# (Optional) UUID for the default service cluster. A service cluster is introduced to
|
||||
# represent a group of gateways and it is needed in order to use Logical Services like
|
||||
# dhcp and metadata in the logical space. NOTE: If agent_mode is set to 'agentless' this
|
||||
# config parameter *MUST BE* set to a valid pre-existent service cluster uuid.
|
||||
# default_service_cluster_uuid =
|
||||
|
||||
# Name of the default interface name to be used on network-gateway. This value
|
||||
# will be used for any device associated with a network gateway for which an
|
||||
# interface name was not specified
|
||||
# nsx_default_interface_name = breth0
|
||||
|
||||
# Reconnect connection to nsx if not used within this amount of time.
|
||||
# conn_idle_timeout = 900
|
||||
|
||||
# Specify the class path for the Layer 2 gateway backend driver(i.e. NSXv3/NSX-V).
|
||||
# This field will be used when a L2 Gateway service plugin is configured.
|
||||
# nsx_l2gw_driver =
|
||||
|
||||
# (Optional) URL for distributed locking coordination resource for lock manager
|
||||
# This value is passed as a parameter to tooz coordinator.
|
||||
# By default, value is None and oslo_concurrency is used for single-node
|
||||
# lock management.
|
||||
# locking_coordinator_url =
|
||||
|
||||
[quotas]
|
||||
# number of network gateways allowed per tenant, -1 means unlimited
|
||||
# quota_network_gateway = 5
|
||||
|
||||
[nsxv]
|
||||
# URL for NSXv manager
|
||||
# manager_uri = https://management_ip
|
||||
|
||||
# User name for NSXv manager
|
||||
# user = admin
|
||||
|
||||
# Password for NSXv manager
|
||||
# password = default
|
||||
|
||||
# Specify a CA bundle file to use in verifying the NSXv server certificate.
|
||||
# ca_file =
|
||||
|
||||
# If True, the NSXv server certificate is not verified. If False,
|
||||
# then the default CA truststore is used for verification. This option
|
||||
# is ignored if "ca_file" is set.
|
||||
# insecure = True
|
||||
|
||||
# (Required) Datacenter MoRef ID for Edge deployment
|
||||
# datacenter_moid =
|
||||
|
||||
# (Required) Cluster MoRef IDs for OpenStack compute clusters, comma separated
|
||||
# cluster_moid =
|
||||
|
||||
# (Optional) Deployment Container MoRef ID for NSX Edge deployment
|
||||
# If not specified, either a default global container will be used, or
|
||||
# the resource pool and datastore specified below will be used
|
||||
# deployment_container_id =
|
||||
|
||||
# (Optional) Resource pool MoRef ID for NSX Edge deployment
|
||||
# resource_pool_id =
|
||||
|
||||
# (Optional) Datastore MoRef ID for NSX Edge deployment
|
||||
# datastore_id =
|
||||
|
||||
# (Required) Portgroup MoRef ID for Edge physical network connectivity
|
||||
# external_network =
|
||||
|
||||
# (Optional) Asynchronous task status check interval
|
||||
# default is 2000 (millisecond)
|
||||
# task_status_check_interval = 2000
|
||||
|
||||
# (Optional) Transport Zone MoRef ID for VXLAN logical networks
|
||||
# vdn_scope_id =
|
||||
|
||||
# (Optional) DVS MoRef ID for DVS connected to Management / Edge cluster
|
||||
# dvs_id =
|
||||
|
||||
# (ListOpt) Define backup edge pool's management range with the four-tuple:
|
||||
# <edge_type>:[edge_size]:<minimum_pooled_edges>:<maximum_pooled_edges>.
|
||||
# edge_type:'service'(service edge) or 'vdr'(distributed edge).
|
||||
# edge_size: 'compact', 'large'(by default), 'xlarge' or 'quadlarge'.
|
||||
#
|
||||
# By default, edge pool manager would manage service edge
|
||||
# with compact&&large size and distributed edge with large size as following:
|
||||
# backup_edge_pool = service:large:4:10,service:compact:4:10,vdr:large:4:10
|
||||
|
||||
# (Optional) Maximum number of sub interfaces supported per vnic in edge
|
||||
# default is 20
|
||||
# maximum_tunnels_per_vnic = 20
|
||||
|
||||
# Maximum number of API retries
|
||||
# retries = 10
|
||||
|
||||
# (Optional) Portgroup MoRef ID for metadata proxy management network
|
||||
# mgt_net_moid =
|
||||
|
||||
# (Optional) Management network IP address for metadata proxy, comma separated
|
||||
# mgt_net_proxy_ips =
|
||||
|
||||
# (Optional) Management network netmask for metadata proxy
|
||||
# mgt_net_proxy_netmask =
|
||||
|
||||
# (Optional) Management network default gateway for metadata proxy
|
||||
# mgt_net_default_gateway =
|
||||
|
||||
# (Optional) IP addresses used by Nova metadata service
|
||||
# nova_metadata_ips =
|
||||
|
||||
# (Optional) TCP Port used by Nova metadata server
|
||||
# nova_metadata_port = 8775
|
||||
|
||||
# (Optional) Shared secret to sign metadata requests
|
||||
# metadata_shared_secret =
|
||||
|
||||
# (Optional) If True, the end to end connection for metadata service is
|
||||
# not verified. If False, the default CA truststore is used for verification.
|
||||
# metadata_insecure =
|
||||
|
||||
# (Optional) Comma separated list of tcp ports, to be allowed access to the
|
||||
# metadata proxy, in addition to the default 80,443,8775 tcp ports
|
||||
# metadata_service_allowed_ports =
|
||||
|
||||
# (Optional) Client certificate to use when metadata connection is to be
|
||||
# verified. If not provided, a self signed certificate will be used.
|
||||
# metadata_nova_client_cert =
|
||||
|
||||
# (Optional) Private key to use for client certificate
|
||||
# metadata_nova_client_priv_key =
|
||||
|
||||
# (Optional) Indicates if Nsxv spoofguard component is used to implement
|
||||
# port-security feature.
|
||||
# spoofguard_enabled = True
|
||||
|
||||
# (Optional) Deploys NSX Edges in HA mode
|
||||
# edge_ha = False
|
||||
|
||||
# (Optional) Edge appliance size to be used for creating exclusive router.
|
||||
# Valid values: ['compact', 'large', 'xlarge', 'quadlarge']
|
||||
# This exclusive_router_appliance_size will be picked up if --router-size
|
||||
# parameter is not specified while doing neutron router-create
|
||||
# exclusive_router_appliance_size = compact
|
||||
|
||||
# (ListOpt) Ordered list of router_types to allocate as tenant routers.
|
||||
# It limits the router types that the Nsxv can support for tenants:
|
||||
# distributed: router is supported by distributed edge at the backend.
|
||||
# shared: multiple routers share the same service edge at the backend.
|
||||
# exclusive: router exclusively occupies one service edge at the backend.
|
||||
# Nsxv would select the first available router type from tenant_router_types
|
||||
# list if router-type is not specified.
|
||||
# If the tenant defines the router type with "--distributed",
|
||||
# "--router_type exclusive" or "--router_type shared", Nsxv would verify that
|
||||
# the router type is in tenant_router_types.
|
||||
# Admin supports all these three router types
|
||||
#
|
||||
# tenant_router_types = shared, distributed, exclusive
|
||||
# Example: tenant_router_types = distributed, shared
|
||||
|
||||
# (Optional) Enable an administrator to configure the edge user and password
|
||||
# Username to configure for Edge appliance login
|
||||
# edge_appliance_user =
|
||||
# (Optional) Password to configure for Edge appliance login
|
||||
# edge_appliance_password =
|
||||
|
||||
# (Optional) DHCP lease time
|
||||
# dhcp_lease_time = 86400
|
||||
|
||||
# (Optional) Indicates whether distributed-firewall rule for security-groups
|
||||
# blocked traffic is logged.
|
||||
# log_security_groups_blocked_traffic = False
|
||||
|
||||
# (Optional) Indicates whether distributed-firewall security-groups rules are
|
||||
# logged.
|
||||
# log_security_groups_allowed_traffic = False
|
||||
|
||||
# (Optional) In some cases the Neutron router is not present to provide the
|
||||
# metadata IP but the DHCP server can be used to provide this info. Setting
|
||||
# this value will force the DHCP edge server to append specific host routes
|
||||
# to the DHCP request. If this option is set, then the metadata service will
|
||||
# be activated for all the dhcp enabled networks.
|
||||
# Note: this option can only be supported at NSX manager version 6.2.3 or
|
||||
# higher
|
||||
# dhcp_force_metadata = True
|
||||
|
||||
[nsx]
|
||||
# Maximum number of ports for each bridged logical switch
|
||||
# The recommended value for this parameter varies with NSX version
|
||||
# Please use:
|
||||
# NSX 2.x -> 64
|
||||
# NSX 3.0, 3.1 -> 5000
|
||||
# NSX 3.2 -> 10000
|
||||
# max_lp_per_bridged_ls = 5000
|
||||
|
||||
# Maximum number of ports for each overlay (stt, gre) logical switch
|
||||
# max_lp_per_overlay_ls = 256
|
||||
|
||||
# Number of connections to each controller node.
|
||||
# default is 10
|
||||
# concurrent_connections = 10
|
||||
|
||||
# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
|
||||
# nsx_gen_timeout = -1
|
||||
|
||||
# Acceptable values for 'metadata_mode' are:
|
||||
# - 'access_network': this enables a dedicated connection to the metadata
|
||||
# proxy for metadata server access via Neutron router.
|
||||
# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
|
||||
# This option is only useful if running on a host that does not support
|
||||
# namespaces otherwise access_network should be used.
|
||||
# metadata_mode = access_network
|
||||
|
||||
# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
|
||||
# default_transport_type = stt
|
||||
|
||||
# Specifies in which mode the plugin needs to operate in order to provide DHCP and
|
||||
# metadata proxy services to tenant instances. If 'agent' is chosen (default)
|
||||
# the NSX plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
|
||||
# provide such services. In this mode, the plugin supports API extensions 'agent'
|
||||
# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Icehouse),
|
||||
# the plugin will use NSX logical services for DHCP and metadata proxy. This
|
||||
# simplifies the deployment model for Neutron, in that the plugin no longer requires
|
||||
# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
|
||||
# becomes ineffective. The 'agentless' mode works only on NSX 4.1.
|
||||
# Furthermore, a 'combined' mode is also provided and is used to support existing
|
||||
# deployments that want to adopt the agentless mode. With this mode, existing networks
|
||||
# keep being served by the existing infrastructure (thus preserving backward
|
||||
# compatibility, whereas new networks will be served by the new infrastructure.
|
||||
# Migration tools are provided to 'move' one network from one model to another; with
|
||||
# agent_mode set to 'combined', option 'network_auto_schedule' in neutron.conf is
|
||||
# ignored, as new networks will no longer be scheduled to existing dhcp agents.
|
||||
# agent_mode = agent
|
||||
|
||||
# Specifies which mode packet replication should be done in. If set to service
|
||||
# a service node is required in order to perform packet replication. This can
|
||||
# also be set to source if one wants replication to be performed locally (NOTE:
|
||||
# usually only useful for testing if one does not want to deploy a service node).
|
||||
# In order to leverage distributed routers, replication_mode should be set to
|
||||
# "service".
|
||||
# replication_mode = service
|
||||
|
||||
[nsx_sync]
|
||||
# Interval in seconds between runs of the status synchronization task.
|
||||
# The plugin will aim at resynchronizing operational status for all
|
||||
# resources in this interval, and it should be therefore large enough
|
||||
# to ensure the task is feasible. Otherwise the plugin will be
|
||||
# constantly synchronizing resource status, ie: a new task is started
|
||||
# as soon as the previous is completed.
|
||||
# If this value is set to 0, the state synchronization thread for this
|
||||
# Neutron instance will be disabled.
|
||||
# state_sync_interval = 10
|
||||
|
||||
# Random additional delay between two runs of the state synchronization task.
|
||||
# An additional wait time between 0 and max_random_sync_delay seconds
|
||||
# will be added on top of state_sync_interval.
|
||||
# max_random_sync_delay = 0
|
||||
|
||||
# Minimum delay, in seconds, between two status synchronization requests for NSX.
|
||||
# Depending on chunk size, controller load, and other factors, state
|
||||
# synchronization requests might be pretty heavy. This means the
|
||||
# controller might take time to respond, and its load might be quite
|
||||
# increased by them. This parameter allows to specify a minimum
|
||||
# interval between two subsequent requests.
|
||||
# The value for this parameter must never exceed state_sync_interval.
|
||||
# If this does, an error will be raised at startup.
|
||||
# min_sync_req_delay = 1
|
||||
|
||||
# Minimum number of resources to be retrieved from NSX in a single status
|
||||
# synchronization request.
|
||||
# The actual size of the chunk will increase if the number of resources is such
|
||||
# that using the minimum chunk size will cause the interval between two
|
||||
# requests to be less than min_sync_req_delay
|
||||
# min_chunk_size = 500
|
||||
|
||||
# Enable this option to allow punctual state synchronization on show
|
||||
# operations. In this way, show operations will always fetch the operational
|
||||
# status of the resource from the NSX backend, and this might have
|
||||
# a considerable impact on overall performance.
|
||||
# always_read_status = False
|
||||
|
||||
[nsx_lsn]
|
||||
# Pull LSN information from NSX in case it is missing from the local
|
||||
# data store. This is useful to rebuild the local store in case of
|
||||
# server recovery
|
||||
# sync_on_missing_data = False
|
||||
|
||||
[nsx_dhcp]
|
||||
# (Optional) Comma separated list of additional dns servers. Default is an empty list
|
||||
# extra_domain_name_servers =
|
||||
|
||||
# Domain to use for building the hostnames
|
||||
# domain_name = openstacklocal
|
||||
|
||||
# Default DHCP lease time
|
||||
# default_lease_time = 43200
|
||||
|
||||
[nsx_metadata]
|
||||
# IP address used by Metadata server
|
||||
# metadata_server_address = 127.0.0.1
|
||||
|
||||
# TCP Port used by Metadata server
|
||||
# metadata_server_port = 8775
|
||||
|
||||
# When proxying metadata requests, Neutron signs the Instance-ID header with a
|
||||
# shared secret to prevent spoofing. You may select any string for a secret,
|
||||
# but it MUST match with the configuration used by the Metadata server
|
||||
# metadata_shared_secret =
|
||||
|
||||
[nsx_v3]
|
||||
# IP address of one or more NSX managers separated by commas.
|
||||
# The IP address should be of the form:
|
||||
# [<scheme>://]<ip_adress>[:<port>]
|
||||
# If scheme is not provided https is used. If port is not provided
|
||||
# port 80 is used for http and port 443 for https.
|
||||
# nsx_api_managers = 1.2.3.4
|
||||
|
||||
# User name of NSX Manager
|
||||
# nsx_api_user = admin
|
||||
|
||||
# Password of NSX Manager
|
||||
# nsx_api_password = default
|
||||
|
||||
# UUID of the default NSX overlay transport zone that will be used for creating
|
||||
# tunneled isolated Neutron networks. If no physical network is specified when
|
||||
# creating a logical network, this transport zone will be used by default
|
||||
# default_overlay_tz_uuid = afc40f8a-4967-477e-a17a-9d560d1786c7
|
||||
|
||||
# (Optional) Only required when creating VLAN or flat provider networks. UUID
|
||||
# of default NSX VLAN transport zone that will be used for bridging between
|
||||
# Neutron networks, if no physical network has been specified
|
||||
# default_vlan_tz_uuid = afc40f8a-4967-477e-a17a-9d560d1786c7
|
||||
|
||||
# Maximum number of times to retry API requests upon stale revision errors.
|
||||
# retries = 10
|
||||
|
||||
# Specify a CA bundle file to use in verifying the NSX Manager
|
||||
# server certificate. This option is ignored if "insecure" is set to True.
|
||||
# If "insecure" is set to False and ca_file is unset, the system root CAs
|
||||
# will be used to verify the server certificate.
|
||||
# ca_file =
|
||||
|
||||
# If true, the NSX Manager server certificate is not verified. If false
|
||||
# the CA bundle specified via "ca_file" will be used or if unset the
|
||||
# default system root CAs will be used.
|
||||
# insecure = True
|
||||
|
||||
# The time in seconds before aborting a HTTP connection to a NSX manager.
|
||||
# http_timeout = 10
|
||||
|
||||
# The time in seconds before aborting a HTTP read response from a NSX manager.
|
||||
# http_read_timeout = 180
|
||||
|
||||
# Maximum number of times to retry a HTTP connection.
|
||||
# http_retries = 3
|
||||
|
||||
# Maximum number of connection connections to each NSX manager.
|
||||
# concurrent_connections = 10
|
||||
|
||||
# The amount of time in seconds to wait before ensuring connectivity to
|
||||
# the NSX manager if no manager connection has been used.
|
||||
# conn_idle_timeout = 10
|
||||
|
||||
# UUID of the default tier0 router that will be used for connecting to
|
||||
# tier1 logical routers and configuring external networks
|
||||
# default_tier0_router_uuid = 412983fd-9016-45e5-93f2-48ba2a931225
|
||||
|
||||
# (Optional) UUID of the default NSX bridge cluster that will be used to
|
||||
# perform L2 gateway bridging between VXLAN and VLAN networks. It is an
|
||||
# optional field. If default bridge cluster UUID is not specified, admin will
|
||||
# have to manually create a L2 gateway corresponding to a NSX Bridge Cluster
|
||||
# using L2 gateway APIs. This field must be specified on one of the active
|
||||
# neutron servers only.
|
||||
# default_bridge_cluster_uuid =
|
||||
|
||||
# (Optional) The number of nested groups which are used by the plugin,
|
||||
# each Neutron security-groups is added to one nested group, and each nested
|
||||
# group can contain as maximum as 500 security-groups, therefore, the maximum
|
||||
# number of security groups that can be created is
|
||||
# 500 * number_of_nested_groups.
|
||||
# The default is 8 nested groups, which allows a maximum of 4k security-groups,
|
||||
# to allow creation of more security-groups, modify this figure.
|
||||
# number_of_nested_groups =
|
||||
|
||||
# Acceptable values for 'metadata_mode' are:
|
||||
# - 'access_network': this enables a dedicated connection to the metadata
|
||||
# proxy for metadata server access via Neutron router.
|
||||
# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
|
||||
# This option is only useful if running on a host that does not support
|
||||
# namespaces otherwise access_network should be used.
|
||||
# metadata_mode = access_network
|
||||
|
||||
# If True, an internal metadata network will be created for a router only when
|
||||
# the router is attached to a DHCP-disabled subnet.
|
||||
# metadata_on_demand = False
|
||||
|
||||
# (Optional) Indicates whether distributed-firewall rule for security-groups
|
||||
# blocked traffic is logged.
|
||||
# log_security_groups_blocked_traffic = False
|
||||
|
||||
# (Optional) Indicates whether distributed-firewall security-groups rules are
|
||||
# logged.
|
||||
# log_security_groups_allowed_traffic = False
|
Loading…
Reference in New Issue
Block a user