2bfa1d45b4
Blueprint nicira-plugin-get-improvements With this patch GET operations on the Nicira plugin will not be forwarded anymore to the NVP backend. Resource operational status will be periodically retrieved from the NVP backend using a DynamicLoopingCall. The process has been designed with the aim of avoiding: 1) frequent queries to NVP for retrieving resource status 2) execution of large queries to NVP for retrieving the status of a consistent number of resources. The process can be tuned using a set of configuration variables. GET operations will now return a status which might differ from the actual status of the resource. For retrieving status in a punctual way, the field 'status' should be explicitly specified in the GET request (only 'show' support has been implemented in this patch) This patchs also makes some changes to the fake nvp api client in order to ensure each instance has a private set of dictionaries for fake nvp entities. Change-Id: Ia745b80d2826de32ba8d6883c0d6e0893047e123
137 lines
5.8 KiB
INI
137 lines
5.8 KiB
INI
# #############################################################
|
|
# WARNINGS: The following deprecations have been made in the
|
|
# Havana release. Support for the options below will be removed
|
|
# in Ixxx.
|
|
#
|
|
# Section: [DEFAULT], Option: 'metadata_dhcp_host_route'
|
|
# Remarks: Use 'enable_isolated_metadata' in dhcp_agent.ini.
|
|
#
|
|
#
|
|
# Section: [cluster:name], Option: 'nvp_controller_connection'
|
|
# Remarks: The configuration will allow the specification of
|
|
# a single cluster, therefore [cluster:name] is no
|
|
# longer used. Use 'nvp_*', options, 'req_timeout',
|
|
# 'retries', etc. as indicated in the DEFAULT section.
|
|
# Support for multiple clusters will be added through
|
|
# an API extension.
|
|
# ##############################################################
|
|
|
|
[DEFAULT]
|
|
# User name for NVP controller
|
|
# nvp_user = admin
|
|
|
|
# Password for NVP controller
|
|
# nvp_password = admin
|
|
|
|
# Total time limit for a cluster request
|
|
# (including retries across different controllers)
|
|
# req_timeout = 30
|
|
|
|
# Time before aborting a request on an unresponsive controller
|
|
# http_timeout = 10
|
|
|
|
# Maximum number of times a particular request should be retried
|
|
# retries = 2
|
|
|
|
# Maximum number of times a redirect response should be followed
|
|
# redirects = 2
|
|
|
|
# Comma-separated list of NVP controller endpoints (<ip>:<port>). When port
|
|
# is omitted, 443 is assumed. This option MUST be specified, e.g.:
|
|
# nvp_controllers = xx.yy.zz.ww:443, aa.bb.cc.dd, ee.ff.gg.hh.ee:80
|
|
|
|
# UUID of the pre-existing default NVP Transport zone to be used for creating
|
|
# tunneled isolated "Neutron" networks. This option MUST be specified, e.g.:
|
|
# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
|
|
|
|
# (Optional) UUID of the cluster in NVP. It can be retrieved from NVP management
|
|
# console "admin" section.
|
|
# nvp_cluster_uuid = 615be8e4-82e9-4fd2-b4b3-fd141e51a5a7
|
|
|
|
# (Optional) UUID for the default l3 gateway service to use with this cluster.
|
|
# To be specified if planning to use logical routers with external gateways.
|
|
# default_l3_gw_service_uuid =
|
|
|
|
# (Optional) UUID for the default l2 gateway service to use with this cluster.
|
|
# To be specified for providing a predefined gateway tenant for connecting their networks.
|
|
# default_l2_gw_service_uuid =
|
|
|
|
# Name of the default interface name to be used on network-gateway. This value
|
|
# will be used for any device associated with a network gateway for which an
|
|
# interface name was not specified
|
|
# default_interface_name = breth0
|
|
|
|
[quotas]
|
|
# number of network gateways allowed per tenant, -1 means unlimited
|
|
# quota_network_gateway = 5
|
|
|
|
|
|
[nvp]
|
|
# Maximum number of ports for each bridged logical switch
|
|
# max_lp_per_bridged_ls = 64
|
|
|
|
# Maximum number of ports for each overlay (stt, gre) logical switch
|
|
# max_lp_per_overlay_ls = 256
|
|
|
|
# Number of connects to each controller node.
|
|
# default is 5
|
|
# concurrent_connections = 3
|
|
|
|
# Number of seconds a generation id should be valid for (default -1 meaning do not time out)
|
|
# nvp_gen_timeout = -1
|
|
|
|
# Acceptable values for 'metadata_mode' are:
|
|
# - 'access_network': this enables a dedicated connection to the metadata
|
|
# proxy for metadata server access via Neutron router.
|
|
# - 'dhcp_host_route': this enables host route injection via the dhcp agent.
|
|
# This option is only useful if running on a host that does not support
|
|
# namespaces otherwise access_network should be used.
|
|
# metadata_mode = access_network
|
|
|
|
# The default network transport type to use (stt, gre, bridge, ipsec_gre, or ipsec_stt)
|
|
# default_transport_type = stt
|
|
|
|
# Specifies in which mode the plugin needs to operate in order to provide DHCP and
|
|
# metadata proxy services to tenant instances. If 'agent' is chosen (default)
|
|
# the NVP plugin relies on external RPC agents (i.e. dhcp and metadata agents) to
|
|
# provide such services. In this mode, the plugin supports API extensions 'agent'
|
|
# and 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in Havana),
|
|
# the plugin will use NVP logical services for DHCP and metadata proxy. This
|
|
# simplifies the deployment model for Neutron, in that the plugin no longer requires
|
|
# the RPC agents to operate. When 'agentless' is chosen, the config option metadata_mode
|
|
# becomes ineffective. The mode 'agentless' is not supported for NVP 3.2 or below.
|
|
# agent_mode = agent
|
|
|
|
[nvp_sync]
|
|
# Interval in seconds between runs of the status synchronization task.
|
|
# The plugin will aim at resynchronizing operational status for all
|
|
# resources in this interval, and it should be therefore large enough
|
|
# to ensure the task is feasible. Otherwise the plugin will be
|
|
# constantly synchronizing resource status, ie: a new task is started
|
|
# as soon as the previous is completed.
|
|
# If this value is set to 0, the state synchronization thread for this
|
|
# Neutron instance will be disabled.
|
|
# state_sync_interval = 120
|
|
|
|
# Random additional delay between two runs of the state synchronization task.
|
|
# An additional wait time between 0 and max_random_sync_delay seconds
|
|
# will be added on top of state_sync_interval.
|
|
# max_random_sync_delay = 0
|
|
|
|
# Minimum delay, in seconds, between two status synchronization requests for NVP.
|
|
# Depending on chunk size, controller load, and other factors, state
|
|
# synchronization requests might be pretty heavy. This means the
|
|
# controller might take time to respond, and its load might be quite
|
|
# increased by them. This parameter allows to specify a minimum
|
|
# interval between two subsequent requests.
|
|
# The value for this parameter must never exceed state_sync_interval.
|
|
# If this does, an error will be raised at startup.
|
|
# min_sync_req_delay = 10
|
|
|
|
# Minimum number of resources to be retrieved from NVP in a single status
|
|
# synchronization request.
|
|
# The actual size of the chunk will increase if the number of resources is such
|
|
# that using the minimum chunk size will cause the interval between two
|
|
# requests to be less than min_sync_req_delay
|
|
# min_chunk_size = 500
|