Update the Nicira NVP plugin to support the v2 Quantum API
blueprint: quantum-nvp-plugin-v2 Change-Id: I848ad7b7b99a24e19ea28e65b7d88261c21eac3a
This commit is contained in:
parent
c6f591b7ae
commit
c0f3e2a43f
@ -1,36 +1,59 @@
|
||||
# Example configuration:
|
||||
# [NVP]
|
||||
# DEFAULT_TZ_UUID = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
|
||||
# NVP_CONTROLLER_CONNECTIONS = NVP_CONN_1 NVP_CONN_2 NVP_CONN_3
|
||||
# NVP_CONN_1=10.0.1.2:443:admin:password:30:10:2:2
|
||||
# NVP_CONN_2=10.0.1.3:443:admin:password:30:10:2:2
|
||||
# NVP_CONN_3=10.0.1.4:443:admin:password:30:10:2:2
|
||||
[DEFAULT]
|
||||
# No default config for now.
|
||||
|
||||
[DATABASE]
|
||||
# This line MUST be changed to actually run the plugin.
|
||||
# Example:
|
||||
# sql_connection = mysql://root:quantum@127.0.0.1:3306/nvp_quantum
|
||||
# Replace 127.0.0.1 above with the IP address of the database used by the
|
||||
# main quantum server. (Leave it as is if the database runs on this host.)
|
||||
sql_connection = sqlite://
|
||||
# Database reconnection retry times - in event connectivity is lost
|
||||
# set to -1 implies an infinite retry count
|
||||
# sql_max_retries = 10
|
||||
# Database reconnection interval in seconds - in event connectivity is lost
|
||||
reconnect_interval = 2
|
||||
|
||||
[NVP]
|
||||
# This is the uuid of the default NVP Transport zone that will be used for
|
||||
# creating isolated "Quantum" networks. The transport zone needs to be
|
||||
# created in NVP before starting Quantum with the plugin.
|
||||
DEFAULT_TZ_UUID = <insert default tz uuid>
|
||||
# This parameter is a space separated list of NVP_CONTROLLER_CONNECTIONS.
|
||||
NVP_CONTROLLER_CONNECTIONS = <space separated names of controller connections>
|
||||
# This parameter describes a connection to a single NVP controller.
|
||||
# The number of logical ports to create per bridged logical switch
|
||||
# max_lp_per_bridged_ls = 64
|
||||
# Time from when a connection pool is switched to another controller
|
||||
# during failure.
|
||||
# failover_time = 5
|
||||
# Number of connects to each controller node.
|
||||
# concurrent_connections = 3
|
||||
|
||||
#[CLUSTER:example]
|
||||
# This is uuid of the default NVP Transport zone that will be used for
|
||||
# creating tunneled isolated "Quantum" networks. It needs to be created in
|
||||
# NVP before starting Quantum with the nvp plugin.
|
||||
# default_tz_uuid = 1e8e52cf-fa7f-46b0-a14a-f99835a9cb53
|
||||
|
||||
# Nova "zone" that maps to this NVP cluster. This should map to the
|
||||
# node_availability_zone in your nova.conf for each nova cluster. Each nova
|
||||
# cluster should have a unique node_availability_zone set.
|
||||
# nova_zone_id = zone1 # (Optional)
|
||||
|
||||
# UUID of the cluster in NVP. This can be retrieved from NVP management
|
||||
# console "admin" section.
|
||||
# nvp_cluster_uuid = 615be8e4-82e9-4fd2-b4b3-fd141e51a5a7 # (Optional)
|
||||
|
||||
# This parameter describes a connection to a single NVP controller. Format:
|
||||
# <ip>:<port>:<user>:<pw>:<req_timeout>:<http_timeout>:<retries>:<redirects>
|
||||
# <ip> is the ip address of the controller
|
||||
# <port> is the port of the controller (default NVP port is 443)
|
||||
# <user> is the user name for this controller
|
||||
# <pass> is the user password.
|
||||
# <request_timeout>: The total time limit on all operations for a controller
|
||||
# <pw> is the user password.
|
||||
# <req_timeout>: The total time limit on all operations for a controller
|
||||
# request (including retries, redirects from unresponsive controllers).
|
||||
# Default is 30.
|
||||
# <http_timeout>: How long to wait before aborting an unresponsive controller
|
||||
# (and allow for retries to another controller).
|
||||
# (and allow for retries to another controller in the cluster).
|
||||
# Default is 10.
|
||||
# <retries>: the maximum number of times to retry a particular request
|
||||
# Default is 2.
|
||||
# <redirects>: the maximum number of times to follow a redirect response from a server.
|
||||
# Default is 2.
|
||||
# There must be at least one NVP_CONTROLLER_CONNECTION per system.
|
||||
#
|
||||
# Here is an example:
|
||||
# NVP_CONTROLLER_CONNECTION_1=10.0.0.1:443:admin:password:30:10:2:2
|
||||
<connection name>=<ip>:<port>:<user>:<pass>:<api_call_timeout>:<http_timeout>:<retries>:<redirects>
|
||||
# There must be at least one nvp_controller_connection per system or per cluster.
|
||||
# nvp_controller_connection=10.0.1.2:443:admin:admin:30:10:2:2
|
||||
# nvp_controller_connection=10.0.1.3:443:admin:admin:30:10:2:2
|
||||
# nvp_controller_connection=10.0.1.4:443:admin:admin:30:10:2:2
|
||||
|
@ -1,4 +1,5 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
# Copyright 2012 Nicira, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -12,23 +13,20 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
#@author: Somik Behera, Nicira Networks, Inc.
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# @author: Somik Behera, Nicira Networks, Inc.
|
||||
|
||||
import httplib # basic HTTP library for HTTPS connections
|
||||
import logging
|
||||
|
||||
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.api_client.client_eventlet \
|
||||
import NvpApiClientEventlet
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.api_client.request_eventlet \
|
||||
import NvpGenericRequestEventlet
|
||||
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.api_client import (
|
||||
client_eventlet, request_eventlet)
|
||||
|
||||
LOG = logging.getLogger("NVPApiHelper")
|
||||
LOG.setLevel(logging.INFO)
|
||||
|
||||
|
||||
class NVPApiHelper(NvpApiClientEventlet):
|
||||
class NVPApiHelper(client_eventlet.NvpApiClientEventlet):
|
||||
'''
|
||||
Helper class to do basic login, cookie management, and provide base
|
||||
method to send HTTP requests.
|
||||
@ -51,13 +49,14 @@ class NVPApiHelper(NvpApiClientEventlet):
|
||||
from unresponsive controllers, etc) should finish within this
|
||||
timeout.
|
||||
:param http_timeout: how long to wait before aborting an
|
||||
unresponsive controller
|
||||
unresponsive controller (and allow for retries to another
|
||||
controller in the cluster)
|
||||
:param retries: the number of concurrent connections.
|
||||
:param redirects: the number of concurrent connections.
|
||||
:param failover_time: minimum time between controller failover and new
|
||||
connections allowed.
|
||||
'''
|
||||
NvpApiClientEventlet.__init__(
|
||||
client_eventlet.NvpApiClientEventlet.__init__(
|
||||
self, api_providers, user, password, concurrent_connections,
|
||||
failover_time=failover_time)
|
||||
|
||||
@ -85,12 +84,12 @@ class NVPApiHelper(NvpApiClientEventlet):
|
||||
if password:
|
||||
self._password = password
|
||||
|
||||
return NvpApiClientEventlet.login(self)
|
||||
return client_eventlet.NvpApiClientEventlet.login(self)
|
||||
|
||||
def request(self, method, url, body="", content_type="application/json"):
|
||||
'''Issues request to controller.'''
|
||||
|
||||
g = NvpGenericRequestEventlet(
|
||||
g = request_eventlet.NvpGenericRequestEventlet(
|
||||
self, method, url, body, content_type, auto_login=True,
|
||||
request_timeout=self._request_timeout,
|
||||
http_timeout=self._http_timeout,
|
||||
@ -127,9 +126,8 @@ class NVPApiHelper(NvpApiClientEventlet):
|
||||
# Continue processing for non-error condition.
|
||||
if (status != httplib.OK and status != httplib.CREATED
|
||||
and status != httplib.NO_CONTENT):
|
||||
LOG.error(
|
||||
"%s to %s, unexpected response code: %d (content = '%s')" %
|
||||
(method, url, response.status, response.body))
|
||||
LOG.error("%s to %s, unexpected response code: %d (content = '%s')"
|
||||
% (method, url, response.status, response.body))
|
||||
return None
|
||||
|
||||
return response.body
|
||||
@ -149,8 +147,9 @@ class NVPApiHelper(NvpApiClientEventlet):
|
||||
def zero(self):
|
||||
raise NvpApiException()
|
||||
|
||||
error_codes = {
|
||||
404: fourZeroFour,
|
||||
# TODO(del): ensure error_codes are handled/raised appropriately
|
||||
# in api_client.
|
||||
error_codes = {404: fourZeroFour,
|
||||
409: fourZeroNine,
|
||||
503: fiveZeroThree,
|
||||
403: fourZeroThree,
|
||||
@ -158,7 +157,7 @@ class NVPApiHelper(NvpApiClientEventlet):
|
||||
307: zero,
|
||||
400: zero,
|
||||
500: zero,
|
||||
}
|
||||
503: zero}
|
||||
|
||||
|
||||
class NvpApiException(Exception):
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,5 @@
|
||||
# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -11,3 +12,5 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
@ -0,0 +1,16 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
@ -1,4 +1,5 @@
|
||||
# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
|
||||
# Copyright 2009-2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -12,6 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Author: David Lapsley <dlapsley@nicira.com>, Nicira Networks, Inc.
|
||||
|
||||
from abc import ABCMeta
|
||||
@ -28,8 +31,6 @@ class NvpApiClient(object):
|
||||
|
||||
__metaclass__ = ABCMeta
|
||||
|
||||
# Default connection timeout for a controller. After CONN_IDLE_TIMEOUT
|
||||
# seconds the client attempt to reconnect.
|
||||
CONN_IDLE_TIMEOUT = 60 * 15
|
||||
|
||||
@abstractmethod
|
||||
|
@ -1,4 +1,5 @@
|
||||
# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
|
||||
# Copyright 2009-2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -11,32 +12,33 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
|
||||
import client
|
||||
import eventlet
|
||||
import httplib
|
||||
import logging
|
||||
import request_eventlet
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
|
||||
import quantum.plugins.nicira.nicira_nvp_plugin.api_client.client as client
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.api_client.common import (
|
||||
_conn_str,
|
||||
)
|
||||
import quantum.plugins.nicira.nicira_nvp_plugin.api_client.request_eventlet
|
||||
from common import _conn_str
|
||||
|
||||
eventlet.monkey_patch()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
LOG = logging.getLogger('nvp_api_client')
|
||||
|
||||
lg = logging.getLogger('nvp_api_client')
|
||||
|
||||
# Default parameters.
|
||||
DEFAULT_FAILOVER_TIME = 5
|
||||
DEFAULT_CONCURRENT_CONNECTIONS = 3
|
||||
DEFAULT_CONNECT_TIMEOUT = 5
|
||||
GENERATION_ID_TIMEOUT = -1 # if set to -1 then disabled
|
||||
|
||||
|
||||
class NvpApiClientEventlet(object):
|
||||
"""Eventlet-based implementation of NvpApiClient ABC."""
|
||||
'''Eventlet-based implementation of NvpApiClient ABC.'''
|
||||
|
||||
CONN_IDLE_TIMEOUT = 60 * 15
|
||||
|
||||
@ -44,17 +46,24 @@ class NvpApiClientEventlet(object):
|
||||
concurrent_connections=DEFAULT_CONCURRENT_CONNECTIONS,
|
||||
use_https=True,
|
||||
connect_timeout=DEFAULT_CONNECT_TIMEOUT,
|
||||
failover_time=DEFAULT_FAILOVER_TIME):
|
||||
"""Constructor
|
||||
failover_time=DEFAULT_FAILOVER_TIME,
|
||||
nvp_gen_timeout=GENERATION_ID_TIMEOUT):
|
||||
'''Constructor
|
||||
|
||||
Args:
|
||||
api_providers: a list of tuples of the form: (host, port, is_ssl).
|
||||
user: login username.
|
||||
password: login password.
|
||||
concurrent_connections: total number of concurrent connections.
|
||||
use_https: whether or not to use https for requests.
|
||||
connect_timeout: connection timeout in seconds.
|
||||
"""
|
||||
:param api_providers: a list of tuples of the form: (host, port,
|
||||
is_ssl).
|
||||
:param user: login username.
|
||||
:param password: login password.
|
||||
:param concurrent_connections: total number of concurrent connections.
|
||||
:param use_https: whether or not to use https for requests.
|
||||
:param connect_timeout: connection timeout in seconds.
|
||||
:param failover_time: time from when a connection pool is switched to
|
||||
the next connection released via acquire_connection().
|
||||
:param nvp_gen_timeout controls how long the generation id is kept
|
||||
if set to -1 the generation id is never timed out
|
||||
'''
|
||||
if not api_providers:
|
||||
api_providers = []
|
||||
self._api_providers = set([tuple(p) for p in api_providers])
|
||||
self._user = user
|
||||
self._password = password
|
||||
@ -62,22 +71,27 @@ class NvpApiClientEventlet(object):
|
||||
self._use_https = use_https
|
||||
self._connect_timeout = connect_timeout
|
||||
self._failover_time = failover_time
|
||||
self._nvp_config_gen = None
|
||||
self._nvp_config_gen_ts = None
|
||||
self._nvp_gen_timeout = nvp_gen_timeout
|
||||
|
||||
# Connection pool is a queue. Head of the queue is the
|
||||
# connection pool with the highest priority.
|
||||
self._conn_pool = eventlet.queue.Queue()
|
||||
for host, port, is_ssl in self._api_providers:
|
||||
provider_conn_pool = eventlet.queue.Queue()
|
||||
# Connection pool is a list of queues.
|
||||
self._conn_pool = list()
|
||||
conn_pool_idx = 0
|
||||
for host, port, is_ssl in api_providers:
|
||||
provider_conn_pool = eventlet.queue.Queue(
|
||||
maxsize=concurrent_connections)
|
||||
for i in range(concurrent_connections):
|
||||
# All connections in a provider_conn_poool have the
|
||||
# same priority (they connect to the same server).
|
||||
conn = self._create_connection(host, port, is_ssl)
|
||||
conn.conn_pool = provider_conn_pool
|
||||
conn.idx = conn_pool_idx
|
||||
provider_conn_pool.put(conn)
|
||||
|
||||
self._conn_pool.put(provider_conn_pool)
|
||||
self._conn_pool.append(provider_conn_pool)
|
||||
conn_pool_idx += 1
|
||||
|
||||
self._active_conn_pool = self._conn_pool.get()
|
||||
self._active_conn_pool_idx = 0
|
||||
|
||||
self._cookie = None
|
||||
self._need_login = True
|
||||
@ -106,81 +120,123 @@ class NvpApiClientEventlet(object):
|
||||
def password(self):
|
||||
return self._password
|
||||
|
||||
@property
|
||||
def nvp_config_gen(self):
|
||||
# If nvp_gen_timeout is not -1 then:
|
||||
# Maintain a timestamp along with the generation ID. Hold onto the
|
||||
# ID long enough to be useful and block on sequential requests but
|
||||
# not long enough to persist when Onix db is cleared, which resets
|
||||
# the generation ID, causing the DAL to block indefinitely with some
|
||||
# number that's higher than the cluster's value.
|
||||
if self._nvp_gen_timeout != -1:
|
||||
ts = self._nvp_config_gen_ts
|
||||
if ts is not None:
|
||||
if (time.time() - ts) > self._nvp_gen_timeout:
|
||||
return None
|
||||
return self._nvp_config_gen
|
||||
|
||||
@nvp_config_gen.setter
|
||||
def nvp_config_gen(self, value):
|
||||
if self._nvp_config_gen != value:
|
||||
if self._nvp_gen_timeout != -1:
|
||||
self._nvp_config_gen_ts = time.time()
|
||||
self._nvp_config_gen = value
|
||||
|
||||
@property
|
||||
def auth_cookie(self):
|
||||
return self._cookie
|
||||
|
||||
def acquire_connection(self):
|
||||
"""Check out an available HTTPConnection instance.
|
||||
def acquire_connection(self, rid=-1):
|
||||
'''Check out an available HTTPConnection instance.
|
||||
|
||||
Blocks until a connection is available.
|
||||
|
||||
Returns: An available HTTPConnection instance or None if no
|
||||
:param rid: request id passed in from request eventlet.
|
||||
:returns: An available HTTPConnection instance or None if no
|
||||
api_providers are configured.
|
||||
"""
|
||||
'''
|
||||
if not self._api_providers:
|
||||
lg.warn("[%d] no API providers currently available." % rid)
|
||||
return None
|
||||
|
||||
# The sleep time is to give controllers time to become consistent after
|
||||
# there has been a change in the controller used as the api_provider.
|
||||
now = time.time()
|
||||
if now < getattr(self, '_issue_conn_barrier', now):
|
||||
LOG.info("acquire_connection() waiting for timer to expire.")
|
||||
lg.warn("[%d] Waiting for failover timer to expire." % rid)
|
||||
time.sleep(self._issue_conn_barrier - now)
|
||||
|
||||
if self._active_conn_pool.empty():
|
||||
LOG.debug("Waiting to acquire an API client connection")
|
||||
# Print out a warning if all connections are in use.
|
||||
if self._conn_pool[self._active_conn_pool_idx].empty():
|
||||
lg.debug("[%d] Waiting to acquire client connection." % rid)
|
||||
|
||||
# get() call is blocking.
|
||||
conn = self._active_conn_pool.get()
|
||||
# Try to acquire a connection (block in get() until connection
|
||||
# available or timeout occurs).
|
||||
active_conn_pool_idx = self._active_conn_pool_idx
|
||||
conn = self._conn_pool[active_conn_pool_idx].get()
|
||||
|
||||
if active_conn_pool_idx != self._active_conn_pool_idx:
|
||||
# active_conn_pool became inactive while we were waiting.
|
||||
# Put connection back on old pool and try again.
|
||||
lg.warn("[%d] Active pool expired while waiting for connection: %s"
|
||||
% (rid, _conn_str(conn)))
|
||||
self._conn_pool[active_conn_pool_idx].put(conn)
|
||||
return self.acquire_connection(rid=rid)
|
||||
|
||||
# Check if the connection has been idle too long.
|
||||
now = time.time()
|
||||
if getattr(conn, 'last_used', now) < now - self.CONN_IDLE_TIMEOUT:
|
||||
LOG.info("Connection %s idle for %0.2f seconds; reconnecting." %
|
||||
(_conn_str(conn), now - conn.last_used))
|
||||
lg.info("[%d] Connection %s idle for %0.2f seconds; reconnecting."
|
||||
% (rid, _conn_str(conn), now - conn.last_used))
|
||||
conn = self._create_connection(*self._conn_params(conn))
|
||||
|
||||
# Stash conn pool so conn knows where to go when it releases.
|
||||
conn.conn_pool = self._active_conn_pool
|
||||
conn.idx = self._active_conn_pool_idx
|
||||
|
||||
conn.last_used = now
|
||||
LOG.debug("API client connection %s acquired" % _conn_str(conn))
|
||||
qsize = self._conn_pool[self._active_conn_pool_idx].qsize()
|
||||
lg.debug("[%d] Acquired connection %s. %d connection(s) available."
|
||||
% (rid, _conn_str(conn), qsize))
|
||||
return conn
|
||||
|
||||
def release_connection(self, http_conn, bad_state=False):
|
||||
"""Mark HTTPConnection instance as available for check-out.
|
||||
def release_connection(self, http_conn, bad_state=False, rid=-1):
|
||||
'''Mark HTTPConnection instance as available for check-out.
|
||||
|
||||
Args:
|
||||
http_conn: An HTTPConnection instance obtained from this
|
||||
:param http_conn: An HTTPConnection instance obtained from this
|
||||
instance.
|
||||
bad_state: True if http_conn is known to be in a bad state
|
||||
:param bad_state: True if http_conn is known to be in a bad state
|
||||
(e.g. connection fault.)
|
||||
"""
|
||||
:param rid: request id passed in from request eventlet.
|
||||
'''
|
||||
if self._conn_params(http_conn) not in self._api_providers:
|
||||
LOG.debug(("Released connection '%s' is no longer an API provider "
|
||||
"for the cluster") % _conn_str(http_conn))
|
||||
lg.warn("[%d] Released connection '%s' is not an API provider "
|
||||
"for the cluster" % (rid, _conn_str(http_conn)))
|
||||
return
|
||||
|
||||
# Retrieve "home" connection pool.
|
||||
conn_pool = http_conn.conn_pool
|
||||
conn_pool_idx = http_conn.idx
|
||||
conn_pool = self._conn_pool[conn_pool_idx]
|
||||
if bad_state:
|
||||
# reconnect
|
||||
LOG.info("API connection fault, reconnecting to %s" %
|
||||
_conn_str(http_conn))
|
||||
# Reconnect to provider.
|
||||
lg.warn("[%d] Connection returned in bad state, reconnecting to %s"
|
||||
% (rid, _conn_str(http_conn)))
|
||||
http_conn = self._create_connection(*self._conn_params(http_conn))
|
||||
http_conn.conn_pool = conn_pool
|
||||
conn_pool.put(http_conn)
|
||||
http_conn.idx = conn_pool_idx
|
||||
|
||||
if self._active_conn_pool == http_conn.conn_pool:
|
||||
# Get next connection from the connection pool and make it
|
||||
# active.
|
||||
LOG.info("API connection fault changing active_conn_pool.")
|
||||
self._conn_pool.put(self._active_conn_pool)
|
||||
self._active_conn_pool = self._conn_pool.get()
|
||||
if self._active_conn_pool_idx == http_conn.idx:
|
||||
# This pool is no longer in a good state. Switch to next pool.
|
||||
self._active_conn_pool_idx += 1
|
||||
self._active_conn_pool_idx %= len(self._conn_pool)
|
||||
lg.warn("[%d] Switched active_conn_pool from %d to %d."
|
||||
% (rid, http_conn.idx, self._active_conn_pool_idx))
|
||||
|
||||
# No connections to the new provider allowed until after this
|
||||
# timer has expired (allow time for synchronization).
|
||||
self._issue_conn_barrier = time.time() + self._failover_time
|
||||
else:
|
||||
conn_pool.put(http_conn)
|
||||
|
||||
LOG.debug("API client connection %s released" % _conn_str(http_conn))
|
||||
conn_pool.put(http_conn)
|
||||
lg.debug("[%d] Released connection %s. %d connection(s) available."
|
||||
% (rid, _conn_str(http_conn), conn_pool.qsize()))
|
||||
|
||||
@property
|
||||
def need_login(self):
|
||||
@ -191,20 +247,19 @@ class NvpApiClientEventlet(object):
|
||||
self._need_login = val
|
||||
|
||||
def wait_for_login(self):
|
||||
'''Block until a login has occurred for the current API provider.'''
|
||||
if self._need_login:
|
||||
if self._doing_login_sem.acquire(blocking=False):
|
||||
self.login()
|
||||
self._doing_login_sem.release()
|
||||
else:
|
||||
LOG.debug("Waiting for auth to complete")
|
||||
lg.debug("Waiting for auth to complete")
|
||||
self._doing_login_sem.acquire()
|
||||
self._doing_login_sem.release()
|
||||
return self._cookie
|
||||
|
||||
def login(self):
|
||||
"""Issue login request and update authentication cookie."""
|
||||
request_eventlet = (quantum.plugins.nicira.nicira_nvp_plugin.
|
||||
api_client.request_eventlet)
|
||||
'''Issue login request and update authentication cookie.'''
|
||||
g = request_eventlet.NvpLoginRequestEventlet(
|
||||
self, self._user, self._password)
|
||||
g.start()
|
||||
@ -212,16 +267,17 @@ class NvpApiClientEventlet(object):
|
||||
|
||||
if ret:
|
||||
if isinstance(ret, Exception):
|
||||
LOG.error('NvpApiClient: login error "%s"' % ret)
|
||||
lg.error('NvpApiClient: login error "%s"' % ret)
|
||||
raise ret
|
||||
|
||||
self._cookie = None
|
||||
cookie = ret.getheader("Set-Cookie")
|
||||
if cookie:
|
||||
LOG.debug("Saving new authentication cookie '%s'" % cookie)
|
||||
lg.debug("Saving new authentication cookie '%s'" % cookie)
|
||||
self._cookie = cookie
|
||||
self._need_login = False
|
||||
|
||||
# TODO: or ret is an error.
|
||||
if not ret:
|
||||
return None
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
|
||||
# Copyright 2009-2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -11,9 +12,11 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
|
||||
import httplib
|
||||
|
||||
import mock
|
||||
|
||||
|
||||
|
@ -1,4 +1,5 @@
|
||||
# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
|
||||
# Copyright 2009-2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -11,6 +12,9 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
|
||||
from abc import ABCMeta
|
||||
from abc import abstractmethod
|
||||
|
@ -1,4 +1,5 @@
|
||||
# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
|
||||
# Copyright 2009-2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -11,36 +12,37 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
|
||||
import copy
|
||||
import eventlet
|
||||
import httplib
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import urllib
|
||||
import urlparse
|
||||
import request
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
import client_eventlet
|
||||
from common import _conn_str
|
||||
from eventlet import timeout
|
||||
|
||||
from quantum.openstack.common import jsonutils
|
||||
import quantum.plugins.nicira.nicira_nvp_plugin.api_client.client_eventlet
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.api_client.common import (
|
||||
_conn_str,
|
||||
)
|
||||
import quantum.plugins.nicira.nicira_nvp_plugin.api_client.request as request
|
||||
|
||||
eventlet.monkey_patch()
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
LOG = logging.getLogger("nvp_api_request")
|
||||
|
||||
|
||||
USER_AGENT = "NVP gevent client/1.0"
|
||||
lg = logging.getLogger("nvp_api_request")
|
||||
USER_AGENT = "NVP eventlet client/1.0"
|
||||
|
||||
# Default parameters.
|
||||
DEFAULT_REQUEST_TIMEOUT = 30
|
||||
DEFAULT_HTTP_TIMEOUT = 10
|
||||
DEFAULT_RETRIES = 2
|
||||
DEFAULT_REDIRECTS = 2
|
||||
API_REQUEST_POOL_SIZE = 10000
|
||||
DEFAULT_API_REQUEST_POOL_SIZE = 1000
|
||||
DEFAULT_MAXIMUM_REQUEST_ID = 4294967295
|
||||
|
||||
|
||||
class NvpApiRequestEventlet:
|
||||
@ -50,6 +52,7 @@ class NvpApiRequestEventlet:
|
||||
(e.g. those used by the Quantum NVP Plugin).
|
||||
'''
|
||||
|
||||
# List of allowed status codes.
|
||||
ALLOWED_STATUS_CODES = [
|
||||
httplib.OK,
|
||||
httplib.CREATED,
|
||||
@ -62,11 +65,23 @@ class NvpApiRequestEventlet:
|
||||
httplib.NOT_FOUND,
|
||||
httplib.CONFLICT,
|
||||
httplib.INTERNAL_SERVER_ERROR,
|
||||
httplib.SERVICE_UNAVAILABLE,
|
||||
httplib.SERVICE_UNAVAILABLE
|
||||
]
|
||||
|
||||
# Maximum number of green threads present in the system at one time.
|
||||
API_REQUEST_POOL_SIZE = DEFAULT_API_REQUEST_POOL_SIZE
|
||||
|
||||
# Pool of green threads. One green thread is allocated per incoming
|
||||
# request. Incoming requests will block when the pool is empty.
|
||||
API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE)
|
||||
|
||||
# A unique id is assigned to each incoming request. When the current
|
||||
# request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0.
|
||||
MAXIMUM_REQUEST_ID = DEFAULT_MAXIMUM_REQUEST_ID
|
||||
|
||||
# The request id for the next incoming request.
|
||||
CURRENT_REQUEST_ID = 0
|
||||
|
||||
def __init__(self, nvp_api_client, url, method="GET", body=None,
|
||||
headers=None,
|
||||
request_timeout=DEFAULT_REQUEST_TIMEOUT,
|
||||
@ -74,7 +89,7 @@ class NvpApiRequestEventlet:
|
||||
auto_login=True,
|
||||
redirects=DEFAULT_REDIRECTS,
|
||||
http_timeout=DEFAULT_HTTP_TIMEOUT):
|
||||
|
||||
'''Constructor.'''
|
||||
self._api_client = nvp_api_client
|
||||
self._url = url
|
||||
self._method = method
|
||||
@ -93,27 +108,45 @@ class NvpApiRequestEventlet:
|
||||
|
||||
self._green_thread = None
|
||||
|
||||
# Retrieve and store this instance's unique request id.
|
||||
self._request_id = NvpApiRequestEventlet.CURRENT_REQUEST_ID
|
||||
|
||||
# Update the class variable that tracks request id.
|
||||
# Request IDs wrap around at MAXIMUM_REQUEST_ID
|
||||
next_request_id = self._request_id + 1
|
||||
next_request_id %= NvpApiRequestEventlet.MAXIMUM_REQUEST_ID
|
||||
NvpApiRequestEventlet.CURRENT_REQUEST_ID = next_request_id
|
||||
|
||||
@classmethod
|
||||
def _spawn(cls, func, *args, **kwargs):
|
||||
'''Allocate a green thread from the class pool.'''
|
||||
return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs)
|
||||
|
||||
def spawn(self, func, *args, **kwargs):
|
||||
'''Spawn a new green thread with the supplied function and args.'''
|
||||
return self.__class__._spawn(func, *args, **kwargs)
|
||||
|
||||
def _rid(self):
|
||||
'''Return current request id.'''
|
||||
return self._request_id
|
||||
|
||||
@classmethod
|
||||
def joinall(cls):
|
||||
'''Wait for all outstanding requests to complete.'''
|
||||
return cls.API_REQUEST_POOL.waitall()
|
||||
|
||||
def join(self):
|
||||
'''Wait for instance green thread to complete.'''
|
||||
if self._green_thread is not None:
|
||||
return self._green_thread.wait()
|
||||
LOG.error('Joining on invalid green thread')
|
||||
return Exception('Joining an invalid green thread')
|
||||
|
||||
def start(self):
|
||||
'''Start request processing.'''
|
||||
self._green_thread = self.spawn(self._run)
|
||||
|
||||
def copy(self):
|
||||
'''Return a copy of this request instance.'''
|
||||
return NvpApiRequestEventlet(
|
||||
self._api_client, self._url, self._method, self._body,
|
||||
self._headers, self._request_timeout, self._retries,
|
||||
@ -121,32 +154,42 @@ class NvpApiRequestEventlet:
|
||||
|
||||
@property
|
||||
def request_error(self):
|
||||
'''Return any errors associated with this instance.'''
|
||||
return self._request_error
|
||||
|
||||
def _run(self):
|
||||
'''Method executed within green thread.'''
|
||||
if self._request_timeout:
|
||||
# No timeout exception escapes the with block.
|
||||
with timeout.Timeout(self._request_timeout, False):
|
||||
return self._handle_request()
|
||||
|
||||
LOG.info('Request timeout handling request.')
|
||||
lg.info('[%d] Request timeout.' % self._rid())
|
||||
self._request_error = Exception('Request timeout')
|
||||
return None
|
||||
else:
|
||||
return self._handle_request()
|
||||
|
||||
def _request_str(self, conn, url):
|
||||
'''Return string representation of connection.'''
|
||||
return "%s %s/%s" % (self._method, _conn_str(conn), url)
|
||||
|
||||
def _issue_request(self):
|
||||
conn = self._api_client.acquire_connection()
|
||||
'''Issue a request to a provider.'''
|
||||
conn = self._api_client.acquire_connection(rid=self._rid())
|
||||
if conn is None:
|
||||
error = Exception("No API connections available")
|
||||
self._request_error = error
|
||||
return error
|
||||
|
||||
# Preserve the acquired connection as conn may be over-written by
|
||||
# redirects below.
|
||||
acquired_conn = conn
|
||||
|
||||
url = self._url
|
||||
LOG.info("Issuing request '%s'" % self._request_str(conn, url))
|
||||
lg.debug("[%d] Issuing - request '%s'" %
|
||||
(self._rid(),
|
||||
self._request_str(conn, url)))
|
||||
issued_time = time.time()
|
||||
is_conn_error = False
|
||||
try:
|
||||
@ -161,66 +204,92 @@ class NvpApiRequestEventlet:
|
||||
elif conn.sock.gettimeout() != self._http_timeout:
|
||||
conn.sock.settimeout(self._http_timeout)
|
||||
|
||||
headers = copy.copy(self._headers)
|
||||
gen = self._api_client.nvp_config_gen
|
||||
if gen:
|
||||
headers["X-Nvp-Wait-For-Config-Generation"] = gen
|
||||
lg.debug("Setting %s request header: %s" %
|
||||
('X-Nvp-Wait-For-Config-Generation', gen))
|
||||
try:
|
||||
conn.request(self._method, url, self._body, self._headers)
|
||||
except Exception, e:
|
||||
LOG.info('_issue_request: conn.request() exception: %s' %
|
||||
e)
|
||||
conn.request(self._method, url, self._body, headers)
|
||||
except Exception as e:
|
||||
lg.warn('[%d] Exception issuing request: %s' %
|
||||
(self._rid(), e))
|
||||
raise e
|
||||
|
||||
response = conn.getresponse()
|
||||
response.body = response.read()
|
||||
response.headers = response.getheaders()
|
||||
LOG.info("Request '%s' complete: %s (%0.2f seconds)"
|
||||
% (self._request_str(conn, url), response.status,
|
||||
time.time() - issued_time))
|
||||
lg.debug("[%d] Completed request '%s': %s (%0.2f seconds)"
|
||||
% (self._rid(), self._request_str(conn, url),
|
||||
response.status, time.time() - issued_time))
|
||||
|
||||
new_gen = response.getheader('X-Nvp-Config-Generation', None)
|
||||
if new_gen:
|
||||
lg.debug("Reading %s response header: %s" %
|
||||
('X-Nvp-config-Generation', new_gen))
|
||||
if (self._api_client.nvp_config_gen is None or
|
||||
self._api_client.nvp_config_gen < int(new_gen)):
|
||||
self._api_client.nvp_config_gen = int(new_gen)
|
||||
|
||||
if response.status not in [httplib.MOVED_PERMANENTLY,
|
||||
httplib.TEMPORARY_REDIRECT]:
|
||||
break
|
||||
elif redirects >= self._redirects:
|
||||
LOG.warn("Maximum redirects exceeded, aborting request")
|
||||
lg.info("[%d] Maximum redirects exceeded, aborting request"
|
||||
% self._rid())
|
||||
break
|
||||
redirects += 1
|
||||
|
||||
# In the following call, conn is replaced by the connection
|
||||
# specified in the redirect response from the server.
|
||||
conn, url = self._redirect_params(conn, response.headers)
|
||||
if url is None:
|
||||
response.status = httplib.INTERNAL_SERVER_ERROR
|
||||
break
|
||||
LOG.info("Redirecting request to: %s" %
|
||||
self._request_str(conn, url))
|
||||
lg.info("[%d] Redirecting request to: %s" %
|
||||
(self._rid(), self._request_str(conn, url)))
|
||||
|
||||
# If we receive any of these responses, then our server did not
|
||||
# process our request and may be in an errored state. Raise an
|
||||
# exception, which will cause the the conn to be released with
|
||||
# is_conn_error == True which puts the conn on the back of the
|
||||
# client's priority queue.
|
||||
# FIX for #9415. If we receive any of these responses, then
|
||||
# our server did not process our request and may be in an
|
||||
# errored state. Raise an exception, which will cause the
|
||||
# the conn to be released with is_conn_error == True
|
||||
# which puts the conn on the back of the client's priority
|
||||
# queue.
|
||||
if response.status >= 500:
|
||||
LOG.warn("API Request '%s %s' received: %s" %
|
||||
(self._method, self._url, response.status))
|
||||
lg.warn("[%d] Request '%s %s' received: %s"
|
||||
% (self._rid(), self._method, self._url,
|
||||
response.status))
|
||||
raise Exception('Server error return: %s' %
|
||||
response.status)
|
||||
return response
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
if isinstance(e, httplib.BadStatusLine):
|
||||
msg = "Invalid server response"
|
||||
else:
|
||||
msg = unicode(e)
|
||||
LOG.warn("Request '%s' failed: %s (%0.2f seconds)"
|
||||
% (self._request_str(conn, url), msg,
|
||||
lg.warn("[%d] Failed request '%s': %s (%0.2f seconds)"
|
||||
% (self._rid(), self._request_str(conn, url), msg,
|
||||
time.time() - issued_time))
|
||||
self._request_error = e
|
||||
is_conn_error = True
|
||||
return e
|
||||
finally:
|
||||
self._api_client.release_connection(conn, is_conn_error)
|
||||
# Make sure we release the original connection provided by the
|
||||
# acquire_connection() call above.
|
||||
self._api_client.release_connection(acquired_conn, is_conn_error,
|
||||
rid=self._rid())
|
||||
|
||||
def _redirect_params(self, conn, headers):
|
||||
'''Process redirect params from a server response.'''
|
||||
url = None
|
||||
for name, value in headers:
|
||||
if name.lower() == "location":
|
||||
url = value
|
||||
break
|
||||
if not url:
|
||||
LOG.warn("Received redirect status without location header field")
|
||||
lg.warn("[%d] Received redirect status without location header"
|
||||
" field" % self._rid())
|
||||
return (conn, None)
|
||||
# Accept location with the following format:
|
||||
# 1. /path, redirect to same node
|
||||
@ -236,18 +305,17 @@ class NvpApiRequestEventlet:
|
||||
url = result.path
|
||||
return (conn, url) # case 1
|
||||
else:
|
||||
LOG.warn("Received invalid redirect location: %s" % url)
|
||||
lg.warn("[%d] Received invalid redirect location: %s" %
|
||||
(self._rid(), url))
|
||||
return (conn, None) # case 3
|
||||
elif result.scheme not in ["http", "https"] or not result.hostname:
|
||||
LOG.warn("Received malformed redirect location: %s" % url)
|
||||
lg.warn("[%d] Received malformed redirect location: %s" %
|
||||
(self._rid(), url))
|
||||
return (conn, None) # case 3
|
||||
# case 2, redirect location includes a scheme
|
||||
# so setup a new connection and authenticate
|
||||
use_https = result.scheme == "https"
|
||||
api_providers = [(result.hostname, result.port, use_https)]
|
||||
client_eventlet = (
|
||||
quantum.plugins.nicira.nicira_nvp_plugin.api_client.client_eventlet
|
||||
)
|
||||
api_client = client_eventlet.NvpApiClientEventlet(
|
||||
api_providers, self._api_client.user, self._api_client.password,
|
||||
use_https=use_https)
|
||||
@ -256,7 +324,7 @@ class NvpApiRequestEventlet:
|
||||
self._headers["Cookie"] = api_client.auth_cookie
|
||||
else:
|
||||
self._headers["Cookie"] = ""
|
||||
conn = api_client.acquire_connection()
|
||||
conn = api_client.acquire_connection(rid=self._rid())
|
||||
if result.query:
|
||||
url = "%s?%s" % (result.path, result.query)
|
||||
else:
|
||||
@ -264,6 +332,7 @@ class NvpApiRequestEventlet:
|
||||
return (conn, url)
|
||||
|
||||
def _handle_request(self):
|
||||
'''First level request handling.'''
|
||||
attempt = 0
|
||||
response = None
|
||||
while response is None and attempt <= self._retries:
|
||||
@ -272,34 +341,35 @@ class NvpApiRequestEventlet:
|
||||
if self._auto_login and self._api_client.need_login:
|
||||
self._api_client.wait_for_login()
|
||||
|
||||
if self._api_client.auth_cookie and "Cookie" not in self._headers:
|
||||
if self._api_client.auth_cookie:
|
||||
self._headers["Cookie"] = self._api_client.auth_cookie
|
||||
|
||||
req = self.spawn(self._issue_request).wait()
|
||||
# automatically raises any exceptions returned.
|
||||
LOG.debug('req: %s' % type(req))
|
||||
|
||||
if isinstance(req, httplib.HTTPResponse):
|
||||
if ((req.status == httplib.UNAUTHORIZED
|
||||
or req.status == httplib.FORBIDDEN)):
|
||||
if (req.status == httplib.UNAUTHORIZED
|
||||
or req.status == httplib.FORBIDDEN):
|
||||
self._api_client.need_login = True
|
||||
if attempt <= self._retries:
|
||||
continue
|
||||
# else fall through to return the error code
|
||||
|
||||
LOG.debug("API Request '%s %s' complete: %s" %
|
||||
(self._method, self._url, req.status))
|
||||
lg.debug("[%d] Completed request '%s %s': %s"
|
||||
% (self._rid(), self._method, self._url, req.status))
|
||||
self._request_error = None
|
||||
response = req
|
||||
else:
|
||||
LOG.info('_handle_request: caught an error - %s' % req)
|
||||
lg.info('[%d] Error while handling request: %s' % (self._rid(),
|
||||
req))
|
||||
self._request_error = req
|
||||
response = None
|
||||
|
||||
LOG.debug('_handle_request: response - %s' % response)
|
||||
return response
|
||||
|
||||
|
||||
class NvpLoginRequestEventlet(NvpApiRequestEventlet):
|
||||
'''Process a login request.'''
|
||||
|
||||
def __init__(self, nvp_client, user, password):
|
||||
headers = {"Content-Type": "application/x-www-form-urlencoded"}
|
||||
body = urllib.urlencode({"username": user, "password": password})
|
||||
@ -314,6 +384,8 @@ class NvpLoginRequestEventlet(NvpApiRequestEventlet):
|
||||
|
||||
|
||||
class NvpGetApiProvidersRequestEventlet(NvpApiRequestEventlet):
|
||||
'''Get a list of API providers.'''
|
||||
|
||||
def __init__(self, nvp_client):
|
||||
url = "/ws.v1/control-cluster/node?fields=roles"
|
||||
NvpApiRequestEventlet.__init__(
|
||||
@ -332,7 +404,7 @@ class NvpGetApiProvidersRequestEventlet(NvpApiRequestEventlet):
|
||||
try:
|
||||
if self.successful():
|
||||
ret = []
|
||||
body = jsonutils.loads(self.value.body)
|
||||
body = json.loads(self.value.body)
|
||||
for node in body.get('results', []):
|
||||
for role in node.get('roles', []):
|
||||
if role.get('role') == 'api_provider':
|
||||
@ -340,13 +412,15 @@ class NvpGetApiProvidersRequestEventlet(NvpApiRequestEventlet):
|
||||
if addr:
|
||||
ret.append(_provider_from_listen_addr(addr))
|
||||
return ret
|
||||
except Exception, e:
|
||||
LOG.warn("Failed to parse API provider: %s" % e)
|
||||
except Exception as e:
|
||||
lg.warn("[%d] Failed to parse API provider: %s" % (self._rid(), e))
|
||||
# intentionally fall through
|
||||
return None
|
||||
|
||||
|
||||
class NvpGenericRequestEventlet(NvpApiRequestEventlet):
|
||||
'''Handle a generic request.'''
|
||||
|
||||
def __init__(self, nvp_client, method, url, body, content_type,
|
||||
auto_login=False,
|
||||
request_timeout=DEFAULT_REQUEST_TIMEOUT,
|
||||
|
@ -1,132 +0,0 @@
|
||||
# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
from optparse import OptionParser
|
||||
import os
|
||||
import sys
|
||||
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin import nvplib
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import (
|
||||
NvpPlugin as QuantumManager,
|
||||
)
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
LOG = logging.getLogger('nvp-plugin-cli')
|
||||
|
||||
|
||||
def print_help():
|
||||
"""Help for CLI"""
|
||||
print "\nNVP Plugin Commands:"
|
||||
for key in COMMANDS.keys():
|
||||
print (" %s %s" %
|
||||
(key, " ".join(["<%s>" % y for y in COMMANDS[key]["args"]])))
|
||||
|
||||
|
||||
def build_args(cmd, cmdargs, arglist):
|
||||
"""Building the list of args for a particular CLI"""
|
||||
args = []
|
||||
orig_arglist = arglist[:]
|
||||
try:
|
||||
for cmdarg in cmdargs:
|
||||
args.append(arglist[0])
|
||||
del arglist[0]
|
||||
except:
|
||||
LOG.error("Not enough arguments for \"%s\" (expected: %d, got: %d)" % (
|
||||
cmd, len(cmdargs), len(orig_arglist)))
|
||||
print ("Usage:\n %s %s" %
|
||||
(cmd, " ".join(["<%s>" % y for y in COMMANDS[cmd]["args"]])))
|
||||
sys.exit()
|
||||
if len(arglist) > 0:
|
||||
LOG.error("Too many arguments for \"%s\" (expected: %d, got: %d)" % (
|
||||
cmd, len(cmdargs), len(orig_arglist)))
|
||||
print ("Usage:\n %s %s" %
|
||||
(cmd, " ".join(["<%s>" % y for y in COMMANDS[cmd]["args"]])))
|
||||
sys.exit()
|
||||
return args
|
||||
|
||||
|
||||
def check_config(manager):
|
||||
"""A series of checks to make sure the plugin is correctly configured."""
|
||||
checks = [{"function": nvplib.check_default_transport_zone,
|
||||
"desc": "Transport zone check:"}]
|
||||
any_failed = False
|
||||
for c in checks:
|
||||
result, msg = "PASS", ""
|
||||
try:
|
||||
c["function"]()
|
||||
except Exception, e:
|
||||
any_failed = True
|
||||
result = "FAIL"
|
||||
msg = "(%s)" % str(e)
|
||||
print "%s %s%s" % (c["desc"], result, msg)
|
||||
sys.exit({False: 0, True: 1}[any_failed])
|
||||
|
||||
|
||||
COMMANDS = {
|
||||
"check_config": {
|
||||
"need_login": True,
|
||||
"func": check_config,
|
||||
"args": []
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
usagestr = "Usage: %prog [OPTIONS] <command> [args]"
|
||||
PARSER = OptionParser(usage=usagestr)
|
||||
PARSER.add_option("-v", "--verbose", dest="verbose",
|
||||
action="store_true", default=False,
|
||||
help="turn on verbose logging")
|
||||
PARSER.add_option("-c", "--configfile", dest="configfile", type="string",
|
||||
default="/etc/quantum/plugins/nvp/nvp.ini",
|
||||
help="nvp plugin config file path (nvp.ini)")
|
||||
options, args = PARSER.parse_args()
|
||||
|
||||
loglevel = logging.INFO
|
||||
if options.verbose:
|
||||
loglevel = logging.DEBUG
|
||||
|
||||
LOG.setLevel(loglevel)
|
||||
|
||||
if len(args) < 1:
|
||||
PARSER.print_help()
|
||||
print_help()
|
||||
sys.exit(1)
|
||||
|
||||
CMD = args[0]
|
||||
if CMD not in COMMANDS.keys():
|
||||
LOG.error("Unknown command: %s" % CMD)
|
||||
print_help()
|
||||
sys.exit(1)
|
||||
|
||||
args = build_args(CMD, COMMANDS[CMD]["args"], args[1:])
|
||||
|
||||
LOG.debug("Executing command \"%s\" with args: %s" % (CMD, args))
|
||||
|
||||
manager = None
|
||||
if COMMANDS[CMD]["need_login"] is True:
|
||||
if not os.path.exists(options.configfile):
|
||||
LOG.error("NVP plugin configuration file \"%s\" doesn't exist!" %
|
||||
options.configfile)
|
||||
sys.exit(1)
|
||||
manager = QuantumManager(options.configfile, loglevel, cli=True)
|
||||
|
||||
COMMANDS[CMD]["func"](manager, *args)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
17
quantum/plugins/nicira/nicira_nvp_plugin/common/__init__.py
Normal file
17
quantum/plugins/nicira/nicira_nvp_plugin/common/__init__.py
Normal file
@ -0,0 +1,17 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
132
quantum/plugins/nicira/nicira_nvp_plugin/common/config.py
Normal file
132
quantum/plugins/nicira/nicira_nvp_plugin/common/config.py
Normal file
@ -0,0 +1,132 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2012 Nicira, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from quantum.openstack.common import cfg
|
||||
|
||||
|
||||
database_opts = [
|
||||
cfg.StrOpt('sql_connection', default='sqlite://'),
|
||||
cfg.IntOpt('sql_max_retries', default=-1),
|
||||
cfg.IntOpt('reconnect_interval', default=2),
|
||||
]
|
||||
|
||||
nvp_opts = [
|
||||
cfg.IntOpt('max_lp_per_bridged_ls', default=64),
|
||||
cfg.IntOpt('concurrent_connections', default=5),
|
||||
cfg.IntOpt('failover_time', default=240)
|
||||
]
|
||||
|
||||
cluster_opts = [
|
||||
cfg.StrOpt('default_tz_uuid'),
|
||||
cfg.StrOpt('nvp_cluster_uuid'),
|
||||
cfg.StrOpt('nova_zone_id'),
|
||||
cfg.MultiStrOpt('nvp_controller_connection')
|
||||
]
|
||||
|
||||
cfg.CONF.register_opts(database_opts, "DATABASE")
|
||||
cfg.CONF.register_opts(nvp_opts, "NVP")
|
||||
|
||||
|
||||
class ClusterConfigOptions(cfg.CommonConfigOpts):
|
||||
|
||||
def __init__(self, config_options):
|
||||
super(ClusterConfigOptions, self).__init__()
|
||||
self._group_mappings = {}
|
||||
self._config_opts = config_options._config_opts
|
||||
self._cparser = config_options._cparser
|
||||
self._oparser = config_options._oparser
|
||||
self.register_cli_opts(self._config_opts)
|
||||
|
||||
def _do_get(self, name, group=None):
|
||||
"""Look up an option value.
|
||||
|
||||
:param name: the opt name (or 'dest', more precisely)
|
||||
:param group: an OptGroup
|
||||
:returns: the option value, or a GroupAttr object
|
||||
:raises: NoSuchOptError, NoSuchGroupError, ConfigFileValueError,
|
||||
TemplateSubstitutionError
|
||||
"""
|
||||
if group is None and name in self._groups:
|
||||
return self.GroupAttr(self, self._get_group(name))
|
||||
info = self._get_opt_info(name, group)
|
||||
default, opt, override = [info[k] for k in sorted(info.keys())]
|
||||
if override is not None:
|
||||
return override
|
||||
|
||||
values = []
|
||||
if self._cparser is not None:
|
||||
section = group.name if group is not None else 'DEFAULT'
|
||||
# Check if the name of the group maps to something else in
|
||||
# the conf file.Otherwise leave the section name unchanged
|
||||
|
||||
section = self._group_mappings.get(section, section)
|
||||
try:
|
||||
value = opt._get_from_config_parser(self._cparser, section)
|
||||
except KeyError:
|
||||
pass
|
||||
except ValueError as ve:
|
||||
raise cfg.ConfigFileValueError(str(ve))
|
||||
else:
|
||||
if not opt.multi:
|
||||
# No need to continue since the last value wins
|
||||
return value[-1]
|
||||
values.extend(value)
|
||||
|
||||
name = name if group is None else group.name + '_' + name
|
||||
value = self._cli_values.get(name)
|
||||
if value is not None:
|
||||
if not opt.multi:
|
||||
return value
|
||||
|
||||
return value + values
|
||||
|
||||
if values:
|
||||
return values
|
||||
|
||||
if default is not None:
|
||||
return default
|
||||
|
||||
return opt.default
|
||||
|
||||
def register_opts(self, opts, group_internal_name=None, group=None):
|
||||
"""Register multiple option schemas at once."""
|
||||
if group_internal_name:
|
||||
self._group_mappings[group] = group_internal_name
|
||||
for opt in opts:
|
||||
self.register_opt(opt, group, clear_cache=False)
|
||||
|
||||
|
||||
def _retrieve_extra_groups(conf, key=None, delimiter=':'):
|
||||
"""retrieve configuration groups not listed above."""
|
||||
results = []
|
||||
for parsed_file in cfg.CONF._cparser.parsed:
|
||||
for parsed_item in parsed_file.keys():
|
||||
if not parsed_item in cfg.CONF:
|
||||
items = key and parsed_item.split(delimiter)
|
||||
if not key or key == items[0]:
|
||||
results.append(parsed_item)
|
||||
return results
|
||||
|
||||
|
||||
def register_cluster_groups(conf):
|
||||
"""retrieve configuration groups for nvp clusters."""
|
||||
cluster_names = []
|
||||
cluster_tags = _retrieve_extra_groups(conf, "CLUSTER")
|
||||
for tag in cluster_tags:
|
||||
cluster_name = tag.split(':')[1]
|
||||
conf.register_opts(cluster_opts, tag, cluster_name)
|
||||
cluster_names.append(cluster_name)
|
||||
return cluster_names
|
@ -1,4 +1,5 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -12,26 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Brad Hall, Nicira Networks, Inc.
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
|
||||
import logging
|
||||
import unittest
|
||||
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin import nvplib
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import NvpPlugin
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
LOG = logging.getLogger("test_check")
|
||||
|
||||
|
||||
class NvpTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.quantum = NvpPlugin()
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
||||
# These nvplib functions will throw an exception if the check fails
|
||||
def test_check_default_transport_zone(self):
|
||||
nvplib.check_default_transport_zone(self.quantum.controller)
|
||||
# This will get updated at build time. Version 0 indicates developer build.
|
||||
PLUGIN_VERSION = "0"
|
@ -1,4 +1,5 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
@ -12,222 +13,275 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# @author: Brad Hall, Nicira Networks, Inc.
|
||||
# @author: Dave Lapsley, Nicira Networks, Inc.
|
||||
# @author: Aaron Rosen, Nicira Networks, Inc.
|
||||
|
||||
|
||||
# TODO(bgh): We should break this into separate files. It will just keep
|
||||
# growing as we add more features :)
|
||||
|
||||
from copy import copy
|
||||
import functools
|
||||
import json
|
||||
import hashlib
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
import uuid
|
||||
|
||||
from eventlet import semaphore
|
||||
import NvpApiClient
|
||||
|
||||
#FIXME(danwent): I'd like this file to get to the point where it has
|
||||
# no quantum-specific logic in it
|
||||
from quantum.common import exceptions as exception
|
||||
from quantum.openstack.common import jsonutils
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin import NvpApiClient
|
||||
|
||||
LOCAL_LOGGING = False
|
||||
if LOCAL_LOGGING:
|
||||
from logging.handlers import SysLogHandler
|
||||
FORMAT = ("|%(levelname)s|%(filename)s|%(funcName)s|%(lineno)s"
|
||||
"|%(message)s")
|
||||
LOG = logging.getLogger(__name__)
|
||||
formatter = logging.Formatter(FORMAT)
|
||||
syslog = SysLogHandler(address="/dev/log")
|
||||
syslog.setFormatter(formatter)
|
||||
LOG.addHandler(syslog)
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
else:
|
||||
LOG = logging.getLogger("nvplib")
|
||||
LOG.setLevel(logging.INFO)
|
||||
|
||||
# TODO(bgh): it would be more efficient to use a bitmap
|
||||
taken_context_ids = []
|
||||
|
||||
_net_type_cache = {} # cache of {net_id: network_type}
|
||||
# XXX Only cache default for now
|
||||
_lqueue_cache = {}
|
||||
|
||||
|
||||
LOG = logging.getLogger("nvplib")
|
||||
LOG.setLevel(logging.INFO)
|
||||
def get_cluster_version(cluster):
|
||||
"""Return major/minor version #"""
|
||||
# Get control-cluster nodes
|
||||
uri = "/ws.v1/control-cluster/node?_page_length=1&fields=uuid"
|
||||
try:
|
||||
res = do_single_request("GET", uri, cluster=cluster)
|
||||
res = json.loads(res)
|
||||
except NvpApiClient.NvpApiException:
|
||||
raise exception.QuantumException()
|
||||
if res["result_count"] == 0:
|
||||
return None
|
||||
node_uuid = res["results"][0]["uuid"]
|
||||
# Get control-cluster node status. It's unsupported to have controllers
|
||||
# running different version so we just need the first node version.
|
||||
uri = "/ws.v1/control-cluster/node/%s/status" % node_uuid
|
||||
try:
|
||||
res = do_single_request("GET", uri, cluster=cluster)
|
||||
res = json.loads(res)
|
||||
except NvpApiClient.NvpApiException:
|
||||
raise exception.QuantumException()
|
||||
version_parts = res["version"].split(".")
|
||||
version = "%s.%s" % tuple(version_parts[:2])
|
||||
LOG.info("NVP controller cluster version: %s" % version)
|
||||
return version
|
||||
|
||||
|
||||
def get_all_query_pages(path, c):
|
||||
need_more_results = True
|
||||
result_list = []
|
||||
page_cursor = None
|
||||
query_marker = "&" if (path.find("?") != -1) else "?"
|
||||
while need_more_results:
|
||||
page_cursor_str = (
|
||||
"_page_cursor=%s" % page_cursor if page_cursor else "")
|
||||
res = do_single_request("GET", "%s%s%s" %
|
||||
(path, query_marker, page_cursor_str),
|
||||
cluster=c)
|
||||
body = json.loads(res)
|
||||
page_cursor = body.get('page_cursor')
|
||||
if not page_cursor:
|
||||
need_more_results = False
|
||||
result_list.extend(body['results'])
|
||||
return result_list
|
||||
|
||||
|
||||
def do_single_request(*args, **kwargs):
|
||||
"""Issue a request to a specified controller if specified via kwargs
|
||||
(controller=<controller>)."""
|
||||
controller = kwargs["controller"]
|
||||
LOG.debug("Issuing request to controller: %s" % controller.name)
|
||||
return controller.api_client.request(*args)
|
||||
"""Issue a request to a specified cluster if specified via kwargs
|
||||
(cluster=<cluster>)."""
|
||||
cluster = kwargs["cluster"]
|
||||
return cluster.api_client.request(*args)
|
||||
|
||||
|
||||
def check_default_transport_zone(c):
|
||||
"""Make sure the default transport zone specified in the config exists"""
|
||||
msg = []
|
||||
# This will throw an exception on failure and that's ok since it will
|
||||
# just propogate to the cli.
|
||||
resp = do_single_request(
|
||||
"GET",
|
||||
"/ws.v1/transport-zone?uuid=%s" % c.default_tz_uuid,
|
||||
controller=c)
|
||||
result = jsonutils.loads(resp)
|
||||
if int(result["result_count"]) == 0:
|
||||
msg.append("Unable to find zone \"%s\" for controller \"%s\"" %
|
||||
(c.default_tz_uuid, c.name))
|
||||
if len(msg) > 0:
|
||||
raise Exception(' '.join(msg))
|
||||
def do_multi_request(*args, **kwargs):
|
||||
"""Issue a request to all clusters"""
|
||||
results = []
|
||||
clusters = kwargs["clusters"]
|
||||
for x in clusters:
|
||||
LOG.debug("Issuing request to cluster: %s" % x.name)
|
||||
rv = x.api_client.request(*args)
|
||||
results.append(rv)
|
||||
return results
|
||||
|
||||
|
||||
def check_tenant(controller, net_id, tenant_id):
|
||||
"""Return true if the tenant "owns" this network"""
|
||||
net = get_network(controller, net_id)
|
||||
for t in net["tags"]:
|
||||
if t["scope"] == "os_tid" and t["tag"] == tenant_id:
|
||||
return True
|
||||
return False
|
||||
|
||||
# -------------------------------------------------------------------
|
||||
# Network functions
|
||||
# -------------------------------------------------------------------
|
||||
def find_port_and_cluster(clusters, port_id):
|
||||
"""Return (url, cluster_id) of port or (None, None) if port does not exist.
|
||||
"""
|
||||
for c in clusters:
|
||||
query = "/ws.v1/lswitch/*/lport?uuid=%s&fields=*" % port_id
|
||||
LOG.debug("Looking for lswitch with port id \"%s\" on: %s"
|
||||
% (port_id, c))
|
||||
try:
|
||||
res = do_single_request('GET', query, cluster=c)
|
||||
except Exception as e:
|
||||
LOG.error("get_port_cluster_and_url, exception: %s" % str(e))
|
||||
continue
|
||||
res = json.loads(res)
|
||||
if len(res["results"]) == 1:
|
||||
return (res["results"][0], c)
|
||||
return (None, None)
|
||||
|
||||
|
||||
def get_network(controller, net_id):
|
||||
def find_lswitch_by_portid(clusters, port_id):
|
||||
port, cluster = find_port_and_cluster(clusters, port_id)
|
||||
if port and cluster:
|
||||
href = port["_href"].split('/')
|
||||
return (href[3], cluster)
|
||||
return (None, None)
|
||||
|
||||
|
||||
def get_network(cluster, net_id):
|
||||
path = "/ws.v1/lswitch/%s" % net_id
|
||||
try:
|
||||
resp_obj = do_single_request("GET", path, controller=controller)
|
||||
network = jsonutils.loads(resp_obj)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
resp_obj = do_single_request("GET", path, cluster=cluster)
|
||||
network = json.loads(resp_obj)
|
||||
LOG.warning("### nw:%s", network)
|
||||
except NvpApiClient.ResourceNotFound:
|
||||
raise exception.NetworkNotFound(net_id=net_id)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
except NvpApiClient.NvpApiException:
|
||||
raise exception.QuantumException()
|
||||
LOG.debug("Got network \"%s\": %s" % (net_id, network))
|
||||
return network
|
||||
|
||||
|
||||
def create_lswitch(controller, lswitch_obj):
|
||||
LOG.debug("Creating lswitch: %s" % lswitch_obj)
|
||||
def create_lswitch(cluster, lswitch_obj):
|
||||
LOG.info("Creating lswitch: %s" % lswitch_obj)
|
||||
# Warn if no tenant is specified
|
||||
found = "os_tid" in [x["scope"] for x in lswitch_obj["tags"]]
|
||||
if not found:
|
||||
LOG.warn("No tenant-id tag specified in logical switch: %s" %
|
||||
lswitch_obj)
|
||||
LOG.warn("No tenant-id tag specified in logical switch: %s" % (
|
||||
lswitch_obj))
|
||||
uri = "/ws.v1/lswitch"
|
||||
try:
|
||||
resp_obj = do_single_request("POST", uri,
|
||||
jsonutils.dumps(lswitch_obj),
|
||||
controller=controller)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
json.dumps(lswitch_obj),
|
||||
cluster=cluster)
|
||||
except NvpApiClient.NvpApiException:
|
||||
raise exception.QuantumException()
|
||||
|
||||
r = jsonutils.loads(resp_obj)
|
||||
r = json.loads(resp_obj)
|
||||
d = {}
|
||||
d["net-id"] = r["uuid"]
|
||||
d["net-name"] = r["display_name"]
|
||||
d["net-id"] = r['uuid']
|
||||
d["net-name"] = r['display_name']
|
||||
LOG.debug("Created logical switch: %s" % d["net-id"])
|
||||
return d
|
||||
|
||||
|
||||
def update_network(controller, network, **kwargs):
|
||||
uri = "/ws.v1/lswitch/" + network
|
||||
def update_network(cluster, switch, **params):
|
||||
uri = "/ws.v1/lswitch/" + switch
|
||||
lswitch_obj = {}
|
||||
if "name" in kwargs:
|
||||
lswitch_obj["display_name"] = kwargs["name"]
|
||||
if params["network"]["name"]:
|
||||
lswitch_obj["display_name"] = params["network"]["name"]
|
||||
try:
|
||||
resp_obj = do_single_request("PUT",
|
||||
uri,
|
||||
jsonutils.dumps(lswitch_obj),
|
||||
controller=controller)
|
||||
resp_obj = do_single_request("PUT", uri, json.dumps(lswitch_obj),
|
||||
cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Network not found, Error: %s" % str(e))
|
||||
raise exception.NetworkNotFound(net_id=network)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
|
||||
obj = jsonutils.loads(resp_obj)
|
||||
obj = json.loads(resp_obj)
|
||||
return obj
|
||||
|
||||
|
||||
def get_all_networks(controller, tenant_id, networks):
|
||||
"""Append the quantum network uuids we can find in the given controller to
|
||||
def get_all_networks(cluster, tenant_id, networks):
|
||||
"""Append the quantum network uuids we can find in the given cluster to
|
||||
"networks"
|
||||
"""
|
||||
uri = "/ws.v1/lswitch?fields=*&tag=%s&tag_scope=os_tid" % tenant_id
|
||||
try:
|
||||
resp_obj = do_single_request("GET", uri, controller=controller)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
resp_obj = do_single_request("GET", uri, cluster=cluster)
|
||||
except NvpApiClient.NvpApiException:
|
||||
raise exception.QuantumException()
|
||||
if not resp_obj:
|
||||
return []
|
||||
lswitches = jsonutils.loads(resp_obj)["results"]
|
||||
for lswitch in lswitches:
|
||||
net_id = lswitch["uuid"]
|
||||
if net_id not in [x["net-id"] for x in networks]:
|
||||
networks.append({"net-id": net_id,
|
||||
"net-name": lswitch["display_name"]})
|
||||
return networks
|
||||
lswitches = json.loads(resp_obj)["results"]
|
||||
networks_result = copy(networks)
|
||||
return networks_result
|
||||
|
||||
|
||||
def query_networks(controller, tenant_id, fields="*", tags=None):
|
||||
def query_networks(cluster, tenant_id, fields="*", tags=None):
|
||||
uri = "/ws.v1/lswitch?fields=%s" % fields
|
||||
if tags:
|
||||
for t in tags:
|
||||
uri += "&tag=%s&tag_scope=%s" % (t[0], t[1])
|
||||
try:
|
||||
resp_obj = do_single_request("GET", uri, controller=controller)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
resp_obj = do_single_request("GET", uri, cluster=cluster)
|
||||
except NvpApiClient.NvpApiException:
|
||||
raise exception.QuantumException()
|
||||
if not resp_obj:
|
||||
return []
|
||||
lswitches = jsonutils.loads(resp_obj)["results"]
|
||||
nets = [{'net-id': lswitch["uuid"],
|
||||
'net-name': lswitch["display_name"]}
|
||||
lswitches = json.loads(resp_obj)["results"]
|
||||
nets = [{'net-id': lswitch["uuid"], 'net-name': lswitch["display_name"]}
|
||||
for lswitch in lswitches]
|
||||
return nets
|
||||
|
||||
|
||||
def delete_network(controller, network):
|
||||
delete_networks(controller, [network])
|
||||
def delete_network(cluster, net_id, lswitch_id):
|
||||
delete_networks(cluster, net_id, [lswitch_id])
|
||||
|
||||
|
||||
def delete_networks(controller, networks):
|
||||
for network in networks:
|
||||
path = "/ws.v1/lswitch/%s" % network
|
||||
def delete_networks(cluster, net_id, lswitch_ids):
|
||||
if net_id in _net_type_cache:
|
||||
del _net_type_cache[net_id]
|
||||
for ls_id in lswitch_ids:
|
||||
path = "/ws.v1/lswitch/%s" % ls_id
|
||||
|
||||
try:
|
||||
do_single_request("DELETE", path, controller=controller)
|
||||
do_single_request("DELETE", path, cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Network not found, Error: %s" % str(e))
|
||||
raise exception.NetworkNotFound(net_id=network)
|
||||
raise exception.NetworkNotFound(net_id=ls_id)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
|
||||
|
||||
def create_network(tenant_id, net_name, **kwargs):
|
||||
controller = kwargs["controller"]
|
||||
clusters = kwargs["clusters"]
|
||||
# Default to the primary cluster
|
||||
cluster = clusters[0]
|
||||
|
||||
transport_zone = kwargs.get("transport_zone",
|
||||
controller.default_tz_uuid)
|
||||
transport_type = kwargs.get("transport_type", "gre")
|
||||
lswitch_obj = {
|
||||
"display_name": net_name,
|
||||
"transport_zones": [{
|
||||
"zone_uuid": transport_zone,
|
||||
"transport_type": transport_type,
|
||||
}],
|
||||
"tags": [{"tag": tenant_id, "scope": "os_tid"}],
|
||||
}
|
||||
cluster.default_tz_uuid)
|
||||
transport_type = kwargs.get("transport_type", "stt")
|
||||
lswitch_obj = {"display_name": net_name,
|
||||
"transport_zones": [
|
||||
{"zone_uuid": transport_zone,
|
||||
"transport_type": transport_type}
|
||||
],
|
||||
"tags": [{"tag": tenant_id, "scope": "os_tid"}]}
|
||||
|
||||
net = create_lswitch(controller, lswitch_obj)
|
||||
net = create_lswitch(cluster, lswitch_obj)
|
||||
net['net-op-status'] = "UP"
|
||||
return net
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# Port functions
|
||||
#---------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_port_stats(controller, network_id, port_id):
|
||||
try:
|
||||
do_single_request("GET", "/ws.v1/lswitch/%s" % (network_id),
|
||||
controller=controller)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Network not found, Error: %s" % str(e))
|
||||
raise exception.NetworkNotFound(net_id=network_id)
|
||||
try:
|
||||
path = "/ws.v1/lswitch/%s/lport/%s/statistic" % (network_id, port_id)
|
||||
resp = do_single_request("GET", path, controller=controller)
|
||||
stats = jsonutils.loads(resp)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=port_id, net_id=network_id)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
LOG.debug("Returning stats for port \"%s\" on \"%s\": %s" % (port_id,
|
||||
network_id,
|
||||
stats))
|
||||
return stats
|
||||
|
||||
|
||||
def check_port_state(state):
|
||||
if state not in ["ACTIVE", "DOWN"]:
|
||||
LOG.error("Invalid port state (ACTIVE and DOWN are valid states): %s" %
|
||||
state)
|
||||
raise exception.StateInvalid(port_state=state)
|
||||
|
||||
|
||||
def query_ports(controller, network, relations=None, fields="*", filters=None):
|
||||
def query_ports(cluster, network, relations=None, fields="*", filters=None):
|
||||
uri = "/ws.v1/lswitch/" + network + "/lport?"
|
||||
if relations:
|
||||
uri += "relations=%s" % relations
|
||||
@ -235,44 +289,75 @@ def query_ports(controller, network, relations=None, fields="*", filters=None):
|
||||
if filters and "attachment" in filters:
|
||||
uri += "&attachment_vif_uuid=%s" % filters["attachment"]
|
||||
try:
|
||||
resp_obj = do_single_request("GET", uri, controller=controller)
|
||||
resp_obj = do_single_request("GET", uri, cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Network not found, Error: %s" % str(e))
|
||||
raise exception.NetworkNotFound(net_id=network)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
return jsonutils.loads(resp_obj)["results"]
|
||||
return json.loads(resp_obj)["results"]
|
||||
|
||||
|
||||
def delete_port(controller, network, port):
|
||||
uri = "/ws.v1/lswitch/" + network + "/lport/" + port
|
||||
def delete_port(cluster, port):
|
||||
try:
|
||||
do_single_request("DELETE", uri, controller=controller)
|
||||
do_single_request("DELETE", port['_href'], cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=port, net_id=network)
|
||||
raise exception.PortNotFound(port_id=port['uuid'])
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
|
||||
|
||||
def delete_all_ports(controller, ls_uuid):
|
||||
res = do_single_request("GET", "/ws.v1/lswitch/%s/lport?fields=uuid" %
|
||||
ls_uuid, controller=controller)
|
||||
res = jsonutils.loads(res)
|
||||
for r in res["results"]:
|
||||
do_single_request(
|
||||
"DELETE",
|
||||
"/ws.v1/lswitch/%s/lport/%s" % (ls_uuid, r["uuid"]),
|
||||
controller=controller)
|
||||
def get_port_by_quantum_tag(clusters, lswitch, quantum_tag):
|
||||
"""Return (url, cluster_id) of port or raises ResourceNotFound
|
||||
"""
|
||||
query = ("/ws.v1/lswitch/%s/lport?fields=admin_status_enabled,"
|
||||
"fabric_status_up,uuid&tag=%s&tag_scope=q_port_id"
|
||||
"&relations=LogicalPortStatus" % (lswitch, quantum_tag))
|
||||
|
||||
LOG.debug("Looking for port with q_tag \"%s\" on: %s"
|
||||
% (quantum_tag, lswitch))
|
||||
for c in clusters:
|
||||
try:
|
||||
res_obj = do_single_request('GET', query, cluster=c)
|
||||
except Exception as e:
|
||||
continue
|
||||
res = json.loads(res_obj)
|
||||
if len(res["results"]) == 1:
|
||||
return (res["results"][0], c)
|
||||
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=quantum_tag, net_id=lswitch)
|
||||
|
||||
|
||||
def get_port(controller, network, port, relations=None):
|
||||
def get_port_by_display_name(clusters, lswitch, display_name):
|
||||
"""Return (url, cluster_id) of port or raises ResourceNotFound
|
||||
"""
|
||||
query = ("/ws.v1/lswitch/%s/lport?display_name=%s&fields=*" %
|
||||
(lswitch, display_name))
|
||||
LOG.debug("Looking for port with display_name \"%s\" on: %s"
|
||||
% (display_name, lswitch))
|
||||
for c in clusters:
|
||||
try:
|
||||
res_obj = do_single_request('GET', query, cluster=c)
|
||||
except Exception as e:
|
||||
continue
|
||||
res = json.loads(res_obj)
|
||||
if len(res["results"]) == 1:
|
||||
return (res["results"][0], c)
|
||||
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=display_name, net_id=lswitch)
|
||||
|
||||
|
||||
def get_port(cluster, network, port, relations=None):
|
||||
LOG.info("get_port() %s %s" % (network, port))
|
||||
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "?"
|
||||
if relations:
|
||||
uri += "relations=%s" % relations
|
||||
try:
|
||||
resp_obj = do_single_request("GET", uri, controller=controller)
|
||||
port = jsonutils.loads(resp_obj)
|
||||
resp_obj = do_single_request("GET", uri, cluster=cluster)
|
||||
port = json.loads(resp_obj)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=port, net_id=network)
|
||||
@ -281,130 +366,75 @@ def get_port(controller, network, port, relations=None):
|
||||
return port
|
||||
|
||||
|
||||
def plug_interface(controller, network, port, type, attachment=None):
|
||||
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "/attachment"
|
||||
|
||||
lport_obj = {}
|
||||
if attachment:
|
||||
lport_obj["vif_uuid"] = attachment
|
||||
|
||||
lport_obj["type"] = type
|
||||
try:
|
||||
resp_obj = do_single_request("PUT",
|
||||
uri,
|
||||
jsonutils.dumps(lport_obj),
|
||||
controller=controller)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=port, net_id=network)
|
||||
except NvpApiClient.Conflict as e:
|
||||
LOG.error("Conflict while making attachment to port, "
|
||||
"Error: %s" % str(e))
|
||||
raise exception.AlreadyAttached(att_id=attachment,
|
||||
port_id=port,
|
||||
net_id=network,
|
||||
att_port_id="UNKNOWN")
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
|
||||
result = jsonutils.dumps(resp_obj)
|
||||
return result
|
||||
|
||||
|
||||
def unplug_interface(controller, network, port):
|
||||
uri = "/ws.v1/lswitch/" + network + "/lport/" + port + "/attachment"
|
||||
lport_obj = {"type": "NoAttachment"}
|
||||
try:
|
||||
resp_obj = do_single_request("PUT",
|
||||
uri,
|
||||
jsonutils.dumps(lport_obj),
|
||||
controller=controller)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=port, net_id=network)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
return jsonutils.loads(resp_obj)
|
||||
|
||||
|
||||
def update_port(network, port_id, **params):
|
||||
controller = params["controller"]
|
||||
cluster = params["cluster"]
|
||||
lport_obj = {}
|
||||
|
||||
if "state" in params:
|
||||
state = params["state"]
|
||||
check_port_state(state)
|
||||
admin_status = True
|
||||
if state == "DOWN":
|
||||
admin_status = False
|
||||
lport_obj["admin_status_enabled"] = admin_status
|
||||
admin_state_up = params['port'].get('admin_state_up')
|
||||
name = params["port"].get("name")
|
||||
if admin_state_up:
|
||||
lport_obj["admin_status_enabled"] = admin_state_up
|
||||
if name:
|
||||
lport_obj["display_name"] = name
|
||||
|
||||
uri = "/ws.v1/lswitch/" + network + "/lport/" + port_id
|
||||
try:
|
||||
resp_obj = do_single_request("PUT",
|
||||
uri,
|
||||
jsonutils.dumps(lport_obj),
|
||||
controller=controller)
|
||||
resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
|
||||
cluster=cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=port_id, net_id=network)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
|
||||
obj = jsonutils.loads(resp_obj)
|
||||
obj["port-op-status"] = get_port_status(controller, network, obj["uuid"])
|
||||
obj = json.loads(resp_obj)
|
||||
obj["port-op-status"] = get_port_status(cluster, network, obj["uuid"])
|
||||
return obj
|
||||
|
||||
|
||||
def create_port(tenant, network, port_init_state, **params):
|
||||
# Check initial state -- this throws an exception if the port state is
|
||||
# invalid
|
||||
check_port_state(port_init_state)
|
||||
|
||||
controller = params["controller"]
|
||||
|
||||
ls_uuid = network
|
||||
|
||||
admin_status = True
|
||||
if port_init_state == "DOWN":
|
||||
admin_status = False
|
||||
lport_obj = {"admin_status_enabled": admin_status}
|
||||
def create_port(tenant, **params):
|
||||
print "create_port_nvplib"
|
||||
print params
|
||||
clusters = params["clusters"]
|
||||
dest_cluster = clusters[0] # primary cluster
|
||||
|
||||
ls_uuid = params["port"]["network_id"]
|
||||
# device_id can be longer than 40 so we rehash it
|
||||
device_id = hashlib.sha1(params["port"]["device_id"]).hexdigest()
|
||||
lport_obj = dict(
|
||||
admin_status_enabled=params["port"]["admin_state_up"],
|
||||
display_name=params["port"]["name"],
|
||||
tags=[dict(scope='os_tid', tag=tenant),
|
||||
dict(scope='q_port_id', tag=params["port"]["id"]),
|
||||
dict(scope='vm_id', tag=device_id)]
|
||||
)
|
||||
path = "/ws.v1/lswitch/" + ls_uuid + "/lport"
|
||||
|
||||
try:
|
||||
resp_obj = do_single_request("POST",
|
||||
path,
|
||||
jsonutils.dumps(lport_obj),
|
||||
controller=controller)
|
||||
resp_obj = do_single_request("POST", path, json.dumps(lport_obj),
|
||||
cluster=dest_cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Network not found, Error: %s" % str(e))
|
||||
raise exception.NetworkNotFound(net_id=network)
|
||||
raise exception.NetworkNotFound(net_id=params["port"]["network_id"])
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
|
||||
result = jsonutils.loads(resp_obj)
|
||||
result['port-op-status'] = get_port_status(controller, ls_uuid,
|
||||
result = json.loads(resp_obj)
|
||||
result['port-op-status'] = get_port_status(dest_cluster, ls_uuid,
|
||||
result['uuid'])
|
||||
return result
|
||||
|
||||
params["port"].update({"admin_state_up": result["admin_status_enabled"],
|
||||
"status": result["port-op-status"]})
|
||||
return (params["port"], result['uuid'])
|
||||
|
||||
|
||||
def get_port_status(controller, lswitch_id, port_id):
|
||||
def get_port_status(cluster, lswitch_id, port_id):
|
||||
"""Retrieve the operational status of the port"""
|
||||
# Make sure the network exists first
|
||||
try:
|
||||
do_single_request("GET", "/ws.v1/lswitch/%s" % (lswitch_id),
|
||||
controller=controller)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Network not found, Error: %s" % str(e))
|
||||
raise exception.NetworkNotFound(net_id=lswitch_id)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
try:
|
||||
r = do_single_request(
|
||||
"GET",
|
||||
"/ws.v1/lswitch/%s/lport/%s/status" % (lswitch_id, port_id),
|
||||
controller=controller)
|
||||
r = jsonutils.loads(r)
|
||||
r = do_single_request("GET",
|
||||
"/ws.v1/lswitch/%s/lport/%s/status" %
|
||||
(lswitch_id, port_id), cluster=cluster)
|
||||
r = json.loads(r)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=port_id, net_id=lswitch_id)
|
||||
@ -414,3 +444,32 @@ def get_port_status(controller, lswitch_id, port_id):
|
||||
return "UP"
|
||||
else:
|
||||
return "DOWN"
|
||||
|
||||
|
||||
def plug_interface(clusters, lswitch_id, port, type, attachment=None):
|
||||
dest_cluster = clusters[0] # primary cluster
|
||||
uri = "/ws.v1/lswitch/" + lswitch_id + "/lport/" + port + "/attachment"
|
||||
|
||||
lport_obj = {}
|
||||
if attachment:
|
||||
lport_obj["vif_uuid"] = attachment
|
||||
|
||||
lport_obj["type"] = type
|
||||
try:
|
||||
resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
|
||||
cluster=dest_cluster)
|
||||
except NvpApiClient.ResourceNotFound as e:
|
||||
LOG.error("Port or Network not found, Error: %s" % str(e))
|
||||
raise exception.PortNotFound(port_id=port, net_id=lswitch_id)
|
||||
except NvpApiClient.Conflict as e:
|
||||
LOG.error("Conflict while making attachment to port, "
|
||||
"Error: %s" % str(e))
|
||||
raise exception.AlreadyAttached(att_id=attachment,
|
||||
port_id=port,
|
||||
net_id=lswitch_id,
|
||||
att_port_id="UNKNOWN")
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
raise exception.QuantumException()
|
||||
|
||||
result = json.dumps(resp_obj)
|
||||
return result
|
||||
|
113
quantum/plugins/nicira/nicira_nvp_plugin/run_tests.py
Executable file
113
quantum/plugins/nicira/nicira_nvp_plugin/run_tests.py
Executable file
@ -0,0 +1,113 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 OpenStack, LLC
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Unittest runner for Nicira NVP plugin
|
||||
|
||||
This file should be run from the top dir in the quantum directory
|
||||
|
||||
To run all tests::
|
||||
PLUGIN_DIR=quantum/plugins/nicira ./run_tests.sh
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
import mock
|
||||
from nose import config
|
||||
from nose import core
|
||||
|
||||
CONFIG_FILE_OPT = "--config-file"
|
||||
NICIRA_PATH = "quantum/plugins/nicira/nicira_nvp_plugin"
|
||||
|
||||
sys.path.append(os.getcwd())
|
||||
sys.path.append(os.path.dirname(__file__))
|
||||
sys.path.append(os.path.abspath(NICIRA_PATH))
|
||||
|
||||
from quantum.common.test_lib import run_tests, test_config
|
||||
from quantum.openstack.common import cfg
|
||||
import quantum.tests.unit
|
||||
from quantum import version
|
||||
|
||||
from tests import fake_nvpapiclient
|
||||
|
||||
if __name__ == '__main__':
|
||||
exit_status = False
|
||||
do_mock = False
|
||||
# remove the value
|
||||
test_config['config_files'] = []
|
||||
|
||||
# if a single test case was specified,
|
||||
# we should only invoked the tests once
|
||||
invoke_once = len(sys.argv) > 1
|
||||
# this will allow us to pass --config-file to run_tests.sh for
|
||||
# running the unit tests against a real backend
|
||||
# if --config-file has been specified, remove it from sys.argv
|
||||
# otherwise nose will complain
|
||||
while CONFIG_FILE_OPT in sys.argv:
|
||||
test_config['config_files'].append(
|
||||
sys.argv.pop(sys.argv.index(CONFIG_FILE_OPT) + 1))
|
||||
# and the option itself
|
||||
sys.argv.remove(CONFIG_FILE_OPT)
|
||||
|
||||
# if no config file available, inject one for fake backend tests
|
||||
if not test_config.get('config_files'):
|
||||
do_mock = True
|
||||
test_config['config_files'] = [os.path.abspath('%s/tests/nvp.ini.test'
|
||||
% NICIRA_PATH)]
|
||||
|
||||
test_config['plugin_name_v2'] = "QuantumPlugin.NvpPluginV2"
|
||||
cwd = os.getcwd()
|
||||
c = config.Config(stream=sys.stdout,
|
||||
env=os.environ,
|
||||
verbosity=3,
|
||||
includeExe=True,
|
||||
traverseNamespace=True,
|
||||
plugins=core.DefaultPluginManager())
|
||||
c.configureWhere(quantum.tests.unit.__path__)
|
||||
|
||||
# patch nvpapi client if not running against "real" back end
|
||||
if do_mock:
|
||||
fc = fake_nvpapiclient.FakeClient(os.path.abspath('%s/tests'
|
||||
% NICIRA_PATH))
|
||||
mock_nvpapi = mock.patch('NvpApiClient.NVPApiHelper', autospec=True)
|
||||
instance = mock_nvpapi.start()
|
||||
instance.return_value.login.return_value = "the_cookie"
|
||||
|
||||
def _fake_request(*args, **kwargs):
|
||||
return fc.fake_request(*args, **kwargs)
|
||||
|
||||
instance.return_value.request.side_effect = _fake_request
|
||||
|
||||
exit_status = run_tests(c)
|
||||
if invoke_once:
|
||||
sys.exit(0)
|
||||
|
||||
os.chdir(cwd)
|
||||
|
||||
working_dir = os.path.abspath(NICIRA_PATH)
|
||||
c = config.Config(stream=sys.stdout,
|
||||
env=os.environ,
|
||||
verbosity=3,
|
||||
workingDir=working_dir)
|
||||
exit_status = exit_status or run_tests(c)
|
||||
|
||||
# restore original nvpapi client (probably pleonastic here)
|
||||
if do_mock:
|
||||
mock_nvpapi.stop()
|
||||
sys.exit(exit_status)
|
@ -0,0 +1,17 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
#@author: Brad Hall, Nicira Networks, Inc.
|
||||
#@author: Dave Lapsley, Nicira Networks, Inc.
|
||||
#@author: Aaron Rosen, Nicira Networks, Inc.
|
@ -0,0 +1,18 @@
|
||||
{"display_name": "%(uuid)s",
|
||||
"_relations":
|
||||
{"LogicalPortStatus":
|
||||
{"type": "LogicalSwitchPortStatus",
|
||||
"fabric_status_up": false,
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s/status",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchPortStatus"}
|
||||
},
|
||||
"tags":
|
||||
[{"scope": "q_port_id", "tag": "%(quantum_port_id)s"},
|
||||
{"scope": "vm_id", "tag": "%(quantum_device_id)s"},
|
||||
{"scope": "os_tid", "tag": "%(tenant_id)s"}],
|
||||
"uuid": "%(uuid)s",
|
||||
"admin_status_enabled": true,
|
||||
"type": "LogicalSwitchPortConfig",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchPortConfig",
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s"
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
{"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s",
|
||||
"lswitch":
|
||||
{"display_name": "%(ls_name)s",
|
||||
"uuid": "%(ls_uuid)s",
|
||||
"tags": [
|
||||
{"scope": "os_tid",
|
||||
"tag": "%(ls_tenant_id)s"}
|
||||
],
|
||||
"type": "LogicalSwitchConfig",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
|
||||
"port_isolation_enabled": false,
|
||||
"transport_zones": [
|
||||
{"zone_uuid": "%(ls_zone_uuid)s",
|
||||
"transport_type": "stt"}
|
||||
],
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s"},
|
||||
"link_status_up": false,
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchPortStatus",
|
||||
"admin_status_up": true,
|
||||
"fabric_status_up": false,
|
||||
"type": "LogicalSwitchPortStatus"
|
||||
}
|
@ -0,0 +1,10 @@
|
||||
{"display_name": "%(display_name)s",
|
||||
"_href": "/ws.v1/lswitch/%(uuid)s",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
|
||||
"_relations": {"LogicalSwitchStatus":
|
||||
{"fabric_status": true,
|
||||
"type": "LogicalSwitchStatus",
|
||||
"_href": "/ws.v1/lswitch/%(uuid)s/status",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchStatus"}},
|
||||
"type": "LogicalSwitchConfig",
|
||||
"uuid": "%(uuid)s"}
|
@ -0,0 +1,234 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
import urlparse
|
||||
|
||||
LOG = logging.getLogger("fake_nvpapiclient")
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
class FakeClient:
|
||||
|
||||
FAKE_GET_RESPONSES = {
|
||||
"lswitch": "fake_get_lswitch.json",
|
||||
"lport": "fake_get_lport.json",
|
||||
"lportstatus": "fake_get_lport_status.json"
|
||||
}
|
||||
|
||||
FAKE_POST_RESPONSES = {
|
||||
"lswitch": "fake_post_lswitch.json",
|
||||
"lport": "fake_post_lport.json"
|
||||
}
|
||||
|
||||
FAKE_PUT_RESPONSES = {
|
||||
"lswitch": "fake_post_lswitch.json",
|
||||
"lport": "fake_post_lport.json"
|
||||
}
|
||||
|
||||
_fake_lswitch_dict = {}
|
||||
_fake_lport_dict = {}
|
||||
_fake_lportstatus_dict = {}
|
||||
|
||||
def __init__(self, fake_files_path):
|
||||
self.fake_files_path = fake_files_path
|
||||
|
||||
def _get_tag(self, resource, scope):
|
||||
tags = [tag['tag'] for tag in resource['tags']
|
||||
if tag['scope'] == scope]
|
||||
return len(tags) > 0 and tags[0]
|
||||
|
||||
def _get_filters(self, querystring):
|
||||
if not querystring:
|
||||
return (None, None)
|
||||
params = urlparse.parse_qs(querystring)
|
||||
tag_filter = None
|
||||
attr_filter = None
|
||||
if 'tag' in params and 'tag_scope' in params:
|
||||
tag_filter = {'scope': params['tag_scope'][0],
|
||||
'tag': params['tag'][0]}
|
||||
elif 'uuid' in params:
|
||||
attr_filter = {'uuid': params['uuid'][0]}
|
||||
return (tag_filter, attr_filter)
|
||||
|
||||
def _add_lswitch(self, body):
|
||||
fake_lswitch = json.loads(body)
|
||||
fake_lswitch['uuid'] = str(uuid.uuid4())
|
||||
self._fake_lswitch_dict[fake_lswitch['uuid']] = fake_lswitch
|
||||
# put the tenant_id and the zone_uuid in the main dict
|
||||
# for simplyfying templating
|
||||
zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid']
|
||||
fake_lswitch['zone_uuid'] = zone_uuid
|
||||
fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid')
|
||||
return fake_lswitch
|
||||
|
||||
def _add_lport(self, body, ls_uuid):
|
||||
fake_lport = json.loads(body)
|
||||
fake_lport['uuid'] = str(uuid.uuid4())
|
||||
# put the tenant_id and the ls_uuid in the main dict
|
||||
# for simplyfying templating
|
||||
fake_lport['ls_uuid'] = ls_uuid
|
||||
fake_lport['tenant_id'] = self._get_tag(fake_lport, 'os_tid')
|
||||
fake_lport['quantum_port_id'] = self._get_tag(fake_lport,
|
||||
'q_port_id')
|
||||
fake_lport['quantum_device_id'] = self._get_tag(fake_lport, 'vm_id')
|
||||
self._fake_lport_dict[fake_lport['uuid']] = fake_lport
|
||||
|
||||
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
|
||||
fake_lport_status = fake_lport.copy()
|
||||
fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id']
|
||||
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
|
||||
fake_lport_status['ls_name'] = fake_lswitch['display_name']
|
||||
fake_lport_status['ls_zone_uuid'] = fake_lswitch['zone_uuid']
|
||||
self._fake_lportstatus_dict[fake_lport['uuid']] = fake_lport_status
|
||||
return fake_lport
|
||||
|
||||
def _get_resource_type(self, path):
|
||||
uri_split = path.split('/')
|
||||
resource_type = ('status' in uri_split and
|
||||
'lport' in uri_split and 'lportstatus'
|
||||
or 'lport' in uri_split and 'lport'
|
||||
or 'lswitch' in uri_split and 'lswitch')
|
||||
switch_uuid = ('lswitch' in uri_split and
|
||||
len(uri_split) > 3 and uri_split[3])
|
||||
port_uuid = ('lport' in uri_split and
|
||||
len(uri_split) > 5 and uri_split[5])
|
||||
return (resource_type, switch_uuid, port_uuid)
|
||||
|
||||
def _list(self, resource_type, response_file,
|
||||
switch_uuid=None, query=None):
|
||||
(tag_filter, attr_filter) = self._get_filters(query)
|
||||
|
||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||
response_template = f.read()
|
||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
||||
if switch_uuid == "*":
|
||||
switch_uuid = None
|
||||
|
||||
def _attr_match(res_uuid):
|
||||
if not attr_filter:
|
||||
return True
|
||||
item = res_dict[res_uuid]
|
||||
for (attr, value) in attr_filter.iteritems():
|
||||
if item.get(attr) != value:
|
||||
return False
|
||||
return True
|
||||
|
||||
def _tag_match(res_uuid):
|
||||
if not tag_filter:
|
||||
return True
|
||||
return any([x['scope'] == tag_filter['scope'] and
|
||||
x['tag'] == tag_filter['tag']
|
||||
for x in res_dict[res_uuid]['tags']])
|
||||
|
||||
def _lswitch_match(res_uuid):
|
||||
if (not switch_uuid or
|
||||
res_dict[res_uuid].get('ls_uuid') == switch_uuid):
|
||||
return True
|
||||
return False
|
||||
|
||||
items = [json.loads(response_template % res_dict[res_uuid])
|
||||
for res_uuid in res_dict
|
||||
if (_lswitch_match(res_uuid) and
|
||||
_tag_match(res_uuid) and
|
||||
_attr_match(res_uuid))]
|
||||
|
||||
return json.dumps({'results': items,
|
||||
'result_count': len(items)})
|
||||
|
||||
def _show(self, resource_type, response_file,
|
||||
switch_uuid, port_uuid=None):
|
||||
target_uuid = port_uuid or switch_uuid
|
||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||
response_template = f.read()
|
||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
||||
items = [json.loads(response_template % res_dict[res_uuid])
|
||||
for res_uuid in res_dict if res_uuid == target_uuid]
|
||||
if items:
|
||||
return json.dumps(items[0])
|
||||
raise Exception("show: resource %s:%s not found" %
|
||||
(resource_type, target_uuid))
|
||||
|
||||
def handle_get(self, url):
|
||||
#TODO(salvatore-orlando): handle field selection
|
||||
parsedurl = urlparse.urlparse(url)
|
||||
(res_type, s_uuid, p_uuid) = self._get_resource_type(parsedurl.path)
|
||||
response_file = self.FAKE_GET_RESPONSES.get(res_type)
|
||||
if not response_file:
|
||||
raise Exception("resource not found")
|
||||
if res_type == 'lport':
|
||||
if p_uuid:
|
||||
return self._show(res_type, response_file, s_uuid, p_uuid)
|
||||
else:
|
||||
return self._list(res_type, response_file, s_uuid,
|
||||
query=parsedurl.query)
|
||||
elif res_type == 'lportstatus':
|
||||
return self._show(res_type, response_file, s_uuid, p_uuid)
|
||||
elif res_type == 'lswitch':
|
||||
if s_uuid:
|
||||
return self._show(res_type, response_file, s_uuid)
|
||||
else:
|
||||
return self._list(res_type, response_file,
|
||||
query=parsedurl.query)
|
||||
else:
|
||||
raise Exception("unknown resource:%s" % res_type)
|
||||
|
||||
def handle_post(self, url, body):
|
||||
parsedurl = urlparse.urlparse(url)
|
||||
(res_type, s_uuid, _p) = self._get_resource_type(parsedurl.path)
|
||||
response_file = self.FAKE_POST_RESPONSES.get(res_type)
|
||||
if not response_file:
|
||||
raise Exception("resource not found")
|
||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||
response_template = f.read()
|
||||
add_resource = getattr(self, '_add_%s' % res_type)
|
||||
args = [body]
|
||||
if s_uuid:
|
||||
args.append(s_uuid)
|
||||
response = response_template % add_resource(*args)
|
||||
return response
|
||||
|
||||
def handle_put(self, url, body):
|
||||
parsedurl = urlparse.urlparse(url)
|
||||
(res_type, s_uuid, p_uuid) = self._get_resource_type(parsedurl.path)
|
||||
target_uuid = p_uuid or s_uuid
|
||||
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
||||
if not response_file:
|
||||
raise Exception("resource not found")
|
||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||
response_template = f.read()
|
||||
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
||||
resource = res_dict[target_uuid]
|
||||
resource.update(json.loads(body))
|
||||
response = response_template % resource
|
||||
return response
|
||||
|
||||
def handle_delete(self, url):
|
||||
parsedurl = urlparse.urlparse(url)
|
||||
(res_type, s_uuid, p_uuid) = self._get_resource_type(parsedurl.path)
|
||||
target_uuid = p_uuid or s_uuid
|
||||
response_file = self.FAKE_PUT_RESPONSES.get(res_type)
|
||||
if not response_file:
|
||||
raise Exception("resource not found")
|
||||
res_dict = getattr(self, '_fake_%s_dict' % res_type)
|
||||
del res_dict[target_uuid]
|
||||
return ""
|
||||
|
||||
def fake_request(self, *args, **kwargs):
|
||||
method = args[0]
|
||||
handler = getattr(self, "handle_%s" % method.lower())
|
||||
return handler(*args[1:])
|
@ -0,0 +1,17 @@
|
||||
{
|
||||
"display_name": "%(uuid)s",
|
||||
"_href": "/ws.v1/lswitch/%(ls_uuid)s/lport/%(uuid)s",
|
||||
"security_profiles": [],
|
||||
"tags":
|
||||
[{"scope": "q_port_id", "tag": "%(quantum_port_id)s"},
|
||||
{"scope": "vm_id", "tag": "%(quantum_device_id)s"},
|
||||
{"scope": "os_tid", "tag": "%(tenant_id)s"}],
|
||||
"portno": 1,
|
||||
"queue_uuid": null,
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchPortConfig",
|
||||
"mirror_targets": [],
|
||||
"allowed_address_pairs": [],
|
||||
"admin_status_enabled": true,
|
||||
"type": "LogicalSwitchPortConfig",
|
||||
"uuid": "%(uuid)s"
|
||||
}
|
@ -0,0 +1,12 @@
|
||||
{
|
||||
"display_name": "%(display_name)s",
|
||||
"uuid": "%(uuid)s",
|
||||
"tags": [{"scope": "os_tid", "tag": "%(tenant_id)s"}],
|
||||
"type": "LogicalSwitchConfig",
|
||||
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
|
||||
"port_isolation_enabled": false,
|
||||
"transport_zones": [
|
||||
{"zone_uuid": "%(zone_uuid)s",
|
||||
"transport_type": "stt"}],
|
||||
"_href": "/ws.v1/lswitch/%(uuid)s"
|
||||
}
|
10
quantum/plugins/nicira/nicira_nvp_plugin/tests/nvp.ini.test
Normal file
10
quantum/plugins/nicira/nicira_nvp_plugin/tests/nvp.ini.test
Normal file
@ -0,0 +1,10 @@
|
||||
[DEFAULT]
|
||||
|
||||
[DATABASE]
|
||||
sql_connection = sqlite://
|
||||
|
||||
[CLUSTER:fake]
|
||||
default_tz_uuid = fake_tz_uuid
|
||||
nova_zone_id = whatever
|
||||
nvp_cluster_uuid = fake_cluster_uuid
|
||||
nvp_controller_connection=fake:443:admin:admin:30:10:2:2
|
@ -1,241 +0,0 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ConfigParser
|
||||
import StringIO
|
||||
import unittest
|
||||
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import (
|
||||
NVPCluster,
|
||||
parse_config,
|
||||
)
|
||||
|
||||
|
||||
class ConfigParserTest(unittest.TestCase):
|
||||
def setUp(self):
|
||||
pass
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
||||
def test_nvp_config_000(self):
|
||||
nvpc = NVPCluster('cluster1')
|
||||
for f in [
|
||||
(
|
||||
'default_tz_id1', 'ip1', 'port1', 'user1', 'passwd1', 42, 43,
|
||||
44, 45),
|
||||
(
|
||||
'default_tz_id1', 'ip2', 'port2', 'user2', 'passwd2', 42, 43,
|
||||
44, 45),
|
||||
(
|
||||
'default_tz_id1', 'ip3', 'port3', 'user3', 'passwd3', 42, 43,
|
||||
44, 45),
|
||||
]:
|
||||
nvpc.add_controller(*f)
|
||||
|
||||
self.assertTrue(nvpc.name == 'cluster1')
|
||||
self.assertTrue(len(nvpc.controllers) == 3)
|
||||
|
||||
def test_old_config_parser_old_style(self):
|
||||
config = StringIO.StringIO("""
|
||||
[DEFAULT]
|
||||
[NVP]
|
||||
DEFAULT_TZ_UUID = <default uuid>
|
||||
NVP_CONTROLLER_IP = <controller ip>
|
||||
PORT = <port>
|
||||
USER = <user>
|
||||
PASSWORD = <pass>
|
||||
""")
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.readfp(config)
|
||||
cluster1, plugin_config = parse_config(cp)
|
||||
|
||||
self.assertTrue(cluster1.name == 'cluster1')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['default_tz_uuid'] == '<default uuid>')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['port'] == '<port>')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['user'] == '<user>')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['password'] == '<pass>')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['request_timeout'] == 30)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['http_timeout'] == 10)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['retries'] == 2)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['redirects'] == 2)
|
||||
|
||||
def test_old_config_parser_new_style(self):
|
||||
config = StringIO.StringIO("""
|
||||
[DEFAULT]
|
||||
[NVP]
|
||||
DEFAULT_TZ_UUID = <default uuid>
|
||||
NVP_CONTROLLER_CONNECTIONS = CONNECTION1
|
||||
CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
|
||||
""")
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.readfp(config)
|
||||
cluster1, plugin_config = parse_config(cp)
|
||||
|
||||
self.assertTrue(cluster1.name == 'cluster1')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['default_tz_uuid'] == '<default uuid>')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['port'] == '4242')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['user'] == 'admin')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['password'] == 'admin')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['request_timeout'] == 42)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['http_timeout'] == 43)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['retries'] == 44)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['redirects'] == 45)
|
||||
|
||||
def test_old_config_parser_both_styles(self):
|
||||
config = StringIO.StringIO("""
|
||||
[DEFAULT]
|
||||
[NVP]
|
||||
NVP_CONTROLLER_IP = <controller ip>
|
||||
PORT = <port>
|
||||
USER = <user>
|
||||
PASSWORD = <pass>
|
||||
DEFAULT_TZ_UUID = <default uuid>
|
||||
NVP_CONTROLLER_CONNECTIONS = CONNECTION1
|
||||
CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
|
||||
""")
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.readfp(config)
|
||||
cluster1, plugin_config = parse_config(cp)
|
||||
|
||||
self.assertTrue(cluster1.name == 'cluster1')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['default_tz_uuid'] == '<default uuid>')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['port'] == '4242')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['user'] == 'admin')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['password'] == 'admin')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['request_timeout'] == 42)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['http_timeout'] == 43)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['retries'] == 44)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['redirects'] == 45)
|
||||
|
||||
def test_old_config_parser_both_styles(self):
|
||||
config = StringIO.StringIO("""
|
||||
[DEFAULT]
|
||||
[NVP]
|
||||
NVP_CONTROLLER_IP = <controller ip>
|
||||
PORT = <port>
|
||||
USER = <user>
|
||||
PASSWORD = <pass>
|
||||
DEFAULT_TZ_UUID = <default uuid>
|
||||
NVP_CONTROLLER_CONNECTIONS = CONNECTION1
|
||||
CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
|
||||
""")
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.readfp(config)
|
||||
cluster1, plugin_config = parse_config(cp)
|
||||
|
||||
self.assertTrue(cluster1.name == 'cluster1')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['default_tz_uuid'] == '<default uuid>')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['port'] == '4242')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['user'] == 'admin')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['password'] == 'admin')
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['request_timeout'] == 42)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['http_timeout'] == 43)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['retries'] == 44)
|
||||
self.assertTrue(
|
||||
cluster1.controllers[0]['redirects'] == 45)
|
||||
|
||||
def test_failover_time(self):
|
||||
config = StringIO.StringIO("""
|
||||
[DEFAULT]
|
||||
[NVP]
|
||||
DEFAULT_TZ_UUID = <default uuid>
|
||||
NVP_CONTROLLER_IP = <controller ip>
|
||||
PORT = 443
|
||||
USER = admin
|
||||
PASSWORD = admin
|
||||
FAILOVER_TIME = 10
|
||||
""")
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.readfp(config)
|
||||
cluster1, plugin_config = parse_config(cp)
|
||||
self.assertTrue(plugin_config['failover_time'] == '10')
|
||||
|
||||
def test_failover_time_new_style(self):
|
||||
config = StringIO.StringIO("""
|
||||
[DEFAULT]
|
||||
[NVP]
|
||||
DEFAULT_TZ_UUID = <default uuid>
|
||||
NVP_CONTROLLER_CONNECTIONS = CONNECTION1
|
||||
CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
|
||||
FAILOVER_TIME = 10
|
||||
""")
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.readfp(config)
|
||||
cluster1, plugin_config = parse_config(cp)
|
||||
self.assertTrue(plugin_config['failover_time'] == '10')
|
||||
|
||||
def test_concurrent_connections_time(self):
|
||||
config = StringIO.StringIO("""
|
||||
[DEFAULT]
|
||||
[NVP]
|
||||
DEFAULT_TZ_UUID = <default uuid>
|
||||
NVP_CONTROLLER_IP = <controller ip>
|
||||
PORT = 443
|
||||
USER = admin
|
||||
PASSWORD = admin
|
||||
CONCURRENT_CONNECTIONS = 5
|
||||
""")
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.readfp(config)
|
||||
cluster1, plugin_config = parse_config(cp)
|
||||
self.assertTrue(plugin_config['concurrent_connections'] == '5')
|
||||
|
||||
def test_concurrent_connections_time_new_style(self):
|
||||
config = StringIO.StringIO("""
|
||||
[DEFAULT]
|
||||
[NVP]
|
||||
DEFAULT_TZ_UUID = <default uuid>
|
||||
NVP_CONTROLLER_CONNECTIONS = CONNECTION1
|
||||
CONNECTION1 = 10.0.0.1:4242:admin:admin:42:43:44:45
|
||||
CONCURRENT_CONNECTIONS = 5
|
||||
""")
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.readfp(config)
|
||||
cluster1, plugin_config = parse_config(cp)
|
||||
self.assertTrue(plugin_config['concurrent_connections'] == '5')
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -1,201 +0,0 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Somik Behera, Nicira Networks, Inc.
|
||||
# @author: Brad Hall, Nicira Networks, Inc.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from quantum.common import exceptions as exception
|
||||
from quantum.openstack.common import jsonutils
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin import (
|
||||
NvpApiClient,
|
||||
nvplib,
|
||||
)
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import NvpPlugin
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
LOG = logging.getLogger("test_network")
|
||||
|
||||
|
||||
class NvpTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.quantum = NvpPlugin()
|
||||
self.BRIDGE_TZ_UUID = self._create_tz("bridge")
|
||||
self.DEFAULT_TZ_UUID = self._create_tz("default")
|
||||
|
||||
self.nets = []
|
||||
self.ports = []
|
||||
|
||||
def tearDown(self):
|
||||
self._delete_tz(self.BRIDGE_TZ_UUID)
|
||||
self._delete_tz(self.DEFAULT_TZ_UUID)
|
||||
|
||||
for tenant, net, port in self.ports:
|
||||
self.quantum.delete_port(tenant, net, port)
|
||||
for tenant, net in self.nets:
|
||||
self.quantum.delete_network(tenant, net)
|
||||
|
||||
def _create_tz(self, name):
|
||||
post_uri = "/ws.v1/transport-zone"
|
||||
body = {"display_name": name,
|
||||
"tags": [{"tag": "plugin-test"}]}
|
||||
try:
|
||||
resp_obj = self.quantum.api_client.request("POST", post_uri,
|
||||
jsonutils.dumps(body))
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
print("Unknown API Error: %s" % str(e))
|
||||
raise exception.QuantumException()
|
||||
return jsonutils.loads(resp_obj)["uuid"]
|
||||
|
||||
def _delete_tz(self, uuid):
|
||||
post_uri = "/ws.v1/transport-zone/%s" % uuid
|
||||
try:
|
||||
resp_obj = self.quantum.api_client.request("DELETE", post_uri)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
LOG.error("Unknown API Error: %s" % str(e))
|
||||
raise exception.QuantumException()
|
||||
|
||||
def test_create_multi_networks(self):
|
||||
|
||||
resp = self.quantum.create_custom_network(
|
||||
"quantum-test-tenant", "quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID, self.quantum.controller)
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
resp2 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantC")
|
||||
resp3 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantD")
|
||||
net_id = resp["net-id"]
|
||||
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id1 = resp["port-id"]
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id1)
|
||||
old_vic = resp["attachment"]
|
||||
self.assertTrue(old_vic == "None")
|
||||
|
||||
self.quantum.plug_interface("quantum-test-tenant", net_id, port_id1,
|
||||
"nova-instance-test-%s" % os.getpid())
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id1)
|
||||
new_vic = resp["attachment"]
|
||||
self.assertTrue(old_vic != new_vic)
|
||||
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id2 = resp["port-id"]
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id2)
|
||||
old_vic2 = resp["attachment"]
|
||||
self.assertTrue(old_vic2 == "None")
|
||||
|
||||
self.quantum.plug_interface("quantum-test-tenant", net_id, port_id2,
|
||||
"nova-instance-test2-%s" % os.getpid())
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id2)
|
||||
new_vic = resp["attachment"]
|
||||
self.assertTrue(old_vic2 != new_vic)
|
||||
|
||||
resp = self.quantum.get_all_ports("quantum-test-tenant", net_id)
|
||||
|
||||
resp = self.quantum.get_network_details("quantum-test-tenant", net_id)
|
||||
|
||||
resp = self.quantum.get_all_networks("quantum-test-tenant")
|
||||
|
||||
resp = self.quantum.delete_port("quantum-test-tenant", net_id,
|
||||
port_id1)
|
||||
resp = self.quantum.delete_port("quantum-test-tenant", net_id,
|
||||
port_id2)
|
||||
self.quantum.delete_network("quantum-test-tenant", net_id)
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
self.quantum.delete_network("quantum-test-tenant", resp2["net-id"])
|
||||
self.quantum.delete_network("quantum-test-tenant", resp3["net-id"])
|
||||
|
||||
def test_update_network(self):
|
||||
resp = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantA")
|
||||
net_id = resp["net-id"]
|
||||
try:
|
||||
resp = self.quantum.update_network("quantum-test-tenant", net_id,
|
||||
name="new-name")
|
||||
except exception.NetworkNotFound:
|
||||
self.assertTrue(False)
|
||||
|
||||
self.assertTrue(resp["net-name"] == "new-name")
|
||||
|
||||
def test_negative_delete_networks(self):
|
||||
try:
|
||||
self.quantum.delete_network("quantum-test-tenant", "xxx-no-net-id")
|
||||
except exception.NetworkNotFound:
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_negative_get_network_details(self):
|
||||
try:
|
||||
self.quantum.get_network_details("quantum-test-tenant",
|
||||
"xxx-no-net-id")
|
||||
except exception.NetworkNotFound:
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_negative_update_network(self):
|
||||
try:
|
||||
self.quantum.update_network("quantum-test-tenant", "xxx-no-net-id",
|
||||
name="new-name")
|
||||
except exception.NetworkNotFound:
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_get_all_networks(self):
|
||||
networks = self.quantum.get_all_networks("quantum-test-tenant")
|
||||
num_nets = len(networks)
|
||||
|
||||
# Make sure we only get back networks with the specified tenant_id
|
||||
unique_tid = "tenant-%s" % os.getpid()
|
||||
# Add a network that we shouldn't get back
|
||||
resp = self.quantum.create_custom_network(
|
||||
"another_tid", "another_tid_network",
|
||||
self.BRIDGE_TZ_UUID, self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
self.nets.append(("another_tid", net_id))
|
||||
# Add 3 networks that we should get back
|
||||
for i in [1, 2, 3]:
|
||||
resp = self.quantum.create_custom_network(
|
||||
unique_tid, "net-%s" % str(i),
|
||||
self.BRIDGE_TZ_UUID, self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
self.nets.append((unique_tid, net_id))
|
||||
networks = self.quantum.get_all_networks(unique_tid)
|
||||
self.assertTrue(len(networks) == 3)
|
||||
|
||||
def test_delete_nonexistent_network(self):
|
||||
try:
|
||||
nvplib.delete_network(self.quantum.controller,
|
||||
"my-non-existent-network")
|
||||
except exception.NetworkNotFound:
|
||||
return
|
||||
# shouldn't be reached
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_query_networks(self):
|
||||
resp = self.quantum.create_custom_network(
|
||||
"quantum-test-tenant", "quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID, self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
self.nets.append(("quantum-test-tenant", net_id))
|
||||
nets = nvplib.query_networks(self.quantum.controller,
|
||||
"quantum-test-tenant")
|
@ -1,20 +1,16 @@
|
||||
# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
|
||||
# Copyright (C) 2009-2011 Nicira Networks, Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
# This software is provided only under the terms and conditions of a written
|
||||
# license agreement with Nicira. If no such agreement applies to you, you are
|
||||
# not authorized to use this software. Contact Nicira to obtain an appropriate
|
||||
# license: www.nicira.com.
|
||||
|
||||
# System
|
||||
import httplib
|
||||
import unittest2 as unittest
|
||||
import unittest
|
||||
|
||||
# Third party
|
||||
# Local
|
||||
import quantum.plugins.nicira.nicira_nvp_plugin.api_client.common as naco
|
||||
|
||||
|
||||
@ -35,5 +31,5 @@ class NvpApiCommonTest(unittest.TestCase):
|
||||
self.assertTrue(
|
||||
naco._conn_str(conn) == 'http://localhost:4242')
|
||||
|
||||
with self.assertRaises(TypeError):
|
||||
naco._conn_str('not an httplib.HTTPSConnection')
|
||||
self.assertRaises(TypeError, naco._conn_str,
|
||||
('not an httplib.HTTPSConnection'))
|
||||
|
@ -1,27 +1,18 @@
|
||||
# Copyright (C) 2009-2012 Nicira Networks, Inc. All Rights Reserved.
|
||||
# Copyright (C) 2009-2011 Nicira Networks, Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# This software is provided only under the terms and conditions of a written
|
||||
# license agreement with Nicira. If no such agreement applies to you, you are
|
||||
# not authorized to use this software. Contact Nicira to obtain an appropriate
|
||||
# license: www.nicira.com.
|
||||
|
||||
import eventlet
|
||||
eventlet.monkey_patch()
|
||||
import logging
|
||||
import unittest
|
||||
|
||||
from eventlet.green import urllib2
|
||||
|
||||
import urllib2
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
LOG = logging.getLogger("test_nvp_api_request")
|
||||
|
||||
lg = logging.getLogger("test_nvp_api_request")
|
||||
|
||||
REQUEST_TIMEOUT = 1
|
||||
|
||||
|
@ -211,8 +211,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
|
||||
self.assertTrue(retval is None)
|
||||
|
||||
def test_redirect_params_setup_https_with_cooki(self):
|
||||
with patch('nicira_nvp_plugin.api_client.client_eventlet'
|
||||
'.NvpApiClientEventlet') as mock:
|
||||
with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
|
||||
'client_eventlet.NvpApiClientEventlet') as mock:
|
||||
api_client = mock.return_value
|
||||
api_client.wait_for_login.return_value = None
|
||||
api_client.auth_cookie = 'mycookie'
|
||||
@ -226,8 +226,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
|
||||
self.assertTrue(api_client.acquire_connection.called)
|
||||
|
||||
def test_redirect_params_setup_htttps_and_query(self):
|
||||
with patch('nicira_nvp_plugin.api_client.client_eventlet'
|
||||
'.NvpApiClientEventlet') as mock:
|
||||
with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
|
||||
'client_eventlet.NvpApiClientEventlet') as mock:
|
||||
api_client = mock.return_value
|
||||
api_client.wait_for_login.return_value = None
|
||||
api_client.auth_cookie = 'mycookie'
|
||||
@ -241,8 +241,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
|
||||
self.assertTrue(api_client.acquire_connection.called)
|
||||
|
||||
def test_redirect_params_setup_https_connection_no_cookie(self):
|
||||
with patch('nicira_nvp_plugin.api_client.client_eventlet'
|
||||
'.NvpApiClientEventlet') as mock:
|
||||
with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
|
||||
'client_eventlet.NvpApiClientEventlet') as mock:
|
||||
api_client = mock.return_value
|
||||
api_client.wait_for_login.return_value = None
|
||||
api_client.auth_cookie = None
|
||||
@ -256,8 +256,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
|
||||
self.assertTrue(api_client.acquire_connection.called)
|
||||
|
||||
def test_redirect_params_setup_https_and_query_no_cookie(self):
|
||||
with patch('nicira_nvp_plugin.api_client.client_eventlet'
|
||||
'.NvpApiClientEventlet') as mock:
|
||||
with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
|
||||
'client_eventlet.NvpApiClientEventlet') as mock:
|
||||
api_client = mock.return_value
|
||||
api_client.wait_for_login.return_value = None
|
||||
api_client.auth_cookie = None
|
||||
@ -270,8 +270,8 @@ class NvpApiRequestEventletTest(unittest.TestCase):
|
||||
self.assertTrue(api_client.acquire_connection.called)
|
||||
|
||||
def test_redirect_params_path_only_with_query(self):
|
||||
with patch('nicira_nvp_plugin.api_client.client_eventlet'
|
||||
'.NvpApiClientEventlet') as mock:
|
||||
with patch('quantum.plugins.nicira.nicira_nvp_plugin.api_client.'
|
||||
'client_eventlet.NvpApiClientEventlet') as mock:
|
||||
api_client = mock.return_value
|
||||
api_client.wait_for_login.return_value = None
|
||||
api_client.auth_cookie = None
|
||||
|
@ -1,521 +0,0 @@
|
||||
# Copyright 2012 Nicira Networks, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
# @author: Somik Behera, Nicira Networks, Inc.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from quantum.common import exceptions as exception
|
||||
from quantum.openstack.common import jsonutils
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin import (
|
||||
NvpApiClient,
|
||||
nvplib,
|
||||
)
|
||||
from quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin import NvpPlugin
|
||||
|
||||
|
||||
logging.basicConfig(level=logging.DEBUG)
|
||||
LOG = logging.getLogger("test_port")
|
||||
|
||||
|
||||
class NvpTests(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.quantum = NvpPlugin()
|
||||
self.BRIDGE_TZ_UUID = self._create_tz("bridge")
|
||||
self.networks = []
|
||||
self.ports = []
|
||||
self.transport_nodes = []
|
||||
self.cis_uuids = []
|
||||
|
||||
def tearDown(self):
|
||||
self._delete_tz(self.BRIDGE_TZ_UUID)
|
||||
|
||||
for (net_id, p) in self.ports:
|
||||
self.quantum.unplug_interface("quantum-test-tenant", net_id, p)
|
||||
self.quantum.delete_port("quantum-test-tenant", net_id, p)
|
||||
for n in self.networks:
|
||||
self.quantum.delete_network("quantum-test-tenant", n)
|
||||
for t in self.transport_nodes:
|
||||
nvplib.do_single_request("DELETE", "/ws.v1/transport-node/%s" % t,
|
||||
controller=self.quantum.controller)
|
||||
for c in self.cis_uuids:
|
||||
nvplib.do_single_request(
|
||||
"DELETE",
|
||||
"/ws.v1/cluster-interconnect-service/%s" % c,
|
||||
controller=self.quantum.controller)
|
||||
|
||||
def _create_tz(self, name):
|
||||
post_uri = "/ws.v1/transport-zone"
|
||||
body = {"display_name": name, "tags": [{"tag": "plugin-test"}]}
|
||||
try:
|
||||
resp_obj = self.quantum.api_client.request("POST",
|
||||
post_uri,
|
||||
jsonutils.dumps(body))
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
LOG.error("Unknown API Error: %s" % str(e))
|
||||
raise exception.QuantumException()
|
||||
return jsonutils.loads(resp_obj)["uuid"]
|
||||
|
||||
def _delete_tz(self, uuid):
|
||||
post_uri = "/ws.v1/transport-zone/%s" % uuid
|
||||
try:
|
||||
resp_obj = self.quantum.api_client.request("DELETE", post_uri)
|
||||
except NvpApiClient.NvpApiException as e:
|
||||
LOG.error("Unknown API Error: %s" % str(e))
|
||||
raise exception.QuantumException()
|
||||
|
||||
def test_create_and_delete_lots_of_ports(self):
|
||||
resp = self.quantum.create_custom_network(
|
||||
"quantum-test-tenant", "quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID, self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
|
||||
nports = 250
|
||||
|
||||
ids = []
|
||||
for i in xrange(0, nports):
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id = resp["port-id"]
|
||||
ids.append(port_id)
|
||||
|
||||
# Test that we get the correct number of ports back
|
||||
ports = self.quantum.get_all_ports("quantum-test-tenant", net_id)
|
||||
self.assertTrue(len(ports) == nports)
|
||||
|
||||
# Verify that each lswitch has matching tags
|
||||
net = nvplib.get_network(self.quantum.controller, net_id)
|
||||
tags = []
|
||||
net_tags = [t["tag"] for t in net["tags"]]
|
||||
if len(tags) == 0:
|
||||
tags = net_tags
|
||||
else:
|
||||
for t in net_tags:
|
||||
self.assertTrue(t in tags)
|
||||
|
||||
for port_id in ids:
|
||||
resp = self.quantum.delete_port("quantum-test-tenant", net_id,
|
||||
port_id)
|
||||
try:
|
||||
self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id)
|
||||
except exception.PortNotFound:
|
||||
continue
|
||||
# Shouldn't be reached
|
||||
self.assertFalse(True)
|
||||
|
||||
self.quantum.delete_network("quantum-test-tenant", net_id)
|
||||
|
||||
def test_create_and_delete_port(self):
|
||||
resp = self.quantum.create_custom_network(
|
||||
"quantum-test-tenant", "quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID, self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id = resp["port-id"]
|
||||
resp = self.quantum.delete_port("quantum-test-tenant", net_id, port_id)
|
||||
self.quantum.delete_network("quantum-test-tenant", net_id)
|
||||
|
||||
def test_create_and_delete_port_with_portsec(self):
|
||||
resp = self.quantum.create_custom_network(
|
||||
"quantum-test-tenant", "quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID, self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
|
||||
params = {}
|
||||
params["NICIRA:allowed_address_pairs"] = [
|
||||
{
|
||||
"ip_address": "172.168.17.5",
|
||||
"mac_address": "10:9a:dd:61:4e:89",
|
||||
},
|
||||
{
|
||||
"ip_address": "172.168.17.6",
|
||||
"mac_address": "10:9a:dd:61:4e:88",
|
||||
},
|
||||
]
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE", **params)
|
||||
port_id = resp["port-id"]
|
||||
resp = self.quantum.delete_port("quantum-test-tenant", net_id, port_id)
|
||||
self.quantum.delete_network("quantum-test-tenant", net_id)
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_create_update_and_delete_port(self):
|
||||
resp = self.quantum.create_custom_network(
|
||||
"quantum-test-tenant", "quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID, self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id = resp["port-id"]
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id)
|
||||
resp = self.quantum.delete_port("quantum-test-tenant", net_id,
|
||||
port_id)
|
||||
self.quantum.delete_network("quantum-test-tenant",
|
||||
net_id)
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_create_plug_unplug_iface(self):
|
||||
resp = self.quantum.create_custom_network(
|
||||
"quantum-test-tenant", "quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID, self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id = resp["port-id"]
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id)
|
||||
old_vic = resp["attachment"]
|
||||
self.assertTrue(old_vic == "None")
|
||||
self.quantum.plug_interface("quantum-test-tenant", net_id, port_id,
|
||||
"nova-instance-test-%s" % os.getpid())
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id)
|
||||
new_vic = resp["attachment"]
|
||||
|
||||
self.assertTrue(old_vic != new_vic)
|
||||
self.quantum.unplug_interface("quantum-test-tenant", net_id, port_id)
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id)
|
||||
new_vic = resp["attachment"]
|
||||
self.assertTrue(old_vic == new_vic)
|
||||
resp = self.quantum.delete_port("quantum-test-tenant", net_id, port_id)
|
||||
self.quantum.delete_network("quantum-test-tenant", net_id)
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_create_multi_port_attachment(self):
|
||||
resp = self.quantum.create_custom_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID,
|
||||
self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id1 = resp["port-id"]
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id1)
|
||||
old_vic = resp["attachment"]
|
||||
self.assertTrue(old_vic == "None")
|
||||
|
||||
self.quantum.plug_interface("quantum-test-tenant", net_id, port_id1,
|
||||
"nova-instance-test-%s" % os.getpid())
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id1)
|
||||
new_vic = resp["attachment"]
|
||||
self.assertTrue(old_vic != new_vic)
|
||||
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id2 = resp["port-id"]
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id2)
|
||||
old_vic2 = resp["attachment"]
|
||||
self.assertTrue(old_vic2 == "None")
|
||||
|
||||
self.quantum.plug_interface("quantum-test-tenant", net_id, port_id2,
|
||||
"nova-instance-test2-%s" % os.getpid())
|
||||
resp = self.quantum.get_port_details("quantum-test-tenant", net_id,
|
||||
port_id2)
|
||||
new_vic = resp["attachment"]
|
||||
self.assertTrue(old_vic2 != new_vic)
|
||||
|
||||
resp = self.quantum.get_all_ports("quantum-test-tenant", net_id)
|
||||
|
||||
resp = self.quantum.get_network_details("quantum-test-tenant", net_id)
|
||||
|
||||
resp = self.quantum.delete_port("quantum-test-tenant", net_id,
|
||||
port_id1)
|
||||
resp = self.quantum.delete_port("quantum-test-tenant", net_id,
|
||||
port_id2)
|
||||
self.quantum.delete_network("quantum-test-tenant", net_id)
|
||||
self.assertTrue(True)
|
||||
|
||||
def test_negative_get_all_ports(self):
|
||||
try:
|
||||
self.quantum.get_all_ports("quantum-test-tenant", "xxx-no-net-id")
|
||||
except exception.NetworkNotFound:
|
||||
self.assertTrue(True)
|
||||
return
|
||||
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_create_port1(self):
|
||||
try:
|
||||
self.quantum.create_port("quantum-test-tenant", "xxx-no-net-id",
|
||||
"ACTIVE")
|
||||
except exception.NetworkNotFound:
|
||||
self.assertTrue(True)
|
||||
return
|
||||
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_create_port2(self):
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
try:
|
||||
self.quantum.create_port("quantum-test-tenant", resp1["net-id"],
|
||||
"INVALID")
|
||||
except exception.StateInvalid:
|
||||
self.assertTrue(True)
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
return
|
||||
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_update_port1(self):
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
try:
|
||||
self.quantum.update_port("quantum-test-tenant", resp1["net-id"],
|
||||
"port_id_fake", state="ACTIVE")
|
||||
except exception.PortNotFound:
|
||||
self.assertTrue(True)
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
return
|
||||
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_update_port2(self):
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
try:
|
||||
self.quantum.update_port("quantum-test-tenant", resp1["net-id"],
|
||||
"port_id_fake", state="INVALID")
|
||||
except exception.StateInvalid:
|
||||
self.assertTrue(True)
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
return
|
||||
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_update_port3(self):
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
try:
|
||||
self.quantum.update_port("quantum-test-tenant", resp1["net-id"],
|
||||
"port_id_fake", state="ACTIVE")
|
||||
except exception.PortNotFound:
|
||||
self.assertTrue(True)
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
return
|
||||
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_delete_port1(self):
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
try:
|
||||
self.quantum.delete_port("quantum-test-tenant", resp1["net-id"],
|
||||
"port_id_fake")
|
||||
except exception.PortNotFound:
|
||||
self.assertTrue(True)
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
return
|
||||
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_delete_port2(self):
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
try:
|
||||
self.quantum.delete_port("quantum-test-tenant", resp1["net-id"],
|
||||
"port_id_fake")
|
||||
except exception.PortNotFound:
|
||||
self.assertTrue(True)
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
return
|
||||
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_get_port_details(self):
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
try:
|
||||
self.quantum.get_port_details("quantum-test-tenant",
|
||||
resp1["net-id"],
|
||||
"port_id_fake")
|
||||
except exception.PortNotFound:
|
||||
self.assertTrue(True)
|
||||
self.quantum.delete_network("quantum-test-tenant",
|
||||
resp1["net-id"])
|
||||
return
|
||||
|
||||
self.quantum.delete_network("quantum-test-tenant", resp1["net-id"])
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_plug_interface(self):
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
try:
|
||||
self.quantum.plug_interface("quantum-test-tenant",
|
||||
resp1["net-id"],
|
||||
"port_id_fake", "iface_id_fake")
|
||||
except exception.PortNotFound:
|
||||
self.assertTrue(True)
|
||||
self.quantum.delete_network("quantum-test-tenant",
|
||||
resp1["net-id"])
|
||||
return
|
||||
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_negative_unplug_interface(self):
|
||||
resp1 = self.quantum.create_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantB")
|
||||
try:
|
||||
self.quantum.unplug_interface("quantum-test-tenant",
|
||||
resp1["net-id"], "port_id_fake")
|
||||
except exception.PortNotFound:
|
||||
self.assertTrue(True)
|
||||
self.quantum.delete_network("quantum-test-tenant",
|
||||
resp1["net-id"])
|
||||
return
|
||||
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_get_port_status_invalid_lswitch(self):
|
||||
try:
|
||||
nvplib.get_port_status(self.quantum.controller,
|
||||
"invalid-lswitch",
|
||||
"invalid-port")
|
||||
except exception.NetworkNotFound:
|
||||
return
|
||||
# Shouldn't be reached
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_get_port_status_invalid_port(self):
|
||||
resp = self.quantum.create_custom_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID,
|
||||
self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
self.networks.append(net_id)
|
||||
|
||||
try:
|
||||
nvplib.get_port_status(self.quantum.controller, net_id,
|
||||
"invalid-port")
|
||||
except exception.PortNotFound:
|
||||
return
|
||||
# Shouldn't be reached
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_get_port_status_returns_the_right_stuff(self):
|
||||
resp = self.quantum.create_custom_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID,
|
||||
self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
self.networks.append(net_id)
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id = resp["port-id"]
|
||||
self.ports.append((net_id, port_id))
|
||||
res = nvplib.get_port_status(self.quantum.controller, net_id, port_id)
|
||||
self.assertTrue(res in ['UP', 'DOWN', 'PROVISIONING'])
|
||||
|
||||
def test_get_port_stats_invalid_lswitch(self):
|
||||
try:
|
||||
nvplib.get_port_stats(self.quantum.controller,
|
||||
"invalid-lswitch",
|
||||
"invalid-port")
|
||||
except exception.NetworkNotFound:
|
||||
return
|
||||
# Shouldn't be reached
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_get_port_stats_invalid_port(self):
|
||||
resp = self.quantum.create_custom_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID,
|
||||
self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
self.networks.append(net_id)
|
||||
|
||||
try:
|
||||
nvplib.get_port_stats(self.quantum.controller, net_id,
|
||||
"invalid-port")
|
||||
except exception.PortNotFound:
|
||||
return
|
||||
# Shouldn't be reached
|
||||
self.assertTrue(False)
|
||||
|
||||
def test_get_port_stats_returns_the_right_stuff(self):
|
||||
resp = self.quantum.create_custom_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID,
|
||||
self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
self.networks.append(net_id)
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id = resp["port-id"]
|
||||
self.ports.append((net_id, port_id))
|
||||
res = nvplib.get_port_stats(self.quantum.controller, net_id, port_id)
|
||||
self.assertTrue("tx_errors" in res)
|
||||
self.assertTrue("tx_bytes" in res)
|
||||
self.assertTrue("tx_packets" in res)
|
||||
self.assertTrue("rx_errors" in res)
|
||||
self.assertTrue("rx_bytes" in res)
|
||||
self.assertTrue("rx_packets" in res)
|
||||
|
||||
def test_port_filters_by_attachment(self):
|
||||
resp = self.quantum.create_custom_network("quantum-test-tenant",
|
||||
"quantum-Private-TenantA",
|
||||
self.BRIDGE_TZ_UUID,
|
||||
self.quantum.controller)
|
||||
net_id = resp["net-id"]
|
||||
self.networks.append(net_id)
|
||||
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id = resp["port-id"]
|
||||
port_id1 = port_id
|
||||
self.ports.append((net_id, port_id))
|
||||
self.quantum.plug_interface("quantum-test-tenant", net_id, port_id,
|
||||
"attachment1")
|
||||
|
||||
resp = self.quantum.create_port("quantum-test-tenant", net_id,
|
||||
"ACTIVE")
|
||||
port_id = resp["port-id"]
|
||||
port_id2 = port_id
|
||||
self.ports.append((net_id, port_id))
|
||||
self.quantum.plug_interface("quantum-test-tenant", net_id, port_id,
|
||||
"attachment2")
|
||||
|
||||
# Make sure we get all the ports that we created back
|
||||
ports = self.quantum.get_all_ports("quantum-test-tenant", net_id)
|
||||
self.assertTrue(len(ports) == 2)
|
||||
|
||||
# Make sure we only get the filtered ones back
|
||||
ports = self.quantum.get_all_ports("quantum-test-tenant", net_id,
|
||||
filter_opts={"attachment":
|
||||
"attachment2"})
|
||||
self.assertTrue(len(ports) == 1)
|
||||
self.assertTrue(ports[0]["port-id"] == port_id2)
|
||||
|
||||
# Make sure we don't get any back with an invalid filter
|
||||
ports = self.quantum.get_all_ports(
|
||||
"quantum-test-tenant", net_id,
|
||||
filter_opts={"attachment": "invalidattachment"})
|
||||
self.assertTrue(len(ports) == 0)
|
Loading…
Reference in New Issue
Block a user