Provider network implementation for NVP plugin.
blueprint nvp-provider-net Implements the provider network extension support. The list of valid network types has been updated to reflect the types supported by the nvp plugin. This was necessary otherwise validation would have always failed. Multiple logical switches might be associated with a quantum network; the first logical switch will always have the same id as the quantum network. Also now raises exception when port limit on overlay network is reached. This patch also adds a check for the maximum number of ports on 'standard' overlay networks, and performs some code refactoring for improving maintanability. For instance the NVPCluster class has been moved into its own module. Change-Id: Ib26d327daf748cfcba9ca74e8dc2e8e89c676c2e
This commit is contained in:
parent
37c25d219a
commit
bcae703bc7
@ -24,13 +24,19 @@ reconnect_interval = 2
|
|||||||
# sql_idle_timeout = 3600
|
# sql_idle_timeout = 3600
|
||||||
|
|
||||||
[NVP]
|
[NVP]
|
||||||
# The number of logical ports to create per bridged logical switch
|
# Maximum number of ports for each bridged logical switch
|
||||||
# max_lp_per_bridged_ls = 64
|
# max_lp_per_bridged_ls = 64
|
||||||
|
# Maximum number of ports for each overlay (stt, gre) logical switch
|
||||||
|
# max_lp_per_overlay_ls = 256
|
||||||
# Time from when a connection pool is switched to another controller
|
# Time from when a connection pool is switched to another controller
|
||||||
# during failure.
|
# during failure.
|
||||||
# failover_time = 5
|
# failover_time = 5
|
||||||
# Number of connects to each controller node.
|
# Number of connects to each controller node.
|
||||||
# concurrent_connections = 3
|
# concurrent_connections = 3
|
||||||
|
# Name of the default cluster where requests should be sent if a nova zone id
|
||||||
|
# is not specified. If it is empty or reference a non-existent cluster
|
||||||
|
# the first cluster specified in this configuration file will be used
|
||||||
|
# default_cluster_name =
|
||||||
|
|
||||||
#[CLUSTER:example]
|
#[CLUSTER:example]
|
||||||
# This is uuid of the default NVP Transport zone that will be used for
|
# This is uuid of the default NVP Transport zone that will be used for
|
||||||
|
@ -19,7 +19,9 @@ NETWORK_TYPE = 'provider:network_type'
|
|||||||
PHYSICAL_NETWORK = 'provider:physical_network'
|
PHYSICAL_NETWORK = 'provider:physical_network'
|
||||||
SEGMENTATION_ID = 'provider:segmentation_id'
|
SEGMENTATION_ID = 'provider:segmentation_id'
|
||||||
|
|
||||||
NETWORK_TYPE_VALUES = ['flat', 'gre', 'local', 'vlan']
|
# TODO(salvatore-orlando): Devise a solution for allowing plugins
|
||||||
|
# to alter the set of allowed values
|
||||||
|
NETWORK_TYPE_VALUES = ['flat', 'gre', 'local', 'vlan', 'stt']
|
||||||
|
|
||||||
EXTENDED_ATTRIBUTES_2_0 = {
|
EXTENDED_ATTRIBUTES_2_0 = {
|
||||||
'networks': {
|
'networks': {
|
||||||
|
@ -20,45 +20,54 @@
|
|||||||
# @author: Aaron Rosen, Nicira Networks, Inc.
|
# @author: Aaron Rosen, Nicira Networks, Inc.
|
||||||
|
|
||||||
|
|
||||||
import ConfigParser
|
|
||||||
import json
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
import netaddr
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import traceback
|
|
||||||
import urllib
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
|
# FIXME(salvatore-orlando): get rid of relative imports
|
||||||
from common import config
|
from common import config
|
||||||
from quantum.plugins.nicira.nicira_nvp_plugin.api_client import client_eventlet
|
from quantum.plugins.nicira.nicira_nvp_plugin.api_client import client_eventlet
|
||||||
import NvpApiClient
|
|
||||||
import nvplib
|
|
||||||
from nvp_plugin_version import PLUGIN_VERSION
|
from nvp_plugin_version import PLUGIN_VERSION
|
||||||
|
|
||||||
|
from quantum.plugins.nicira.nicira_nvp_plugin import nicira_models
|
||||||
|
|
||||||
|
|
||||||
from quantum.api.v2 import attributes
|
from quantum.api.v2 import attributes
|
||||||
|
from quantum.api.v2 import base
|
||||||
from quantum.common import constants
|
from quantum.common import constants
|
||||||
from quantum.common import exceptions as exception
|
from quantum.common import exceptions as q_exc
|
||||||
from quantum.common import rpc as q_rpc
|
from quantum.common import rpc as q_rpc
|
||||||
from quantum.common import topics
|
from quantum.common import topics
|
||||||
from quantum.db import api as db
|
from quantum.db import api as db
|
||||||
from quantum.db import db_base_plugin_v2
|
from quantum.db import db_base_plugin_v2
|
||||||
from quantum.db import dhcp_rpc_base
|
from quantum.db import dhcp_rpc_base
|
||||||
from quantum.db import models_v2
|
from quantum.db import models_v2
|
||||||
|
from quantum.extensions import providernet as pnet
|
||||||
from quantum.openstack.common import cfg
|
from quantum.openstack.common import cfg
|
||||||
from quantum.openstack.common import rpc
|
from quantum.openstack.common import rpc
|
||||||
|
from quantum import policy
|
||||||
|
from quantum.plugins.nicira.nicira_nvp_plugin.common import (exceptions
|
||||||
|
as nvp_exc)
|
||||||
|
from quantum.plugins.nicira.nicira_nvp_plugin import nicira_db
|
||||||
|
from quantum.plugins.nicira.nicira_nvp_plugin import nvp_cluster
|
||||||
|
|
||||||
|
import NvpApiClient
|
||||||
|
import nvplib
|
||||||
|
|
||||||
|
|
||||||
CONFIG_FILE = "nvp.ini"
|
|
||||||
CONFIG_FILE_PATHS = []
|
|
||||||
if os.environ.get('QUANTUM_HOME', None):
|
|
||||||
CONFIG_FILE_PATHS.append('%s/etc' % os.environ['QUANTUM_HOME'])
|
|
||||||
CONFIG_FILE_PATHS.append("/etc/quantum/plugins/nicira")
|
|
||||||
LOG = logging.getLogger("QuantumPlugin")
|
LOG = logging.getLogger("QuantumPlugin")
|
||||||
|
|
||||||
|
|
||||||
|
# Provider network extension - allowed network types for the NVP Plugin
|
||||||
|
class NetworkTypes:
|
||||||
|
""" Allowed provider network types for the NVP Plugin """
|
||||||
|
STT = 'stt'
|
||||||
|
GRE = 'gre'
|
||||||
|
FLAT = 'flat'
|
||||||
|
VLAN = 'vlan'
|
||||||
|
|
||||||
|
|
||||||
def parse_config():
|
def parse_config():
|
||||||
"""Parse the supplied plugin configuration.
|
"""Parse the supplied plugin configuration.
|
||||||
|
|
||||||
@ -77,11 +86,7 @@ def parse_config():
|
|||||||
"sql_idle_timeout": cfg.CONF.DATABASE.sql_idle_timeout,
|
"sql_idle_timeout": cfg.CONF.DATABASE.sql_idle_timeout,
|
||||||
"sql_dbpool_enable": cfg.CONF.DATABASE.sql_dbpool_enable
|
"sql_dbpool_enable": cfg.CONF.DATABASE.sql_dbpool_enable
|
||||||
}
|
}
|
||||||
nvp_options = {'max_lp_per_bridged_ls': cfg.CONF.NVP.max_lp_per_bridged_ls}
|
nvp_options = cfg.CONF.NVP
|
||||||
nvp_options.update({'failover_time': cfg.CONF.NVP.failover_time})
|
|
||||||
nvp_options.update({'concurrent_connections':
|
|
||||||
cfg.CONF.NVP.concurrent_connections})
|
|
||||||
|
|
||||||
nvp_conf = config.ClusterConfigOptions(cfg.CONF)
|
nvp_conf = config.ClusterConfigOptions(cfg.CONF)
|
||||||
cluster_names = config.register_cluster_groups(nvp_conf)
|
cluster_names = config.register_cluster_groups(nvp_conf)
|
||||||
nvp_conf.log_opt_values(LOG, logging.DEBUG)
|
nvp_conf.log_opt_values(LOG, logging.DEBUG)
|
||||||
@ -116,125 +121,16 @@ class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
|
|||||||
return q_rpc.PluginRpcDispatcher([self])
|
return q_rpc.PluginRpcDispatcher([self])
|
||||||
|
|
||||||
|
|
||||||
class NVPCluster(object):
|
|
||||||
"""Encapsulates controller connection and api_client for a cluster.
|
|
||||||
|
|
||||||
Accessed within the NvpPluginV2 class.
|
|
||||||
|
|
||||||
Each element in the self.controllers list is a dictionary that
|
|
||||||
contains the following keys:
|
|
||||||
ip, port, user, password, default_tz_uuid, uuid, zone
|
|
||||||
|
|
||||||
There may be some redundancy here, but that has been done to provide
|
|
||||||
future flexibility.
|
|
||||||
"""
|
|
||||||
def __init__(self, name):
|
|
||||||
self._name = name
|
|
||||||
self.controllers = []
|
|
||||||
self.api_client = None
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
ss = ['{ "NVPCluster": [']
|
|
||||||
ss.append('{ "name" : "%s" }' % self.name)
|
|
||||||
ss.append(',')
|
|
||||||
for c in self.controllers:
|
|
||||||
ss.append(str(c))
|
|
||||||
ss.append(',')
|
|
||||||
ss.append('] }')
|
|
||||||
return ''.join(ss)
|
|
||||||
|
|
||||||
def add_controller(self, ip, port, user, password, request_timeout,
|
|
||||||
http_timeout, retries, redirects,
|
|
||||||
default_tz_uuid, uuid=None, zone=None):
|
|
||||||
"""Add a new set of controller parameters.
|
|
||||||
|
|
||||||
:param ip: IP address of controller.
|
|
||||||
:param port: port controller is listening on.
|
|
||||||
:param user: user name.
|
|
||||||
:param password: user password.
|
|
||||||
:param request_timeout: timeout for an entire API request.
|
|
||||||
:param http_timeout: timeout for a connect to a controller.
|
|
||||||
:param retries: maximum number of request retries.
|
|
||||||
:param redirects: maximum number of server redirect responses to
|
|
||||||
follow.
|
|
||||||
:param default_tz_uuid: default transport zone uuid.
|
|
||||||
:param uuid: UUID of this cluster (used in MDI configs).
|
|
||||||
:param zone: Zone of this cluster (used in MDI configs).
|
|
||||||
"""
|
|
||||||
|
|
||||||
keys = [
|
|
||||||
'ip', 'user', 'password', 'default_tz_uuid', 'uuid', 'zone']
|
|
||||||
controller_dict = dict([(k, locals()[k]) for k in keys])
|
|
||||||
|
|
||||||
int_keys = [
|
|
||||||
'port', 'request_timeout', 'http_timeout', 'retries', 'redirects']
|
|
||||||
for k in int_keys:
|
|
||||||
controller_dict[k] = int(locals()[k])
|
|
||||||
|
|
||||||
self.controllers.append(controller_dict)
|
|
||||||
|
|
||||||
def get_controller(self, idx):
|
|
||||||
return self.controllers[idx]
|
|
||||||
|
|
||||||
@property
|
|
||||||
def name(self):
|
|
||||||
return self._name
|
|
||||||
|
|
||||||
@name.setter
|
|
||||||
def name(self, val=None):
|
|
||||||
self._name = val
|
|
||||||
|
|
||||||
@property
|
|
||||||
def host(self):
|
|
||||||
return self.controllers[0]['ip']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def port(self):
|
|
||||||
return self.controllers[0]['port']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def user(self):
|
|
||||||
return self.controllers[0]['user']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def password(self):
|
|
||||||
return self.controllers[0]['password']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def request_timeout(self):
|
|
||||||
return self.controllers[0]['request_timeout']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def http_timeout(self):
|
|
||||||
return self.controllers[0]['http_timeout']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def retries(self):
|
|
||||||
return self.controllers[0]['retries']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def redirects(self):
|
|
||||||
return self.controllers[0]['redirects']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def default_tz_uuid(self):
|
|
||||||
return self.controllers[0]['default_tz_uuid']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def zone(self):
|
|
||||||
return self.controllers[0]['zone']
|
|
||||||
|
|
||||||
@property
|
|
||||||
def uuid(self):
|
|
||||||
return self.controllers[0]['uuid']
|
|
||||||
|
|
||||||
|
|
||||||
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
||||||
"""
|
"""
|
||||||
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
|
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
|
||||||
functionality using NVP.
|
functionality using NVP.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
supported_extension_aliases = ["provider"]
|
||||||
|
# Default controller cluster
|
||||||
|
default_cluster = None
|
||||||
|
|
||||||
def __init__(self, loglevel=None):
|
def __init__(self, loglevel=None):
|
||||||
if loglevel:
|
if loglevel:
|
||||||
logging.basicConfig(level=loglevel)
|
logging.basicConfig(level=loglevel)
|
||||||
@ -242,11 +138,11 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
NvpApiClient.LOG.setLevel(loglevel)
|
NvpApiClient.LOG.setLevel(loglevel)
|
||||||
|
|
||||||
self.db_opts, self.nvp_opts, self.clusters_opts = parse_config()
|
self.db_opts, self.nvp_opts, self.clusters_opts = parse_config()
|
||||||
self.clusters = []
|
self.clusters = {}
|
||||||
for c_opts in self.clusters_opts:
|
for c_opts in self.clusters_opts:
|
||||||
# Password is guaranteed to be the same across all controllers
|
# Password is guaranteed to be the same across all controllers
|
||||||
# in the same NVP cluster.
|
# in the same NVP cluster.
|
||||||
cluster = NVPCluster(c_opts['name'])
|
cluster = nvp_cluster.NVPCluster(c_opts['name'])
|
||||||
for controller_connection in c_opts['nvp_controller_connection']:
|
for controller_connection in c_opts['nvp_controller_connection']:
|
||||||
args = controller_connection.split(':')
|
args = controller_connection.split(':')
|
||||||
try:
|
try:
|
||||||
@ -259,7 +155,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
"controller %(conn)s in cluster %(name)s"),
|
"controller %(conn)s in cluster %(name)s"),
|
||||||
{'conn': controller_connection,
|
{'conn': controller_connection,
|
||||||
'name': c_opts['name']})
|
'name': c_opts['name']})
|
||||||
raise
|
raise nvp_exc.NvpInvalidConnection(
|
||||||
|
conn_params=controller_connection)
|
||||||
|
|
||||||
api_providers = [(x['ip'], x['port'], True)
|
api_providers = [(x['ip'], x['port'], True)
|
||||||
for x in cluster.controllers]
|
for x in cluster.controllers]
|
||||||
@ -269,29 +166,185 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
http_timeout=cluster.http_timeout,
|
http_timeout=cluster.http_timeout,
|
||||||
retries=cluster.retries,
|
retries=cluster.retries,
|
||||||
redirects=cluster.redirects,
|
redirects=cluster.redirects,
|
||||||
failover_time=self.nvp_opts['failover_time'],
|
failover_time=self.nvp_opts.failover_time,
|
||||||
concurrent_connections=self.nvp_opts['concurrent_connections'])
|
concurrent_connections=self.nvp_opts.concurrent_connections)
|
||||||
|
|
||||||
# TODO(salvatore-orlando): do login at first request,
|
# TODO(salvatore-orlando): do login at first request,
|
||||||
# not when plugin, is instantiated
|
# not when plugin is instantiated
|
||||||
cluster.api_client.login()
|
cluster.api_client.login()
|
||||||
|
self.clusters[c_opts['name']] = cluster
|
||||||
|
|
||||||
# TODO(pjb): What if the cluster isn't reachable this
|
# Connect and configure nvp_quantum db
|
||||||
# instant? It isn't good to fall back to invalid cluster
|
|
||||||
# strings.
|
|
||||||
# Default for future-versions
|
|
||||||
self.clusters.append(cluster)
|
|
||||||
|
|
||||||
# Connect and configure ovs_quantum db
|
|
||||||
options = {
|
options = {
|
||||||
'sql_connection': self.db_opts['sql_connection'],
|
'sql_connection': self.db_opts['sql_connection'],
|
||||||
'sql_max_retries': self.db_opts['sql_max_retries'],
|
'sql_max_retries': self.db_opts['sql_max_retries'],
|
||||||
'reconnect_interval': self.db_opts['reconnect_interval'],
|
'reconnect_interval': self.db_opts['reconnect_interval'],
|
||||||
'base': models_v2.model_base.BASEV2,
|
'base': models_v2.model_base.BASEV2,
|
||||||
}
|
}
|
||||||
|
def_cluster_name = self.nvp_opts.default_cluster_name
|
||||||
|
if def_cluster_name and def_cluster_name in self.clusters:
|
||||||
|
self.default_cluster = self.clusters[def_cluster_name]
|
||||||
|
else:
|
||||||
|
first_cluster_name = self.clusters.keys()[0]
|
||||||
|
if not def_cluster_name:
|
||||||
|
LOG.info(_("Default cluster name not specified. "
|
||||||
|
"Using first cluster:%s"), first_cluster_name)
|
||||||
|
elif not def_cluster_name in self.clusters:
|
||||||
|
LOG.warning(_("Default cluster name %(def_cluster_name)s. "
|
||||||
|
"Using first cluster:%(first_cluster_name)s")
|
||||||
|
% locals())
|
||||||
|
# otherwise set 1st cluster as default
|
||||||
|
self.default_cluster = self.clusters[first_cluster_name]
|
||||||
|
|
||||||
db.configure_db(options)
|
db.configure_db(options)
|
||||||
|
# Extend the fault map
|
||||||
|
self._extend_fault_map()
|
||||||
|
# Set up RPC interface for DHCP agent
|
||||||
self.setup_rpc()
|
self.setup_rpc()
|
||||||
|
|
||||||
|
def _extend_fault_map(self):
|
||||||
|
""" Extends the Quantum Fault Map
|
||||||
|
|
||||||
|
Exceptions specific to the NVP Plugin are mapped to standard
|
||||||
|
HTTP Exceptions
|
||||||
|
"""
|
||||||
|
base.FAULT_MAP.update({nvp_exc.NvpInvalidNovaZone:
|
||||||
|
webob.exc.HTTPBadRequest,
|
||||||
|
nvp_exc.NvpNoMorePortsException:
|
||||||
|
webob.exc.HTTPBadRequest})
|
||||||
|
|
||||||
|
def _novazone_to_cluster(self, novazone_id):
|
||||||
|
if novazone_id in self.novazone_cluster_map:
|
||||||
|
return self.novazone_cluster_map[novazone_id]
|
||||||
|
LOG.debug(_("Looking for nova zone: %s") % novazone_id)
|
||||||
|
for x in self.clusters:
|
||||||
|
LOG.debug(_("Looking for nova zone %(novazone_id)s in "
|
||||||
|
"cluster: %(x)s") % locals())
|
||||||
|
if x.zone == str(novazone_id):
|
||||||
|
self.novazone_cluster_map[x.zone] = x
|
||||||
|
return x
|
||||||
|
LOG.error(_("Unable to find cluster config entry for nova zone: %s") %
|
||||||
|
novazone_id)
|
||||||
|
raise nvp_exc.NvpInvalidNovaZone(nova_zone=novazone_id)
|
||||||
|
|
||||||
|
def _find_target_cluster(self, resource):
|
||||||
|
""" Return cluster where configuration should be applied
|
||||||
|
|
||||||
|
If the resource being configured has a paremeter expressing
|
||||||
|
the zone id (nova_id), then select corresponding cluster,
|
||||||
|
otherwise return default cluster.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if 'nova_id' in resource:
|
||||||
|
return self._novazone_to_cluster(resource['nova_id'])
|
||||||
|
else:
|
||||||
|
return self.default_cluster
|
||||||
|
|
||||||
|
def _check_provider_view_auth(self, context, network):
|
||||||
|
return policy.check(context,
|
||||||
|
"extension:provider_network:view",
|
||||||
|
network)
|
||||||
|
|
||||||
|
def _enforce_provider_set_auth(self, context, network):
|
||||||
|
return policy.enforce(context,
|
||||||
|
"extension:provider_network:set",
|
||||||
|
network)
|
||||||
|
|
||||||
|
def _handle_provider_create(self, context, attrs):
|
||||||
|
# NOTE(salvatore-orlando): This method has been borrowed from
|
||||||
|
# the OpenvSwtich plugin, altough changed to match NVP specifics.
|
||||||
|
network_type = attrs.get(pnet.NETWORK_TYPE)
|
||||||
|
physical_network = attrs.get(pnet.PHYSICAL_NETWORK)
|
||||||
|
segmentation_id = attrs.get(pnet.SEGMENTATION_ID)
|
||||||
|
network_type_set = attributes.is_attr_set(network_type)
|
||||||
|
physical_network_set = attributes.is_attr_set(physical_network)
|
||||||
|
segmentation_id_set = attributes.is_attr_set(segmentation_id)
|
||||||
|
if not (network_type_set or physical_network_set or
|
||||||
|
segmentation_id_set):
|
||||||
|
return
|
||||||
|
|
||||||
|
# Authorize before exposing plugin details to client
|
||||||
|
self._enforce_provider_set_auth(context, attrs)
|
||||||
|
err_msg = None
|
||||||
|
if not network_type_set:
|
||||||
|
err_msg = _("%s required") % pnet.NETWORK_TYPE
|
||||||
|
elif network_type in (NetworkTypes.GRE, NetworkTypes.STT,
|
||||||
|
NetworkTypes.FLAT):
|
||||||
|
if segmentation_id_set:
|
||||||
|
err_msg = _("Segmentation ID cannot be specified with "
|
||||||
|
"flat network type")
|
||||||
|
elif network_type == NetworkTypes.VLAN:
|
||||||
|
if not segmentation_id_set:
|
||||||
|
err_msg = _("Segmentation ID must be specified with "
|
||||||
|
"vlan network type")
|
||||||
|
elif (segmentation_id_set and
|
||||||
|
(segmentation_id < 1 or segmentation_id > 4094)):
|
||||||
|
err_msg = _("%s out of range (1 to 4094)") % segmentation_id
|
||||||
|
else:
|
||||||
|
# Verify segment is not already allocated
|
||||||
|
binding = nicira_db.get_network_binding_by_vlanid(
|
||||||
|
context.session, segmentation_id)
|
||||||
|
if binding:
|
||||||
|
raise q_exc.VlanIdInUse(vlan_id=segmentation_id,
|
||||||
|
physical_network=physical_network)
|
||||||
|
else:
|
||||||
|
err_msg = _("%(net_type_param)s %(net_type_value)s not "
|
||||||
|
"supported") % {'net_type_param': pnet.NETWORK_TYPE,
|
||||||
|
'net_type_value': network_type}
|
||||||
|
if err_msg:
|
||||||
|
raise q_exc.InvalidInput(error_message=err_msg)
|
||||||
|
# TODO(salvatore-orlando): Validate tranport zone uuid
|
||||||
|
# which should be specified in physical_network
|
||||||
|
|
||||||
|
def _extend_network_dict_provider(self, context, network, binding=None):
|
||||||
|
if self._check_provider_view_auth(context, network):
|
||||||
|
if not binding:
|
||||||
|
binding = nicira_db.get_network_binding(context.session,
|
||||||
|
network['id'])
|
||||||
|
# With NVP plugin 'normal' overlay networks will have no binding
|
||||||
|
# TODO(salvatore-orlando) make sure users can specify a distinct
|
||||||
|
# tz_uuid as 'provider network' for STT net type
|
||||||
|
if binding:
|
||||||
|
network[pnet.NETWORK_TYPE] = binding.binding_type
|
||||||
|
network[pnet.PHYSICAL_NETWORK] = binding.tz_uuid
|
||||||
|
network[pnet.SEGMENTATION_ID] = binding.vlan_id
|
||||||
|
|
||||||
|
def _handle_lswitch_selection(self, cluster, network,
|
||||||
|
network_binding, max_ports,
|
||||||
|
allow_extra_lswitches):
|
||||||
|
lswitches = nvplib.get_lswitches(cluster, network.id)
|
||||||
|
try:
|
||||||
|
# TODO find main_ls too!
|
||||||
|
return [ls for ls in lswitches
|
||||||
|
if (ls['_relations']['LogicalSwitchStatus']
|
||||||
|
['lport_count'] < max_ports)].pop(0)
|
||||||
|
except IndexError:
|
||||||
|
# Too bad, no switch available
|
||||||
|
LOG.debug(_("No switch has available ports (%d checked)") %
|
||||||
|
len(lswitches))
|
||||||
|
if allow_extra_lswitches:
|
||||||
|
main_ls = [ls for ls in lswitches if ls['uuid'] == network.id]
|
||||||
|
tag_dict = dict((x['scope'], x['tag']) for x in main_ls[0]['tags'])
|
||||||
|
if not 'multi_lswitch' in tag_dict:
|
||||||
|
nvplib.update_lswitch(cluster,
|
||||||
|
main_ls[0]['uuid'],
|
||||||
|
main_ls[0]['display_name'],
|
||||||
|
network['tenant_id'],
|
||||||
|
tags=[{'tag': 'True',
|
||||||
|
'scope': 'multi_lswitch'}])
|
||||||
|
selected_lswitch = nvplib.create_lswitch(
|
||||||
|
cluster, network.tenant_id,
|
||||||
|
"%s-ext-%s" % (network.name, len(lswitches)),
|
||||||
|
network_binding.binding_type,
|
||||||
|
network_binding.tz_uuid,
|
||||||
|
network_binding.vlan_id,
|
||||||
|
network.id)
|
||||||
|
return selected_lswitch
|
||||||
|
else:
|
||||||
|
LOG.error(_("Maximum number of logical ports reached for "
|
||||||
|
"logical network %s") % network.id)
|
||||||
|
raise nvp_exc.NvpNoMorePortsException(network=network.id)
|
||||||
|
|
||||||
def setup_rpc(self):
|
def setup_rpc(self):
|
||||||
# RPC support for dhcp
|
# RPC support for dhcp
|
||||||
self.topic = topics.PLUGIN
|
self.topic = topics.PLUGIN
|
||||||
@ -302,15 +355,6 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
# Consume from all consumers in a thread
|
# Consume from all consumers in a thread
|
||||||
self.conn.consume_in_thread()
|
self.conn.consume_in_thread()
|
||||||
|
|
||||||
@property
|
|
||||||
def cluster(self):
|
|
||||||
if len(self.clusters):
|
|
||||||
return self.clusters[0]
|
|
||||||
return None
|
|
||||||
|
|
||||||
def clear_state(self):
|
|
||||||
nvplib.clear_state(self.clusters[0])
|
|
||||||
|
|
||||||
def get_all_networks(self, tenant_id, **kwargs):
|
def get_all_networks(self, tenant_id, **kwargs):
|
||||||
networks = []
|
networks = []
|
||||||
for c in self.clusters:
|
for c in self.clusters:
|
||||||
@ -337,24 +381,40 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
}
|
}
|
||||||
:raises: exception.NoImplementedError
|
:raises: exception.NoImplementedError
|
||||||
"""
|
"""
|
||||||
|
net_data = network['network'].copy()
|
||||||
|
# Process the provider network extension
|
||||||
|
self._handle_provider_create(context, net_data)
|
||||||
|
# Replace ATTR_NOT_SPECIFIED with None before sending to NVP
|
||||||
|
for attr, value in network['network'].iteritems():
|
||||||
|
if value == attributes.ATTR_NOT_SPECIFIED:
|
||||||
|
net_data[attr] = None
|
||||||
# FIXME(arosen) implement admin_state_up = False in NVP
|
# FIXME(arosen) implement admin_state_up = False in NVP
|
||||||
if network['network']['admin_state_up'] is False:
|
if net_data['admin_state_up'] is False:
|
||||||
LOG.warning(_("Network with admin_state_up=False are not yet "
|
LOG.warning(_("Network with admin_state_up=False are not yet "
|
||||||
"supported by this plugin. Ignoring setting for "
|
"supported by this plugin. Ignoring setting for "
|
||||||
"network %s"),
|
"network %s") % net_data.get('name', '<unknown>'))
|
||||||
network['network'].get('name', '<unknown>'))
|
tenant_id = self._get_tenant_id_for_create(context, net_data)
|
||||||
|
target_cluster = self._find_target_cluster(net_data)
|
||||||
|
lswitch = nvplib.create_lswitch(target_cluster,
|
||||||
|
tenant_id,
|
||||||
|
net_data.get('name'),
|
||||||
|
net_data.get(pnet.NETWORK_TYPE),
|
||||||
|
net_data.get(pnet.PHYSICAL_NETWORK),
|
||||||
|
net_data.get(pnet.SEGMENTATION_ID))
|
||||||
|
network['network']['id'] = lswitch['uuid']
|
||||||
|
|
||||||
tenant_id = self._get_tenant_id_for_create(context, network)
|
with context.session.begin(subtransactions=True):
|
||||||
# TODO(salvatore-orlando): if the network is shared this should be
|
new_net = super(NvpPluginV2, self).create_network(context,
|
||||||
# probably stored into the lswitch with a tag
|
network)
|
||||||
# TODO(salvatore-orlando): Important - provider networks support
|
if net_data.get(pnet.NETWORK_TYPE):
|
||||||
# (might require a bridged TZ)
|
net_binding = nicira_db.add_network_binding(
|
||||||
net = nvplib.create_network(network['network']['tenant_id'],
|
context.session, new_net['id'],
|
||||||
network['network']['name'],
|
net_data.get(pnet.NETWORK_TYPE),
|
||||||
clusters=self.clusters)
|
net_data.get(pnet.PHYSICAL_NETWORK),
|
||||||
|
net_data.get(pnet.SEGMENTATION_ID))
|
||||||
network['network']['id'] = net['net-id']
|
self._extend_network_dict_provider(context, new_net,
|
||||||
return super(NvpPluginV2, self).create_network(context, network)
|
net_binding)
|
||||||
|
return new_net
|
||||||
|
|
||||||
def delete_network(self, context, id):
|
def delete_network(self, context, id):
|
||||||
"""
|
"""
|
||||||
@ -365,29 +425,31 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
:raises: exception.NetworkInUse
|
:raises: exception.NetworkInUse
|
||||||
:raises: exception.NetworkNotFound
|
:raises: exception.NetworkNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
super(NvpPluginV2, self).delete_network(context, id)
|
super(NvpPluginV2, self).delete_network(context, id)
|
||||||
|
|
||||||
|
# FIXME(salvatore-orlando): Failures here might lead NVP
|
||||||
|
# and quantum state to diverge
|
||||||
pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
|
pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
|
||||||
for (cluster, switches) in pairs:
|
for (cluster, switches) in pairs:
|
||||||
nvplib.delete_networks(cluster, id, switches)
|
nvplib.delete_networks(cluster, id, switches)
|
||||||
|
|
||||||
LOG.debug(_("delete_network() completed for tenant: %s"),
|
LOG.debug(_("delete_network completed for tenant: %s"),
|
||||||
context.tenant_id)
|
context.tenant_id)
|
||||||
|
|
||||||
def _get_lswitch_cluster_pairs(self, netw_id, tenant_id):
|
def _get_lswitch_cluster_pairs(self, netw_id, tenant_id):
|
||||||
"""Figure out the set of lswitches on each cluster that maps to this
|
"""Figure out the set of lswitches on each cluster that maps to this
|
||||||
network id"""
|
network id"""
|
||||||
pairs = []
|
pairs = []
|
||||||
for c in self.clusters:
|
for c in self.clusters.itervalues():
|
||||||
lswitches = []
|
lswitches = []
|
||||||
try:
|
try:
|
||||||
ls = nvplib.get_network(c, netw_id)
|
results = nvplib.get_lswitches(c, netw_id)
|
||||||
lswitches.append(ls['uuid'])
|
lswitches.extend([ls['uuid'] for ls in results])
|
||||||
except exception.NetworkNotFound:
|
except q_exc.NetworkNotFound:
|
||||||
continue
|
continue
|
||||||
pairs.append((c, lswitches))
|
pairs.append((c, lswitches))
|
||||||
if len(pairs) == 0:
|
if len(pairs) == 0:
|
||||||
raise exception.NetworkNotFound(net_id=netw_id)
|
raise q_exc.NetworkNotFound(net_id=netw_id)
|
||||||
LOG.debug(_("Returning pairs for network: %s"), pairs)
|
LOG.debug(_("Returning pairs for network: %s"), pairs)
|
||||||
return pairs
|
return pairs
|
||||||
|
|
||||||
@ -414,55 +476,43 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
:raises: exception.NetworkNotFound
|
:raises: exception.NetworkNotFound
|
||||||
:raises: exception.QuantumException
|
:raises: exception.QuantumException
|
||||||
"""
|
"""
|
||||||
result = {}
|
|
||||||
lswitch_query = "&uuid=%s" % id
|
|
||||||
# always look for the tenant_id in the resource itself rather than
|
|
||||||
# the context, as with shared networks context.tenant_id and
|
|
||||||
# network['tenant_id'] might differ on GETs
|
|
||||||
# goto to the plugin DB and fecth the network
|
# goto to the plugin DB and fecth the network
|
||||||
network = self._get_network(context, id)
|
network = self._get_network(context, id)
|
||||||
# TODO(salvatore-orlando): verify whether the query on os_tid is
|
|
||||||
# redundant or not.
|
# verify the fabric status of the corresponding
|
||||||
if context.is_admin is False:
|
# logical switch(es) in nvp
|
||||||
tenant_query = ("&tag=%s&tag_scope=os_tid"
|
|
||||||
% network['tenant_id'])
|
|
||||||
else:
|
|
||||||
tenant_query = ""
|
|
||||||
# Then fetch the correspondiong logical switch in NVP as well
|
|
||||||
# TODO(salvatore-orlando): verify whether the step on NVP
|
|
||||||
# can be completely avoided
|
|
||||||
lswitch_url_path = (
|
|
||||||
"/ws.v1/lswitch?"
|
|
||||||
"fields=uuid,display_name%s%s"
|
|
||||||
% (tenant_query, lswitch_query))
|
|
||||||
try:
|
try:
|
||||||
for c in self.clusters:
|
# FIXME(salvatore-orlando): This is not going to work unless
|
||||||
lswitch_results = nvplib.get_all_query_pages(
|
# nova_id is stored in db once multiple clusters are enabled
|
||||||
lswitch_url_path, c)
|
cluster = self._find_target_cluster(network)
|
||||||
if lswitch_results:
|
lswitches = nvplib.get_lswitches(cluster, id)
|
||||||
result['lswitch-display-name'] = (
|
net_op_status = constants.NET_STATUS_ACTIVE
|
||||||
lswitch_results[0]['display_name'])
|
quantum_status = network.status
|
||||||
|
for lswitch in lswitches:
|
||||||
|
lswitch_status = lswitch.get('LogicalSwitchStatus', None)
|
||||||
|
# FIXME(salvatore-orlando): Being unable to fetch the
|
||||||
|
# logical switch status should be an exception.
|
||||||
|
if (lswitch_status and
|
||||||
|
not lswitch_status.get('fabric_status', None)):
|
||||||
|
net_op_status = constants.NET_STATUS_DOWN
|
||||||
break
|
break
|
||||||
|
LOG.debug(_("Current network status:%(net_op_status)s; "
|
||||||
|
"Status in Quantum DB:%(quantum_status)s")
|
||||||
|
% locals())
|
||||||
|
if net_op_status != network.status:
|
||||||
|
# update the network status
|
||||||
|
with context.session.begin(subtransactions=True):
|
||||||
|
network.status = net_op_status
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_("Unable to get switches: %s"), traceback.format_exc())
|
err_msg = _("Unable to get lswitches")
|
||||||
raise exception.QuantumException()
|
LOG.exception(err_msg)
|
||||||
|
raise nvp_exc.NvpPluginException(err_msg=err_msg)
|
||||||
|
|
||||||
if 'lswitch-display-name' not in result:
|
# Don't do field selection here otherwise we won't be able
|
||||||
raise exception.NetworkNotFound(net_id=id)
|
# to add provider networks fields
|
||||||
|
net_result = self._make_network_dict(network, None)
|
||||||
# Fetch network in quantum
|
self._extend_network_dict_provider(context, net_result)
|
||||||
quantum_db = super(NvpPluginV2, self).get_network(context, id, fields)
|
return self._fields(net_result, fields)
|
||||||
d = {'id': id,
|
|
||||||
'name': result['lswitch-display-name'],
|
|
||||||
'tenant_id': network['tenant_id'],
|
|
||||||
'admin_state_up': True,
|
|
||||||
'status': constants.NET_STATUS_ACTIVE,
|
|
||||||
'shared': network['shared'],
|
|
||||||
'subnets': quantum_db.get('subnets', [])}
|
|
||||||
|
|
||||||
LOG.debug(_("get_network() completed for tenant %(tenant_id)s: %(d)s"),
|
|
||||||
{'tenant_id': context.tenant_id, 'd': d})
|
|
||||||
return d
|
|
||||||
|
|
||||||
def get_networks(self, context, filters=None, fields=None):
|
def get_networks(self, context, filters=None, fields=None):
|
||||||
"""
|
"""
|
||||||
@ -491,6 +541,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
nvp_lswitches = []
|
nvp_lswitches = []
|
||||||
quantum_lswitches = (
|
quantum_lswitches = (
|
||||||
super(NvpPluginV2, self).get_networks(context, filters))
|
super(NvpPluginV2, self).get_networks(context, filters))
|
||||||
|
for net in quantum_lswitches:
|
||||||
|
self._extend_network_dict_provider(context, net)
|
||||||
|
|
||||||
if context.is_admin and not filters.get("tenant_id"):
|
if context.is_admin and not filters.get("tenant_id"):
|
||||||
tenant_filter = ""
|
tenant_filter = ""
|
||||||
@ -501,19 +553,20 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
else:
|
else:
|
||||||
tenant_filter = "&tag=%s&tag_scope=os_tid" % context.tenant_id
|
tenant_filter = "&tag=%s&tag_scope=os_tid" % context.tenant_id
|
||||||
|
|
||||||
lswitch_filters = "uuid,display_name,fabric_status"
|
lswitch_filters = "uuid,display_name,fabric_status,tags"
|
||||||
lswitch_url_path = (
|
lswitch_url_path = (
|
||||||
"/ws.v1/lswitch?fields=%s&relations=LogicalSwitchStatus%s"
|
"/ws.v1/lswitch?fields=%s&relations=LogicalSwitchStatus%s"
|
||||||
% (lswitch_filters, tenant_filter))
|
% (lswitch_filters, tenant_filter))
|
||||||
try:
|
try:
|
||||||
for c in self.clusters:
|
for c in self.clusters.itervalues():
|
||||||
res = nvplib.get_all_query_pages(
|
res = nvplib.get_all_query_pages(
|
||||||
lswitch_url_path, c)
|
lswitch_url_path, c)
|
||||||
|
|
||||||
nvp_lswitches.extend(res)
|
nvp_lswitches.extend(res)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_("Unable to get switches: %s"), traceback.format_exc())
|
err_msg = _("Unable to get logical switches")
|
||||||
raise exception.QuantumException()
|
LOG.exception(err_msg)
|
||||||
|
raise nvp_exc.NvpPluginException(err_msg=err_msg)
|
||||||
|
|
||||||
# TODO (Aaron) This can be optimized
|
# TODO (Aaron) This can be optimized
|
||||||
if filters.get("id"):
|
if filters.get("id"):
|
||||||
@ -525,9 +578,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
nvp_lswitches = filtered_lswitches
|
nvp_lswitches = filtered_lswitches
|
||||||
|
|
||||||
for quantum_lswitch in quantum_lswitches:
|
for quantum_lswitch in quantum_lswitches:
|
||||||
Found = False
|
|
||||||
for nvp_lswitch in nvp_lswitches:
|
for nvp_lswitch in nvp_lswitches:
|
||||||
if nvp_lswitch["uuid"] == quantum_lswitch["id"]:
|
# TODO(salvatore-orlando): watch out for "extended" lswitches
|
||||||
|
if nvp_lswitch['uuid'] == quantum_lswitch["id"]:
|
||||||
if (nvp_lswitch["_relations"]["LogicalSwitchStatus"]
|
if (nvp_lswitch["_relations"]["LogicalSwitchStatus"]
|
||||||
["fabric_status"]):
|
["fabric_status"]):
|
||||||
quantum_lswitch["status"] = constants.NET_STATUS_ACTIVE
|
quantum_lswitch["status"] = constants.NET_STATUS_ACTIVE
|
||||||
@ -535,12 +588,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
quantum_lswitch["status"] = constants.NET_STATUS_DOWN
|
quantum_lswitch["status"] = constants.NET_STATUS_DOWN
|
||||||
quantum_lswitch["name"] = nvp_lswitch["display_name"]
|
quantum_lswitch["name"] = nvp_lswitch["display_name"]
|
||||||
nvp_lswitches.remove(nvp_lswitch)
|
nvp_lswitches.remove(nvp_lswitch)
|
||||||
Found = True
|
|
||||||
break
|
break
|
||||||
|
else:
|
||||||
if not Found:
|
raise nvp_exc.NvpOutOfSyncException()
|
||||||
raise Exception(_("Quantum and NVP Databases are out of "
|
|
||||||
"Sync!"))
|
|
||||||
# do not make the case in which switches are found in NVP
|
# do not make the case in which switches are found in NVP
|
||||||
# but not in Quantum catastrophic.
|
# but not in Quantum catastrophic.
|
||||||
if len(nvp_lswitches):
|
if len(nvp_lswitches):
|
||||||
@ -587,9 +637,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
|
|
||||||
if network["network"].get("admin_state_up"):
|
if network["network"].get("admin_state_up"):
|
||||||
if network['network']["admin_state_up"] is False:
|
if network['network']["admin_state_up"] is False:
|
||||||
raise exception.NotImplementedError("admin_state_up=False "
|
raise q_exc.NotImplementedError(_("admin_state_up=False "
|
||||||
"networks are not "
|
"networks are not "
|
||||||
"supported.")
|
"supported."))
|
||||||
params = {}
|
params = {}
|
||||||
params["network"] = network["network"]
|
params["network"] = network["network"]
|
||||||
pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
|
pairs = self._get_lswitch_cluster_pairs(id, context.tenant_id)
|
||||||
@ -598,7 +648,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
if network['network'].get("name"):
|
if network['network'].get("name"):
|
||||||
for (cluster, switches) in pairs:
|
for (cluster, switches) in pairs:
|
||||||
for switch in switches:
|
for switch in switches:
|
||||||
result = nvplib.update_network(cluster, switch, **params)
|
nvplib.update_lswitch(cluster, switch,
|
||||||
|
network['network']['name'])
|
||||||
|
|
||||||
LOG.debug(_("update_network() completed for tenant: %s"),
|
LOG.debug(_("update_network() completed for tenant: %s"),
|
||||||
context.tenant_id)
|
context.tenant_id)
|
||||||
@ -653,7 +704,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
lport_fields_str = ("tags,admin_status_enabled,display_name,"
|
lport_fields_str = ("tags,admin_status_enabled,display_name,"
|
||||||
"fabric_status_up")
|
"fabric_status_up")
|
||||||
try:
|
try:
|
||||||
for c in self.clusters:
|
for c in self.clusters.itervalues():
|
||||||
lport_query_path = (
|
lport_query_path = (
|
||||||
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
|
"/ws.v1/lswitch/%s/lport?fields=%s&%s%stag_scope=q_port_id"
|
||||||
"&relations=LogicalPortStatus" %
|
"&relations=LogicalPortStatus" %
|
||||||
@ -667,8 +718,9 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
nvp_lports[tag["tag"]] = port
|
nvp_lports[tag["tag"]] = port
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.error(_("Unable to get ports: %s"), traceback.format_exc())
|
err_msg = _("Unable to get ports")
|
||||||
raise exception.QuantumException()
|
LOG.exception(err_msg)
|
||||||
|
raise nvp_exc.NvpPluginException(err_msg=err_msg)
|
||||||
|
|
||||||
lports = []
|
lports = []
|
||||||
for quantum_lport in quantum_lports:
|
for quantum_lport in quantum_lports:
|
||||||
@ -690,8 +742,10 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
del nvp_lports[quantum_lport["id"]]
|
del nvp_lports[quantum_lport["id"]]
|
||||||
lports.append(quantum_lport)
|
lports.append(quantum_lport)
|
||||||
except KeyError:
|
except KeyError:
|
||||||
raise Exception(_("Quantum and NVP Databases are out of "
|
|
||||||
"Sync!"))
|
LOG.debug(_("Quantum logical port %s was not found on NVP"),
|
||||||
|
quantum_lport['id'])
|
||||||
|
|
||||||
# do not make the case in which ports are found in NVP
|
# do not make the case in which ports are found in NVP
|
||||||
# but not in Quantum catastrophic.
|
# but not in Quantum catastrophic.
|
||||||
if len(nvp_lports):
|
if len(nvp_lports):
|
||||||
@ -721,8 +775,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
"admin_state_up": Sets admin state of port. if down, port
|
"admin_state_up": Sets admin state of port. if down, port
|
||||||
does not forward packets.
|
does not forward packets.
|
||||||
"status": dicates whether port is currently operational
|
"status": dicates whether port is currently operational
|
||||||
(limit values to "ACTIVE", "DOWN", "BUILD", and
|
(limit values to "ACTIVE", "DOWN", "BUILD", and "ERROR")
|
||||||
"ERROR"?)
|
|
||||||
"fixed_ips": list of subnet ID's and IP addresses to be used on
|
"fixed_ips": list of subnet ID's and IP addresses to be used on
|
||||||
this port
|
this port
|
||||||
"device_id": identifies the device (e.g., virtual server) using
|
"device_id": identifies the device (e.g., virtual server) using
|
||||||
@ -732,46 +785,70 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
:raises: exception.NetworkNotFound
|
:raises: exception.NetworkNotFound
|
||||||
:raises: exception.StateInvalid
|
:raises: exception.StateInvalid
|
||||||
"""
|
"""
|
||||||
|
tenant_id = self._get_tenant_id_for_create(context, port['port'])
|
||||||
# Set admin_state_up False since not created in NVP set
|
# Set admin_state_up False since not created in NVP set
|
||||||
|
# TODO(salvatore-orlando) : verify whether subtransactions can help
|
||||||
|
# us avoiding multiple operations on the db. This might also allow
|
||||||
|
# us to use the same identifier for the NVP and the Quantum port
|
||||||
|
# Set admin_state_up False since not created in NVP yet
|
||||||
port["port"]["admin_state_up"] = False
|
port["port"]["admin_state_up"] = False
|
||||||
|
|
||||||
# First we allocate port in quantum database
|
# First we allocate port in quantum database
|
||||||
try:
|
quantum_db = super(NvpPluginV2, self).create_port(context, port)
|
||||||
quantum_db = super(NvpPluginV2, self).create_port(context, port)
|
|
||||||
except Exception as e:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
# Update fields obtained from quantum db
|
# Update fields obtained from quantum db (eg: MAC address)
|
||||||
port["port"].update(quantum_db)
|
port["port"].update(quantum_db)
|
||||||
|
|
||||||
# We want port to be up in NVP
|
# We want port to be up in NVP
|
||||||
port["port"]["admin_state_up"] = True
|
port["port"]["admin_state_up"] = True
|
||||||
params = {}
|
port_data = port['port']
|
||||||
params["max_lp_per_bridged_ls"] = \
|
# Fetch the network and network binding from Quantum db
|
||||||
self.nvp_opts["max_lp_per_bridged_ls"]
|
network = self._get_network(context, port_data['network_id'])
|
||||||
params["port"] = port["port"]
|
network_binding = nicira_db.get_network_binding(
|
||||||
params["clusters"] = self.clusters
|
context.session, port_data['network_id'])
|
||||||
tenant_id = self._get_tenant_id_for_create(context, port["port"])
|
max_ports = self.nvp_opts.max_lp_per_overlay_ls
|
||||||
|
allow_extra_lswitches = False
|
||||||
|
if (network_binding and
|
||||||
|
network_binding.binding_type in (NetworkTypes.FLAT,
|
||||||
|
NetworkTypes.VLAN)):
|
||||||
|
max_ports = self.nvp_opts.max_lp_per_bridged_ls
|
||||||
|
allow_extra_lswitches = True
|
||||||
try:
|
try:
|
||||||
port["port"], nvp_port_id = nvplib.create_port(tenant_id,
|
q_net_id = port_data['network_id']
|
||||||
**params)
|
cluster = self._find_target_cluster(port_data)
|
||||||
nvplib.plug_interface(self.clusters, port["port"]["network_id"],
|
selected_lswitch = self._handle_lswitch_selection(
|
||||||
nvp_port_id, "VifAttachment",
|
cluster, network, network_binding, max_ports,
|
||||||
port["port"]["id"])
|
allow_extra_lswitches)
|
||||||
except Exception as e:
|
lswitch_uuid = selected_lswitch['uuid']
|
||||||
# failed to create port in NVP delete port from quantum_db
|
lport = nvplib.create_lport(cluster,
|
||||||
|
lswitch_uuid,
|
||||||
|
port_data['tenant_id'],
|
||||||
|
port_data['id'],
|
||||||
|
port_data['name'],
|
||||||
|
port_data['device_id'],
|
||||||
|
port_data['admin_state_up'],
|
||||||
|
port_data['mac_address'],
|
||||||
|
port_data['fixed_ips'])
|
||||||
|
# Get NVP ls uuid for quantum network
|
||||||
|
nvplib.plug_interface(cluster, selected_lswitch['uuid'],
|
||||||
|
lport['uuid'], "VifAttachment",
|
||||||
|
port_data['id'])
|
||||||
|
except nvp_exc.NvpNoMorePortsException as e:
|
||||||
|
LOG.error(_("Number of available ports for network %s exhausted"),
|
||||||
|
port_data['network_id'])
|
||||||
super(NvpPluginV2, self).delete_port(context, port["port"]["id"])
|
super(NvpPluginV2, self).delete_port(context, port["port"]["id"])
|
||||||
raise e
|
raise e
|
||||||
|
except Exception:
|
||||||
|
# failed to create port in NVP delete port from quantum_db
|
||||||
|
err_msg = _("An exception occured while plugging the interface "
|
||||||
|
"in NVP for port %s") % port_data['id']
|
||||||
|
LOG.exception(err_msg)
|
||||||
|
super(NvpPluginV2, self).delete_port(context, port["port"]["id"])
|
||||||
|
raise nvp_exc.NvpPluginException(err_desc=err_msg)
|
||||||
|
|
||||||
d = {"port-id": port["port"]["id"],
|
LOG.debug(_("create_port completed on NVP for tenant %(tenant_id)s: "
|
||||||
"port-op-status": port["port"]["status"]}
|
"(%(id)s)") % port_data)
|
||||||
|
|
||||||
LOG.debug(_("create_port() completed for tenant %(tenant_id)s: %(d)s"),
|
# update port on Quantum DB with admin_state_up True
|
||||||
locals())
|
|
||||||
|
|
||||||
# update port with admin_state_up True
|
|
||||||
port_update = {"port": {"admin_state_up": True}}
|
port_update = {"port": {"admin_state_up": True}}
|
||||||
return super(NvpPluginV2, self).update_port(context,
|
return super(NvpPluginV2, self).update_port(context,
|
||||||
port["port"]["id"],
|
port["port"]["id"],
|
||||||
@ -803,23 +880,17 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
"""
|
"""
|
||||||
params = {}
|
params = {}
|
||||||
|
|
||||||
quantum_db = super(NvpPluginV2, self).get_port(context, id)
|
port_quantum = super(NvpPluginV2, self).get_port(context, id)
|
||||||
|
|
||||||
port_nvp, cluster = (
|
port_nvp, cluster = (
|
||||||
nvplib.get_port_by_quantum_tag(self.clusters,
|
nvplib.get_port_by_quantum_tag(self.clusters.itervalues(),
|
||||||
quantum_db["network_id"], id))
|
port_quantum["network_id"], id))
|
||||||
|
|
||||||
LOG.debug(_("Update port request: %s"), params)
|
|
||||||
|
|
||||||
params["cluster"] = cluster
|
params["cluster"] = cluster
|
||||||
params["port"] = port["port"]
|
params["port"] = port_quantum
|
||||||
params["port"]["id"] = quantum_db["id"]
|
LOG.debug(_("Update port request: %s"), params)
|
||||||
params["port"]["tenant_id"] = quantum_db["tenant_id"]
|
nvplib.update_port(port_quantum['network_id'],
|
||||||
result = nvplib.update_port(quantum_db["network_id"],
|
port_nvp['uuid'], **params)
|
||||||
port_nvp["uuid"], **params)
|
|
||||||
LOG.debug(_("update_port() completed for tenant: %s"),
|
|
||||||
context.tenant_id)
|
|
||||||
|
|
||||||
return super(NvpPluginV2, self).update_port(context, id, port)
|
return super(NvpPluginV2, self).update_port(context, id, port)
|
||||||
|
|
||||||
def delete_port(self, context, id):
|
def delete_port(self, context, id):
|
||||||
@ -835,10 +906,11 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
:raises: exception.NetworkNotFound
|
:raises: exception.NetworkNotFound
|
||||||
"""
|
"""
|
||||||
|
|
||||||
port, cluster = nvplib.get_port_by_quantum_tag(self.clusters,
|
# TODO(salvatore-orlando): pass only actual cluster
|
||||||
'*', id)
|
port, cluster = nvplib.get_port_by_quantum_tag(
|
||||||
|
self.clusters.itervalues(), '*', id)
|
||||||
if port is None:
|
if port is None:
|
||||||
raise exception.PortNotFound(port_id=id)
|
raise q_exc.PortNotFound(port_id=id)
|
||||||
# TODO(bgh): if this is a bridged network and the lswitch we just got
|
# TODO(bgh): if this is a bridged network and the lswitch we just got
|
||||||
# back will have zero ports after the delete we should garbage collect
|
# back will have zero ports after the delete we should garbage collect
|
||||||
# the lswitch.
|
# the lswitch.
|
||||||
@ -867,9 +939,11 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
|
|||||||
|
|
||||||
quantum_db = super(NvpPluginV2, self).get_port(context, id, fields)
|
quantum_db = super(NvpPluginV2, self).get_port(context, id, fields)
|
||||||
|
|
||||||
|
#TODO: pass only the appropriate cluster here
|
||||||
|
#Look for port in all lswitches
|
||||||
port, cluster = (
|
port, cluster = (
|
||||||
nvplib.get_port_by_quantum_tag(self.clusters,
|
nvplib.get_port_by_quantum_tag(self.clusters.itervalues(),
|
||||||
quantum_db["network_id"], id))
|
"*", id))
|
||||||
|
|
||||||
quantum_db["admin_state_up"] = port["admin_status_enabled"]
|
quantum_db["admin_state_up"] = port["admin_status_enabled"]
|
||||||
if port["_relations"]["LogicalPortStatus"]["fabric_status_up"]:
|
if port["_relations"]["LogicalPortStatus"]["fabric_status_up"]:
|
||||||
|
@ -28,7 +28,7 @@ from common import _conn_str
|
|||||||
eventlet.monkey_patch()
|
eventlet.monkey_patch()
|
||||||
|
|
||||||
logging.basicConfig(level=logging.INFO)
|
logging.basicConfig(level=logging.INFO)
|
||||||
lg = logging.getLogger('nvp_api_client')
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
# Default parameters.
|
# Default parameters.
|
||||||
DEFAULT_FAILOVER_TIME = 5
|
DEFAULT_FAILOVER_TIME = 5
|
||||||
@ -156,19 +156,19 @@ class NvpApiClientEventlet(object):
|
|||||||
api_providers are configured.
|
api_providers are configured.
|
||||||
'''
|
'''
|
||||||
if not self._api_providers:
|
if not self._api_providers:
|
||||||
lg.warn(_("[%d] no API providers currently available."), rid)
|
LOG.warn(_("[%d] no API providers currently available."), rid)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
# The sleep time is to give controllers time to become consistent after
|
# The sleep time is to give controllers time to become consistent after
|
||||||
# there has been a change in the controller used as the api_provider.
|
# there has been a change in the controller used as the api_provider.
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if now < getattr(self, '_issue_conn_barrier', now):
|
if now < getattr(self, '_issue_conn_barrier', now):
|
||||||
lg.warn(_("[%d] Waiting for failover timer to expire."), rid)
|
LOG.warn(_("[%d] Waiting for failover timer to expire."), rid)
|
||||||
time.sleep(self._issue_conn_barrier - now)
|
time.sleep(self._issue_conn_barrier - now)
|
||||||
|
|
||||||
# Print out a warning if all connections are in use.
|
# Print out a warning if all connections are in use.
|
||||||
if self._conn_pool[self._active_conn_pool_idx].empty():
|
if self._conn_pool[self._active_conn_pool_idx].empty():
|
||||||
lg.debug(_("[%d] Waiting to acquire client connection."), rid)
|
LOG.debug(_("[%d] Waiting to acquire client connection."), rid)
|
||||||
|
|
||||||
# Try to acquire a connection (block in get() until connection
|
# Try to acquire a connection (block in get() until connection
|
||||||
# available or timeout occurs).
|
# available or timeout occurs).
|
||||||
@ -178,19 +178,19 @@ class NvpApiClientEventlet(object):
|
|||||||
if active_conn_pool_idx != self._active_conn_pool_idx:
|
if active_conn_pool_idx != self._active_conn_pool_idx:
|
||||||
# active_conn_pool became inactive while we were waiting.
|
# active_conn_pool became inactive while we were waiting.
|
||||||
# Put connection back on old pool and try again.
|
# Put connection back on old pool and try again.
|
||||||
lg.warn(_("[%(rid)d] Active pool expired while waiting for "
|
LOG.warn(_("[%(rid)d] Active pool expired while waiting for "
|
||||||
"connection: %(conn)s"),
|
"connection: %(conn)s"),
|
||||||
{'rid': rid, 'conn': _conn_str(conn)})
|
{'rid': rid, 'conn': _conn_str(conn)})
|
||||||
self._conn_pool[active_conn_pool_idx].put(conn)
|
self._conn_pool[active_conn_pool_idx].put(conn)
|
||||||
return self.acquire_connection(rid=rid)
|
return self.acquire_connection(rid=rid)
|
||||||
|
|
||||||
# Check if the connection has been idle too long.
|
# Check if the connection has been idle too long.
|
||||||
now = time.time()
|
now = time.time()
|
||||||
if getattr(conn, 'last_used', now) < now - self.CONN_IDLE_TIMEOUT:
|
if getattr(conn, 'last_used', now) < now - self.CONN_IDLE_TIMEOUT:
|
||||||
lg.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
|
LOG.info(_("[%(rid)d] Connection %(conn)s idle for %(sec)0.2f "
|
||||||
"seconds; reconnecting."),
|
"seconds; reconnecting."),
|
||||||
{'rid': rid, 'conn': _conn_str(conn),
|
{'rid': rid, 'conn': _conn_str(conn),
|
||||||
'sec': now - conn.last_used})
|
'sec': now - conn.last_used})
|
||||||
conn = self._create_connection(*self._conn_params(conn))
|
conn = self._create_connection(*self._conn_params(conn))
|
||||||
|
|
||||||
# Stash conn pool so conn knows where to go when it releases.
|
# Stash conn pool so conn knows where to go when it releases.
|
||||||
@ -198,9 +198,9 @@ class NvpApiClientEventlet(object):
|
|||||||
|
|
||||||
conn.last_used = now
|
conn.last_used = now
|
||||||
qsize = self._conn_pool[self._active_conn_pool_idx].qsize()
|
qsize = self._conn_pool[self._active_conn_pool_idx].qsize()
|
||||||
lg.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
|
LOG.debug(_("[%(rid)d] Acquired connection %(conn)s. %(qsize)d "
|
||||||
"connection(s) available."),
|
"connection(s) available."),
|
||||||
{'rid': rid, 'conn': _conn_str(conn), 'qsize': qsize})
|
{'rid': rid, 'conn': _conn_str(conn), 'qsize': qsize})
|
||||||
return conn
|
return conn
|
||||||
|
|
||||||
def release_connection(self, http_conn, bad_state=False, rid=-1):
|
def release_connection(self, http_conn, bad_state=False, rid=-1):
|
||||||
@ -213,9 +213,9 @@ class NvpApiClientEventlet(object):
|
|||||||
:param rid: request id passed in from request eventlet.
|
:param rid: request id passed in from request eventlet.
|
||||||
'''
|
'''
|
||||||
if self._conn_params(http_conn) not in self._api_providers:
|
if self._conn_params(http_conn) not in self._api_providers:
|
||||||
lg.warn(_("[%(rid)d] Released connection '%(conn)s' is not an "
|
LOG.warn(_("[%(rid)d] Released connection '%(conn)s' is not an "
|
||||||
"API provider for the cluster"),
|
"API provider for the cluster"),
|
||||||
{'rid': rid, 'conn': _conn_str(http_conn)})
|
{'rid': rid, 'conn': _conn_str(http_conn)})
|
||||||
return
|
return
|
||||||
|
|
||||||
# Retrieve "home" connection pool.
|
# Retrieve "home" connection pool.
|
||||||
@ -223,9 +223,9 @@ class NvpApiClientEventlet(object):
|
|||||||
conn_pool = self._conn_pool[conn_pool_idx]
|
conn_pool = self._conn_pool[conn_pool_idx]
|
||||||
if bad_state:
|
if bad_state:
|
||||||
# Reconnect to provider.
|
# Reconnect to provider.
|
||||||
lg.warn(_("[%(rid)d] Connection returned in bad state, "
|
LOG.warn(_("[%(rid)d] Connection returned in bad state, "
|
||||||
"reconnecting to %(conn)s"),
|
"reconnecting to %(conn)s"),
|
||||||
{'rid': rid, 'conn': _conn_str(http_conn)})
|
{'rid': rid, 'conn': _conn_str(http_conn)})
|
||||||
http_conn = self._create_connection(*self._conn_params(http_conn))
|
http_conn = self._create_connection(*self._conn_params(http_conn))
|
||||||
http_conn.idx = conn_pool_idx
|
http_conn.idx = conn_pool_idx
|
||||||
|
|
||||||
@ -233,20 +233,20 @@ class NvpApiClientEventlet(object):
|
|||||||
# This pool is no longer in a good state. Switch to next pool.
|
# This pool is no longer in a good state. Switch to next pool.
|
||||||
self._active_conn_pool_idx += 1
|
self._active_conn_pool_idx += 1
|
||||||
self._active_conn_pool_idx %= len(self._conn_pool)
|
self._active_conn_pool_idx %= len(self._conn_pool)
|
||||||
lg.warn(_("[%(rid)d] Switched active_conn_pool from "
|
LOG.warn(_("[%(rid)d] Switched active_conn_pool from "
|
||||||
"%(idx)d to %(pool_idx)d."),
|
"%(idx)d to %(pool_idx)d."),
|
||||||
{'rid': rid, 'idx': http_conn.idx,
|
{'rid': rid, 'idx': http_conn.idx,
|
||||||
'pool_idx': self._active_conn_pool_idx})
|
'pool_idx': self._active_conn_pool_idx})
|
||||||
|
|
||||||
# No connections to the new provider allowed until after this
|
# No connections to the new provider allowed until after this
|
||||||
# timer has expired (allow time for synchronization).
|
# timer has expired (allow time for synchronization).
|
||||||
self._issue_conn_barrier = time.time() + self._failover_time
|
self._issue_conn_barrier = time.time() + self._failover_time
|
||||||
|
|
||||||
conn_pool.put(http_conn)
|
conn_pool.put(http_conn)
|
||||||
lg.debug(_("[%(rid)d] Released connection %(conn)s. "
|
LOG.debug(_("[%(rid)d] Released connection %(conn)s. "
|
||||||
"%(qsize)d connection(s) available."),
|
"%(qsize)d connection(s) available."),
|
||||||
{'rid': rid, 'conn': _conn_str(http_conn),
|
{'rid': rid, 'conn': _conn_str(http_conn),
|
||||||
'qsize': conn_pool.qsize()})
|
'qsize': conn_pool.qsize()})
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def need_login(self):
|
def need_login(self):
|
||||||
@ -263,7 +263,7 @@ class NvpApiClientEventlet(object):
|
|||||||
self.login()
|
self.login()
|
||||||
self._doing_login_sem.release()
|
self._doing_login_sem.release()
|
||||||
else:
|
else:
|
||||||
lg.debug(_("Waiting for auth to complete"))
|
LOG.debug(_("Waiting for auth to complete"))
|
||||||
self._doing_login_sem.acquire()
|
self._doing_login_sem.acquire()
|
||||||
self._doing_login_sem.release()
|
self._doing_login_sem.release()
|
||||||
return self._cookie
|
return self._cookie
|
||||||
@ -277,13 +277,13 @@ class NvpApiClientEventlet(object):
|
|||||||
|
|
||||||
if ret:
|
if ret:
|
||||||
if isinstance(ret, Exception):
|
if isinstance(ret, Exception):
|
||||||
lg.error(_('NvpApiClient: login error "%s"'), ret)
|
LOG.error(_('NvpApiClient: login error "%s"'), ret)
|
||||||
raise ret
|
raise ret
|
||||||
|
|
||||||
self._cookie = None
|
self._cookie = None
|
||||||
cookie = ret.getheader("Set-Cookie")
|
cookie = ret.getheader("Set-Cookie")
|
||||||
if cookie:
|
if cookie:
|
||||||
lg.debug(_("Saving new authentication cookie '%s'"), cookie)
|
LOG.debug(_("Saving new authentication cookie '%s'"), cookie)
|
||||||
self._cookie = cookie
|
self._cookie = cookie
|
||||||
self._need_login = False
|
self._need_login = False
|
||||||
|
|
||||||
|
@ -40,8 +40,10 @@ database_opts = [
|
|||||||
|
|
||||||
nvp_opts = [
|
nvp_opts = [
|
||||||
cfg.IntOpt('max_lp_per_bridged_ls', default=64),
|
cfg.IntOpt('max_lp_per_bridged_ls', default=64),
|
||||||
|
cfg.IntOpt('max_lp_per_overlay_ls', default=256),
|
||||||
cfg.IntOpt('concurrent_connections', default=5),
|
cfg.IntOpt('concurrent_connections', default=5),
|
||||||
cfg.IntOpt('failover_time', default=240)
|
cfg.IntOpt('failover_time', default=240),
|
||||||
|
cfg.StrOpt('default_cluster_name')
|
||||||
]
|
]
|
||||||
|
|
||||||
cluster_opts = [
|
cluster_opts = [
|
||||||
|
@ -0,0 +1,42 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2012 Nicira Networks, Inc
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
""" NVP Plugin exceptions """
|
||||||
|
|
||||||
|
from quantum.common import exceptions as q_exc
|
||||||
|
|
||||||
|
|
||||||
|
class NvpPluginException(q_exc.QuantumException):
|
||||||
|
message = _("An unexpected error occurred in the NVP Plugin:%(err_desc)s")
|
||||||
|
|
||||||
|
|
||||||
|
class NvpInvalidConnection(NvpPluginException):
|
||||||
|
message = _("Invalid NVP connection parameters: %(conn_params)s")
|
||||||
|
|
||||||
|
|
||||||
|
class NvpInvalidNovaZone(NvpPluginException):
|
||||||
|
message = _("Unable to find cluster config entry "
|
||||||
|
"for nova zone: %(nova_zone)s")
|
||||||
|
|
||||||
|
|
||||||
|
class NvpNoMorePortsException(NvpPluginException):
|
||||||
|
message = _("Unable to create port on network %(network)s. "
|
||||||
|
"Maximum number of ports reached")
|
||||||
|
|
||||||
|
|
||||||
|
class NvpOutOfSyncException(NvpPluginException):
|
||||||
|
message = _("Quantum state has diverged from the networking backend!")
|
54
quantum/plugins/nicira/nicira_nvp_plugin/nicira_db.py
Normal file
54
quantum/plugins/nicira/nicira_nvp_plugin/nicira_db.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
# Copyright 2012 Nicira, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from sqlalchemy.orm import exc
|
||||||
|
|
||||||
|
import quantum.db.api as db
|
||||||
|
from quantum.plugins.nicira.nicira_nvp_plugin import nicira_models
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_network_binding(session, network_id):
|
||||||
|
session = session or db.get_session()
|
||||||
|
try:
|
||||||
|
binding = (session.query(nicira_models.NvpNetworkBinding).
|
||||||
|
filter_by(network_id=network_id).
|
||||||
|
one())
|
||||||
|
return binding
|
||||||
|
except exc.NoResultFound:
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def get_network_binding_by_vlanid(session, vlan_id):
|
||||||
|
session = session or db.get_session()
|
||||||
|
try:
|
||||||
|
binding = (session.query(nicira_models.NvpNetworkBinding).
|
||||||
|
filter_by(vlan_id=vlan_id).
|
||||||
|
one())
|
||||||
|
return binding
|
||||||
|
except exc.NoResultFound:
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def add_network_binding(session, network_id, binding_type, tz_uuid, vlan_id):
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
binding = nicira_models.NvpNetworkBinding(network_id, binding_type,
|
||||||
|
tz_uuid, vlan_id)
|
||||||
|
session.add(binding)
|
||||||
|
return binding
|
49
quantum/plugins/nicira/nicira_nvp_plugin/nicira_models.py
Normal file
49
quantum/plugins/nicira/nicira_nvp_plugin/nicira_models.py
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
# Copyright 2012 Nicira, Inc.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
|
||||||
|
from sqlalchemy import Column, Enum, ForeignKey, Integer, String
|
||||||
|
|
||||||
|
from quantum.db.models_v2 import model_base
|
||||||
|
|
||||||
|
|
||||||
|
class NvpNetworkBinding(model_base.BASEV2):
|
||||||
|
"""Represents a binding of a virtual network with a transport zone.
|
||||||
|
|
||||||
|
This model class associates a Quantum network with a transport zone;
|
||||||
|
optionally a vlan ID might be used if the binding type is 'bridge'
|
||||||
|
"""
|
||||||
|
__tablename__ = 'nvp_network_bindings'
|
||||||
|
|
||||||
|
network_id = Column(String(36),
|
||||||
|
ForeignKey('networks.id', ondelete="CASCADE"),
|
||||||
|
primary_key=True)
|
||||||
|
# 'flat', 'vlan', stt' or 'gre'
|
||||||
|
binding_type = Column(Enum('flat', 'vlan', 'stt', 'gre'), nullable=False)
|
||||||
|
tz_uuid = Column(String(36))
|
||||||
|
vlan_id = Column(Integer)
|
||||||
|
|
||||||
|
def __init__(self, network_id, binding_type, tz_uuid, vlan_id):
|
||||||
|
self.network_id = network_id
|
||||||
|
self.binding_type = binding_type
|
||||||
|
self.tz_uuid = tz_uuid
|
||||||
|
self.vlan_id = vlan_id
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "<NetworkBinding(%s,%s,%s,%s)>" % (self.network_id,
|
||||||
|
self.binding_type,
|
||||||
|
self.tz_uuid,
|
||||||
|
self.vlan_id)
|
131
quantum/plugins/nicira/nicira_nvp_plugin/nvp_cluster.py
Normal file
131
quantum/plugins/nicira/nicira_nvp_plugin/nvp_cluster.py
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
# Copyright 2012 Nicira, Inc.
|
||||||
|
# All Rights Reserved
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
class NVPCluster(object):
|
||||||
|
"""Encapsulates controller connection and api_client for a cluster.
|
||||||
|
|
||||||
|
Accessed within the NvpPluginV2 class.
|
||||||
|
|
||||||
|
Each element in the self.controllers list is a dictionary that
|
||||||
|
contains the following keys:
|
||||||
|
ip, port, user, password, default_tz_uuid, uuid, zone
|
||||||
|
|
||||||
|
There may be some redundancy here, but that has been done to provide
|
||||||
|
future flexibility.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, name):
|
||||||
|
self._name = name
|
||||||
|
self.controllers = []
|
||||||
|
self.api_client = None
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
ss = ['{ "NVPCluster": [']
|
||||||
|
ss.append('{ "name" : "%s" }' % self.name)
|
||||||
|
ss.append(',')
|
||||||
|
for c in self.controllers:
|
||||||
|
ss.append(str(c))
|
||||||
|
ss.append(',')
|
||||||
|
ss.append('] }')
|
||||||
|
return ''.join(ss)
|
||||||
|
|
||||||
|
def add_controller(self, ip, port, user, password, request_timeout,
|
||||||
|
http_timeout, retries, redirects,
|
||||||
|
default_tz_uuid, uuid=None, zone=None):
|
||||||
|
"""Add a new set of controller parameters.
|
||||||
|
|
||||||
|
:param ip: IP address of controller.
|
||||||
|
:param port: port controller is listening on.
|
||||||
|
:param user: user name.
|
||||||
|
:param password: user password.
|
||||||
|
:param request_timeout: timeout for an entire API request.
|
||||||
|
:param http_timeout: timeout for a connect to a controller.
|
||||||
|
:param retries: maximum number of request retries.
|
||||||
|
:param redirects: maximum number of server redirect responses to
|
||||||
|
follow.
|
||||||
|
:param default_tz_uuid: default transport zone uuid.
|
||||||
|
:param uuid: UUID of this cluster (used in MDI configs).
|
||||||
|
:param zone: Zone of this cluster (used in MDI configs).
|
||||||
|
"""
|
||||||
|
|
||||||
|
keys = [
|
||||||
|
'ip', 'user', 'password', 'default_tz_uuid', 'uuid', 'zone']
|
||||||
|
controller_dict = dict([(k, locals()[k]) for k in keys])
|
||||||
|
|
||||||
|
int_keys = [
|
||||||
|
'port', 'request_timeout', 'http_timeout', 'retries', 'redirects']
|
||||||
|
for k in int_keys:
|
||||||
|
controller_dict[k] = int(locals()[k])
|
||||||
|
|
||||||
|
self.controllers.append(controller_dict)
|
||||||
|
|
||||||
|
def get_controller(self, idx):
|
||||||
|
return self.controllers[idx]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def name(self):
|
||||||
|
return self._name
|
||||||
|
|
||||||
|
@name.setter
|
||||||
|
def name(self, val=None):
|
||||||
|
self._name = val
|
||||||
|
|
||||||
|
@property
|
||||||
|
def host(self):
|
||||||
|
return self.controllers[0]['ip']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def port(self):
|
||||||
|
return self.controllers[0]['port']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def user(self):
|
||||||
|
return self.controllers[0]['user']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def password(self):
|
||||||
|
return self.controllers[0]['password']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def request_timeout(self):
|
||||||
|
return self.controllers[0]['request_timeout']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def http_timeout(self):
|
||||||
|
return self.controllers[0]['http_timeout']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def retries(self):
|
||||||
|
return self.controllers[0]['retries']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def redirects(self):
|
||||||
|
return self.controllers[0]['redirects']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def default_tz_uuid(self):
|
||||||
|
return self.controllers[0]['default_tz_uuid']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def zone(self):
|
||||||
|
return self.controllers[0]['zone']
|
||||||
|
|
||||||
|
@property
|
||||||
|
def uuid(self):
|
||||||
|
return self.controllers[0]['uuid']
|
@ -25,6 +25,7 @@
|
|||||||
|
|
||||||
from copy import copy
|
from copy import copy
|
||||||
import functools
|
import functools
|
||||||
|
import itertools
|
||||||
import json
|
import json
|
||||||
import hashlib
|
import hashlib
|
||||||
import logging
|
import logging
|
||||||
@ -33,6 +34,7 @@ import re
|
|||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from eventlet import semaphore
|
from eventlet import semaphore
|
||||||
|
|
||||||
import NvpApiClient
|
import NvpApiClient
|
||||||
|
|
||||||
#FIXME(danwent): I'd like this file to get to the point where it has
|
#FIXME(danwent): I'd like this file to get to the point where it has
|
||||||
@ -40,6 +42,17 @@ import NvpApiClient
|
|||||||
from quantum.common import constants
|
from quantum.common import constants
|
||||||
from quantum.common import exceptions as exception
|
from quantum.common import exceptions as exception
|
||||||
|
|
||||||
|
# HTTP METHODS CONSTANTS
|
||||||
|
HTTP_GET = "GET"
|
||||||
|
HTTP_POST = "POST"
|
||||||
|
# Default transport type for logical switches
|
||||||
|
DEF_TRANSPORT_TYPE = "stt"
|
||||||
|
# Prefix to be used for all NVP API calls
|
||||||
|
URI_PREFIX = "/ws.v1"
|
||||||
|
# Resources exposed by NVP API
|
||||||
|
LSWITCH_RESOURCE = "lswitch"
|
||||||
|
LPORT_RESOURCE = "lport"
|
||||||
|
|
||||||
LOCAL_LOGGING = False
|
LOCAL_LOGGING = False
|
||||||
if LOCAL_LOGGING:
|
if LOCAL_LOGGING:
|
||||||
from logging.handlers import SysLogHandler
|
from logging.handlers import SysLogHandler
|
||||||
@ -52,8 +65,8 @@ if LOCAL_LOGGING:
|
|||||||
LOG.addHandler(syslog)
|
LOG.addHandler(syslog)
|
||||||
LOG.setLevel(logging.DEBUG)
|
LOG.setLevel(logging.DEBUG)
|
||||||
else:
|
else:
|
||||||
LOG = logging.getLogger("nvplib")
|
LOG = logging.getLogger(__name__)
|
||||||
LOG.setLevel(logging.INFO)
|
LOG.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
# TODO(bgh): it would be more efficient to use a bitmap
|
# TODO(bgh): it would be more efficient to use a bitmap
|
||||||
taken_context_ids = []
|
taken_context_ids = []
|
||||||
@ -63,12 +76,42 @@ _net_type_cache = {} # cache of {net_id: network_type}
|
|||||||
_lqueue_cache = {}
|
_lqueue_cache = {}
|
||||||
|
|
||||||
|
|
||||||
|
def _build_uri_path(resource,
|
||||||
|
resource_id=None,
|
||||||
|
parent_resource_id=None,
|
||||||
|
fields=None,
|
||||||
|
relations=None, filters=None):
|
||||||
|
# TODO(salvatore-orlando): This is ugly. do something more clever
|
||||||
|
# and aovid the if statement
|
||||||
|
if resource == LPORT_RESOURCE:
|
||||||
|
res_path = ("%s/%s/%s" % (LSWITCH_RESOURCE,
|
||||||
|
parent_resource_id,
|
||||||
|
resource) +
|
||||||
|
(resource_id and "/%s" % resource_id or ''))
|
||||||
|
else:
|
||||||
|
res_path = resource + (resource_id and
|
||||||
|
"/%s" % resource_id or '')
|
||||||
|
|
||||||
|
params = []
|
||||||
|
params.append(fields and "fields=%s" % fields)
|
||||||
|
params.append(relations and "relations=%s" % relations)
|
||||||
|
if filters:
|
||||||
|
params.extend(['%s=%s' % (k, v) for (k, v) in filters.iteritems()])
|
||||||
|
uri_path = "%s/%s" % (URI_PREFIX, res_path)
|
||||||
|
query_string = reduce(lambda x, y: "%s&%s" % (x, y),
|
||||||
|
itertools.ifilter(lambda x: x is not None, params),
|
||||||
|
"")
|
||||||
|
if query_string:
|
||||||
|
uri_path += "?%s" % query_string
|
||||||
|
return uri_path
|
||||||
|
|
||||||
|
|
||||||
def get_cluster_version(cluster):
|
def get_cluster_version(cluster):
|
||||||
"""Return major/minor version #"""
|
"""Return major/minor version #"""
|
||||||
# Get control-cluster nodes
|
# Get control-cluster nodes
|
||||||
uri = "/ws.v1/control-cluster/node?_page_length=1&fields=uuid"
|
uri = "/ws.v1/control-cluster/node?_page_length=1&fields=uuid"
|
||||||
try:
|
try:
|
||||||
res = do_single_request("GET", uri, cluster=cluster)
|
res = do_single_request(HTTP_GET, uri, cluster=cluster)
|
||||||
res = json.loads(res)
|
res = json.loads(res)
|
||||||
except NvpApiClient.NvpApiException:
|
except NvpApiClient.NvpApiException:
|
||||||
raise exception.QuantumException()
|
raise exception.QuantumException()
|
||||||
@ -79,7 +122,7 @@ def get_cluster_version(cluster):
|
|||||||
# running different version so we just need the first node version.
|
# running different version so we just need the first node version.
|
||||||
uri = "/ws.v1/control-cluster/node/%s/status" % node_uuid
|
uri = "/ws.v1/control-cluster/node/%s/status" % node_uuid
|
||||||
try:
|
try:
|
||||||
res = do_single_request("GET", uri, cluster=cluster)
|
res = do_single_request(HTTP_GET, uri, cluster=cluster)
|
||||||
res = json.loads(res)
|
res = json.loads(res)
|
||||||
except NvpApiClient.NvpApiException:
|
except NvpApiClient.NvpApiException:
|
||||||
raise exception.QuantumException()
|
raise exception.QuantumException()
|
||||||
@ -97,7 +140,7 @@ def get_all_query_pages(path, c):
|
|||||||
while need_more_results:
|
while need_more_results:
|
||||||
page_cursor_str = (
|
page_cursor_str = (
|
||||||
"_page_cursor=%s" % page_cursor if page_cursor else "")
|
"_page_cursor=%s" % page_cursor if page_cursor else "")
|
||||||
res = do_single_request("GET", "%s%s%s" %
|
res = do_single_request(HTTP_GET, "%s%s%s" %
|
||||||
(path, query_marker, page_cursor_str),
|
(path, query_marker, page_cursor_str),
|
||||||
cluster=c)
|
cluster=c)
|
||||||
body = json.loads(res)
|
body = json.loads(res)
|
||||||
@ -155,48 +198,83 @@ def find_lswitch_by_portid(clusters, port_id):
|
|||||||
return (None, None)
|
return (None, None)
|
||||||
|
|
||||||
|
|
||||||
def get_network(cluster, net_id):
|
def get_lswitches(cluster, quantum_net_id):
|
||||||
path = "/ws.v1/lswitch/%s" % net_id
|
lswitch_uri_path = _build_uri_path(LSWITCH_RESOURCE, quantum_net_id,
|
||||||
|
relations="LogicalSwitchStatus")
|
||||||
|
results = []
|
||||||
try:
|
try:
|
||||||
resp_obj = do_single_request("GET", path, cluster=cluster)
|
resp_obj = do_single_request(HTTP_GET,
|
||||||
network = json.loads(resp_obj)
|
lswitch_uri_path,
|
||||||
LOG.warning(_("### nw:%s"), network)
|
|
||||||
except NvpApiClient.ResourceNotFound:
|
|
||||||
raise exception.NetworkNotFound(net_id=net_id)
|
|
||||||
except NvpApiClient.NvpApiException:
|
|
||||||
raise exception.QuantumException()
|
|
||||||
LOG.debug(_("Got network '%(net_id)s': %(network)s"), locals())
|
|
||||||
return network
|
|
||||||
|
|
||||||
|
|
||||||
def create_lswitch(cluster, lswitch_obj):
|
|
||||||
LOG.info(_("Creating lswitch: %s"), lswitch_obj)
|
|
||||||
# Warn if no tenant is specified
|
|
||||||
found = "os_tid" in [x["scope"] for x in lswitch_obj["tags"]]
|
|
||||||
if not found:
|
|
||||||
LOG.warn(_("No tenant-id tag specified in logical switch: %s"),
|
|
||||||
lswitch_obj)
|
|
||||||
uri = "/ws.v1/lswitch"
|
|
||||||
try:
|
|
||||||
resp_obj = do_single_request("POST", uri,
|
|
||||||
json.dumps(lswitch_obj),
|
|
||||||
cluster=cluster)
|
cluster=cluster)
|
||||||
|
ls = json.loads(resp_obj)
|
||||||
|
results.append(ls)
|
||||||
|
for tag in ls['tags']:
|
||||||
|
if (tag['scope'] == "multi_lswitch" and
|
||||||
|
tag['tag'] == "True"):
|
||||||
|
# Fetch extra logical switches
|
||||||
|
extra_lswitch_uri_path = _build_uri_path(
|
||||||
|
LSWITCH_RESOURCE,
|
||||||
|
fields="uuid,display_name,tags,lport_count",
|
||||||
|
relations="LogicalSwitchStatus",
|
||||||
|
filters={'tag': quantum_net_id,
|
||||||
|
'tag_scope': 'quantum_net_id'})
|
||||||
|
extra_switches = get_all_query_pages(extra_lswitch_uri_path,
|
||||||
|
cluster)
|
||||||
|
results.extend(extra_switches)
|
||||||
|
return results
|
||||||
except NvpApiClient.NvpApiException:
|
except NvpApiClient.NvpApiException:
|
||||||
|
# TODO(salvatore-olrando): Do a better exception handling
|
||||||
|
# and re-raising
|
||||||
|
LOG.exception(_("An error occured while fetching logical switches "
|
||||||
|
"for Quantum network %s"), quantum_net_id)
|
||||||
raise exception.QuantumException()
|
raise exception.QuantumException()
|
||||||
|
|
||||||
r = json.loads(resp_obj)
|
|
||||||
d = {}
|
def create_lswitch(cluster, tenant_id, display_name,
|
||||||
d["net-id"] = r['uuid']
|
transport_type=None,
|
||||||
d["net-name"] = r['display_name']
|
transport_zone_uuid=None,
|
||||||
LOG.debug(_("Created logical switch: %s"), d["net-id"])
|
vlan_id=None,
|
||||||
return d
|
quantum_net_id=None,
|
||||||
|
**kwargs):
|
||||||
|
nvp_binding_type = transport_type
|
||||||
|
if transport_type in ('flat', 'vlan'):
|
||||||
|
nvp_binding_type = 'bridge'
|
||||||
|
transport_zone_config = {"zone_uuid": (transport_zone_uuid or
|
||||||
|
cluster.default_tz_uuid),
|
||||||
|
"transport_type": (nvp_binding_type or
|
||||||
|
DEF_TRANSPORT_TYPE)}
|
||||||
|
lswitch_obj = {"display_name": display_name,
|
||||||
|
"transport_zones": [transport_zone_config],
|
||||||
|
"tags": [{"tag": tenant_id, "scope": "os_tid"}]}
|
||||||
|
if nvp_binding_type == 'bridge' and vlan_id:
|
||||||
|
transport_zone_config["binding_config"] = {"vlan_translation":
|
||||||
|
[{"transport": vlan_id}]}
|
||||||
|
if quantum_net_id:
|
||||||
|
lswitch_obj["tags"].append({"tag": quantum_net_id,
|
||||||
|
"scope": "quantum_net_id"})
|
||||||
|
if "tags" in kwargs:
|
||||||
|
lswitch_obj["tags"].extend(kwargs["tags"])
|
||||||
|
uri = _build_uri_path(LSWITCH_RESOURCE)
|
||||||
|
try:
|
||||||
|
lswitch_res = do_single_request(HTTP_POST, uri,
|
||||||
|
json.dumps(lswitch_obj),
|
||||||
|
cluster=cluster)
|
||||||
|
except NvpApiClient.NvpApiException:
|
||||||
|
raise exception.QuantumException()
|
||||||
|
lswitch = json.loads(lswitch_res)
|
||||||
|
LOG.debug(_("Created logical switch: %s") % lswitch['uuid'])
|
||||||
|
return lswitch
|
||||||
|
|
||||||
|
|
||||||
def update_network(cluster, lswitch_id, **params):
|
def update_lswitch(cluster, lswitch_id, display_name,
|
||||||
uri = "/ws.v1/lswitch/" + lswitch_id
|
tenant_id=None, **kwargs):
|
||||||
lswitch_obj = {}
|
uri = _build_uri_path(LSWITCH_RESOURCE, resource_id=lswitch_id)
|
||||||
if params["network"]["name"]:
|
# TODO(salvatore-orlando): Make sure this operation does not remove
|
||||||
lswitch_obj["display_name"] = params["network"]["name"]
|
# any other important tag set on the lswtich object
|
||||||
|
lswitch_obj = {"display_name": display_name,
|
||||||
|
"tags": [{"tag": tenant_id, "scope": "os_tid"}]}
|
||||||
|
if "tags" in kwargs:
|
||||||
|
lswitch_obj["tags"].extend(kwargs["tags"])
|
||||||
try:
|
try:
|
||||||
resp_obj = do_single_request("PUT", uri, json.dumps(lswitch_obj),
|
resp_obj = do_single_request("PUT", uri, json.dumps(lswitch_obj),
|
||||||
cluster=cluster)
|
cluster=cluster)
|
||||||
@ -262,26 +340,6 @@ def delete_networks(cluster, net_id, lswitch_ids):
|
|||||||
raise exception.QuantumException()
|
raise exception.QuantumException()
|
||||||
|
|
||||||
|
|
||||||
def create_network(tenant_id, net_name, **kwargs):
|
|
||||||
clusters = kwargs["clusters"]
|
|
||||||
# Default to the primary cluster
|
|
||||||
cluster = clusters[0]
|
|
||||||
|
|
||||||
transport_zone = kwargs.get("transport_zone",
|
|
||||||
cluster.default_tz_uuid)
|
|
||||||
transport_type = kwargs.get("transport_type", "stt")
|
|
||||||
lswitch_obj = {"display_name": net_name,
|
|
||||||
"transport_zones": [
|
|
||||||
{"zone_uuid": transport_zone,
|
|
||||||
"transport_type": transport_type}
|
|
||||||
],
|
|
||||||
"tags": [{"tag": tenant_id, "scope": "os_tid"}]}
|
|
||||||
|
|
||||||
net = create_lswitch(cluster, lswitch_obj)
|
|
||||||
net['net-op-status'] = constants.NET_STATUS_ACTIVE
|
|
||||||
return net
|
|
||||||
|
|
||||||
|
|
||||||
def query_ports(cluster, network, relations=None, fields="*", filters=None):
|
def query_ports(cluster, network, relations=None, fields="*", filters=None):
|
||||||
uri = "/ws.v1/lswitch/" + network + "/lport?"
|
uri = "/ws.v1/lswitch/" + network + "/lport?"
|
||||||
if relations:
|
if relations:
|
||||||
@ -328,7 +386,7 @@ def get_port_by_quantum_tag(clusters, lswitch, quantum_tag):
|
|||||||
if len(res["results"]) == 1:
|
if len(res["results"]) == 1:
|
||||||
return (res["results"][0], c)
|
return (res["results"][0], c)
|
||||||
|
|
||||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
LOG.error(_("Port or Network not found"))
|
||||||
raise exception.PortNotFound(port_id=quantum_tag, net_id=lswitch)
|
raise exception.PortNotFound(port_id=quantum_tag, net_id=lswitch)
|
||||||
|
|
||||||
|
|
||||||
@ -403,38 +461,32 @@ def update_port(network, port_id, **params):
|
|||||||
return obj
|
return obj
|
||||||
|
|
||||||
|
|
||||||
def create_port(tenant, **params):
|
def create_lport(cluster, lswitch_uuid, tenant_id, quantum_port_id,
|
||||||
clusters = params["clusters"]
|
display_name, device_id, admin_status_enabled,
|
||||||
dest_cluster = clusters[0] # primary cluster
|
mac_address=None, fixed_ips=None):
|
||||||
|
""" Creates a logical port on the assigned logical switch """
|
||||||
ls_uuid = params["port"]["network_id"]
|
|
||||||
# device_id can be longer than 40 so we rehash it
|
# device_id can be longer than 40 so we rehash it
|
||||||
device_id = hashlib.sha1(params["port"]["device_id"]).hexdigest()
|
hashed_device_id = hashlib.sha1(device_id).hexdigest()
|
||||||
lport_obj = dict(
|
lport_obj = dict(
|
||||||
admin_status_enabled=params["port"]["admin_state_up"],
|
admin_status_enabled=admin_status_enabled,
|
||||||
display_name=params["port"]["name"],
|
display_name=display_name,
|
||||||
tags=[dict(scope='os_tid', tag=tenant),
|
tags=[dict(scope='os_tid', tag=tenant_id),
|
||||||
dict(scope='q_port_id', tag=params["port"]["id"]),
|
dict(scope='q_port_id', tag=quantum_port_id),
|
||||||
dict(scope='vm_id', tag=device_id)]
|
dict(scope='vm_id', tag=hashed_device_id)],
|
||||||
)
|
)
|
||||||
path = "/ws.v1/lswitch/" + ls_uuid + "/lport"
|
path = _build_uri_path(LPORT_RESOURCE, parent_resource_id=lswitch_uuid)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
resp_obj = do_single_request("POST", path, json.dumps(lport_obj),
|
resp_obj = do_single_request("POST", path,
|
||||||
cluster=dest_cluster)
|
json.dumps(lport_obj),
|
||||||
|
cluster=cluster)
|
||||||
except NvpApiClient.ResourceNotFound as e:
|
except NvpApiClient.ResourceNotFound as e:
|
||||||
LOG.error("Network not found, Error: %s" % str(e))
|
LOG.error("Logical switch not found, Error: %s" % str(e))
|
||||||
raise exception.NetworkNotFound(net_id=params["port"]["network_id"])
|
raise
|
||||||
except NvpApiClient.NvpApiException as e:
|
|
||||||
raise exception.QuantumException()
|
|
||||||
|
|
||||||
result = json.loads(resp_obj)
|
result = json.loads(resp_obj)
|
||||||
result['port-op-status'] = get_port_status(dest_cluster, ls_uuid,
|
LOG.debug("Created logical port %s on logical swtich %s"
|
||||||
result['uuid'])
|
% (result['uuid'], lswitch_uuid))
|
||||||
|
return result
|
||||||
params["port"].update({"admin_state_up": result["admin_status_enabled"],
|
|
||||||
"status": result["port-op-status"]})
|
|
||||||
return (params["port"], result['uuid'])
|
|
||||||
|
|
||||||
|
|
||||||
def get_port_status(cluster, lswitch_id, port_id):
|
def get_port_status(cluster, lswitch_id, port_id):
|
||||||
@ -455,10 +507,8 @@ def get_port_status(cluster, lswitch_id, port_id):
|
|||||||
return constants.PORT_STATUS_DOWN
|
return constants.PORT_STATUS_DOWN
|
||||||
|
|
||||||
|
|
||||||
def plug_interface(clusters, lswitch_id, port, type, attachment=None):
|
def plug_interface(cluster, lswitch_id, port, type, attachment=None):
|
||||||
dest_cluster = clusters[0] # primary cluster
|
|
||||||
uri = "/ws.v1/lswitch/" + lswitch_id + "/lport/" + port + "/attachment"
|
uri = "/ws.v1/lswitch/" + lswitch_id + "/lport/" + port + "/attachment"
|
||||||
|
|
||||||
lport_obj = {}
|
lport_obj = {}
|
||||||
if attachment:
|
if attachment:
|
||||||
lport_obj["vif_uuid"] = attachment
|
lport_obj["vif_uuid"] = attachment
|
||||||
@ -466,7 +516,7 @@ def plug_interface(clusters, lswitch_id, port, type, attachment=None):
|
|||||||
lport_obj["type"] = type
|
lport_obj["type"] = type
|
||||||
try:
|
try:
|
||||||
resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
|
resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
|
||||||
cluster=dest_cluster)
|
cluster=cluster)
|
||||||
except NvpApiClient.ResourceNotFound as e:
|
except NvpApiClient.ResourceNotFound as e:
|
||||||
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
LOG.error(_("Port or Network not found, Error: %s"), str(e))
|
||||||
raise exception.PortNotFound(port_id=port, net_id=lswitch_id)
|
raise exception.PortNotFound(port_id=port, net_id=lswitch_id)
|
||||||
|
@ -4,7 +4,9 @@
|
|||||||
"_relations": {"LogicalSwitchStatus":
|
"_relations": {"LogicalSwitchStatus":
|
||||||
{"fabric_status": true,
|
{"fabric_status": true,
|
||||||
"type": "LogicalSwitchStatus",
|
"type": "LogicalSwitchStatus",
|
||||||
|
"lport_count": %(lport_count)d,
|
||||||
"_href": "/ws.v1/lswitch/%(uuid)s/status",
|
"_href": "/ws.v1/lswitch/%(uuid)s/status",
|
||||||
"_schema": "/ws.v1/schema/LogicalSwitchStatus"}},
|
"_schema": "/ws.v1/schema/LogicalSwitchStatus"}},
|
||||||
"type": "LogicalSwitchConfig",
|
"type": "LogicalSwitchConfig",
|
||||||
|
"tags": %(tags_json)s,
|
||||||
"uuid": "%(uuid)s"}
|
"uuid": "%(uuid)s"}
|
||||||
|
@ -77,6 +77,7 @@ class FakeClient:
|
|||||||
zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid']
|
zone_uuid = fake_lswitch['transport_zones'][0]['zone_uuid']
|
||||||
fake_lswitch['zone_uuid'] = zone_uuid
|
fake_lswitch['zone_uuid'] = zone_uuid
|
||||||
fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid')
|
fake_lswitch['tenant_id'] = self._get_tag(fake_lswitch, 'os_tid')
|
||||||
|
fake_lswitch['lport_count'] = 0
|
||||||
return fake_lswitch
|
return fake_lswitch
|
||||||
|
|
||||||
def _add_lport(self, body, ls_uuid):
|
def _add_lport(self, body, ls_uuid):
|
||||||
@ -92,6 +93,7 @@ class FakeClient:
|
|||||||
self._fake_lport_dict[fake_lport['uuid']] = fake_lport
|
self._fake_lport_dict[fake_lport['uuid']] = fake_lport
|
||||||
|
|
||||||
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
|
fake_lswitch = self._fake_lswitch_dict[ls_uuid]
|
||||||
|
fake_lswitch['lport_count'] += 1
|
||||||
fake_lport_status = fake_lport.copy()
|
fake_lport_status = fake_lport.copy()
|
||||||
fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id']
|
fake_lport_status['ls_tenant_id'] = fake_lswitch['tenant_id']
|
||||||
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
|
fake_lport_status['ls_uuid'] = fake_lswitch['uuid']
|
||||||
@ -115,7 +117,6 @@ class FakeClient:
|
|||||||
def _list(self, resource_type, response_file,
|
def _list(self, resource_type, response_file,
|
||||||
switch_uuid=None, query=None):
|
switch_uuid=None, query=None):
|
||||||
(tag_filter, attr_filter) = self._get_filters(query)
|
(tag_filter, attr_filter) = self._get_filters(query)
|
||||||
|
|
||||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||||
response_template = f.read()
|
response_template = f.read()
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
||||||
@ -143,7 +144,9 @@ class FakeClient:
|
|||||||
res_dict[res_uuid].get('ls_uuid') == switch_uuid):
|
res_dict[res_uuid].get('ls_uuid') == switch_uuid):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
for item in res_dict.itervalues():
|
||||||
|
if 'tags' in item:
|
||||||
|
item['tags_json'] = json.dumps(item['tags'])
|
||||||
items = [json.loads(response_template % res_dict[res_uuid])
|
items = [json.loads(response_template % res_dict[res_uuid])
|
||||||
for res_uuid in res_dict
|
for res_uuid in res_dict
|
||||||
if (_lswitch_match(res_uuid) and
|
if (_lswitch_match(res_uuid) and
|
||||||
@ -159,6 +162,10 @@ class FakeClient:
|
|||||||
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
with open("%s/%s" % (self.fake_files_path, response_file)) as f:
|
||||||
response_template = f.read()
|
response_template = f.read()
|
||||||
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
res_dict = getattr(self, '_fake_%s_dict' % resource_type)
|
||||||
|
for item in res_dict.itervalues():
|
||||||
|
if 'tags' in item:
|
||||||
|
item['tags_json'] = json.dumps(item['tags'])
|
||||||
|
|
||||||
items = [json.loads(response_template % res_dict[res_uuid])
|
items = [json.loads(response_template % res_dict[res_uuid])
|
||||||
for res_uuid in res_dict if res_uuid == target_uuid]
|
for res_uuid in res_dict if res_uuid == target_uuid]
|
||||||
if items:
|
if items:
|
||||||
|
@ -13,14 +13,22 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import logging
|
||||||
import os
|
import os
|
||||||
|
|
||||||
import mock
|
import mock
|
||||||
|
import webob.exc
|
||||||
|
|
||||||
import quantum.common.test_lib as test_lib
|
import quantum.common.test_lib as test_lib
|
||||||
|
from quantum import context
|
||||||
|
from quantum.extensions import providernet as pnet
|
||||||
|
from quantum import manager
|
||||||
|
from quantum.openstack.common import cfg
|
||||||
|
from quantum.plugins.nicira.nicira_nvp_plugin import nvplib
|
||||||
from quantum.tests.unit.nicira import fake_nvpapiclient
|
from quantum.tests.unit.nicira import fake_nvpapiclient
|
||||||
import quantum.tests.unit.test_db_plugin as test_plugin
|
import quantum.tests.unit.test_db_plugin as test_plugin
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
NICIRA_PKG_PATH = 'quantum.plugins.nicira.nicira_nvp_plugin'
|
NICIRA_PKG_PATH = 'quantum.plugins.nicira.nicira_nvp_plugin'
|
||||||
|
|
||||||
|
|
||||||
@ -28,6 +36,26 @@ class NiciraPluginV2TestCase(test_plugin.QuantumDbPluginV2TestCase):
|
|||||||
|
|
||||||
_plugin_name = ('%s.QuantumPlugin.NvpPluginV2' % NICIRA_PKG_PATH)
|
_plugin_name = ('%s.QuantumPlugin.NvpPluginV2' % NICIRA_PKG_PATH)
|
||||||
|
|
||||||
|
def _create_network(self, fmt, name, admin_status_up,
|
||||||
|
arg_list=None, providernet_args=None, **kwargs):
|
||||||
|
data = {'network': {'name': name,
|
||||||
|
'admin_state_up': admin_status_up,
|
||||||
|
'tenant_id': self._tenant_id}}
|
||||||
|
attributes = kwargs
|
||||||
|
if providernet_args:
|
||||||
|
attributes.update(providernet_args)
|
||||||
|
for arg in (('admin_state_up', 'tenant_id', 'shared') +
|
||||||
|
(arg_list or ())):
|
||||||
|
# Arg must be present and not empty
|
||||||
|
if arg in kwargs and kwargs[arg]:
|
||||||
|
data['network'][arg] = kwargs[arg]
|
||||||
|
network_req = self.new_create_request('networks', data, fmt)
|
||||||
|
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
|
||||||
|
# create a specific auth context for this request
|
||||||
|
network_req.environ['quantum.context'] = context.Context(
|
||||||
|
'', kwargs['tenant_id'])
|
||||||
|
return network_req.get_response(self.api)
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
etc_path = os.path.join(os.path.dirname(__file__), 'etc')
|
etc_path = os.path.join(os.path.dirname(__file__), 'etc')
|
||||||
test_lib.test_config['config_files'] = [os.path.join(etc_path,
|
test_lib.test_config['config_files'] = [os.path.join(etc_path,
|
||||||
@ -61,9 +89,66 @@ class TestNiciraV2HTTPResponse(test_plugin.TestV2HTTPResponse,
|
|||||||
|
|
||||||
|
|
||||||
class TestNiciraPortsV2(test_plugin.TestPortsV2, NiciraPluginV2TestCase):
|
class TestNiciraPortsV2(test_plugin.TestPortsV2, NiciraPluginV2TestCase):
|
||||||
pass
|
|
||||||
|
def test_exhaust_ports_overlay_network(self):
|
||||||
|
cfg.CONF.set_override('max_lp_per_overlay_ls', 1, group='NVP')
|
||||||
|
with self.network(name='testnet',
|
||||||
|
arg_list=(pnet.NETWORK_TYPE,
|
||||||
|
pnet.PHYSICAL_NETWORK,
|
||||||
|
pnet.SEGMENTATION_ID)) as net:
|
||||||
|
with self.subnet(network=net) as sub:
|
||||||
|
with self.port(subnet=sub):
|
||||||
|
# creating another port should see an exception
|
||||||
|
self._create_port('json', net['network']['id'], 400)
|
||||||
|
|
||||||
|
def test_exhaust_ports_bridged_network(self):
|
||||||
|
cfg.CONF.set_override('max_lp_per_bridged_ls', 1, group="NVP")
|
||||||
|
providernet_args = {pnet.NETWORK_TYPE: 'flat',
|
||||||
|
pnet.PHYSICAL_NETWORK: 'tzuuid'}
|
||||||
|
with self.network(name='testnet',
|
||||||
|
providernet_args=providernet_args,
|
||||||
|
arg_list=(pnet.NETWORK_TYPE,
|
||||||
|
pnet.PHYSICAL_NETWORK,
|
||||||
|
pnet.SEGMENTATION_ID)) as net:
|
||||||
|
with self.subnet(network=net) as sub:
|
||||||
|
with self.port(subnet=sub):
|
||||||
|
with self.port(subnet=sub):
|
||||||
|
plugin = manager.QuantumManager.get_plugin()
|
||||||
|
ls = nvplib.get_lswitches(plugin.default_cluster,
|
||||||
|
net['network']['id'])
|
||||||
|
self.assertEqual(len(ls), 2)
|
||||||
|
|
||||||
|
|
||||||
class TestNiciraNetworksV2(test_plugin.TestNetworksV2,
|
class TestNiciraNetworksV2(test_plugin.TestNetworksV2,
|
||||||
NiciraPluginV2TestCase):
|
NiciraPluginV2TestCase):
|
||||||
pass
|
|
||||||
|
def _test_create_bridge_network(self, vlan_id=None):
|
||||||
|
net_type = vlan_id and 'vlan' or 'flat'
|
||||||
|
name = 'bridge_net'
|
||||||
|
keys = [('subnets', []), ('name', name), ('admin_state_up', True),
|
||||||
|
('status', 'ACTIVE'), ('shared', False),
|
||||||
|
(pnet.NETWORK_TYPE, net_type),
|
||||||
|
(pnet.PHYSICAL_NETWORK, 'tzuuid'),
|
||||||
|
(pnet.SEGMENTATION_ID, vlan_id)]
|
||||||
|
providernet_args = {pnet.NETWORK_TYPE: net_type,
|
||||||
|
pnet.PHYSICAL_NETWORK: 'tzuuid'}
|
||||||
|
if vlan_id:
|
||||||
|
providernet_args[pnet.SEGMENTATION_ID] = vlan_id
|
||||||
|
with self.network(name=name,
|
||||||
|
providernet_args=providernet_args,
|
||||||
|
arg_list=(pnet.NETWORK_TYPE,
|
||||||
|
pnet.PHYSICAL_NETWORK,
|
||||||
|
pnet.SEGMENTATION_ID)) as net:
|
||||||
|
for k, v in keys:
|
||||||
|
self.assertEquals(net['network'][k], v)
|
||||||
|
|
||||||
|
def test_create_bridge_network(self):
|
||||||
|
self._test_create_bridge_network()
|
||||||
|
|
||||||
|
def test_create_bridge_vlan_network(self):
|
||||||
|
self._test_create_bridge_network(vlan_id=123)
|
||||||
|
|
||||||
|
def test_create_bridge_vlan_network_outofrange_returns_400(self):
|
||||||
|
with self.assertRaises(webob.exc.HTTPClientError) as ctx_manager:
|
||||||
|
self._test_create_bridge_network(vlan_id=5000)
|
||||||
|
self.assertEquals(ctx_manager.exception.code, 400)
|
||||||
|
Loading…
Reference in New Issue
Block a user