Merge "VMware: initial NSXv developments"
This commit is contained in:
commit
0886ba66b7
@ -2,7 +2,8 @@
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
-e git://git.openstack.org/openstack/neutron.git#egg=neutron
|
||||
# Temporary, till https://review.openstack.org/#/c/143949/ is merged
|
||||
-e git://github.com/gkotton/neutron.git#egg=neutron
|
||||
|
||||
hacking>=0.9.2,<0.10
|
||||
|
||||
|
@ -23,7 +23,7 @@ from oslo.config import cfg
|
||||
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware import api_client
|
||||
from vmware_nsx.neutron.plugins.vmware import api_client
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -19,11 +19,11 @@ import httplib
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import base
|
||||
from neutron.plugins.vmware.api_client import eventlet_client
|
||||
from neutron.plugins.vmware.api_client import eventlet_request
|
||||
from neutron.plugins.vmware.api_client import exception
|
||||
from neutron.plugins.vmware.api_client import version
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import base
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import eventlet_client
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import eventlet_request
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import version
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -22,8 +22,8 @@ eventlet.monkey_patch()
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import base
|
||||
from neutron.plugins.vmware.api_client import eventlet_request
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import base
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import eventlet_request
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -22,7 +22,7 @@ from oslo.serialization import jsonutils
|
||||
|
||||
from neutron.i18n import _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import request
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import request
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
USER_AGENT = "Neutron eventlet client/2.0"
|
||||
|
@ -27,7 +27,7 @@ import six.moves.urllib.parse as urlparse
|
||||
|
||||
from neutron.i18n import _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware import api_client
|
||||
from vmware_nsx.neutron.plugins.vmware import api_client
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -20,9 +20,9 @@ import sys
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.common import config
|
||||
from neutron.plugins.vmware.common import config as nsx_config # noqa
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.common import config as nsx_config # noqa
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
|
||||
config.setup_logging()
|
||||
|
||||
|
@ -12,10 +12,15 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.i18n import _LW
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class AgentModes:
|
||||
AGENT = 'agent'
|
||||
@ -153,40 +158,87 @@ cluster_opts = [
|
||||
]
|
||||
|
||||
DEFAULT_STATUS_CHECK_INTERVAL = 2000
|
||||
DEFAULT_MINIMUM_POOLED_EDGES = 1
|
||||
DEFAULT_MAXIMUM_POOLED_EDGES = 3
|
||||
DEFAULT_MAXIMUM_TUNNELS_PER_VNIC = 20
|
||||
|
||||
vcns_opts = [
|
||||
nsxv_opts = [
|
||||
cfg.StrOpt('user',
|
||||
default='admin',
|
||||
deprecated_group="vcns",
|
||||
help=_('User name for vsm')),
|
||||
cfg.StrOpt('password',
|
||||
default='default',
|
||||
deprecated_group="vcns",
|
||||
secret=True,
|
||||
help=_('Password for vsm')),
|
||||
cfg.StrOpt('manager_uri',
|
||||
deprecated_group="vcns",
|
||||
help=_('uri for vsm')),
|
||||
cfg.ListOpt('cluster_moid',
|
||||
default=[],
|
||||
help=_('Parameter listing the IDs of the clusters '
|
||||
'which are used by OpenStack.')),
|
||||
cfg.StrOpt('datacenter_moid',
|
||||
deprecated_group="vcns",
|
||||
help=_('Optional parameter identifying the ID of datacenter '
|
||||
'to deploy NSX Edges')),
|
||||
cfg.StrOpt('deployment_container_id',
|
||||
deprecated_group="vcns",
|
||||
help=_('Optional parameter identifying the ID of datastore to '
|
||||
'deploy NSX Edges')),
|
||||
cfg.StrOpt('resource_pool_id',
|
||||
deprecated_group="vcns",
|
||||
help=_('Optional parameter identifying the ID of resource to '
|
||||
'deploy NSX Edges')),
|
||||
cfg.StrOpt('datastore_id',
|
||||
deprecated_group="vcns",
|
||||
help=_('Optional parameter identifying the ID of datastore to '
|
||||
'deploy NSX Edges')),
|
||||
cfg.StrOpt('external_network',
|
||||
deprecated_group="vcns",
|
||||
help=_('Network ID for physical network connectivity')),
|
||||
cfg.IntOpt('task_status_check_interval',
|
||||
default=DEFAULT_STATUS_CHECK_INTERVAL,
|
||||
help=_("Task status check interval"))
|
||||
deprecated_group="vcns",
|
||||
help=_("Task status check interval")),
|
||||
cfg.StrOpt('vdn_scope_id',
|
||||
help=_('Network scope ID for VXLAN virtual wires')),
|
||||
cfg.StrOpt('dvs_id',
|
||||
help=_('DVS ID for VLANs')),
|
||||
cfg.IntOpt('maximum_tunnels_per_vnic',
|
||||
default=DEFAULT_MAXIMUM_TUNNELS_PER_VNIC,
|
||||
help=_('Maximum number of sub interfaces supported '
|
||||
'per vnic in edge. The value should be in 1-110.')),
|
||||
cfg.ListOpt('backup_edge_pool',
|
||||
default=['service:large:4:10',
|
||||
'service:compact:4:10',
|
||||
'vdr:large:4:10'],
|
||||
help=_('Defines edge pool using the format: '
|
||||
'<edge_type>:[edge_size]:<min_edges>:<max_edges>.'
|
||||
'edge_type: service,vdr. '
|
||||
'edge_size: compact, large, xlarge, quadlarge '
|
||||
'and default is large.')),
|
||||
cfg.IntOpt('retries',
|
||||
default=10,
|
||||
help=_('Maximum number of API retries on endpoint.')),
|
||||
cfg.StrOpt('mgt_net_moid',
|
||||
help=_('Network ID for management network connectivity')),
|
||||
cfg.ListOpt('mgt_net_proxy_ips',
|
||||
help=_('Management network IP address for metadata proxy')),
|
||||
cfg.StrOpt('mgt_net_proxy_netmask',
|
||||
help=_('Management network netmask for metadata proxy')),
|
||||
cfg.ListOpt('nova_metadata_ips',
|
||||
help=_('IP addresses used by Nova metadata service')),
|
||||
cfg.IntOpt('nova_metadata_port',
|
||||
default=8775,
|
||||
help=_("TCP Port used by Nova metadata server."))
|
||||
]
|
||||
|
||||
# Register the configuration options
|
||||
cfg.CONF.register_opts(connection_opts)
|
||||
cfg.CONF.register_opts(cluster_opts)
|
||||
cfg.CONF.register_opts(vcns_opts, group="vcns")
|
||||
cfg.CONF.register_opts(nsxv_opts, group="nsxv")
|
||||
cfg.CONF.register_opts(base_opts, group="NSX")
|
||||
cfg.CONF.register_opts(sync_opts, group="NSX_SYNC")
|
||||
|
||||
@ -197,3 +249,15 @@ def validate_config_options():
|
||||
error = (_("Invalid replication_mode: %s") %
|
||||
cfg.CONF.NSX.replication_mode)
|
||||
raise nsx_exc.NsxPluginException(err_msg=error)
|
||||
|
||||
|
||||
def validate_nsxv_config_options():
|
||||
if (cfg.CONF.nsxv.manager_uri is None or
|
||||
cfg.CONF.nsxv.user is None or
|
||||
cfg.CONF.nsxv.password is None):
|
||||
error = _("manager_uri, user and passwork be configured!")
|
||||
raise nsx_exc.NsxPluginException(err_msg=error)
|
||||
if cfg.CONF.nsxv.dvs_id is None:
|
||||
LOG.warning(_LW("dvs_id must be configured to support VLAN's!"))
|
||||
if cfg.CONF.nsxv.vdn_scope_id is None:
|
||||
LOG.warning(_LW("vdn_scope_id must be configured to support VXLAN's!"))
|
||||
|
@ -76,20 +76,6 @@ class ServiceOverQuota(n_exc.Conflict):
|
||||
message = _("Quota exceeded for Vcns resource: %(overs)s: %(err_msg)s")
|
||||
|
||||
|
||||
class RouterInUseByLBService(n_exc.InUse):
|
||||
message = _("Router %(router_id)s is in use by Loadbalancer Service "
|
||||
"%(vip_id)s")
|
||||
|
||||
|
||||
class RouterInUseByFWService(n_exc.InUse):
|
||||
message = _("Router %(router_id)s is in use by firewall Service "
|
||||
"%(firewall_id)s")
|
||||
|
||||
|
||||
class VcnsDriverException(NsxPluginException):
|
||||
message = _("Error happened in NSX VCNS Driver: %(err_msg)s")
|
||||
|
||||
|
||||
class ServiceClusterUnavailable(NsxPluginException):
|
||||
message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, "
|
||||
"check NSX setup and/or configuration")
|
||||
|
@ -19,16 +19,16 @@ from neutron.extensions import multiprovidernet as mpnet
|
||||
from neutron.extensions import providernet as pnet
|
||||
from neutron.i18n import _LW
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import client
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import utils as vmw_utils
|
||||
from neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
from neutron.plugins.vmware.dbexts import networkgw_db
|
||||
from neutron.plugins.vmware import nsx_cluster
|
||||
from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
|
||||
from neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
|
||||
from neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import client
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils as vmw_utils
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
from vmware_nsx.neutron.plugins.vmware import nsx_cluster
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
28
vmware_nsx/neutron/plugins/vmware/common/nsxv_constants.py
Normal file
28
vmware_nsx/neutron/plugins/vmware/common/nsxv_constants.py
Normal file
@ -0,0 +1,28 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Edge size
|
||||
COMPACT = 'compact'
|
||||
LARGE = 'large'
|
||||
XLARGE = 'xlarge'
|
||||
QUADLARGE = 'quadlarge'
|
||||
|
||||
|
||||
# Edge type
|
||||
SERVICE_EDGE = 'service'
|
||||
VDR_EDGE = 'vdr'
|
||||
|
||||
# Internal element purpose
|
||||
INTER_EDGE_PURPOSE = 'inter_edge_net'
|
@ -14,7 +14,7 @@
|
||||
# under the License.
|
||||
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
# Protocol number look up for supported protocols
|
||||
|
@ -30,10 +30,10 @@ from neutron.openstack.common import log
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
|
||||
# Maximum page size for a single request
|
||||
# NOTE(salv-orlando): This might become a version-dependent map should the
|
||||
|
@ -36,6 +36,15 @@ class NetworkTypes:
|
||||
BRIDGE = 'bridge'
|
||||
|
||||
|
||||
# Allowed network types for the NSX-v Plugin
|
||||
class NsxVNetworkTypes:
|
||||
"""Allowed provider network types for the NSX-v Plugin."""
|
||||
FLAT = 'flat'
|
||||
VLAN = 'vlan'
|
||||
VXLAN = 'vxlan'
|
||||
PORTGROUP = 'portgroup'
|
||||
|
||||
|
||||
def get_tags(**kwargs):
|
||||
tags = ([dict(tag=value, scope=key)
|
||||
for key, value in kwargs.iteritems()])
|
||||
|
@ -1,5 +1,6 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
@ -13,15 +14,16 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from neutron.db import l3_dvr_db
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import servicerouter
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import nsxrouter
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import (
|
||||
distributedrouter as dist_rtr)
|
||||
|
||||
|
||||
class ServiceRouter_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin):
|
||||
"""Mixin class to enable service router support."""
|
||||
class DistributedRouter_mixin(nsxrouter.NsxRouterMixin):
|
||||
"""Mixin class to enable distributed router support."""
|
||||
|
||||
extra_attributes = (
|
||||
l3_dvr_db.L3_NAT_with_dvr_db_mixin.extra_attributes + [{
|
||||
'name': servicerouter.SERVICE_ROUTER,
|
||||
nsx_attributes = (
|
||||
nsxrouter.NsxRouterMixin.nsx_attributes + [{
|
||||
'name': dist_rtr.DISTRIBUTED,
|
||||
'default': False
|
||||
}])
|
@ -1,132 +0,0 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo.db import exception as d_exc
|
||||
from sqlalchemy import Column
|
||||
from sqlalchemy import ForeignKey
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy import String
|
||||
|
||||
from neutron.db import models_v2
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LsnPort(models_v2.model_base.BASEV2):
|
||||
|
||||
__tablename__ = 'lsn_port'
|
||||
|
||||
lsn_port_id = Column(String(36), primary_key=True)
|
||||
|
||||
lsn_id = Column(String(36), ForeignKey('lsn.lsn_id', ondelete="CASCADE"),
|
||||
nullable=False)
|
||||
sub_id = Column(String(36), nullable=False, unique=True)
|
||||
mac_addr = Column(String(32), nullable=False, unique=True)
|
||||
|
||||
def __init__(self, lsn_port_id, subnet_id, mac_address, lsn_id):
|
||||
self.lsn_port_id = lsn_port_id
|
||||
self.lsn_id = lsn_id
|
||||
self.sub_id = subnet_id
|
||||
self.mac_addr = mac_address
|
||||
|
||||
|
||||
class Lsn(models_v2.model_base.BASEV2):
|
||||
__tablename__ = 'lsn'
|
||||
|
||||
lsn_id = Column(String(36), primary_key=True)
|
||||
net_id = Column(String(36), nullable=False)
|
||||
|
||||
def __init__(self, net_id, lsn_id):
|
||||
self.net_id = net_id
|
||||
self.lsn_id = lsn_id
|
||||
|
||||
|
||||
def lsn_add(context, network_id, lsn_id):
|
||||
"""Add Logical Service Node information to persistent datastore."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
lsn = Lsn(network_id, lsn_id)
|
||||
context.session.add(lsn)
|
||||
|
||||
|
||||
def lsn_remove(context, lsn_id):
|
||||
"""Remove Logical Service Node information from datastore given its id."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.query(Lsn).filter_by(lsn_id=lsn_id).delete()
|
||||
|
||||
|
||||
def lsn_remove_for_network(context, network_id):
|
||||
"""Remove information about the Logical Service Node given its network."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.query(Lsn).filter_by(net_id=network_id).delete()
|
||||
|
||||
|
||||
def lsn_get_for_network(context, network_id, raise_on_err=True):
|
||||
"""Retrieve LSN information given its network id."""
|
||||
query = context.session.query(Lsn)
|
||||
try:
|
||||
return query.filter_by(net_id=network_id).one()
|
||||
except (orm.exc.NoResultFound, d_exc.DBError):
|
||||
msg = _('Unable to find Logical Service Node for network %s')
|
||||
if raise_on_err:
|
||||
LOG.error(msg, network_id)
|
||||
raise p_exc.LsnNotFound(entity='network',
|
||||
entity_id=network_id)
|
||||
else:
|
||||
LOG.warn(msg, network_id)
|
||||
|
||||
|
||||
def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id):
|
||||
"""Add Logical Service Node Port information to persistent datastore."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
lsn_port = LsnPort(lsn_port_id, subnet_id, mac, lsn_id)
|
||||
context.session.add(lsn_port)
|
||||
|
||||
|
||||
def lsn_port_get_for_subnet(context, subnet_id, raise_on_err=True):
|
||||
"""Return Logical Service Node Port information given its subnet id."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
return (context.session.query(LsnPort).
|
||||
filter_by(sub_id=subnet_id).one())
|
||||
except (orm.exc.NoResultFound, d_exc.DBError):
|
||||
if raise_on_err:
|
||||
raise p_exc.LsnPortNotFound(lsn_id=None,
|
||||
entity='subnet',
|
||||
entity_id=subnet_id)
|
||||
|
||||
|
||||
def lsn_port_get_for_mac(context, mac_address, raise_on_err=True):
|
||||
"""Return Logical Service Node Port information given its mac address."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
try:
|
||||
return (context.session.query(LsnPort).
|
||||
filter_by(mac_addr=mac_address).one())
|
||||
except (orm.exc.NoResultFound, d_exc.DBError):
|
||||
if raise_on_err:
|
||||
raise p_exc.LsnPortNotFound(lsn_id=None,
|
||||
entity='mac',
|
||||
entity_id=mac_address)
|
||||
|
||||
|
||||
def lsn_port_remove(context, lsn_port_id):
|
||||
"""Remove Logical Service Node port from the given Logical Service Node."""
|
||||
with context.session.begin(subtransactions=True):
|
||||
(context.session.query(LsnPort).
|
||||
filter_by(lsn_port_id=lsn_port_id).delete())
|
@ -1,78 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.extensions import maclearning as mac
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MacLearningState(model_base.BASEV2):
|
||||
|
||||
port_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('ports.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
mac_learning_enabled = sa.Column(sa.Boolean(), nullable=False)
|
||||
|
||||
# Add a relationship to the Port model using the backref attribute.
|
||||
# This will instruct SQLAlchemy to eagerly load this association.
|
||||
port = orm.relationship(
|
||||
models_v2.Port,
|
||||
backref=orm.backref("mac_learning_state", lazy='joined',
|
||||
uselist=False, cascade='delete'))
|
||||
|
||||
|
||||
class MacLearningDbMixin(object):
|
||||
"""Mixin class for mac learning."""
|
||||
|
||||
def _make_mac_learning_state_dict(self, port, fields=None):
|
||||
res = {'port_id': port['port_id'],
|
||||
mac.MAC_LEARNING: port[mac.MAC_LEARNING]}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _extend_port_mac_learning_state(self, port_res, port_db):
|
||||
state = port_db.mac_learning_state
|
||||
if state and state.mac_learning_enabled:
|
||||
port_res[mac.MAC_LEARNING] = state.mac_learning_enabled
|
||||
|
||||
# Register dict extend functions for ports
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
attributes.PORTS, ['_extend_port_mac_learning_state'])
|
||||
|
||||
def _update_mac_learning_state(self, context, port_id, enabled):
|
||||
try:
|
||||
query = self._model_query(context, MacLearningState)
|
||||
state = query.filter(MacLearningState.port_id == port_id).one()
|
||||
state.update({mac.MAC_LEARNING: enabled})
|
||||
except exc.NoResultFound:
|
||||
self._create_mac_learning_state(context,
|
||||
{'id': port_id,
|
||||
mac.MAC_LEARNING: enabled})
|
||||
|
||||
def _create_mac_learning_state(self, context, port):
|
||||
with context.session.begin(subtransactions=True):
|
||||
enabled = port[mac.MAC_LEARNING]
|
||||
state = MacLearningState(port_id=port['id'],
|
||||
mac_learning_enabled=enabled)
|
||||
context.session.add(state)
|
||||
return self._make_mac_learning_state_dict(state)
|
@ -1,117 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from sqlalchemy import Column, Enum, ForeignKey, Integer, String
|
||||
|
||||
from neutron.db import model_base
|
||||
|
||||
|
||||
class TzNetworkBinding(model_base.BASEV2):
|
||||
"""Represents a binding of a virtual network with a transport zone.
|
||||
|
||||
This model class associates a Neutron network with a transport zone;
|
||||
optionally a vlan ID might be used if the binding type is 'bridge'
|
||||
"""
|
||||
__tablename__ = 'tz_network_bindings'
|
||||
|
||||
# TODO(arosen) - it might be worth while refactoring the how this data
|
||||
# is stored later so every column does not need to be a primary key.
|
||||
network_id = Column(String(36),
|
||||
ForeignKey('networks.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
# 'flat', 'vlan', stt' or 'gre'
|
||||
binding_type = Column(Enum('flat', 'vlan', 'stt', 'gre', 'l3_ext',
|
||||
name='tz_network_bindings_binding_type'),
|
||||
nullable=False, primary_key=True)
|
||||
phy_uuid = Column(String(36), primary_key=True, default='')
|
||||
vlan_id = Column(Integer, primary_key=True, autoincrement=False, default=0)
|
||||
|
||||
def __init__(self, network_id, binding_type, phy_uuid, vlan_id):
|
||||
self.network_id = network_id
|
||||
self.binding_type = binding_type
|
||||
self.phy_uuid = phy_uuid
|
||||
self.vlan_id = vlan_id
|
||||
|
||||
def __repr__(self):
|
||||
return "<NetworkBinding(%s,%s,%s,%s)>" % (self.network_id,
|
||||
self.binding_type,
|
||||
self.phy_uuid,
|
||||
self.vlan_id)
|
||||
|
||||
|
||||
class NeutronNsxNetworkMapping(model_base.BASEV2):
|
||||
"""Maps neutron network identifiers to NSX identifiers.
|
||||
|
||||
Because of chained logical switches more than one mapping might exist
|
||||
for a single Neutron network.
|
||||
"""
|
||||
__tablename__ = 'neutron_nsx_network_mappings'
|
||||
neutron_id = Column(String(36),
|
||||
ForeignKey('networks.id', ondelete='CASCADE'),
|
||||
primary_key=True)
|
||||
nsx_id = Column(String(36), primary_key=True)
|
||||
|
||||
|
||||
class NeutronNsxSecurityGroupMapping(model_base.BASEV2):
|
||||
"""Backend mappings for Neutron Security Group identifiers.
|
||||
|
||||
This class maps a neutron security group identifier to the corresponding
|
||||
NSX security profile identifier.
|
||||
"""
|
||||
|
||||
__tablename__ = 'neutron_nsx_security_group_mappings'
|
||||
neutron_id = Column(String(36),
|
||||
ForeignKey('securitygroups.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
nsx_id = Column(String(36), primary_key=True)
|
||||
|
||||
|
||||
class NeutronNsxPortMapping(model_base.BASEV2):
|
||||
"""Represents the mapping between neutron and nsx port uuids."""
|
||||
|
||||
__tablename__ = 'neutron_nsx_port_mappings'
|
||||
neutron_id = Column(String(36),
|
||||
ForeignKey('ports.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
nsx_switch_id = Column(String(36))
|
||||
nsx_port_id = Column(String(36), nullable=False)
|
||||
|
||||
def __init__(self, neutron_id, nsx_switch_id, nsx_port_id):
|
||||
self.neutron_id = neutron_id
|
||||
self.nsx_switch_id = nsx_switch_id
|
||||
self.nsx_port_id = nsx_port_id
|
||||
|
||||
|
||||
class NeutronNsxRouterMapping(model_base.BASEV2):
|
||||
"""Maps neutron router identifiers to NSX identifiers."""
|
||||
__tablename__ = 'neutron_nsx_router_mappings'
|
||||
neutron_id = Column(String(36),
|
||||
ForeignKey('routers.id', ondelete='CASCADE'),
|
||||
primary_key=True)
|
||||
nsx_id = Column(String(36))
|
||||
|
||||
|
||||
class MultiProviderNetworks(model_base.BASEV2):
|
||||
"""Networks provisioned through multiprovider extension."""
|
||||
|
||||
__tablename__ = 'multi_provider_networks'
|
||||
network_id = Column(String(36),
|
||||
ForeignKey('networks.id', ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
|
||||
def __init__(self, network_id):
|
||||
self.network_id = network_id
|
@ -1,521 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc as sa_orm_exc
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.common import exceptions
|
||||
from neutron.common import utils
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.extensions import networkgw
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
DEVICE_OWNER_NET_GW_INTF = 'network:gateway-interface'
|
||||
NETWORK_ID = 'network_id'
|
||||
SEGMENTATION_TYPE = 'segmentation_type'
|
||||
SEGMENTATION_ID = 'segmentation_id'
|
||||
ALLOWED_CONNECTION_ATTRIBUTES = set((NETWORK_ID,
|
||||
SEGMENTATION_TYPE,
|
||||
SEGMENTATION_ID))
|
||||
# Constants for gateway device operational status
|
||||
STATUS_UNKNOWN = "UNKNOWN"
|
||||
STATUS_ERROR = "ERROR"
|
||||
STATUS_ACTIVE = "ACTIVE"
|
||||
STATUS_DOWN = "DOWN"
|
||||
|
||||
|
||||
class GatewayInUse(exceptions.InUse):
|
||||
message = _("Network Gateway '%(gateway_id)s' still has active mappings "
|
||||
"with one or more neutron networks.")
|
||||
|
||||
|
||||
class GatewayNotFound(exceptions.NotFound):
|
||||
message = _("Network Gateway %(gateway_id)s could not be found")
|
||||
|
||||
|
||||
class GatewayDeviceInUse(exceptions.InUse):
|
||||
message = _("Network Gateway Device '%(device_id)s' is still used by "
|
||||
"one or more network gateways.")
|
||||
|
||||
|
||||
class GatewayDeviceNotFound(exceptions.NotFound):
|
||||
message = _("Network Gateway Device %(device_id)s could not be found.")
|
||||
|
||||
|
||||
class GatewayDevicesNotFound(exceptions.NotFound):
|
||||
message = _("One or more Network Gateway Devices could not be found: "
|
||||
"%(device_ids)s.")
|
||||
|
||||
|
||||
class NetworkGatewayPortInUse(exceptions.InUse):
|
||||
message = _("Port '%(port_id)s' is owned by '%(device_owner)s' and "
|
||||
"therefore cannot be deleted directly via the port API.")
|
||||
|
||||
|
||||
class GatewayConnectionInUse(exceptions.InUse):
|
||||
message = _("The specified mapping '%(mapping)s' is already in use on "
|
||||
"network gateway '%(gateway_id)s'.")
|
||||
|
||||
|
||||
class MultipleGatewayConnections(exceptions.Conflict):
|
||||
message = _("Multiple network connections found on '%(gateway_id)s' "
|
||||
"with provided criteria.")
|
||||
|
||||
|
||||
class GatewayConnectionNotFound(exceptions.NotFound):
|
||||
message = _("The connection %(network_mapping_info)s was not found on the "
|
||||
"network gateway '%(network_gateway_id)s'")
|
||||
|
||||
|
||||
class NetworkGatewayUnchangeable(exceptions.InUse):
|
||||
message = _("The network gateway %(gateway_id)s "
|
||||
"cannot be updated or deleted")
|
||||
|
||||
|
||||
class NetworkConnection(model_base.BASEV2, models_v2.HasTenant):
|
||||
"""Defines a connection between a network gateway and a network."""
|
||||
# We use port_id as the primary key as one can connect a gateway
|
||||
# to a network in multiple ways (and we cannot use the same port form
|
||||
# more than a single gateway)
|
||||
network_gateway_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('networkgateways.id',
|
||||
ondelete='CASCADE'))
|
||||
network_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('networks.id', ondelete='CASCADE'))
|
||||
segmentation_type = sa.Column(
|
||||
sa.Enum('flat', 'vlan',
|
||||
name='networkconnections_segmentation_type'))
|
||||
segmentation_id = sa.Column(sa.Integer)
|
||||
__table_args__ = (sa.UniqueConstraint(network_gateway_id,
|
||||
segmentation_type,
|
||||
segmentation_id),)
|
||||
# Also, storing port id comes back useful when disconnecting a network
|
||||
# from a gateway
|
||||
port_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('ports.id', ondelete='CASCADE'),
|
||||
primary_key=True)
|
||||
|
||||
|
||||
class NetworkGatewayDeviceReference(model_base.BASEV2):
|
||||
id = sa.Column(sa.String(36), primary_key=True)
|
||||
network_gateway_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey('networkgateways.id',
|
||||
ondelete='CASCADE'),
|
||||
primary_key=True)
|
||||
interface_name = sa.Column(sa.String(64), primary_key=True)
|
||||
|
||||
|
||||
class NetworkGatewayDevice(model_base.BASEV2, models_v2.HasId,
|
||||
models_v2.HasTenant):
|
||||
nsx_id = sa.Column(sa.String(36))
|
||||
# Optional name for the gateway device
|
||||
name = sa.Column(sa.String(255))
|
||||
# Transport connector type. Not using enum as range of
|
||||
# connector types might vary with backend version
|
||||
connector_type = sa.Column(sa.String(10))
|
||||
# Transport connector IP Address
|
||||
connector_ip = sa.Column(sa.String(64))
|
||||
# operational status
|
||||
status = sa.Column(sa.String(16))
|
||||
|
||||
|
||||
class NetworkGateway(model_base.BASEV2, models_v2.HasId,
|
||||
models_v2.HasTenant):
|
||||
"""Defines the data model for a network gateway."""
|
||||
name = sa.Column(sa.String(255))
|
||||
# Tenant id is nullable for this resource
|
||||
tenant_id = sa.Column(sa.String(36))
|
||||
default = sa.Column(sa.Boolean())
|
||||
devices = orm.relationship(NetworkGatewayDeviceReference,
|
||||
backref='networkgateways',
|
||||
cascade='all,delete')
|
||||
network_connections = orm.relationship(NetworkConnection, lazy='joined')
|
||||
|
||||
|
||||
class NetworkGatewayMixin(networkgw.NetworkGatewayPluginBase):
|
||||
|
||||
gateway_resource = networkgw.GATEWAY_RESOURCE_NAME
|
||||
device_resource = networkgw.DEVICE_RESOURCE_NAME
|
||||
|
||||
def _get_network_gateway(self, context, gw_id):
|
||||
try:
|
||||
gw = self._get_by_id(context, NetworkGateway, gw_id)
|
||||
except sa_orm_exc.NoResultFound:
|
||||
raise GatewayNotFound(gateway_id=gw_id)
|
||||
return gw
|
||||
|
||||
def _make_gw_connection_dict(self, gw_conn):
|
||||
return {'port_id': gw_conn['port_id'],
|
||||
'segmentation_type': gw_conn['segmentation_type'],
|
||||
'segmentation_id': gw_conn['segmentation_id']}
|
||||
|
||||
def _make_network_gateway_dict(self, network_gateway, fields=None):
|
||||
device_list = []
|
||||
for d in network_gateway['devices']:
|
||||
device_list.append({'id': d['id'],
|
||||
'interface_name': d['interface_name']})
|
||||
res = {'id': network_gateway['id'],
|
||||
'name': network_gateway['name'],
|
||||
'default': network_gateway['default'],
|
||||
'devices': device_list,
|
||||
'tenant_id': network_gateway['tenant_id']}
|
||||
# Query gateway connections only if needed
|
||||
if not fields or 'ports' in fields:
|
||||
res['ports'] = [self._make_gw_connection_dict(conn)
|
||||
for conn in network_gateway.network_connections]
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _set_mapping_info_defaults(self, mapping_info):
|
||||
if not mapping_info.get('segmentation_type'):
|
||||
mapping_info['segmentation_type'] = 'flat'
|
||||
if not mapping_info.get('segmentation_id'):
|
||||
mapping_info['segmentation_id'] = 0
|
||||
|
||||
def _validate_network_mapping_info(self, network_mapping_info):
|
||||
self._set_mapping_info_defaults(network_mapping_info)
|
||||
network_id = network_mapping_info.get(NETWORK_ID)
|
||||
if not network_id:
|
||||
raise exceptions.InvalidInput(
|
||||
error_message=_("A network identifier must be specified "
|
||||
"when connecting a network to a network "
|
||||
"gateway. Unable to complete operation"))
|
||||
connection_attrs = set(network_mapping_info.keys())
|
||||
if not connection_attrs.issubset(ALLOWED_CONNECTION_ATTRIBUTES):
|
||||
raise exceptions.InvalidInput(
|
||||
error_message=(_("Invalid keys found among the ones provided "
|
||||
"in request body: %(connection_attrs)s."),
|
||||
connection_attrs))
|
||||
seg_type = network_mapping_info.get(SEGMENTATION_TYPE)
|
||||
seg_id = network_mapping_info.get(SEGMENTATION_ID)
|
||||
# The NSX plugin accepts 0 as a valid vlan tag
|
||||
seg_id_valid = seg_id == 0 or utils.is_valid_vlan_tag(seg_id)
|
||||
if seg_type.lower() == 'flat' and seg_id:
|
||||
msg = _("Cannot specify a segmentation id when "
|
||||
"the segmentation type is flat")
|
||||
raise exceptions.InvalidInput(error_message=msg)
|
||||
elif (seg_type.lower() == 'vlan' and not seg_id_valid):
|
||||
msg = _("Invalid segmentation id (%d) for "
|
||||
"vlan segmentation type") % seg_id
|
||||
raise exceptions.InvalidInput(error_message=msg)
|
||||
return network_id
|
||||
|
||||
def _retrieve_gateway_connections(self, context, gateway_id,
|
||||
mapping_info={}, only_one=False):
|
||||
filters = {'network_gateway_id': [gateway_id]}
|
||||
for k, v in mapping_info.iteritems():
|
||||
if v and k != NETWORK_ID:
|
||||
filters[k] = [v]
|
||||
query = self._get_collection_query(context,
|
||||
NetworkConnection,
|
||||
filters)
|
||||
return query.one() if only_one else query.all()
|
||||
|
||||
def _unset_default_network_gateways(self, context):
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.query(NetworkGateway).update(
|
||||
{NetworkGateway.default: False})
|
||||
|
||||
def _set_default_network_gateway(self, context, gw_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw = (context.session.query(NetworkGateway).
|
||||
filter_by(id=gw_id).one())
|
||||
gw['default'] = True
|
||||
|
||||
def prevent_network_gateway_port_deletion(self, context, port):
|
||||
"""Pre-deletion check.
|
||||
|
||||
Ensures a port will not be deleted if is being used by a network
|
||||
gateway. In that case an exception will be raised.
|
||||
"""
|
||||
if port['device_owner'] == DEVICE_OWNER_NET_GW_INTF:
|
||||
raise NetworkGatewayPortInUse(port_id=port['id'],
|
||||
device_owner=port['device_owner'])
|
||||
|
||||
def _validate_device_list(self, context, tenant_id, gateway_data):
|
||||
device_query = self._query_gateway_devices(
|
||||
context, filters={'id': [device['id']
|
||||
for device in gateway_data['devices']]})
|
||||
retrieved_device_ids = set()
|
||||
for device in device_query:
|
||||
retrieved_device_ids.add(device['id'])
|
||||
if device['tenant_id'] != tenant_id:
|
||||
raise GatewayDeviceNotFound(device_id=device['id'])
|
||||
missing_device_ids = (
|
||||
set(device['id'] for device in gateway_data['devices']) -
|
||||
retrieved_device_ids)
|
||||
if missing_device_ids:
|
||||
raise GatewayDevicesNotFound(
|
||||
device_ids=",".join(missing_device_ids))
|
||||
|
||||
def create_network_gateway(self, context, network_gateway,
|
||||
validate_device_list=True):
|
||||
gw_data = network_gateway[self.gateway_resource]
|
||||
tenant_id = self._get_tenant_id_for_create(context, gw_data)
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw_db = NetworkGateway(
|
||||
id=gw_data.get('id', uuidutils.generate_uuid()),
|
||||
tenant_id=tenant_id,
|
||||
name=gw_data.get('name'))
|
||||
# Device list is guaranteed to be a valid list, but some devices
|
||||
# might still either not exist or belong to a different tenant
|
||||
if validate_device_list:
|
||||
self._validate_device_list(context, tenant_id, gw_data)
|
||||
gw_db.devices.extend([NetworkGatewayDeviceReference(**device)
|
||||
for device in gw_data['devices']])
|
||||
context.session.add(gw_db)
|
||||
LOG.debug("Created network gateway with id:%s", gw_db['id'])
|
||||
return self._make_network_gateway_dict(gw_db)
|
||||
|
||||
def update_network_gateway(self, context, id, network_gateway):
|
||||
gw_data = network_gateway[self.gateway_resource]
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw_db = self._get_network_gateway(context, id)
|
||||
if gw_db.default:
|
||||
raise NetworkGatewayUnchangeable(gateway_id=id)
|
||||
# Ensure there is something to update before doing it
|
||||
if any([gw_db[k] != gw_data[k] for k in gw_data]):
|
||||
gw_db.update(gw_data)
|
||||
LOG.debug("Updated network gateway with id:%s", id)
|
||||
return self._make_network_gateway_dict(gw_db)
|
||||
|
||||
def get_network_gateway(self, context, id, fields=None):
|
||||
gw_db = self._get_network_gateway(context, id)
|
||||
return self._make_network_gateway_dict(gw_db, fields)
|
||||
|
||||
def delete_network_gateway(self, context, id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw_db = self._get_network_gateway(context, id)
|
||||
if gw_db.network_connections:
|
||||
raise GatewayInUse(gateway_id=id)
|
||||
if gw_db.default:
|
||||
raise NetworkGatewayUnchangeable(gateway_id=id)
|
||||
context.session.delete(gw_db)
|
||||
LOG.debug("Network gateway '%s' was destroyed.", id)
|
||||
|
||||
def get_network_gateways(self, context, filters=None, fields=None,
|
||||
sorts=None, limit=None, marker=None,
|
||||
page_reverse=False):
|
||||
marker_obj = self._get_marker_obj(
|
||||
context, 'network_gateway', limit, marker)
|
||||
return self._get_collection(context, NetworkGateway,
|
||||
self._make_network_gateway_dict,
|
||||
filters=filters, fields=fields,
|
||||
sorts=sorts, limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
|
||||
def connect_network(self, context, network_gateway_id,
|
||||
network_mapping_info):
|
||||
network_id = self._validate_network_mapping_info(network_mapping_info)
|
||||
LOG.debug("Connecting network '%(network_id)s' to gateway "
|
||||
"'%(network_gateway_id)s'",
|
||||
{'network_id': network_id,
|
||||
'network_gateway_id': network_gateway_id})
|
||||
with context.session.begin(subtransactions=True):
|
||||
gw_db = self._get_network_gateway(context, network_gateway_id)
|
||||
tenant_id = self._get_tenant_id_for_create(context, gw_db)
|
||||
# TODO(salvatore-orlando): Leverage unique constraint instead
|
||||
# of performing another query!
|
||||
if self._retrieve_gateway_connections(context,
|
||||
network_gateway_id,
|
||||
network_mapping_info):
|
||||
raise GatewayConnectionInUse(mapping=network_mapping_info,
|
||||
gateway_id=network_gateway_id)
|
||||
# TODO(salvatore-orlando): Creating a port will give it an IP,
|
||||
# but we actually do not need any. Instead of wasting an IP we
|
||||
# should have a way to say a port shall not be associated with
|
||||
# any subnet
|
||||
try:
|
||||
# We pass the segmentation type and id too - the plugin
|
||||
# might find them useful as the network connection object
|
||||
# does not exist yet.
|
||||
# NOTE: they're not extended attributes, rather extra data
|
||||
# passed in the port structure to the plugin
|
||||
# TODO(salvatore-orlando): Verify optimal solution for
|
||||
# ownership of the gateway port
|
||||
port = self.create_port(context, {
|
||||
'port':
|
||||
{'tenant_id': tenant_id,
|
||||
'network_id': network_id,
|
||||
'mac_address': attributes.ATTR_NOT_SPECIFIED,
|
||||
'admin_state_up': True,
|
||||
'fixed_ips': [],
|
||||
'device_id': network_gateway_id,
|
||||
'device_owner': DEVICE_OWNER_NET_GW_INTF,
|
||||
'name': '',
|
||||
'gw:segmentation_type':
|
||||
network_mapping_info.get('segmentation_type'),
|
||||
'gw:segmentation_id':
|
||||
network_mapping_info.get('segmentation_id')}})
|
||||
except exceptions.NetworkNotFound:
|
||||
err_msg = (_("Requested network '%(network_id)s' not found."
|
||||
"Unable to create network connection on "
|
||||
"gateway '%(network_gateway_id)s") %
|
||||
{'network_id': network_id,
|
||||
'network_gateway_id': network_gateway_id})
|
||||
LOG.error(err_msg)
|
||||
raise exceptions.InvalidInput(error_message=err_msg)
|
||||
port_id = port['id']
|
||||
LOG.debug("Gateway port for '%(network_gateway_id)s' "
|
||||
"created on network '%(network_id)s':%(port_id)s",
|
||||
{'network_gateway_id': network_gateway_id,
|
||||
'network_id': network_id,
|
||||
'port_id': port_id})
|
||||
# Create NetworkConnection record
|
||||
network_mapping_info['port_id'] = port_id
|
||||
network_mapping_info['tenant_id'] = tenant_id
|
||||
gw_db.network_connections.append(
|
||||
NetworkConnection(**network_mapping_info))
|
||||
port_id = port['id']
|
||||
# now deallocate and recycle ip from the port
|
||||
for fixed_ip in port.get('fixed_ips', []):
|
||||
self._delete_ip_allocation(context, network_id,
|
||||
fixed_ip['subnet_id'],
|
||||
fixed_ip['ip_address'])
|
||||
LOG.debug("Ensured no Ip addresses are configured on port %s",
|
||||
port_id)
|
||||
return {'connection_info':
|
||||
{'network_gateway_id': network_gateway_id,
|
||||
'network_id': network_id,
|
||||
'port_id': port_id}}
|
||||
|
||||
def disconnect_network(self, context, network_gateway_id,
|
||||
network_mapping_info):
|
||||
network_id = self._validate_network_mapping_info(network_mapping_info)
|
||||
LOG.debug("Disconnecting network '%(network_id)s' from gateway "
|
||||
"'%(network_gateway_id)s'",
|
||||
{'network_id': network_id,
|
||||
'network_gateway_id': network_gateway_id})
|
||||
with context.session.begin(subtransactions=True):
|
||||
# Uniquely identify connection, otherwise raise
|
||||
try:
|
||||
net_connection = self._retrieve_gateway_connections(
|
||||
context, network_gateway_id,
|
||||
network_mapping_info, only_one=True)
|
||||
except sa_orm_exc.NoResultFound:
|
||||
raise GatewayConnectionNotFound(
|
||||
network_mapping_info=network_mapping_info,
|
||||
network_gateway_id=network_gateway_id)
|
||||
except sa_orm_exc.MultipleResultsFound:
|
||||
raise MultipleGatewayConnections(
|
||||
gateway_id=network_gateway_id)
|
||||
# Remove gateway port from network
|
||||
# FIXME(salvatore-orlando): Ensure state of port in NSX is
|
||||
# consistent with outcome of transaction
|
||||
self.delete_port(context, net_connection['port_id'],
|
||||
nw_gw_port_check=False)
|
||||
# Remove NetworkConnection record
|
||||
context.session.delete(net_connection)
|
||||
|
||||
def _make_gateway_device_dict(self, gateway_device, fields=None,
|
||||
include_nsx_id=False):
|
||||
res = {'id': gateway_device['id'],
|
||||
'name': gateway_device['name'],
|
||||
'status': gateway_device['status'],
|
||||
'connector_type': gateway_device['connector_type'],
|
||||
'connector_ip': gateway_device['connector_ip'],
|
||||
'tenant_id': gateway_device['tenant_id']}
|
||||
if include_nsx_id:
|
||||
# Return the NSX mapping as well. This attribute will not be
|
||||
# returned in the API response anyway. Ensure it will not be
|
||||
# filtered out in field selection.
|
||||
if fields:
|
||||
fields.append('nsx_id')
|
||||
res['nsx_id'] = gateway_device['nsx_id']
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _get_gateway_device(self, context, device_id):
|
||||
try:
|
||||
return self._get_by_id(context, NetworkGatewayDevice, device_id)
|
||||
except sa_orm_exc.NoResultFound:
|
||||
raise GatewayDeviceNotFound(device_id=device_id)
|
||||
|
||||
def _is_device_in_use(self, context, device_id):
|
||||
query = self._get_collection_query(
|
||||
context, NetworkGatewayDeviceReference, {'id': [device_id]})
|
||||
return query.first()
|
||||
|
||||
def get_gateway_device(self, context, device_id, fields=None,
|
||||
include_nsx_id=False):
|
||||
return self._make_gateway_device_dict(
|
||||
self._get_gateway_device(context, device_id),
|
||||
fields, include_nsx_id)
|
||||
|
||||
def _query_gateway_devices(self, context,
|
||||
filters=None, sorts=None,
|
||||
limit=None, marker=None,
|
||||
page_reverse=None):
|
||||
marker_obj = self._get_marker_obj(
|
||||
context, 'gateway_device', limit, marker)
|
||||
return self._get_collection_query(context,
|
||||
NetworkGatewayDevice,
|
||||
filters=filters,
|
||||
sorts=sorts,
|
||||
limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
|
||||
def get_gateway_devices(self, context, filters=None, fields=None,
|
||||
sorts=None, limit=None, marker=None,
|
||||
page_reverse=False, include_nsx_id=False):
|
||||
query = self._query_gateway_devices(context, filters, sorts, limit,
|
||||
marker, page_reverse)
|
||||
return [self._make_gateway_device_dict(row, fields, include_nsx_id)
|
||||
for row in query]
|
||||
|
||||
def create_gateway_device(self, context, gateway_device,
|
||||
initial_status=STATUS_UNKNOWN):
|
||||
device_data = gateway_device[self.device_resource]
|
||||
tenant_id = self._get_tenant_id_for_create(context, device_data)
|
||||
with context.session.begin(subtransactions=True):
|
||||
device_db = NetworkGatewayDevice(
|
||||
id=device_data.get('id', uuidutils.generate_uuid()),
|
||||
tenant_id=tenant_id,
|
||||
name=device_data.get('name'),
|
||||
connector_type=device_data['connector_type'],
|
||||
connector_ip=device_data['connector_ip'],
|
||||
status=initial_status)
|
||||
context.session.add(device_db)
|
||||
LOG.debug("Created network gateway device: %s", device_db['id'])
|
||||
return self._make_gateway_device_dict(device_db)
|
||||
|
||||
def update_gateway_device(self, context, gateway_device_id,
|
||||
gateway_device, include_nsx_id=False):
|
||||
device_data = gateway_device[self.device_resource]
|
||||
with context.session.begin(subtransactions=True):
|
||||
device_db = self._get_gateway_device(context, gateway_device_id)
|
||||
# Ensure there is something to update before doing it
|
||||
if any([device_db[k] != device_data[k] for k in device_data]):
|
||||
device_db.update(device_data)
|
||||
LOG.debug("Updated network gateway device: %s",
|
||||
gateway_device_id)
|
||||
return self._make_gateway_device_dict(
|
||||
device_db, include_nsx_id=include_nsx_id)
|
||||
|
||||
def delete_gateway_device(self, context, device_id):
|
||||
with context.session.begin(subtransactions=True):
|
||||
# A gateway device should not be deleted
|
||||
# if it is used in any network gateway service
|
||||
if self._is_device_in_use(context, device_id):
|
||||
raise GatewayDeviceInUse(device_id=device_id)
|
||||
device_db = self._get_gateway_device(context, device_id)
|
||||
context.session.delete(device_db)
|
||||
LOG.debug("Deleted network gateway device: %s.", device_id)
|
66
vmware_nsx/neutron/plugins/vmware/dbexts/nsxrouter.py
Normal file
66
vmware_nsx/neutron/plugins/vmware/dbexts/nsxrouter.py
Normal file
@ -0,0 +1,66 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.extensions import l3
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.dbexts import nsxv_models
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NsxRouterMixin(object):
|
||||
"""Mixin class to enable nsx router support."""
|
||||
|
||||
nsx_attributes = []
|
||||
|
||||
def _extend_nsx_router_dict(self, router_res, router_db):
|
||||
nsx_attrs = router_db['nsx_attributes']
|
||||
# Return False if nsx attributes are not definied for this
|
||||
# neutron router
|
||||
for attr in self.nsx_attributes:
|
||||
name = attr['name']
|
||||
default = attr['default']
|
||||
router_res[name] = (
|
||||
nsx_attrs and nsx_attrs[name] or default)
|
||||
|
||||
def _process_nsx_router_create(
|
||||
self, context, router_db, router_req):
|
||||
if not router_db['nsx_attributes']:
|
||||
kwargs = {}
|
||||
for attr in self.nsx_attributes:
|
||||
name = attr['name']
|
||||
default = attr['default']
|
||||
kwargs[name] = router_req.get(name, default)
|
||||
nsx_attributes = nsxv_models.NsxvRouterExtAttributes(
|
||||
router_id=router_db['id'], **kwargs)
|
||||
context.session.add(nsx_attributes)
|
||||
router_db['nsx_attributes'] = nsx_attributes
|
||||
else:
|
||||
# The situation where the record already exists will
|
||||
# be likely once the NSXRouterExtAttributes model
|
||||
# will allow for defining several attributes pertaining
|
||||
# to different extensions
|
||||
for attr in self.nsx_attributes:
|
||||
name = attr['name']
|
||||
default = attr['default']
|
||||
router_db['nsx_attributes'][name] = router_req.get(
|
||||
name, default)
|
||||
LOG.debug("Nsx router extension successfully processed "
|
||||
"for router:%s", router_db['id'])
|
||||
|
||||
# Register dict extend functions for ports
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
l3.ROUTERS, ['_extend_nsx_router_dict'])
|
435
vmware_nsx/neutron/plugins/vmware/dbexts/nsxv_db.py
Normal file
435
vmware_nsx/neutron/plugins/vmware/dbexts/nsxv_db.py
Normal file
@ -0,0 +1,435 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.db import exception as db_exc
|
||||
from oslo.utils import excutils
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
import neutron.db.api as db
|
||||
from neutron.i18n import _, _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.dbexts import nsxv_models
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsxv_constants
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import constants
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def add_nsxv_router_binding(session, router_id, vse_id, lswitch_id, status,
|
||||
appliance_size=nsxv_constants.LARGE,
|
||||
edge_type=nsxv_constants.SERVICE_EDGE):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = nsxv_models.NsxvRouterBinding(
|
||||
router_id=router_id,
|
||||
edge_id=vse_id,
|
||||
lswitch_id=lswitch_id,
|
||||
status=status,
|
||||
appliance_size=appliance_size,
|
||||
edge_type=edge_type)
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def get_nsxv_router_binding(session, router_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvRouterBinding).
|
||||
filter_by(router_id=router_id).first())
|
||||
|
||||
|
||||
def get_nsxv_router_binding_by_edge(session, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvRouterBinding).
|
||||
filter_by(edge_id=edge_id).first())
|
||||
|
||||
|
||||
def get_nsxv_router_bindings(session):
|
||||
with session.begin(subtransactions=True):
|
||||
return session.query(nsxv_models.NsxvRouterBinding).all()
|
||||
|
||||
|
||||
def update_nsxv_router_binding(session, router_id, **kwargs):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(nsxv_models.NsxvRouterBinding).
|
||||
filter_by(router_id=router_id).one())
|
||||
for key, value in kwargs.iteritems():
|
||||
binding[key] = value
|
||||
|
||||
|
||||
def delete_nsxv_router_binding(session, router_id):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(nsxv_models.NsxvRouterBinding).
|
||||
filter_by(router_id=router_id).one())
|
||||
session.delete(binding)
|
||||
|
||||
|
||||
def get_edge_vnic_binding(session, edge_id, network_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvEdgeVnicBinding).
|
||||
filter_by(edge_id=edge_id, network_id=network_id).first())
|
||||
|
||||
|
||||
def get_edge_vnic_bindings_by_edge(session, edge_id):
|
||||
query = session.query(nsxv_models.NsxvEdgeVnicBinding)
|
||||
query = query.filter(
|
||||
nsxv_models.NsxvEdgeVnicBinding.edge_id == edge_id,
|
||||
nsxv_models.NsxvEdgeVnicBinding.network_id is not None)
|
||||
return query.all()
|
||||
|
||||
|
||||
def get_edge_vnic_bindings_by_int_lswitch(session, lswitch_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvEdgeVnicBinding).
|
||||
filter_by(network_id=lswitch_id).all())
|
||||
|
||||
|
||||
def create_edge_vnic_binding(session, edge_id, vnic_index,
|
||||
network_id, tunnel_index=-1):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = nsxv_models.NsxvEdgeVnicBinding(
|
||||
edge_id=edge_id,
|
||||
vnic_index=vnic_index,
|
||||
tunnel_index=tunnel_index,
|
||||
network_id=network_id)
|
||||
session.add(binding)
|
||||
|
||||
|
||||
def delete_edge_vnic_binding_by_network(session, edge_id, network_id):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(nsxv_models.NsxvEdgeVnicBinding).
|
||||
filter_by(edge_id=edge_id, network_id=network_id).one())
|
||||
session.delete(binding)
|
||||
|
||||
|
||||
def init_edge_vnic_binding(session, edge_id):
|
||||
"""Init edge vnic binding to preallocated 10 available edge vnics."""
|
||||
|
||||
with session.begin(subtransactions=True):
|
||||
for vnic_index in range(constants.MAX_VNIC_NUM)[1:]:
|
||||
start = (vnic_index - 1) * constants.MAX_TUNNEL_NUM
|
||||
stop = vnic_index * constants.MAX_TUNNEL_NUM
|
||||
for tunnel_index in range(start, stop):
|
||||
binding = nsxv_models.NsxvEdgeVnicBinding(
|
||||
edge_id=edge_id,
|
||||
vnic_index=vnic_index,
|
||||
tunnel_index=tunnel_index + 1)
|
||||
session.add(binding)
|
||||
|
||||
|
||||
def clean_edge_vnic_binding(session, edge_id):
|
||||
"""Clean edge vnic binding."""
|
||||
|
||||
with session.begin(subtransactions=True):
|
||||
(session.query(nsxv_models.NsxvEdgeVnicBinding).
|
||||
filter_by(edge_id=edge_id).delete())
|
||||
|
||||
|
||||
def allocate_edge_vnic(session, edge_id, network_id):
|
||||
"""Allocate an avaliable edge vnic to network."""
|
||||
|
||||
with session.begin(subtransactions=True):
|
||||
bindings = (session.query(nsxv_models.NsxvEdgeVnicBinding).
|
||||
filter_by(edge_id=edge_id, network_id=None).all())
|
||||
for binding in bindings:
|
||||
if binding['tunnel_index'] % constants.MAX_TUNNEL_NUM == 1:
|
||||
binding['network_id'] = network_id
|
||||
session.add(binding)
|
||||
return binding
|
||||
msg = (_("Failed to allocate one available vnic on edge_id: "
|
||||
":%(edge_id)s to network_id: %(network_id)s") %
|
||||
{'edge_id': edge_id, 'network_id': network_id})
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
|
||||
|
||||
def allocate_edge_vnic_with_tunnel_index(session, edge_id, network_id):
|
||||
"""Allocate an avaliable edge vnic with tunnel index to network."""
|
||||
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(nsxv_models.NsxvEdgeVnicBinding).
|
||||
filter_by(edge_id=edge_id, network_id=None).first())
|
||||
if not binding:
|
||||
msg = (_("Failed to allocate one available vnic on edge_id: "
|
||||
":%(edge_id)s to network_id: %(network_id)s") %
|
||||
{'edge_id': edge_id, 'network_id': network_id})
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
binding['network_id'] = network_id
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def allocate_specific_edge_vnic(session, edge_id, vnic_index,
|
||||
tunnel_index, network_id):
|
||||
"""Allocate an specific edge vnic to network."""
|
||||
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(nsxv_models.NsxvEdgeVnicBinding).
|
||||
filter_by(edge_id=edge_id,
|
||||
vnic_index=vnic_index,
|
||||
tunnel_index=tunnel_index).one())
|
||||
binding['network_id'] = network_id
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def get_dhcp_edge_network_binding(session, network_id):
|
||||
with session.begin(subtransactions=True):
|
||||
dhcp_router_edges = [binding['edge_id']
|
||||
for binding in get_nsxv_router_bindings(session)
|
||||
if binding['router_id'].startswith(
|
||||
constants.DHCP_EDGE_PREFIX)]
|
||||
bindings = (session.query(nsxv_models.NsxvEdgeVnicBinding).
|
||||
filter_by(network_id=network_id))
|
||||
for binding in bindings:
|
||||
edge_id = binding['edge_id']
|
||||
if edge_id in dhcp_router_edges:
|
||||
return binding
|
||||
|
||||
|
||||
def free_edge_vnic_by_network(session, edge_id, network_id):
|
||||
"""Free an edge vnic."""
|
||||
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(nsxv_models.NsxvEdgeVnicBinding).
|
||||
filter_by(edge_id=edge_id, network_id=network_id).one())
|
||||
binding['network_id'] = None
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def create_edge_dhcp_static_binding(session, edge_id, mac_address, binding_id):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = nsxv_models.NsxvEdgeDhcpStaticBinding(
|
||||
edge_id=edge_id,
|
||||
mac_address=mac_address,
|
||||
binding_id=binding_id)
|
||||
session.add(binding)
|
||||
|
||||
|
||||
def get_edge_dhcp_static_binding(session, edge_id, mac_address):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).
|
||||
filter_by(edge_id=edge_id, mac_address=mac_address).first())
|
||||
|
||||
|
||||
def delete_edge_dhcp_static_binding(session, edge_id, mac_address):
|
||||
with session.begin(subtransactions=True):
|
||||
session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by(
|
||||
edge_id=edge_id, mac_address=mac_address).delete()
|
||||
|
||||
|
||||
def clean_edge_dhcp_static_bindings_by_edge(session, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
session.query(nsxv_models.NsxvEdgeDhcpStaticBinding).filter_by(
|
||||
edge_id=edge_id).delete()
|
||||
|
||||
|
||||
def create_nsxv_internal_network(session, network_purpose, network_id):
|
||||
with session.begin(subtransactions=True):
|
||||
try:
|
||||
network = nsxv_models.NsxvInternalNetworks(
|
||||
network_purpose=network_purpose,
|
||||
network_id=network_id)
|
||||
session.add(network)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Duplicate internal network for purpose %s"),
|
||||
network_purpose)
|
||||
|
||||
|
||||
def get_nsxv_internal_network(session, network_purpose):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvInternalNetworks).
|
||||
filter_by(network_purpose=network_purpose).all())
|
||||
|
||||
|
||||
def delete_nsxv_internal_network(session, network_purpose):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvInternalNetworks).
|
||||
filter_by(network_purpose=network_purpose).delete())
|
||||
|
||||
|
||||
def create_nsxv_internal_edge(session, ext_ip_address, purpose, router_id):
|
||||
with session.begin(subtransactions=True):
|
||||
try:
|
||||
internal_edge = nsxv_models.NsxvInternalEdges(
|
||||
ext_ip_address=ext_ip_address,
|
||||
purpose=purpose,
|
||||
router_id=router_id)
|
||||
session.add(internal_edge)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Duplicate internal Edge IP %s"),
|
||||
ext_ip_address)
|
||||
|
||||
|
||||
def get_nsxv_internal_edge(session, ext_ip_address):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvInternalEdges).
|
||||
filter_by(ext_ip_address=ext_ip_address).all())
|
||||
|
||||
|
||||
def get_nsxv_internal_edges_by_purpose(session, purpose):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvInternalEdges).
|
||||
filter_by(purpose=purpose).all())
|
||||
|
||||
|
||||
def delete_nsxv_internal_edge(session, ext_ip_address):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvInternalEdges).
|
||||
filter_by(ext_ip_address=ext_ip_address).delete())
|
||||
|
||||
|
||||
def add_neutron_nsx_section_mapping(session, neutron_id, ip_section_id,
|
||||
mac_section_id=None):
|
||||
with session.begin(subtransactions=True):
|
||||
mapping = nsxv_models.NsxvSectionMapping(
|
||||
neutron_id=neutron_id, ip_section_id=ip_section_id,
|
||||
mac_section_id=mac_section_id)
|
||||
session.add(mapping)
|
||||
return mapping
|
||||
|
||||
|
||||
def add_neutron_nsx_rule_mapping(session, neutron_id, nsx_rule_id):
|
||||
with session.begin(subtransactions=True):
|
||||
mapping = nsxv_models.NsxvRuleMapping(neutron_id=neutron_id,
|
||||
nsx_rule_id=nsx_rule_id)
|
||||
session.add(mapping)
|
||||
return mapping
|
||||
|
||||
|
||||
def add_neutron_nsx_port_vnic_mapping(session, neutron_id, nsx_id):
|
||||
with session.begin(subtransactions=True):
|
||||
mapping = nsxv_models.NsxvPortVnicMapping(
|
||||
neutron_id=neutron_id, nsx_id=nsx_id)
|
||||
session.add(mapping)
|
||||
return mapping
|
||||
|
||||
|
||||
def get_nsx_section(session, neutron_id):
|
||||
try:
|
||||
mapping = (session.query(nsxv_models.NsxvSectionMapping).
|
||||
filter_by(neutron_id=neutron_id).
|
||||
one())
|
||||
return mapping
|
||||
except exc.NoResultFound:
|
||||
LOG.debug("NSX identifiers for neutron security group %s not yet "
|
||||
"stored in Neutron DB", neutron_id)
|
||||
|
||||
|
||||
def get_nsx_rule_id(session, neutron_id):
|
||||
try:
|
||||
mapping = (session.query(nsxv_models.NsxvRuleMapping).
|
||||
filter_by(neutron_id=neutron_id).
|
||||
one())
|
||||
return mapping['nsx_rule_id']
|
||||
except exc.NoResultFound:
|
||||
LOG.debug("NSX identifiers for neutron rule %s not yet "
|
||||
"stored in Neutron DB", neutron_id)
|
||||
|
||||
|
||||
def get_nsx_vnic_id(session, neutron_id):
|
||||
try:
|
||||
mapping = (session.query(nsxv_models.NsxvPortVnicMapping).
|
||||
filter_by(neutron_id=neutron_id).
|
||||
one())
|
||||
return mapping['nsx_id']
|
||||
except exc.NoResultFound:
|
||||
LOG.debug("NSX identifiers for neutron port %s not yet "
|
||||
"stored in Neutron DB", neutron_id)
|
||||
|
||||
|
||||
def get_network_bindings(session, network_id):
|
||||
session = session or db.get_session()
|
||||
return (session.query(nsxv_models.NsxvTzNetworkBinding).
|
||||
filter_by(network_id=network_id).
|
||||
all())
|
||||
|
||||
|
||||
def get_network_bindings_by_vlanid_and_physical_net(session, vlan_id,
|
||||
phy_uuid):
|
||||
session = session or db.get_session()
|
||||
return (session.query(nsxv_models.NsxvTzNetworkBinding).
|
||||
filter_by(vlan_id=vlan_id, phy_uuid=phy_uuid).
|
||||
all())
|
||||
|
||||
|
||||
def delete_network_bindings(session, network_id):
|
||||
return (session.query(nsxv_models.NsxvTzNetworkBinding).
|
||||
filter_by(network_id=network_id).delete())
|
||||
|
||||
|
||||
def add_network_binding(session, network_id, binding_type, phy_uuid, vlan_id):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = nsxv_models.NsxvTzNetworkBinding(network_id, binding_type,
|
||||
phy_uuid, vlan_id)
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def get_network_bindings_by_vlanid(session, vlan_id):
|
||||
session = session or db.get_session()
|
||||
return (session.query(nsxv_models.NsxvTzNetworkBinding).
|
||||
filter_by(vlan_id=vlan_id).
|
||||
all())
|
||||
|
||||
|
||||
#
|
||||
# Edge Firewall binding methods
|
||||
#
|
||||
def add_nsxv_edge_firewallrule_binding(session, map_info):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = nsxv_models.NsxvEdgeFirewallRuleBinding(
|
||||
rule_id=map_info['rule_id'],
|
||||
rule_vseid=map_info['rule_vseid'],
|
||||
edge_id=map_info['edge_id'])
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def delete_nsxv_edge_firewallrule_binding(session, id):
|
||||
with session.begin(subtransactions=True):
|
||||
if not (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding).
|
||||
filter_by(rule_id=id).delete()):
|
||||
msg = _("Rule Resource binding with id:%s not found!") % id
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
|
||||
|
||||
def get_nsxv_edge_firewallrule_binding(session, id, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding).
|
||||
filter_by(rule_id=id, edge_id=edge_id).first())
|
||||
|
||||
|
||||
def get_nsxv_edge_firewallrule_binding_by_vseid(
|
||||
session, edge_id, rule_vseid):
|
||||
with session.begin(subtransactions=True):
|
||||
try:
|
||||
return (session.query(nsxv_models.NsxvEdgeFirewallRuleBinding).
|
||||
filter_by(edge_id=edge_id, rule_vseid=rule_vseid).one())
|
||||
except exc.NoResultFound:
|
||||
msg = _("Rule Resource binding not found!")
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
|
||||
|
||||
def cleanup_nsxv_edge_firewallrule_binding(session, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
session.query(
|
||||
nsxv_models.NsxvEdgeFirewallRuleBinding).filter_by(
|
||||
edge_id=edge_id).delete()
|
@ -1,302 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import sqlalchemy as sa
|
||||
from sqlalchemy import orm
|
||||
from sqlalchemy.orm import exc
|
||||
from sqlalchemy import sql
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
from neutron.i18n import _LI
|
||||
from neutron.openstack.common import log
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.extensions import qos
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class QoSQueue(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
|
||||
name = sa.Column(sa.String(255))
|
||||
default = sa.Column(sa.Boolean, default=False, server_default=sql.false())
|
||||
min = sa.Column(sa.Integer, nullable=False)
|
||||
max = sa.Column(sa.Integer, nullable=True)
|
||||
qos_marking = sa.Column(sa.Enum('untrusted', 'trusted',
|
||||
name='qosqueues_qos_marking'))
|
||||
dscp = sa.Column(sa.Integer)
|
||||
|
||||
|
||||
class PortQueueMapping(model_base.BASEV2):
|
||||
port_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("ports.id", ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
|
||||
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id"),
|
||||
primary_key=True)
|
||||
|
||||
# Add a relationship to the Port model adding a backref which will
|
||||
# allow SQLAlchemy for eagerly load the queue binding
|
||||
port = orm.relationship(
|
||||
models_v2.Port,
|
||||
backref=orm.backref("qos_queue", uselist=False,
|
||||
cascade='delete', lazy='joined'))
|
||||
|
||||
|
||||
class NetworkQueueMapping(model_base.BASEV2):
|
||||
network_id = sa.Column(sa.String(36),
|
||||
sa.ForeignKey("networks.id", ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
|
||||
queue_id = sa.Column(sa.String(36), sa.ForeignKey("qosqueues.id",
|
||||
ondelete="CASCADE"))
|
||||
|
||||
# Add a relationship to the Network model adding a backref which will
|
||||
# allow SQLAlcremy for eagerly load the queue binding
|
||||
network = orm.relationship(
|
||||
models_v2.Network,
|
||||
backref=orm.backref("qos_queue", uselist=False,
|
||||
cascade='delete', lazy='joined'))
|
||||
|
||||
|
||||
class QoSDbMixin(qos.QueuePluginBase):
|
||||
"""Mixin class to add queues."""
|
||||
|
||||
def create_qos_queue(self, context, qos_queue):
|
||||
q = qos_queue['qos_queue']
|
||||
with context.session.begin(subtransactions=True):
|
||||
qos_queue = QoSQueue(id=q.get('id', uuidutils.generate_uuid()),
|
||||
name=q.get('name'),
|
||||
tenant_id=q['tenant_id'],
|
||||
default=q.get('default'),
|
||||
min=q.get('min'),
|
||||
max=q.get('max'),
|
||||
qos_marking=q.get('qos_marking'),
|
||||
dscp=q.get('dscp'))
|
||||
context.session.add(qos_queue)
|
||||
return self._make_qos_queue_dict(qos_queue)
|
||||
|
||||
def get_qos_queue(self, context, queue_id, fields=None):
|
||||
return self._make_qos_queue_dict(
|
||||
self._get_qos_queue(context, queue_id), fields)
|
||||
|
||||
def _get_qos_queue(self, context, queue_id):
|
||||
try:
|
||||
return self._get_by_id(context, QoSQueue, queue_id)
|
||||
except exc.NoResultFound:
|
||||
raise qos.QueueNotFound(id=queue_id)
|
||||
|
||||
def get_qos_queues(self, context, filters=None, fields=None, sorts=None,
|
||||
limit=None, marker=None, page_reverse=False):
|
||||
marker_obj = self._get_marker_obj(context, 'qos_queue', limit, marker)
|
||||
return self._get_collection(context, QoSQueue,
|
||||
self._make_qos_queue_dict,
|
||||
filters=filters, fields=fields,
|
||||
sorts=sorts, limit=limit,
|
||||
marker_obj=marker_obj,
|
||||
page_reverse=page_reverse)
|
||||
|
||||
def delete_qos_queue(self, context, queue_id):
|
||||
qos_queue = self._get_qos_queue(context, queue_id)
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.delete(qos_queue)
|
||||
|
||||
def _process_port_queue_mapping(self, context, port_data, queue_id):
|
||||
port_data[qos.QUEUE] = queue_id
|
||||
if not queue_id:
|
||||
return
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.add(PortQueueMapping(port_id=port_data['id'],
|
||||
queue_id=queue_id))
|
||||
|
||||
def _get_port_queue_bindings(self, context, filters=None, fields=None):
|
||||
return self._get_collection(context, PortQueueMapping,
|
||||
self._make_port_queue_binding_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
def _delete_port_queue_mapping(self, context, port_id):
|
||||
query = self._model_query(context, PortQueueMapping)
|
||||
try:
|
||||
binding = query.filter(PortQueueMapping.port_id == port_id).one()
|
||||
except exc.NoResultFound:
|
||||
# return since this can happen if we are updating a port that
|
||||
# did not already have a queue on it. There is no need to check
|
||||
# if there is one before deleting if we return here.
|
||||
return
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.delete(binding)
|
||||
|
||||
def _process_network_queue_mapping(self, context, net_data, queue_id):
|
||||
net_data[qos.QUEUE] = queue_id
|
||||
if not queue_id:
|
||||
return
|
||||
with context.session.begin(subtransactions=True):
|
||||
context.session.add(
|
||||
NetworkQueueMapping(network_id=net_data['id'],
|
||||
queue_id=queue_id))
|
||||
|
||||
def _get_network_queue_bindings(self, context, filters=None, fields=None):
|
||||
return self._get_collection(context, NetworkQueueMapping,
|
||||
self._make_network_queue_binding_dict,
|
||||
filters=filters, fields=fields)
|
||||
|
||||
def _delete_network_queue_mapping(self, context, network_id):
|
||||
query = self._model_query(context, NetworkQueueMapping)
|
||||
with context.session.begin(subtransactions=True):
|
||||
binding = query.filter_by(network_id=network_id).first()
|
||||
if binding:
|
||||
context.session.delete(binding)
|
||||
|
||||
def _extend_dict_qos_queue(self, obj_res, obj_db):
|
||||
queue_mapping = obj_db['qos_queue']
|
||||
if queue_mapping:
|
||||
obj_res[qos.QUEUE] = queue_mapping.get('queue_id')
|
||||
return obj_res
|
||||
|
||||
def _extend_port_dict_qos_queue(self, port_res, port_db):
|
||||
self._extend_dict_qos_queue(port_res, port_db)
|
||||
|
||||
def _extend_network_dict_qos_queue(self, network_res, network_db):
|
||||
self._extend_dict_qos_queue(network_res, network_db)
|
||||
|
||||
# Register dict extend functions for networks and ports
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
attr.NETWORKS, ['_extend_network_dict_qos_queue'])
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
attr.PORTS, ['_extend_port_dict_qos_queue'])
|
||||
|
||||
def _make_qos_queue_dict(self, queue, fields=None):
|
||||
res = {'id': queue['id'],
|
||||
'name': queue.get('name'),
|
||||
'default': queue.get('default'),
|
||||
'tenant_id': queue['tenant_id'],
|
||||
'min': queue.get('min'),
|
||||
'max': queue.get('max'),
|
||||
'qos_marking': queue.get('qos_marking'),
|
||||
'dscp': queue.get('dscp')}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _make_port_queue_binding_dict(self, queue, fields=None):
|
||||
res = {'port_id': queue['port_id'],
|
||||
'queue_id': queue['queue_id']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _make_network_queue_binding_dict(self, queue, fields=None):
|
||||
res = {'network_id': queue['network_id'],
|
||||
'queue_id': queue['queue_id']}
|
||||
return self._fields(res, fields)
|
||||
|
||||
def _check_for_queue_and_create(self, context, port):
|
||||
"""Check for queue and create.
|
||||
|
||||
This function determines if a port should be associated with a
|
||||
queue. It works by first querying NetworkQueueMapping to determine
|
||||
if the network is associated with a queue. If so, then it queries
|
||||
NetworkQueueMapping for all the networks that are associated with
|
||||
this queue. Next, it queries against all the ports on these networks
|
||||
with the port device_id. Finally it queries PortQueueMapping. If that
|
||||
query returns a queue_id that is returned. Otherwise a queue is
|
||||
created that is the size of the queue associated with the network and
|
||||
that queue_id is returned.
|
||||
|
||||
If the network is not associated with a queue we then query to see
|
||||
if there is a default queue in the system. If so, a copy of that is
|
||||
created and the queue_id is returned.
|
||||
|
||||
Otherwise None is returned. None is also returned if the port does not
|
||||
have a device_id or if the device_owner is network:
|
||||
"""
|
||||
|
||||
queue_to_create = None
|
||||
# If there is no device_id don't create a queue. The queue will be
|
||||
# created on update port when the device_id is present. Also don't
|
||||
# apply QoS to network ports.
|
||||
if (not port.get('device_id') or
|
||||
port['device_owner'].startswith('network:')):
|
||||
return
|
||||
|
||||
# Check if there is a queue associated with the network
|
||||
filters = {'network_id': [port['network_id']]}
|
||||
network_queue_id = self._get_network_queue_bindings(
|
||||
context, filters, ['queue_id'])
|
||||
if network_queue_id:
|
||||
# get networks that queue is associated with
|
||||
filters = {'queue_id': [network_queue_id[0]['queue_id']]}
|
||||
networks_with_same_queue = self._get_network_queue_bindings(
|
||||
context, filters)
|
||||
|
||||
# get the ports on these networks with the same_queue and device_id
|
||||
filters = {'device_id': [port.get('device_id')],
|
||||
'network_id': [network['network_id'] for
|
||||
network in networks_with_same_queue]}
|
||||
query = self._model_query(context, models_v2.Port.id)
|
||||
query = self._apply_filters_to_query(query, models_v2.Port,
|
||||
filters)
|
||||
ports_ids = [p[0] for p in query]
|
||||
if ports_ids:
|
||||
# shared queue already exists find the queue id
|
||||
queues = self._get_port_queue_bindings(context,
|
||||
{'port_id': ports_ids},
|
||||
['queue_id'])
|
||||
if queues:
|
||||
return queues[0]['queue_id']
|
||||
|
||||
# get the size of the queue we want to create
|
||||
queue_to_create = self._get_qos_queue(
|
||||
context, network_queue_id[0]['queue_id'])
|
||||
|
||||
else:
|
||||
# check for default queue
|
||||
filters = {'default': [True]}
|
||||
# context is elevated since default queue is owned by admin
|
||||
queue_to_create = self.get_qos_queues(context.elevated(), filters)
|
||||
if not queue_to_create:
|
||||
return
|
||||
queue_to_create = queue_to_create[0]
|
||||
|
||||
# create the queue
|
||||
tenant_id = self._get_tenant_id_for_create(context, port)
|
||||
if port.get(qos.RXTX_FACTOR) and queue_to_create.get('max'):
|
||||
queue_to_create['max'] *= int(port[qos.RXTX_FACTOR])
|
||||
queue = {'qos_queue': {'name': queue_to_create.get('name'),
|
||||
'min': queue_to_create.get('min'),
|
||||
'max': queue_to_create.get('max'),
|
||||
'dscp': queue_to_create.get('dscp'),
|
||||
'qos_marking':
|
||||
queue_to_create.get('qos_marking'),
|
||||
'tenant_id': tenant_id}}
|
||||
return self.create_qos_queue(context, queue, False)['id']
|
||||
|
||||
def _validate_qos_queue(self, context, qos_queue):
|
||||
if qos_queue.get('default'):
|
||||
if context.is_admin:
|
||||
if self.get_qos_queues(context, filters={'default': [True]}):
|
||||
raise qos.DefaultQueueAlreadyExists()
|
||||
else:
|
||||
raise qos.DefaultQueueCreateNotAdmin()
|
||||
if qos_queue.get('qos_marking') == 'trusted':
|
||||
dscp = qos_queue.pop('dscp')
|
||||
if dscp:
|
||||
# must raise because a non-zero dscp was provided
|
||||
raise qos.QueueInvalidMarking()
|
||||
LOG.info(_LI("DSCP value (%s) will be ignored with 'trusted' "
|
||||
"marking"), dscp)
|
||||
max = qos_queue.get('max')
|
||||
min = qos_queue.get('min')
|
||||
# Max can be None
|
||||
if max and min > max:
|
||||
raise qos.QueueMinGreaterMax()
|
@ -1,202 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.dbexts import vcns_models
|
||||
from neutron.plugins.vmware.vshield.common import (
|
||||
exceptions as vcns_exc)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = vcns_models.VcnsRouterBinding(
|
||||
router_id=router_id,
|
||||
edge_id=vse_id,
|
||||
lswitch_id=lswitch_id,
|
||||
status=status)
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def get_vcns_router_binding(session, router_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(vcns_models.VcnsRouterBinding).
|
||||
filter_by(router_id=router_id).first())
|
||||
|
||||
|
||||
def update_vcns_router_binding(session, router_id, **kwargs):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(vcns_models.VcnsRouterBinding).
|
||||
filter_by(router_id=router_id).one())
|
||||
for key, value in kwargs.iteritems():
|
||||
binding[key] = value
|
||||
|
||||
|
||||
def delete_vcns_router_binding(session, router_id):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = (session.query(vcns_models.VcnsRouterBinding).
|
||||
filter_by(router_id=router_id).one())
|
||||
session.delete(binding)
|
||||
|
||||
|
||||
#
|
||||
# Edge Firewall binding methods
|
||||
#
|
||||
def add_vcns_edge_firewallrule_binding(session, map_info):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = vcns_models.VcnsEdgeFirewallRuleBinding(
|
||||
rule_id=map_info['rule_id'],
|
||||
rule_vseid=map_info['rule_vseid'],
|
||||
edge_id=map_info['edge_id'])
|
||||
session.add(binding)
|
||||
return binding
|
||||
|
||||
|
||||
def delete_vcns_edge_firewallrule_binding(session, id, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
if not (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
|
||||
filter_by(rule_id=id, edge_id=edge_id).delete()):
|
||||
msg = _("Rule Resource binding with id:%s not found!") % id
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
|
||||
|
||||
def get_vcns_edge_firewallrule_binding(session, id, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
|
||||
filter_by(rule_id=id, edge_id=edge_id).first())
|
||||
|
||||
|
||||
def get_vcns_edge_firewallrule_binding_by_vseid(
|
||||
session, edge_id, rule_vseid):
|
||||
with session.begin(subtransactions=True):
|
||||
try:
|
||||
return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
|
||||
filter_by(edge_id=edge_id, rule_vseid=rule_vseid).one())
|
||||
except exc.NoResultFound:
|
||||
msg = _("Rule Resource binding not found!")
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
|
||||
|
||||
def cleanup_vcns_edge_firewallrule_binding(session, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
session.query(
|
||||
vcns_models.VcnsEdgeFirewallRuleBinding).filter_by(
|
||||
edge_id=edge_id).delete()
|
||||
|
||||
|
||||
def add_vcns_edge_vip_binding(session, map_info):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = vcns_models.VcnsEdgeVipBinding(
|
||||
vip_id=map_info['vip_id'],
|
||||
edge_id=map_info['edge_id'],
|
||||
vip_vseid=map_info['vip_vseid'],
|
||||
app_profileid=map_info['app_profileid'])
|
||||
session.add(binding)
|
||||
|
||||
return binding
|
||||
|
||||
|
||||
def get_vcns_edge_vip_binding(session, id):
|
||||
with session.begin(subtransactions=True):
|
||||
try:
|
||||
qry = session.query(vcns_models.VcnsEdgeVipBinding)
|
||||
return qry.filter_by(vip_id=id).one()
|
||||
except exc.NoResultFound:
|
||||
msg = _("VIP Resource binding with id:%s not found!") % id
|
||||
LOG.exception(msg)
|
||||
raise vcns_exc.VcnsNotFound(
|
||||
resource='router_service_binding', msg=msg)
|
||||
|
||||
|
||||
def delete_vcns_edge_vip_binding(session, id):
|
||||
with session.begin(subtransactions=True):
|
||||
qry = session.query(vcns_models.VcnsEdgeVipBinding)
|
||||
if not qry.filter_by(vip_id=id).delete():
|
||||
msg = _("VIP Resource binding with id:%s not found!") % id
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
|
||||
|
||||
def add_vcns_edge_pool_binding(session, map_info):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = vcns_models.VcnsEdgePoolBinding(
|
||||
pool_id=map_info['pool_id'],
|
||||
edge_id=map_info['edge_id'],
|
||||
pool_vseid=map_info['pool_vseid'])
|
||||
session.add(binding)
|
||||
|
||||
return binding
|
||||
|
||||
|
||||
def get_vcns_edge_pool_binding(session, id, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(vcns_models.VcnsEdgePoolBinding).
|
||||
filter_by(pool_id=id, edge_id=edge_id).first())
|
||||
|
||||
|
||||
def get_vcns_edge_pool_binding_by_vseid(session, edge_id, pool_vseid):
|
||||
with session.begin(subtransactions=True):
|
||||
try:
|
||||
qry = session.query(vcns_models.VcnsEdgePoolBinding)
|
||||
binding = qry.filter_by(edge_id=edge_id,
|
||||
pool_vseid=pool_vseid).one()
|
||||
except exc.NoResultFound:
|
||||
msg = (_("Pool Resource binding with edge_id:%(edge_id)s "
|
||||
"pool_vseid:%(pool_vseid)s not found!") %
|
||||
{'edge_id': edge_id, 'pool_vseid': pool_vseid})
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
return binding
|
||||
|
||||
|
||||
def delete_vcns_edge_pool_binding(session, id, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
qry = session.query(vcns_models.VcnsEdgePoolBinding)
|
||||
if not qry.filter_by(pool_id=id, edge_id=edge_id).delete():
|
||||
msg = _("Pool Resource binding with id:%s not found!") % id
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
|
||||
|
||||
def add_vcns_edge_monitor_binding(session, map_info):
|
||||
with session.begin(subtransactions=True):
|
||||
binding = vcns_models.VcnsEdgeMonitorBinding(
|
||||
monitor_id=map_info['monitor_id'],
|
||||
edge_id=map_info['edge_id'],
|
||||
monitor_vseid=map_info['monitor_vseid'])
|
||||
session.add(binding)
|
||||
|
||||
return binding
|
||||
|
||||
|
||||
def get_vcns_edge_monitor_binding(session, id, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
return (session.query(vcns_models.VcnsEdgeMonitorBinding).
|
||||
filter_by(monitor_id=id, edge_id=edge_id).first())
|
||||
|
||||
|
||||
def delete_vcns_edge_monitor_binding(session, id, edge_id):
|
||||
with session.begin(subtransactions=True):
|
||||
qry = session.query(vcns_models.VcnsEdgeMonitorBinding)
|
||||
if not qry.filter_by(monitor_id=id, edge_id=edge_id).delete():
|
||||
msg = _("Monitor Resource binding with id:%s not found!") % id
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
@ -1,94 +0,0 @@
|
||||
# Copyright 2013 VMware, Inc.
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import sqlalchemy as sa
|
||||
|
||||
from neutron.db import model_base
|
||||
from neutron.db import models_v2
|
||||
|
||||
|
||||
class VcnsRouterBinding(model_base.BASEV2, models_v2.HasStatusDescription):
|
||||
"""Represents the mapping between neutron router and vShield Edge."""
|
||||
|
||||
__tablename__ = 'vcns_router_bindings'
|
||||
|
||||
# no ForeignKey to routers.id because for now, a router can be removed
|
||||
# from routers when delete_router is executed, but the binding is only
|
||||
# removed after the Edge is deleted
|
||||
router_id = sa.Column(sa.String(36),
|
||||
primary_key=True)
|
||||
edge_id = sa.Column(sa.String(16),
|
||||
nullable=True)
|
||||
lswitch_id = sa.Column(sa.String(36),
|
||||
nullable=False)
|
||||
|
||||
|
||||
#
|
||||
# VCNS Edge FW mapping tables
|
||||
#
|
||||
class VcnsEdgeFirewallRuleBinding(model_base.BASEV2):
|
||||
"""1:1 mapping between firewall rule and edge firewall rule_id."""
|
||||
|
||||
__tablename__ = 'vcns_firewall_rule_bindings'
|
||||
|
||||
rule_id = sa.Column(sa.String(36),
|
||||
# TODO(dougw) unbreak this link
|
||||
#sa.ForeignKey("firewall_rules.id"),
|
||||
primary_key=True)
|
||||
edge_id = sa.Column(sa.String(36), primary_key=True)
|
||||
rule_vseid = sa.Column(sa.String(36))
|
||||
|
||||
|
||||
class VcnsEdgePoolBinding(model_base.BASEV2):
|
||||
"""Represents the mapping between neutron pool and Edge pool."""
|
||||
|
||||
__tablename__ = 'vcns_edge_pool_bindings'
|
||||
|
||||
pool_id = sa.Column(sa.String(36),
|
||||
# TODO(dougw) unbreak this link
|
||||
#sa.ForeignKey("pools.id", ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
edge_id = sa.Column(sa.String(36), primary_key=True)
|
||||
pool_vseid = sa.Column(sa.String(36))
|
||||
|
||||
|
||||
class VcnsEdgeVipBinding(model_base.BASEV2):
|
||||
"""Represents the mapping between neutron vip and Edge vip."""
|
||||
|
||||
__tablename__ = 'vcns_edge_vip_bindings'
|
||||
|
||||
vip_id = sa.Column(sa.String(36),
|
||||
# TODO(dougw) unbreak this link
|
||||
#sa.ForeignKey("vips.id", ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
edge_id = sa.Column(sa.String(36))
|
||||
vip_vseid = sa.Column(sa.String(36))
|
||||
app_profileid = sa.Column(sa.String(36))
|
||||
|
||||
|
||||
class VcnsEdgeMonitorBinding(model_base.BASEV2):
|
||||
"""Represents the mapping between neutron monitor and Edge monitor."""
|
||||
|
||||
__tablename__ = 'vcns_edge_monitor_bindings'
|
||||
|
||||
monitor_id = sa.Column(sa.String(36),
|
||||
# TODO(dougw) unbreak this link
|
||||
#sa.ForeignKey("healthmonitors.id",
|
||||
# ondelete="CASCADE"),
|
||||
primary_key=True)
|
||||
edge_id = sa.Column(sa.String(36), primary_key=True)
|
||||
monitor_vseid = sa.Column(sa.String(36))
|
61
vmware_nsx/neutron/plugins/vmware/dbexts/vnic_index_db.py
Normal file
61
vmware_nsx/neutron/plugins/vmware/dbexts/vnic_index_db.py
Normal file
@ -0,0 +1,61 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy.orm import exc
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.dbexts import nsxv_models
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import vnic_index as vnicidx
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VnicIndexDbMixin(object):
|
||||
|
||||
def _extend_port_vnic_index_binding(self, port_res, port_db):
|
||||
state = port_db.vnic_index
|
||||
port_res[vnicidx.VNIC_INDEX] = state.index if state else None
|
||||
|
||||
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
|
||||
attr.PORTS, ['_extend_port_vnic_index_binding'])
|
||||
|
||||
def _get_port_vnic_index(self, context, port_id):
|
||||
"""Returns the vnic index for the given port.
|
||||
If the port is not associated with any vnic then return None
|
||||
"""
|
||||
session = context.session
|
||||
try:
|
||||
mapping = (session.query(nsxv_models.NsxvPortIndexMapping).
|
||||
filter_by(port_id=port_id).one())
|
||||
return mapping['index']
|
||||
except exc.NoResultFound:
|
||||
LOG.debug("No record in DB for vnic-index of port %s", port_id)
|
||||
|
||||
def _set_port_vnic_index_mapping(self, context, port_id, device_id, index):
|
||||
"""Save the port vnic-index to DB."""
|
||||
session = context.session
|
||||
with session.begin(subtransactions=True):
|
||||
index_mapping_model = nsxv_models.NsxvPortIndexMapping(
|
||||
port_id=port_id, device_id=device_id, index=index)
|
||||
session.add(index_mapping_model)
|
||||
|
||||
def _delete_port_vnic_index_mapping(self, context, port_id):
|
||||
"""Delete the port vnic-index association."""
|
||||
session = context.session
|
||||
query = (session.query(nsxv_models.NsxvPortIndexMapping).
|
||||
filter_by(port_id=port_id))
|
||||
query.delete()
|
@ -18,8 +18,8 @@
|
||||
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
|
||||
from neutron.common import constants as const
|
||||
from neutron.common import topics
|
||||
from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
|
||||
from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
|
||||
|
||||
|
||||
class DhcpAgentNotifyAPI(dhcp_rpc_agent_api.DhcpAgentNotifyAPI):
|
||||
|
@ -24,11 +24,11 @@ from neutron.i18n import _LE, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
from neutron.plugins.vmware.dbexts import lsn_db
|
||||
from neutron.plugins.vmware.dhcp_meta import constants as const
|
||||
from neutron.plugins.vmware.nsxlib import lsn as lsn_api
|
||||
from neutron.plugins.vmware.nsxlib import switch as switch_api
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import constants as const
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import lsn as lsn_api
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switch_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -21,8 +21,8 @@ from neutron.extensions import external_net
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
from neutron.plugins.vmware.dhcp_meta import nsx
|
||||
from neutron.plugins.vmware.dhcp_meta import rpc
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import nsx
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -27,8 +27,8 @@ from neutron.extensions import external_net
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
from neutron.plugins.vmware.dhcp_meta import constants as d_const
|
||||
from neutron.plugins.vmware.nsxlib import lsn as lsn_api
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import constants as d_const
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import lsn as lsn_api
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -28,8 +28,8 @@ from neutron.db import models_v2
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import config
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from vmware_nsx.neutron.plugins.vmware.common import config
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -27,14 +27,14 @@ from neutron.common import topics
|
||||
from neutron.db import agents_db
|
||||
from neutron.i18n import _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import config
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.dhcp_meta import combined
|
||||
from neutron.plugins.vmware.dhcp_meta import lsnmanager
|
||||
from neutron.plugins.vmware.dhcp_meta import migration
|
||||
from neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
|
||||
from neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
|
||||
from neutron.plugins.vmware.extensions import lsn
|
||||
from vmware_nsx.neutron.plugins.vmware.common import config
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import combined
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import lsnmanager
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import migration
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import nsx as nsx_svc
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc as nsx_rpc
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import lsn
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -0,0 +1,70 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
|
||||
|
||||
def convert_to_boolean_if_not_none(data):
|
||||
if data is not None:
|
||||
return attributes.convert_to_boolean(data)
|
||||
return data
|
||||
|
||||
|
||||
DISTRIBUTED = 'distributed'
|
||||
EXTENDED_ATTRIBUTES_2_0 = {
|
||||
'routers': {
|
||||
DISTRIBUTED: {'allow_post': True, 'allow_put': False,
|
||||
'convert_to': convert_to_boolean_if_not_none,
|
||||
'default': attributes.ATTR_NOT_SPECIFIED,
|
||||
'is_visible': True},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class Distributedrouter(object):
|
||||
"""Extension class supporting distributed router."""
|
||||
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "Distributed Router"
|
||||
|
||||
@classmethod
|
||||
def get_alias(cls):
|
||||
return "dist-router"
|
||||
|
||||
@classmethod
|
||||
def get_description(cls):
|
||||
return "Enables configuration of NSX Distributed routers."
|
||||
|
||||
@classmethod
|
||||
def get_namespace(cls):
|
||||
return "http://docs.openstack.org/ext/dist-router/api/v1.0"
|
||||
|
||||
@classmethod
|
||||
def get_updated(cls):
|
||||
return "2013-08-1T10:00:00-00:00"
|
||||
|
||||
def get_required_extensions(self):
|
||||
return ["router"]
|
||||
|
||||
@classmethod
|
||||
def get_resources(cls):
|
||||
"""Returns Ext Resources."""
|
||||
return []
|
||||
|
||||
def get_extended_resources(self, version):
|
||||
if version == "2.0":
|
||||
return EXTENDED_ATTRIBUTES_2_0
|
||||
else:
|
||||
return {}
|
@ -0,0 +1,56 @@
|
||||
# Copyright 2014 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
# Attribute Map
|
||||
METADATA_PROVIDERS = 'metadata_providers'
|
||||
|
||||
|
||||
EXTENDED_ATTRIBUTES_2_0 = {
|
||||
'subnets': {
|
||||
METADATA_PROVIDERS:
|
||||
{'allow_post': False,
|
||||
'allow_put': False,
|
||||
'is_visible': True,
|
||||
'default': None}}}
|
||||
|
||||
|
||||
class Metadata_providers(object):
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "Metadata Providers"
|
||||
|
||||
@classmethod
|
||||
def get_alias(cls):
|
||||
return "metadata-providers"
|
||||
|
||||
@classmethod
|
||||
def get_description(cls):
|
||||
return ("Id of the metadata providers attached to the subnet")
|
||||
|
||||
@classmethod
|
||||
def get_namespace(cls):
|
||||
return(
|
||||
"http://docs.openstack.org/ext/neutron/metadata_providers/api/v1.0"
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_updated(cls):
|
||||
return "2014-12-11T12:00:00-00:00"
|
||||
|
||||
def get_extended_resources(self, version):
|
||||
if version == "2.0":
|
||||
return EXTENDED_ATTRIBUTES_2_0
|
||||
else:
|
||||
return {}
|
@ -19,7 +19,7 @@ from oslo.config import cfg
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
from neutron.api.v2 import resource_helper
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
|
||||
GATEWAY_RESOURCE_NAME = "network_gateway"
|
||||
DEVICE_RESOURCE_NAME = "gateway_device"
|
||||
|
@ -17,7 +17,7 @@
|
||||
# TODO(arosen): This is deprecated in Juno, and
|
||||
# to be removed in Kxxxx.
|
||||
|
||||
from neutron.plugins.vmware.extensions import qos
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import qos
|
||||
|
||||
|
||||
class Nvp_qos(qos.Qos):
|
||||
|
61
vmware_nsx/neutron/plugins/vmware/extensions/vnic_index.py
Normal file
61
vmware_nsx/neutron/plugins/vmware/extensions/vnic_index.py
Normal file
@ -0,0 +1,61 @@
|
||||
# Copyright 2013 VMware, Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from neutron.api.v2 import attributes
|
||||
|
||||
# Attribute Map
|
||||
VNIC_INDEX = 'vnic_index'
|
||||
|
||||
|
||||
def convert_to_int_if_not_none(data):
|
||||
if data is not None:
|
||||
return attributes.convert_to_int(data)
|
||||
return data
|
||||
|
||||
EXTENDED_ATTRIBUTES_2_0 = {
|
||||
'ports': {
|
||||
VNIC_INDEX:
|
||||
{'allow_post': True,
|
||||
'allow_put': True,
|
||||
'is_visible': True,
|
||||
'default': None,
|
||||
'convert_to': convert_to_int_if_not_none}}}
|
||||
|
||||
|
||||
class Vnic_index(object):
|
||||
@classmethod
|
||||
def get_name(cls):
|
||||
return "VNIC Index"
|
||||
|
||||
@classmethod
|
||||
def get_alias(cls):
|
||||
return "vnic-index"
|
||||
|
||||
@classmethod
|
||||
def get_description(cls):
|
||||
return ("Enable a port to be associated with a VNIC index")
|
||||
|
||||
@classmethod
|
||||
def get_namespace(cls):
|
||||
return "http://docs.openstack.org/ext/neutron/vnic_index/api/v1.0"
|
||||
|
||||
@classmethod
|
||||
def get_updated(cls):
|
||||
return "2014-09-15T12:00:00-00:00"
|
||||
|
||||
def get_extended_resources(self, version):
|
||||
if version == "2.0":
|
||||
return EXTENDED_ATTRIBUTES_2_0
|
||||
else:
|
||||
return {}
|
@ -19,9 +19,9 @@ from oslo.serialization import jsonutils
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import switch
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import switch
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
|
@ -19,8 +19,8 @@ from neutron.common import exceptions as exception
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
|
@ -20,8 +20,8 @@ from neutron.api.v2 import attributes as attr
|
||||
from neutron.common import exceptions as exception
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
|
||||
HTTP_POST = "POST"
|
||||
HTTP_DELETE = "DELETE"
|
||||
|
@ -22,10 +22,10 @@ from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import switch
|
||||
from neutron.plugins.vmware.nsxlib import versioning
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import switch
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import versioning
|
||||
|
||||
# @versioning.versioned decorator makes the apparent function body
|
||||
# totally unrelated to the real function. This confuses pylint :(
|
||||
@ -630,7 +630,7 @@ def update_lrouter_port_ips(cluster, lrouter_id, lport_id,
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
except api_exc.NsxApiException as e:
|
||||
msg = _("An exception occurred while updating IP addresses on a "
|
||||
"router logical port:%s") % str(e)
|
||||
"router logical port:%s") % e
|
||||
LOG.exception(msg)
|
||||
raise nsx_exc.NsxPluginException(err_msg=msg)
|
||||
|
||||
|
@ -20,8 +20,8 @@ from neutron.common import constants
|
||||
from neutron.common import exceptions
|
||||
from neutron.i18n import _LW
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
|
@ -23,8 +23,8 @@ from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
|
||||
HTTP_GET = "GET"
|
||||
HTTP_POST = "POST"
|
||||
@ -185,8 +185,8 @@ def delete_port(cluster, switch, port):
|
||||
uri = "/ws.v1/lswitch/" + switch + "/lport/" + port
|
||||
try:
|
||||
nsxlib.do_request(HTTP_DELETE, uri, cluster=cluster)
|
||||
except exception.NotFound:
|
||||
LOG.exception(_LE("Port or Network not found"))
|
||||
except exception.NotFound as e:
|
||||
LOG.error(_LE("Port or Network not found, Error: %s"), str(e))
|
||||
raise exception.PortNotFoundOnNetwork(
|
||||
net_id=switch, port_id=port)
|
||||
except api_exc.NsxApiException:
|
||||
|
@ -15,6 +15,8 @@
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from neutron.plugins.vmware.plugins import base
|
||||
from vmware_nsx.neutron.plugins.vmware.plugins import base
|
||||
from vmware_nsx.neutron.plugins.vmware.plugins import nsx_v
|
||||
|
||||
NsxPlugin = base.NsxPluginV2
|
||||
NsxVPlugin = nsx_v.NsxVPluginV2
|
||||
|
@ -55,27 +55,27 @@ from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import lockutils
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.common import constants as plugin_const
|
||||
from neutron.plugins import vmware
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import config # noqa
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
from neutron.plugins.vmware.common import securitygroups as sg_utils
|
||||
from neutron.plugins.vmware.common import sync
|
||||
from neutron.plugins.vmware.common import utils as c_utils
|
||||
from neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
from neutron.plugins.vmware.dbexts import maclearning as mac_db
|
||||
from neutron.plugins.vmware.dbexts import networkgw_db
|
||||
from neutron.plugins.vmware.dbexts import qos_db
|
||||
from neutron.plugins.vmware import dhcpmeta_modes
|
||||
from neutron.plugins.vmware.extensions import maclearning as mac_ext
|
||||
from neutron.plugins.vmware.extensions import networkgw
|
||||
from neutron.plugins.vmware.extensions import qos
|
||||
from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
|
||||
from neutron.plugins.vmware.nsxlib import queue as queuelib
|
||||
from neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
|
||||
from neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
from vmware_nsx.neutron.plugins import vmware
|
||||
from vmware_nsx.neutron.plugins.vmware.common import config # noqa
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
|
||||
from vmware_nsx.neutron.plugins.vmware.common import securitygroups as sg_utils
|
||||
from vmware_nsx.neutron.plugins.vmware.common import sync
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils as c_utils
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
from vmware_nsx.neutron.plugins.vmware import dhcpmeta_modes
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import networkgw
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import qos
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import queue as queuelib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
1855
vmware_nsx/neutron/plugins/vmware/plugins/nsx_v.py
Normal file
1855
vmware_nsx/neutron/plugins/vmware/plugins/nsx_v.py
Normal file
File diff suppressed because it is too large
Load Diff
367
vmware_nsx/neutron/plugins/vmware/plugins/nsx_v_md_proxy.py
Normal file
367
vmware_nsx/neutron/plugins/vmware/plugins/nsx_v_md_proxy.py
Normal file
@ -0,0 +1,367 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
from oslo.config import cfg
|
||||
from oslo.db import exception as db_exc
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron.common import constants
|
||||
from neutron import context as neutron_context
|
||||
from neutron.openstack.common import log as logging
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsxv_constants
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import nsxv_db
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import (
|
||||
nsxv_loadbalancer as nsxv_lb)
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import (
|
||||
constants as vcns_const)
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import edge_utils
|
||||
|
||||
|
||||
METADATA_IP_ADDR = '169.254.169.254'
|
||||
METADATA_TCP_PORT = 80
|
||||
INTERNAL_SUBNET = '169.254.0.0/16'
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NsxVMetadataProxyHandler:
|
||||
|
||||
def __init__(self, nsxv_plugin):
|
||||
self.nsxv_plugin = nsxv_plugin
|
||||
self.context = neutron_context.get_admin_context()
|
||||
|
||||
self.internal_net, self.internal_subnet = self._get_internal_network()
|
||||
|
||||
if not self.internal_net or not self.internal_subnet:
|
||||
self.internal_net, self.internal_subnet = (
|
||||
self._create_internal_network())
|
||||
|
||||
self.proxy_edge_ids, self.proxy_edge_ips = self._get_proxy_edges()
|
||||
if not self.proxy_edge_ids or not self.proxy_edge_ips:
|
||||
self.proxy_edge_ids, self.proxy_edge_ips = (
|
||||
self._create_proxy_edges())
|
||||
|
||||
def _create_metadata_internal_network(self, cidr):
|
||||
net_data = {'network': {'name': 'inter-edge-net',
|
||||
'admin_state_up': True,
|
||||
'port_security_enabled': False,
|
||||
'shared': False,
|
||||
'tenant_id': None}}
|
||||
net = self.nsxv_plugin.create_network(self.context, net_data)
|
||||
|
||||
subnet_data = {'subnet':
|
||||
{'cidr': cidr,
|
||||
'name': 'inter-edge-subnet',
|
||||
'gateway_ip': attr.ATTR_NOT_SPECIFIED,
|
||||
'allocation_pools': attr.ATTR_NOT_SPECIFIED,
|
||||
'ip_version': 4,
|
||||
'dns_nameservers': attr.ATTR_NOT_SPECIFIED,
|
||||
'host_routes': attr.ATTR_NOT_SPECIFIED,
|
||||
'enable_dhcp': False,
|
||||
'network_id': net['id'],
|
||||
'tenant_id': None}}
|
||||
|
||||
subnet = self.nsxv_plugin.create_subnet(
|
||||
self.context,
|
||||
subnet_data)
|
||||
|
||||
return net['id'], subnet['id']
|
||||
|
||||
def _get_internal_network(self):
|
||||
internal_net = None
|
||||
internal_subnet = None
|
||||
|
||||
net_list = nsxv_db.get_nsxv_internal_network(
|
||||
self.context.session,
|
||||
nsxv_constants.INTER_EDGE_PURPOSE)
|
||||
|
||||
if net_list:
|
||||
internal_net = net_list[0]['network_id']
|
||||
internal_subnet = self.nsxv_plugin.get_subnets(
|
||||
self.context,
|
||||
fields=['id'],
|
||||
filters={'network_id': [internal_net]})[0]['id']
|
||||
|
||||
return internal_net, internal_subnet
|
||||
|
||||
def _create_internal_network(self):
|
||||
internal_net, internal_subnet = (
|
||||
self._create_metadata_internal_network(INTERNAL_SUBNET))
|
||||
|
||||
try:
|
||||
nsxv_db.create_nsxv_internal_network(
|
||||
self.context.session,
|
||||
nsxv_constants.INTER_EDGE_PURPOSE,
|
||||
internal_net)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
# We may have a race condition, where another Neutron instance
|
||||
# initialized these elements. Delete and use existing elements
|
||||
self.nsxv_plugin.delete_network(self.context, internal_net)
|
||||
internal_net, internal_subnet = self._get_internal_network()
|
||||
|
||||
return internal_net, internal_subnet
|
||||
|
||||
def _get_proxy_edges(self):
|
||||
proxy_edge_ids = []
|
||||
proxy_edge_ips = []
|
||||
|
||||
rtr_list = nsxv_db.get_nsxv_internal_edges_by_purpose(
|
||||
self.context.session,
|
||||
nsxv_constants.INTER_EDGE_PURPOSE)
|
||||
|
||||
for rtr in rtr_list:
|
||||
rtr_id = rtr['router_id']
|
||||
proxy_edge_ids.append(rtr_id)
|
||||
proxy_edge_ips.append(self._get_edge_internal_ip(rtr_id))
|
||||
|
||||
return proxy_edge_ids, proxy_edge_ips
|
||||
|
||||
def _get_edge_internal_ip(self, rtr_id):
|
||||
filters = {
|
||||
'network_id': [self.internal_net],
|
||||
'device_id': [rtr_id]}
|
||||
ports = self.nsxv_plugin.get_ports(self.context, filters=filters)
|
||||
return ports[0]['fixed_ips'][0]['ip_address']
|
||||
|
||||
def _create_proxy_edges(self):
|
||||
proxy_edge_ids = []
|
||||
proxy_edge_ips = []
|
||||
|
||||
for rtr_ip in cfg.CONF.nsxv.mgt_net_proxy_ips:
|
||||
router_data = {
|
||||
'router': {
|
||||
'name': 'metadata_proxy_router',
|
||||
'admin_state_up': True,
|
||||
'tenant_id': None}}
|
||||
|
||||
rtr = self.nsxv_plugin.create_router(
|
||||
self.context,
|
||||
router_data,
|
||||
allow_metadata=False)
|
||||
|
||||
rtr_id = rtr['id']
|
||||
binding = nsxv_db.get_nsxv_router_binding(
|
||||
self.context.session,
|
||||
rtr_id)
|
||||
|
||||
self.nsxv_plugin.nsx_v.update_interface(
|
||||
rtr['id'],
|
||||
binding['edge_id'],
|
||||
vcns_const.EXTERNAL_VNIC_INDEX,
|
||||
cfg.CONF.nsxv.mgt_net_moid,
|
||||
address=rtr_ip,
|
||||
netmask=cfg.CONF.nsxv.mgt_net_proxy_netmask,
|
||||
secondary=[])
|
||||
|
||||
port_data = {
|
||||
'port': {
|
||||
'network_id': self.internal_net,
|
||||
'name': None,
|
||||
'admin_state_up': True,
|
||||
'device_id': rtr_id,
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_INTF,
|
||||
'fixed_ips': attr.ATTR_NOT_SPECIFIED,
|
||||
'mac_address': attr.ATTR_NOT_SPECIFIED,
|
||||
'port_security_enabled': False,
|
||||
'tenant_id': None}}
|
||||
|
||||
port = self.nsxv_plugin.create_port(self.context, port_data)
|
||||
|
||||
address_groups = self._get_address_groups(
|
||||
self.context, self.internal_net, rtr_id, is_proxy=True)
|
||||
|
||||
edge_ip = port['fixed_ips'][0]['ip_address']
|
||||
edge_utils.update_internal_interface(
|
||||
self.nsxv_plugin.nsx_v, self.context, rtr_id,
|
||||
self.internal_net, address_groups)
|
||||
|
||||
self._setup_metadata_lb(rtr_id,
|
||||
port['fixed_ips'][0]['ip_address'],
|
||||
cfg.CONF.nsxv.nova_metadata_port,
|
||||
cfg.CONF.nsxv.nova_metadata_port,
|
||||
cfg.CONF.nsxv.nova_metadata_ips,
|
||||
proxy_lb=True)
|
||||
|
||||
firewall_rule = {
|
||||
'action': 'allow',
|
||||
'enabled': True,
|
||||
'source_ip_address': [INTERNAL_SUBNET]}
|
||||
|
||||
edge_utils.update_firewall(
|
||||
self.nsxv_plugin.nsx_v,
|
||||
self.context,
|
||||
rtr_id,
|
||||
{'firewall_rule_list': [firewall_rule]},
|
||||
allow_external=False)
|
||||
|
||||
# If DB Entry already defined by another Neutron instance, remove
|
||||
# and resume
|
||||
try:
|
||||
nsxv_db.create_nsxv_internal_edge(
|
||||
self.context.session,
|
||||
rtr_ip,
|
||||
nsxv_constants.INTER_EDGE_PURPOSE,
|
||||
rtr_id)
|
||||
except db_exc.DBDuplicateEntry:
|
||||
self.nsxv_plugin.delete_router(self.context, rtr_id)
|
||||
rtr_id = nsxv_db.get_nsxv_internal_edge(self.context, rtr_ip)
|
||||
edge_ip = self._get_edge_internal_ip(rtr_id)
|
||||
|
||||
proxy_edge_ids.append(rtr_id)
|
||||
proxy_edge_ips.append(edge_ip)
|
||||
return proxy_edge_ids, proxy_edge_ips
|
||||
|
||||
def _get_address_groups(self, context, network_id, device_id, is_proxy):
|
||||
|
||||
filters = {'network_id': [network_id],
|
||||
'device_id': [device_id]}
|
||||
ports = self.nsxv_plugin.get_ports(context, filters=filters)
|
||||
|
||||
subnets = self.nsxv_plugin.get_subnets(context, filters=filters)
|
||||
|
||||
address_groups = []
|
||||
for subnet in subnets:
|
||||
address_group = {}
|
||||
net = netaddr.IPNetwork(subnet['cidr'])
|
||||
address_group['subnetMask'] = str(net.netmask)
|
||||
address_group['subnetPrefixLength'] = str(net.prefixlen)
|
||||
for port in ports:
|
||||
fixed_ips = port['fixed_ips']
|
||||
for fip in fixed_ips:
|
||||
s_id = fip['subnet_id']
|
||||
ip_addr = fip['ip_address']
|
||||
if s_id == subnet['id'] and netaddr.valid_ipv4(ip_addr):
|
||||
address_group['primaryAddress'] = ip_addr
|
||||
break
|
||||
|
||||
# For Edge appliances which aren't the metadata proxy Edge
|
||||
# we add the metadata IP address
|
||||
if not is_proxy and network_id == self.internal_net:
|
||||
address_group['secondaryAddresses'] = {
|
||||
'type': 'secondary_addresses',
|
||||
'ipAddress': [METADATA_IP_ADDR]}
|
||||
|
||||
address_groups.append(address_group)
|
||||
return address_groups
|
||||
|
||||
def _setup_metadata_lb(
|
||||
self, rtr_id, vip, v_port, s_port, member_ips, proxy_lb=False):
|
||||
|
||||
binding = nsxv_db.get_nsxv_router_binding(self.context.session, rtr_id)
|
||||
edge_id = binding['edge_id']
|
||||
LOG.debug('Setting up Edge device %s', edge_id)
|
||||
|
||||
lb_obj = nsxv_lb.NsxvLoadbalancer()
|
||||
|
||||
# Create virtual server
|
||||
virt_srvr = nsxv_lb.NsxvLBVirtualServer(
|
||||
name='MdSrv',
|
||||
ip_address=vip,
|
||||
port=v_port)
|
||||
|
||||
# For router Edge, we add X-LB-Proxy-ID header
|
||||
if not proxy_lb:
|
||||
app_rule = nsxv_lb.NsxvLBAppRule(
|
||||
'insert-reqadd',
|
||||
'reqadd X-Metadata-Provider:' + edge_id)
|
||||
virt_srvr.add_app_rule(app_rule)
|
||||
|
||||
# Create app profile
|
||||
# XFF is inserted in router LBs
|
||||
app_profile = nsxv_lb.NsxvLBAppProfile(
|
||||
name='MDSrvProxy',
|
||||
template='HTTP',
|
||||
insert_xff=not proxy_lb)
|
||||
|
||||
virt_srvr.set_app_profile(app_profile)
|
||||
|
||||
# Create pool, members and monitor
|
||||
pool = nsxv_lb.NsxvLBPool(
|
||||
name='MDSrvPool')
|
||||
|
||||
monitor = nsxv_lb.NsxvLBMonitor(
|
||||
name='MDSrvMon')
|
||||
pool.add_monitor(monitor)
|
||||
|
||||
i = 0
|
||||
for member_ip in member_ips:
|
||||
i += 1
|
||||
member = nsxv_lb.NsxvLBPoolMember(
|
||||
name='Member-%d' % i,
|
||||
ip_address=member_ip,
|
||||
port=s_port,
|
||||
monitor_port=s_port)
|
||||
pool.add_member(member)
|
||||
|
||||
virt_srvr.set_default_pool(pool)
|
||||
lb_obj.add_virtual_server(virt_srvr)
|
||||
|
||||
lb_obj.submit_to_backend(
|
||||
self.nsxv_plugin.nsx_v.vcns,
|
||||
edge_id)
|
||||
|
||||
def configure_router_edge(self, rtr_id):
|
||||
# Connect router interface to inter-edge network
|
||||
port_data = {
|
||||
'port': {
|
||||
'network_id': self.internal_net,
|
||||
'name': None,
|
||||
'admin_state_up': True,
|
||||
'device_id': rtr_id,
|
||||
'device_owner': constants.DEVICE_OWNER_ROUTER_GW,
|
||||
'fixed_ips': attr.ATTR_NOT_SPECIFIED,
|
||||
'mac_address': attr.ATTR_NOT_SPECIFIED,
|
||||
'port_security_enabled': False,
|
||||
'tenant_id': None}}
|
||||
|
||||
self.nsxv_plugin.create_port(self.context, port_data)
|
||||
|
||||
address_groups = self._get_address_groups(
|
||||
self.context,
|
||||
self.internal_net,
|
||||
rtr_id,
|
||||
is_proxy=False)
|
||||
|
||||
edge_utils.update_internal_interface(
|
||||
self.nsxv_plugin.nsx_v,
|
||||
self.context,
|
||||
rtr_id,
|
||||
self.internal_net,
|
||||
address_groups=address_groups)
|
||||
|
||||
self._setup_metadata_lb(rtr_id,
|
||||
METADATA_IP_ADDR,
|
||||
METADATA_TCP_PORT,
|
||||
cfg.CONF.nsxv.nova_metadata_port,
|
||||
self.proxy_edge_ips,
|
||||
proxy_lb=False)
|
||||
|
||||
def get_router_fw_rules(self):
|
||||
fw_rules = [
|
||||
{
|
||||
'name': 'MDServiceIP',
|
||||
'enabled': True,
|
||||
'action': 'allow',
|
||||
'destination_ip_address': [METADATA_IP_ADDR]
|
||||
},
|
||||
{
|
||||
'name': 'MDInterEdgeNet',
|
||||
'enabled': True,
|
||||
'action': 'deny',
|
||||
'destination_ip_address': [INTERNAL_SUBNET]
|
||||
}]
|
||||
|
||||
return fw_rules
|
@ -16,8 +16,8 @@
|
||||
|
||||
import sys
|
||||
|
||||
from neutron.plugins.vmware.shell import commands as cmd
|
||||
from neutronclient import shell
|
||||
from vmware_nsx.neutron.plugins.vmware.shell import commands as cmd
|
||||
|
||||
|
||||
class NsxManage(shell.NeutronShell):
|
||||
|
@ -17,24 +17,48 @@ import base64
|
||||
import eventlet
|
||||
from oslo.serialization import jsonutils
|
||||
|
||||
from neutron.plugins.vmware.vshield.common import exceptions
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import exceptions
|
||||
|
||||
httplib2 = eventlet.import_patched('httplib2')
|
||||
|
||||
|
||||
def xmldumps(obj):
|
||||
def _xmldump(obj):
|
||||
"""Sort of imporved xml creation method.
|
||||
|
||||
This converts the dict to xml with following assumptions:
|
||||
keys starting with _(underscore) are to be used as attributes and not
|
||||
elements keys starting with @ are to there so that dict can be made.
|
||||
The keys are not part of any xml schema.
|
||||
"""
|
||||
|
||||
config = ""
|
||||
attr = ""
|
||||
if isinstance(obj, dict):
|
||||
for key, value in obj.iteritems():
|
||||
cfg = "<%s>%s</%s>" % (key, xmldumps(value), key)
|
||||
if (key.startswith('_')):
|
||||
attr += ' %s="%s"' % (key[1:], value)
|
||||
else:
|
||||
a, x = _xmldump(value)
|
||||
if (key.startswith('@')):
|
||||
cfg = "%s" % (x)
|
||||
else:
|
||||
cfg = "<%s%s>%s</%s>" % (key, a, x, key)
|
||||
|
||||
config += cfg
|
||||
elif isinstance(obj, list):
|
||||
for value in obj:
|
||||
config += xmldumps(value)
|
||||
a, x = _xmldump(value)
|
||||
attr += a
|
||||
config += x
|
||||
else:
|
||||
config = obj
|
||||
|
||||
return config
|
||||
return attr, config
|
||||
|
||||
|
||||
def xmldumps(obj):
|
||||
attr, xml = _xmldump(obj)
|
||||
return xml
|
||||
|
||||
|
||||
class VcnsApiHelper(object):
|
||||
@ -43,6 +67,7 @@ class VcnsApiHelper(object):
|
||||
400: exceptions.RequestBad,
|
||||
403: exceptions.Forbidden,
|
||||
404: exceptions.ResourceNotFound,
|
||||
409: exceptions.ServiceConflict,
|
||||
415: exceptions.MediaTypeUnsupport,
|
||||
503: exceptions.ServiceUnavailable
|
||||
}
|
||||
@ -58,16 +83,22 @@ class VcnsApiHelper(object):
|
||||
else:
|
||||
self.encode = xmldumps
|
||||
|
||||
def request(self, method, uri, params=None):
|
||||
def request(self, method, uri, params=None, headers=None,
|
||||
encodeparams=True):
|
||||
uri = self.address + uri
|
||||
http = httplib2.Http()
|
||||
http.disable_ssl_certificate_validation = True
|
||||
headers = {
|
||||
'Content-Type': 'application/' + self.format,
|
||||
'Accept': 'application/' + 'json',
|
||||
'Authorization': 'Basic ' + self.authToken
|
||||
}
|
||||
if headers is None:
|
||||
headers = {}
|
||||
|
||||
headers['Content-Type'] = 'application/' + self.format
|
||||
headers['Accept'] = 'application/' + self.format,
|
||||
headers['Authorization'] = 'Basic ' + self.authToken
|
||||
|
||||
if encodeparams is True:
|
||||
body = self.encode(params) if params else None
|
||||
else:
|
||||
body = params if params else None
|
||||
header, response = http.request(uri, method,
|
||||
body=body, headers=headers)
|
||||
status = int(header['status'])
|
||||
|
@ -13,14 +13,29 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsxv_constants
|
||||
|
||||
|
||||
EDGE_ID = 'edge_id'
|
||||
ROUTER_ID = 'router_id'
|
||||
DHCP_EDGE_PREFIX = 'dhcp-'
|
||||
ROUTER_EDGE_PREFIX = 'router-'
|
||||
PLR_EDGE_PREFIX = 'plr-'
|
||||
BACKUP_ROUTER_PREFIX = 'backup-'
|
||||
EDGE_NAME_LEN = 20
|
||||
|
||||
# Interface
|
||||
EXTERNAL_VNIC_INDEX = 0
|
||||
INTERNAL_VNIC_INDEX = 1
|
||||
EXTERNAL_VNIC_NAME = "external"
|
||||
INTERNAL_VNIC_NAME = "internal"
|
||||
MAX_VNIC_NUM = 10
|
||||
MAX_TUNNEL_NUM = (cfg.CONF.nsxv.maximum_tunnels_per_vnic if
|
||||
(cfg.CONF.nsxv.maximum_tunnels_per_vnic < 110 and
|
||||
cfg.CONF.nsxv.maximum_tunnels_per_vnic > 0)
|
||||
else 10)
|
||||
|
||||
INTEGRATION_LR_IPADDRESS = "169.254.2.1/28"
|
||||
INTEGRATION_EDGE_IPADDRESS = "169.254.2.3"
|
||||
@ -35,6 +50,20 @@ VCNS_ERROR_CODE_EDGE_NOT_RUNNING = 10013
|
||||
|
||||
SUFFIX_LENGTH = 8
|
||||
|
||||
#Edge size
|
||||
SERVICE_SIZE_MAPPING = {
|
||||
'router': nsxv_constants.LARGE,
|
||||
'dhcp': nsxv_constants.COMPACT
|
||||
}
|
||||
ALLOWED_EDGE_SIZES = (nsxv_constants.COMPACT,
|
||||
nsxv_constants.LARGE,
|
||||
nsxv_constants.XLARGE,
|
||||
nsxv_constants.QUADLARGE)
|
||||
|
||||
#Edge type
|
||||
ALLOWED_EDGE_TYPES = (nsxv_constants.SERVICE_EDGE,
|
||||
nsxv_constants.VDR_EDGE)
|
||||
|
||||
|
||||
# router status by number
|
||||
class RouterStatus(object):
|
||||
@ -43,3 +72,7 @@ class RouterStatus(object):
|
||||
ROUTER_STATUS_PENDING_CREATE = 2
|
||||
ROUTER_STATUS_PENDING_DELETE = 3
|
||||
ROUTER_STATUS_ERROR = 4
|
||||
|
||||
|
||||
class InternalEdgePurposes(object):
|
||||
INTER_EDGE_PURPOSE = 'inter_edge_net'
|
||||
|
@ -66,3 +66,7 @@ class MediaTypeUnsupport(VcnsApiException):
|
||||
|
||||
class ServiceUnavailable(VcnsApiException):
|
||||
message = _("Service Unavailable: %(uri)s")
|
||||
|
||||
|
||||
class ServiceConflict(VcnsApiException):
|
||||
message = _("Concurrent object access error: %(uri)s")
|
||||
|
@ -1,3 +1,5 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2013 VMware, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -12,16 +14,21 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import time
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.serialization import jsonutils
|
||||
from oslo.utils import excutils
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron.i18n import _LE, _LI, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware.vshield.common import constants as vcns_const
|
||||
from neutron.plugins.vmware.vshield.common import exceptions
|
||||
from neutron.plugins.vmware.vshield.tasks import constants
|
||||
from neutron.plugins.vmware.vshield.tasks import tasks
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsxv_constants
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import constants
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import exceptions
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.tasks import (
|
||||
constants as task_constants)
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.tasks import tasks
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -36,13 +43,11 @@ class EdgeApplianceDriver(object):
|
||||
|
||||
def _assemble_edge(self, name, appliance_size="compact",
|
||||
deployment_container_id=None, datacenter_moid=None,
|
||||
enable_aesni=True, hypervisor_assist=False,
|
||||
enable_aesni=True, dist=False,
|
||||
enable_fips=False, remote_access=False):
|
||||
edge = {
|
||||
'name': name,
|
||||
'fqdn': name,
|
||||
'hypervisorAssist': hypervisor_assist,
|
||||
'type': 'gatewayServices',
|
||||
'enableAesni': enable_aesni,
|
||||
'enableFips': enable_fips,
|
||||
'cliSettings': {
|
||||
@ -51,10 +56,14 @@ class EdgeApplianceDriver(object):
|
||||
'appliances': {
|
||||
'applianceSize': appliance_size
|
||||
},
|
||||
'vnics': {
|
||||
'vnics': []
|
||||
}
|
||||
}
|
||||
if not dist:
|
||||
edge['type'] = "gatewayServices"
|
||||
edge['vnics'] = {'vnics': []}
|
||||
else:
|
||||
edge['type'] = "distributedRouter"
|
||||
edge['interfaces'] = {'interfaces': []}
|
||||
|
||||
if deployment_container_id:
|
||||
edge['appliances']['deploymentContainerId'] = (
|
||||
deployment_container_id)
|
||||
@ -71,14 +80,15 @@ class EdgeApplianceDriver(object):
|
||||
appliance['datastoreId'] = datastore_id
|
||||
return appliance
|
||||
|
||||
def _assemble_edge_vnic(self, name, index, portgroup_id,
|
||||
def _assemble_edge_vnic(self, name, index, portgroup_id, tunnel_index=-1,
|
||||
primary_address=None, subnet_mask=None,
|
||||
secondary=None,
|
||||
type="internal",
|
||||
enable_proxy_arp=False,
|
||||
enable_send_redirects=True,
|
||||
is_connected=True,
|
||||
mtu=1500):
|
||||
mtu=1500,
|
||||
address_groups=None):
|
||||
vnic = {
|
||||
'index': index,
|
||||
'name': name,
|
||||
@ -89,6 +99,9 @@ class EdgeApplianceDriver(object):
|
||||
'enableSendRedirects': enable_send_redirects,
|
||||
'isConnected': is_connected
|
||||
}
|
||||
if address_groups is None:
|
||||
address_groups = []
|
||||
if not address_groups:
|
||||
if primary_address and subnet_mask:
|
||||
address_group = {
|
||||
'primaryAddress': primary_address,
|
||||
@ -97,22 +110,65 @@ class EdgeApplianceDriver(object):
|
||||
if secondary:
|
||||
address_group['secondaryAddresses'] = {
|
||||
'ipAddress': secondary,
|
||||
'type': 'IpAddressesDto'
|
||||
'type': 'secondary_addresses'
|
||||
}
|
||||
|
||||
vnic['addressGroups'] = {
|
||||
'addressGroups': [address_group]
|
||||
}
|
||||
else:
|
||||
vnic['subInterfaces'] = {'subInterfaces': address_groups}
|
||||
else:
|
||||
if tunnel_index < 0:
|
||||
vnic['addressGroups'] = {'addressGroups': address_groups}
|
||||
else:
|
||||
vnic['subInterfaces'] = {'subInterfaces': address_groups}
|
||||
|
||||
return vnic
|
||||
|
||||
def _assemble_vdr_interface(self, portgroup_id,
|
||||
primary_address=None, subnet_mask=None,
|
||||
secondary=None,
|
||||
type="internal",
|
||||
is_connected=True,
|
||||
mtu=1500,
|
||||
address_groups=None):
|
||||
interface = {
|
||||
'type': type,
|
||||
'connectedToId': portgroup_id,
|
||||
'mtu': mtu,
|
||||
'isConnected': is_connected
|
||||
}
|
||||
if address_groups is None:
|
||||
address_groups = []
|
||||
if not address_groups:
|
||||
if primary_address and subnet_mask:
|
||||
address_group = {
|
||||
'primaryAddress': primary_address,
|
||||
'subnetMask': subnet_mask
|
||||
}
|
||||
if secondary:
|
||||
address_group['secondaryAddresses'] = {
|
||||
'ipAddress': secondary,
|
||||
'type': 'secondary_addresses'
|
||||
}
|
||||
|
||||
interface['addressGroups'] = {
|
||||
'addressGroups': [address_group]
|
||||
}
|
||||
else:
|
||||
interface['addressGroups'] = {'addressGroups': address_groups}
|
||||
interfaces = {'interfaces': [interface]}
|
||||
|
||||
return interfaces
|
||||
|
||||
def _edge_status_to_level(self, status):
|
||||
if status == 'GREEN':
|
||||
status_level = vcns_const.RouterStatus.ROUTER_STATUS_ACTIVE
|
||||
status_level = constants.RouterStatus.ROUTER_STATUS_ACTIVE
|
||||
elif status in ('GREY', 'YELLOW'):
|
||||
status_level = vcns_const.RouterStatus.ROUTER_STATUS_DOWN
|
||||
status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
|
||||
else:
|
||||
status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
|
||||
status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
|
||||
return status_level
|
||||
|
||||
def _enable_loadbalancer(self, edge):
|
||||
@ -131,13 +187,12 @@ class EdgeApplianceDriver(object):
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to get edge status:\n%s"),
|
||||
e.response)
|
||||
status_level = vcns_const.RouterStatus.ROUTER_STATUS_ERROR
|
||||
status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
|
||||
try:
|
||||
desc = jsonutils.loads(e.response)
|
||||
if desc.get('errorCode') == (
|
||||
vcns_const.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
|
||||
status_level = (
|
||||
vcns_const.RouterStatus.ROUTER_STATUS_DOWN)
|
||||
constants.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
|
||||
status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
|
||||
except ValueError:
|
||||
LOG.exception(e.response)
|
||||
|
||||
@ -153,59 +208,131 @@ class EdgeApplianceDriver(object):
|
||||
|
||||
return edges_status_level
|
||||
|
||||
def _update_interface(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
config = task.userdata['config']
|
||||
LOG.debug("VCNS: start updating vnic %s", config)
|
||||
def get_interface(self, edge_id, vnic_index):
|
||||
self.check_edge_jobs(edge_id)
|
||||
# get vnic interface address groups
|
||||
try:
|
||||
self.vcns.update_interface(edge_id, config)
|
||||
except exceptions.VcnsApiException as e:
|
||||
return self.vcns.query_interface(edge_id, vnic_index)
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Failed to update vnic %(config)s:\n"
|
||||
"%(response)s"), {
|
||||
'config': config,
|
||||
'response': e.response})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Failed to update vnic %d"),
|
||||
config['index'])
|
||||
LOG.exception(_LE("NSXv: Failed to query vnic %s"), vnic_index)
|
||||
|
||||
return constants.TaskStatus.COMPLETED
|
||||
def check_edge_jobs(self, edge_id):
|
||||
retries = max(cfg.CONF.nsxv.retries, 1)
|
||||
delay = 0.5
|
||||
for attempt in range(1, retries + 1):
|
||||
if attempt != 1:
|
||||
time.sleep(delay)
|
||||
delay = min(2 * delay, 60)
|
||||
h, jobs = self.vcns.get_edge_jobs(edge_id)
|
||||
if jobs['edgeJob'] == []:
|
||||
return
|
||||
LOG.warning(_LW('NSXv: jobs still running.'))
|
||||
LOG.error(_LE('NSXv: jobs are still runnings!'))
|
||||
|
||||
def update_interface(self, router_id, edge_id, index, network,
|
||||
address=None, netmask=None, secondary=None,
|
||||
jobdata=None):
|
||||
tunnel_index=-1, address=None, netmask=None,
|
||||
secondary=None, jobdata=None,
|
||||
address_groups=None):
|
||||
LOG.debug("VCNS: update vnic %(index)d: %(addr)s %(netmask)s", {
|
||||
'index': index, 'addr': address, 'netmask': netmask})
|
||||
if index == vcns_const.EXTERNAL_VNIC_INDEX:
|
||||
name = vcns_const.EXTERNAL_VNIC_NAME
|
||||
if index == constants.EXTERNAL_VNIC_INDEX:
|
||||
name = constants.EXTERNAL_VNIC_NAME
|
||||
intf_type = 'uplink'
|
||||
elif index == vcns_const.INTERNAL_VNIC_INDEX:
|
||||
name = vcns_const.INTERNAL_VNIC_NAME
|
||||
else:
|
||||
name = constants.INTERNAL_VNIC_NAME + str(index)
|
||||
if tunnel_index < 0:
|
||||
intf_type = 'internal'
|
||||
else:
|
||||
msg = _("Vnic %d currently not supported") % index
|
||||
raise exceptions.VcnsGeneralException(msg)
|
||||
intf_type = 'trunk'
|
||||
|
||||
config = self._assemble_edge_vnic(
|
||||
name, index, network, address, netmask, secondary, type=intf_type)
|
||||
name, index, network, tunnel_index,
|
||||
address, netmask, secondary, type=intf_type,
|
||||
address_groups=address_groups)
|
||||
|
||||
self.vcns.update_interface(edge_id, config)
|
||||
|
||||
def add_vdr_internal_interface(self, edge_id,
|
||||
network, address=None, netmask=None,
|
||||
secondary=None, address_groups=None,
|
||||
type="internal"):
|
||||
LOG.debug("Add VDR interface on edge: %s", edge_id)
|
||||
if address_groups is None:
|
||||
address_groups = []
|
||||
interface_req = self._assemble_vdr_interface(
|
||||
network, address, netmask, secondary,
|
||||
address_groups=address_groups,
|
||||
type=type)
|
||||
self.vcns.add_vdr_internal_interface(edge_id, interface_req)
|
||||
header, response = self.vcns.get_edge_interfaces(edge_id)
|
||||
for interface in response['interfaces']:
|
||||
if interface['connectedToId'] == network:
|
||||
vnic_index = int(interface['index'])
|
||||
return vnic_index
|
||||
|
||||
def update_vdr_internal_interface(self, edge_id, index, network,
|
||||
address=None, netmask=None,
|
||||
secondary=None, address_groups=None):
|
||||
if not address_groups:
|
||||
address_groups = []
|
||||
interface_req = self._assemble_vdr_interface(
|
||||
network, address, netmask, secondary,
|
||||
address_groups=address_groups)
|
||||
try:
|
||||
header, response = self.vcns.update_vdr_internal_interface(
|
||||
edge_id, index, interface_req)
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to update vdr interface on edge: "
|
||||
"%s"), edge_id)
|
||||
|
||||
def delete_vdr_internal_interface(self, edge_id, interface_index):
|
||||
LOG.debug("Delete VDR interface on edge: %s", edge_id)
|
||||
try:
|
||||
header, response = self.vcns.delete_vdr_internal_interface(
|
||||
edge_id, interface_index)
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to delete vdr interface on edge: "
|
||||
"%s"),
|
||||
edge_id)
|
||||
|
||||
def _delete_interface(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
vnic_index = task.userdata['vnic_index']
|
||||
LOG.debug("start deleting vnic %s", vnic_index)
|
||||
try:
|
||||
self.vcns.delete_interface(edge_id, vnic_index)
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to delete vnic %(vnic_index)s: "
|
||||
"on edge %(edge_id)s"),
|
||||
{'vnic_index': vnic_index,
|
||||
'edge_id': edge_id})
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to delete vnic %d"), vnic_index)
|
||||
|
||||
return task_constants.TaskStatus.COMPLETED
|
||||
|
||||
def delete_interface(self, router_id, edge_id, index, jobdata=None):
|
||||
task_name = "delete-interface-%s-%d" % (edge_id, index)
|
||||
userdata = {
|
||||
'router_id': router_id,
|
||||
'edge_id': edge_id,
|
||||
'config': config,
|
||||
'vnic_index': index,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task_name = "update-interface-%s-%d" % (edge_id, index)
|
||||
task = tasks.Task(task_name, router_id,
|
||||
self._update_interface, userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.interface_update_result)
|
||||
task = tasks.Task(task_name, router_id, self._delete_interface,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.interface_delete_result)
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def _deploy_edge(self, task):
|
||||
userdata = task.userdata
|
||||
name = userdata['router_name']
|
||||
LOG.debug("VCNS: start deploying edge %s", name)
|
||||
LOG.debug("NSXv: start deploying edge")
|
||||
request = userdata['request']
|
||||
try:
|
||||
header = self.vcns.deploy_edge(request)[0]
|
||||
@ -215,11 +342,10 @@ class EdgeApplianceDriver(object):
|
||||
edge_id = response['edgeId']
|
||||
LOG.debug("VCNS: deploying edge %s", edge_id)
|
||||
userdata['edge_id'] = edge_id
|
||||
status = constants.TaskStatus.PENDING
|
||||
status = task_constants.TaskStatus.PENDING
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: deploy edge failed for router %s."),
|
||||
name)
|
||||
LOG.exception(_LE("NSXv: deploy edge failed."))
|
||||
|
||||
return status
|
||||
|
||||
@ -230,16 +356,15 @@ class EdgeApplianceDriver(object):
|
||||
task.userdata['retries'] = 0
|
||||
system_status = response.get('systemStatus', None)
|
||||
if system_status is None:
|
||||
status = constants.TaskStatus.PENDING
|
||||
status = task_constants.TaskStatus.PENDING
|
||||
elif system_status == 'good':
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
status = task_constants.TaskStatus.COMPLETED
|
||||
else:
|
||||
status = constants.TaskStatus.ERROR
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Edge %s status query failed."),
|
||||
edge_id)
|
||||
except Exception:
|
||||
status = task_constants.TaskStatus.ERROR
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Edge %s status query failed."), edge_id)
|
||||
raise e
|
||||
except Exception as e:
|
||||
retries = task.userdata.get('retries', 0) + 1
|
||||
if retries < 3:
|
||||
task.userdata['retries'] = retries
|
||||
@ -247,34 +372,42 @@ class EdgeApplianceDriver(object):
|
||||
"status. Retry %(retries)d."),
|
||||
{'edge_id': edge_id,
|
||||
'retries': retries})
|
||||
status = constants.TaskStatus.PENDING
|
||||
status = task_constants.TaskStatus.PENDING
|
||||
else:
|
||||
LOG.exception(_LE("VCNS: Unable to retrieve edge %s status. "
|
||||
"Abort."), edge_id)
|
||||
status = constants.TaskStatus.ERROR
|
||||
status = task_constants.TaskStatus.ERROR
|
||||
LOG.debug("VCNS: Edge %s status", edge_id)
|
||||
return status
|
||||
|
||||
def _result_edge(self, task):
|
||||
router_name = task.userdata['router_name']
|
||||
edge_id = task.userdata.get('edge_id')
|
||||
if task.status != constants.TaskStatus.COMPLETED:
|
||||
LOG.error(_LE("VCNS: Failed to deploy edge %(edge_id)s "
|
||||
"for %(name)s, status %(status)d"), {
|
||||
'edge_id': edge_id,
|
||||
'name': router_name,
|
||||
'status': task.status
|
||||
})
|
||||
if task.status != task_constants.TaskStatus.COMPLETED:
|
||||
LOG.error(_LE("NSXv: Failed to deploy edge %(edge_id)s "
|
||||
"status %(status)d"),
|
||||
{'edge_id': edge_id,
|
||||
'status': task.status})
|
||||
else:
|
||||
LOG.debug("VCNS: Edge %(edge_id)s deployed for "
|
||||
"router %(name)s", {
|
||||
'edge_id': edge_id, 'name': router_name
|
||||
})
|
||||
LOG.debug("NSXv: Edge %s is deployed", edge_id)
|
||||
|
||||
def _update_edge(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
LOG.debug("start update edge %s", edge_id)
|
||||
request = task.userdata['request']
|
||||
try:
|
||||
self.vcns.update_edge(edge_id, request)
|
||||
status = task_constants.TaskStatus.COMPLETED
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.error(_LE("Failed to update edge: %s"),
|
||||
e.response)
|
||||
status = task_constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
def _delete_edge(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
LOG.debug("VCNS: start destroying edge %s", edge_id)
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
status = task_constants.TaskStatus.COMPLETED
|
||||
if edge_id:
|
||||
try:
|
||||
self.vcns.delete_edge(edge_id)
|
||||
@ -284,10 +417,10 @@ class EdgeApplianceDriver(object):
|
||||
LOG.exception(_LE("VCNS: Failed to delete %(edge_id)s:\n"
|
||||
"%(response)s"),
|
||||
{'edge_id': edge_id, 'response': e.response})
|
||||
status = constants.TaskStatus.ERROR
|
||||
status = task_constants.TaskStatus.ERROR
|
||||
except Exception:
|
||||
LOG.exception(_LE("VCNS: Failed to delete %s"), edge_id)
|
||||
status = constants.TaskStatus.ERROR
|
||||
status = task_constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
@ -295,42 +428,49 @@ class EdgeApplianceDriver(object):
|
||||
try:
|
||||
return self.vcns.get_edges()[1]
|
||||
except exceptions.VcnsApiException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Failed to get edges:\n%s"),
|
||||
e.response)
|
||||
LOG.exception(_LE("VCNS: Failed to get edges:\n%s"), e.response)
|
||||
raise e
|
||||
|
||||
def deploy_edge(self, router_id, name, internal_network, jobdata=None,
|
||||
wait_for_exec=False, loadbalancer_enable=True):
|
||||
def deploy_edge(self, resource_id, name, internal_network, jobdata=None,
|
||||
dist=False, wait_for_exec=False, loadbalancer_enable=True,
|
||||
appliance_size=nsxv_constants.LARGE):
|
||||
task_name = 'deploying-%s' % name
|
||||
edge_name = name
|
||||
edge = self._assemble_edge(
|
||||
edge_name, datacenter_moid=self.datacenter_moid,
|
||||
deployment_container_id=self.deployment_container_id,
|
||||
appliance_size='large', remote_access=True)
|
||||
appliance_size=appliance_size, remote_access=True, dist=dist)
|
||||
appliance = self._assemble_edge_appliance(self.resource_pool_id,
|
||||
self.datastore_id)
|
||||
if appliance:
|
||||
edge['appliances']['appliances'] = [appliance]
|
||||
|
||||
if not dist:
|
||||
vnic_external = self._assemble_edge_vnic(
|
||||
vcns_const.EXTERNAL_VNIC_NAME, vcns_const.EXTERNAL_VNIC_INDEX,
|
||||
constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX,
|
||||
self.external_network, type="uplink")
|
||||
edge['vnics']['vnics'].append(vnic_external)
|
||||
else:
|
||||
edge['mgmtInterface'] = {
|
||||
'connectedToId': self.external_network,
|
||||
'name': "mgmtInterface"}
|
||||
if internal_network:
|
||||
vnic_inside = self._assemble_edge_vnic(
|
||||
vcns_const.INTERNAL_VNIC_NAME, vcns_const.INTERNAL_VNIC_INDEX,
|
||||
constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX,
|
||||
internal_network,
|
||||
vcns_const.INTEGRATION_EDGE_IPADDRESS,
|
||||
vcns_const.INTEGRATION_SUBNET_NETMASK,
|
||||
constants.INTEGRATION_EDGE_IPADDRESS,
|
||||
constants.INTEGRATION_SUBNET_NETMASK,
|
||||
type="internal")
|
||||
edge['vnics']['vnics'].append(vnic_inside)
|
||||
if loadbalancer_enable:
|
||||
if not dist and loadbalancer_enable:
|
||||
self._enable_loadbalancer(edge)
|
||||
userdata = {
|
||||
'dist': dist,
|
||||
'request': edge,
|
||||
'router_name': name,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task = tasks.Task(task_name, router_id,
|
||||
task = tasks.Task(task_name, resource_id,
|
||||
self._deploy_edge,
|
||||
status_callback=self._status_edge,
|
||||
result_callback=self._result_edge,
|
||||
@ -340,19 +480,68 @@ class EdgeApplianceDriver(object):
|
||||
self.task_manager.add(task)
|
||||
|
||||
if wait_for_exec:
|
||||
# wait until the deploy task is executed so edge_id is available
|
||||
task.wait(constants.TaskState.EXECUTED)
|
||||
# waitl until the deploy task is executed so edge_id is available
|
||||
task.wait(task_constants.TaskState.EXECUTED)
|
||||
|
||||
return task
|
||||
|
||||
def delete_edge(self, router_id, edge_id, jobdata=None):
|
||||
def update_edge(self, router_id, edge_id, name, internal_network,
|
||||
jobdata=None, dist=False, loadbalancer_enable=True,
|
||||
appliance_size=nsxv_constants.LARGE):
|
||||
"""Update edge name."""
|
||||
task_name = 'update-%s' % name
|
||||
edge_name = name
|
||||
edge = self._assemble_edge(
|
||||
edge_name, datacenter_moid=self.datacenter_moid,
|
||||
deployment_container_id=self.deployment_container_id,
|
||||
appliance_size=appliance_size, remote_access=True, dist=dist)
|
||||
edge['id'] = edge_id
|
||||
appliance = self._assemble_edge_appliance(self.resource_pool_id,
|
||||
self.datastore_id)
|
||||
if appliance:
|
||||
edge['appliances']['appliances'] = [appliance]
|
||||
|
||||
if not dist:
|
||||
vnic_external = self._assemble_edge_vnic(
|
||||
constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX,
|
||||
self.external_network, type="uplink")
|
||||
edge['vnics']['vnics'].append(vnic_external)
|
||||
else:
|
||||
edge['mgmtInterface'] = {
|
||||
'connectedToId': self.external_network,
|
||||
'name': "mgmtInterface"}
|
||||
|
||||
if internal_network:
|
||||
internal_vnic = self._assemble_edge_vnic(
|
||||
constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX,
|
||||
internal_network,
|
||||
constants.INTEGRATION_EDGE_IPADDRESS,
|
||||
constants.INTEGRATION_SUBNET_NETMASK,
|
||||
type="internal")
|
||||
edge['vnics']['vnics'].append(internal_vnic)
|
||||
if not dist and loadbalancer_enable:
|
||||
self._enable_loadbalancer(edge)
|
||||
userdata = {
|
||||
'edge_id': edge_id,
|
||||
'request': edge,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task = tasks.Task(task_name, router_id,
|
||||
self._update_edge,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.edge_update_result)
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def delete_edge(self, resource_id, edge_id, jobdata=None, dist=False):
|
||||
task_name = 'delete-%s' % edge_id
|
||||
userdata = {
|
||||
'router_id': router_id,
|
||||
'router_id': resource_id,
|
||||
'dist': dist,
|
||||
'edge_id': edge_id,
|
||||
'jobdata': jobdata
|
||||
}
|
||||
task = tasks.Task(task_name, router_id, self._delete_edge,
|
||||
task = tasks.Task(task_name, resource_id, self._delete_edge,
|
||||
userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.edge_delete_result)
|
||||
self.task_manager.add(task)
|
||||
@ -360,23 +549,30 @@ class EdgeApplianceDriver(object):
|
||||
|
||||
def _assemble_nat_rule(self, action, original_address,
|
||||
translated_address,
|
||||
vnic_index=vcns_const.EXTERNAL_VNIC_INDEX,
|
||||
enabled=True):
|
||||
vnic_index=constants.EXTERNAL_VNIC_INDEX,
|
||||
enabled=True,
|
||||
protocol='any',
|
||||
original_port='any',
|
||||
translated_port='any'):
|
||||
nat_rule = {}
|
||||
nat_rule['action'] = action
|
||||
nat_rule['vnic'] = vnic_index
|
||||
nat_rule['originalAddress'] = original_address
|
||||
nat_rule['translatedAddress'] = translated_address
|
||||
nat_rule['enabled'] = enabled
|
||||
nat_rule['protocol'] = protocol
|
||||
nat_rule['originalPort'] = original_port
|
||||
nat_rule['translatedPort'] = translated_port
|
||||
|
||||
return nat_rule
|
||||
|
||||
def get_nat_config(self, edge_id):
|
||||
try:
|
||||
return self.vcns.get_nat_config(edge_id)[1]
|
||||
except exceptions.VcnsApiException as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("VCNS: Failed to get nat config:\n%s"),
|
||||
e.response)
|
||||
raise e
|
||||
|
||||
def _create_nat_rule(self, task):
|
||||
# TODO(fank): use POST for optimization
|
||||
@ -389,18 +585,18 @@ class EdgeApplianceDriver(object):
|
||||
|
||||
del nat['version']
|
||||
|
||||
if location is None or location == vcns_const.APPEND:
|
||||
if location is None or location == constants.APPEND:
|
||||
nat['rules']['natRulesDtos'].append(rule)
|
||||
else:
|
||||
nat['rules']['natRulesDtos'].insert(location, rule)
|
||||
|
||||
try:
|
||||
self.vcns.update_nat_config(edge_id, nat)
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
status = task_constants.TaskStatus.COMPLETED
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
|
||||
e.response)
|
||||
status = constants.TaskStatus.ERROR
|
||||
status = task_constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
@ -433,7 +629,7 @@ class EdgeApplianceDriver(object):
|
||||
'type': addrtype, 'addr': address})
|
||||
nat = self.get_nat_config(edge_id)
|
||||
del nat['version']
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
status = task_constants.TaskStatus.COMPLETED
|
||||
for nat_rule in nat['rules']['natRulesDtos']:
|
||||
if nat_rule[addrtype] == address:
|
||||
rule_id = nat_rule['ruleId']
|
||||
@ -442,7 +638,7 @@ class EdgeApplianceDriver(object):
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to delete snat rule:\n"
|
||||
"%s"), e.response)
|
||||
status = constants.TaskStatus.ERROR
|
||||
status = task_constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
@ -507,7 +703,7 @@ class EdgeApplianceDriver(object):
|
||||
if task != self.updated_task['nat'][edge_id]:
|
||||
# this task does not have the latest config, abort now
|
||||
# for speedup
|
||||
return constants.TaskStatus.ABORT
|
||||
return task_constants.TaskStatus.ABORT
|
||||
|
||||
rules = task.userdata['rules']
|
||||
LOG.debug("VCNS: start updating nat rules: %s", rules)
|
||||
@ -521,11 +717,11 @@ class EdgeApplianceDriver(object):
|
||||
|
||||
try:
|
||||
self.vcns.update_nat_config(edge_id, nat)
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
status = task_constants.TaskStatus.COMPLETED
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to create snat rule:\n%s"),
|
||||
e.response)
|
||||
status = constants.TaskStatus.ERROR
|
||||
status = task_constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
@ -560,21 +756,53 @@ class EdgeApplianceDriver(object):
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
||||
def update_dnat_rules(self, edge_id, dnat_rules):
|
||||
edge_nat_rules = []
|
||||
for rule in dnat_rules:
|
||||
edge_nat_rules.append(
|
||||
self._assemble_nat_rule(
|
||||
'dnat',
|
||||
rule['dst'],
|
||||
rule['translated'],
|
||||
vnic_index=rule['vnic_index'],
|
||||
enabled=True,
|
||||
protocol=rule['protocol'],
|
||||
original_port=rule['original_port'],
|
||||
translated_port=rule['translated_port']))
|
||||
|
||||
nat = {
|
||||
'featureType': 'nat',
|
||||
'rules': {
|
||||
'natRulesDtos': edge_nat_rules
|
||||
}
|
||||
}
|
||||
|
||||
self.vcns.update_nat_config(edge_id, nat)
|
||||
|
||||
def _update_routes(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
if (task != self.updated_task['route'][edge_id] and
|
||||
task.userdata.get('skippable', True)):
|
||||
# this task does not have the latest config, abort now
|
||||
# for speedup
|
||||
return constants.TaskStatus.ABORT
|
||||
return task_constants.TaskStatus.ABORT
|
||||
gateway = task.userdata['gateway']
|
||||
gateway_vnic_index = task.userdata['gateway_vnic_index']
|
||||
routes = task.userdata['routes']
|
||||
LOG.debug("VCNS: start updating routes for %s", edge_id)
|
||||
static_routes = []
|
||||
for route in routes:
|
||||
if route.get('vnic_index') is None:
|
||||
static_routes.append({
|
||||
"description": "",
|
||||
"vnic": vcns_const.INTERNAL_VNIC_INDEX,
|
||||
"vnic": constants.INTERNAL_VNIC_INDEX,
|
||||
"network": route['cidr'],
|
||||
"nextHop": route['nexthop']
|
||||
})
|
||||
else:
|
||||
static_routes.append({
|
||||
"description": "",
|
||||
"vnic": route['vnic_index'],
|
||||
"network": route['cidr'],
|
||||
"nextHop": route['nexthop']
|
||||
})
|
||||
@ -587,26 +815,28 @@ class EdgeApplianceDriver(object):
|
||||
request["defaultRoute"] = {
|
||||
"description": "default-gateway",
|
||||
"gatewayAddress": gateway,
|
||||
"vnic": vcns_const.EXTERNAL_VNIC_INDEX
|
||||
"vnic": gateway_vnic_index
|
||||
}
|
||||
try:
|
||||
self.vcns.update_routes(edge_id, request)
|
||||
status = constants.TaskStatus.COMPLETED
|
||||
status = task_constants.TaskStatus.COMPLETED
|
||||
except exceptions.VcnsApiException as e:
|
||||
LOG.exception(_LE("VCNS: Failed to update routes:\n%s"),
|
||||
e.response)
|
||||
status = constants.TaskStatus.ERROR
|
||||
status = task_constants.TaskStatus.ERROR
|
||||
|
||||
return status
|
||||
|
||||
def update_routes(self, router_id, edge_id, gateway, routes,
|
||||
skippable=True, jobdata=None):
|
||||
skippable=True, jobdata=None,
|
||||
gateway_vnic_index=constants.EXTERNAL_VNIC_INDEX):
|
||||
if gateway:
|
||||
gateway = gateway.split('/')[0]
|
||||
|
||||
userdata = {
|
||||
'edge_id': edge_id,
|
||||
'gateway': gateway,
|
||||
'gateway_vnic_index': gateway_vnic_index,
|
||||
'routes': routes,
|
||||
'skippable': skippable,
|
||||
'jobdata': jobdata
|
||||
@ -659,3 +889,90 @@ class EdgeApplianceDriver(object):
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to enable loadbalancer "
|
||||
"service config"))
|
||||
|
||||
def _delete_port_group(self, task):
|
||||
try:
|
||||
header, response = self.vcns.get_edge_id(task.userdata['job_id'])
|
||||
except exceptions.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("NSXv: Failed to get job for %s"),
|
||||
task.userdata)
|
||||
status = response['status']
|
||||
if status != 'COMPLETED':
|
||||
if (status == 'QUEUED' or status == 'RUNNING' or
|
||||
status == 'ROLLBACK'):
|
||||
LOG.debug("NSXv: job is still pending for %s", task.userdata)
|
||||
return task_constants.TaskStatus.PENDING
|
||||
try:
|
||||
self.vcns.delete_port_group(
|
||||
task.userdata['dvs_id'],
|
||||
task.userdata['port_group_id'])
|
||||
except Exception as e:
|
||||
LOG.error(_LE('Unable to delete %(pg)s (job status %(state)s) '
|
||||
'exception %(ex)s'),
|
||||
{'pg': task.userdata['port_group_id'],
|
||||
'state': status,
|
||||
'ex': e})
|
||||
if status == 'FAILED':
|
||||
return task_constants.TaskStatus.ERROR
|
||||
return task_constants.TaskStatus.COMPLETED
|
||||
|
||||
def delete_portgroup(self, dvs_id, port_group_id, job_id):
|
||||
task_name = "delete-port-group-%s" % port_group_id
|
||||
userdata = {'dvs_id': dvs_id,
|
||||
'port_group_id': port_group_id,
|
||||
'job_id': job_id}
|
||||
task = tasks.Task(task_name, port_group_id,
|
||||
self._delete_port_group,
|
||||
status_callback=self._delete_port_group,
|
||||
userdata=userdata)
|
||||
self.task_manager.add(task)
|
||||
|
||||
def _retry_task(self, task):
|
||||
delay = 0.5
|
||||
max_retries = max(cfg.CONF.nsxv.retries, 1)
|
||||
args = task.userdata.get('args', [])
|
||||
kwargs = task.userdata.get('kwargs', {})
|
||||
retry_number = task.userdata['retry_number']
|
||||
retry_command = task.userdata['retry_command']
|
||||
try:
|
||||
retry_command(*args, **kwargs)
|
||||
except Exception as exc:
|
||||
LOG.debug("Task %(name)s retry %(retry)s failed %(exc)s",
|
||||
{'name': task.name,
|
||||
'exc': exc,
|
||||
'retry': retry_number})
|
||||
retry_number += 1
|
||||
if retry_number > max_retries:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to %s"), task.name)
|
||||
else:
|
||||
task.userdata['retry_number'] = retry_number
|
||||
# Sleep twice as long as the previous retry
|
||||
tts = (2 ** (retry_number - 1)) * delay
|
||||
time.sleep(min(tts, 60))
|
||||
return task_constants.TaskStatus.PENDING
|
||||
LOG.info(_LI("Task %(name)s completed."), {'name': task.name})
|
||||
return task_constants.TaskStatus.COMPLETED
|
||||
|
||||
def delete_port_group(self, dvs_id, port_group_id):
|
||||
task_name = 'delete-port-group-%s-%s' % (port_group_id, dvs_id)
|
||||
userdata = {'retry_number': 1,
|
||||
'retry_command': self.vcns.delete_port_group,
|
||||
'args': [dvs_id, port_group_id]}
|
||||
task = tasks.Task(task_name, port_group_id,
|
||||
self._retry_task,
|
||||
status_callback=self._retry_task,
|
||||
userdata=userdata)
|
||||
self.task_manager.add(task)
|
||||
|
||||
def delete_virtual_wire(self, vw_id):
|
||||
task_name = 'delete-virtualwire-%s' % vw_id
|
||||
userdata = {'retry_number': 1,
|
||||
'retry_command': self.vcns.delete_virtual_wire,
|
||||
'args': [vw_id]}
|
||||
task = tasks.Task(task_name, vw_id,
|
||||
self._retry_task,
|
||||
status_callback=self._retry_task,
|
||||
userdata=userdata)
|
||||
self.task_manager.add(task)
|
||||
|
@ -15,12 +15,15 @@
|
||||
from oslo.utils import excutils
|
||||
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron.i18n import _LE
|
||||
from neutron.i18n import _, _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.common import constants
|
||||
from neutron.plugins.vmware.dbexts import vcns_db
|
||||
from neutron.plugins.vmware.vshield.common import (
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import nsxv_db
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import (
|
||||
exceptions as vcns_exc)
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.tasks import (
|
||||
constants as task_const)
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.tasks import tasks
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -69,17 +72,19 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
|
||||
def _convert_firewall_rule(self, context, rule, index=None):
|
||||
vcns_rule = {
|
||||
"name": rule['name'],
|
||||
"description": rule['description'],
|
||||
"action": self._convert_firewall_action(rule['action']),
|
||||
"enabled": rule['enabled']}
|
||||
"enabled": rule.get('enabled', True)}
|
||||
if rule.get('name'):
|
||||
vcns_rule['name'] = rule['name']
|
||||
if rule.get('description'):
|
||||
vcns_rule['description'] = rule['description']
|
||||
if rule.get('source_ip_address'):
|
||||
vcns_rule['source'] = {
|
||||
"ipAddress": [rule['source_ip_address']]
|
||||
"ipAddress": rule['source_ip_address']
|
||||
}
|
||||
if rule.get('destination_ip_address'):
|
||||
vcns_rule['destination'] = {
|
||||
"ipAddress": [rule['destination_ip_address']]
|
||||
"ipAddress": rule['destination_ip_address']
|
||||
}
|
||||
service = {}
|
||||
if rule.get('source_port'):
|
||||
@ -102,27 +107,30 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
|
||||
def _restore_firewall_rule(self, context, edge_id, response):
|
||||
rule = response
|
||||
rule_binding = vcns_db.get_vcns_edge_firewallrule_binding_by_vseid(
|
||||
rule_binding = nsxv_db.get_nsxv_edge_firewallrule_binding_by_vseid(
|
||||
context.session, edge_id, rule['ruleId'])
|
||||
service = rule['application']['service'][0]
|
||||
src_port_range = self._get_port_range_from_min_max_ports(
|
||||
service['sourcePort'][0], service['sourcePort'][-1])
|
||||
dst_port_range = self._get_port_range_from_min_max_ports(
|
||||
service['port'][0], service['port'][-1])
|
||||
return {
|
||||
fw_rule = {
|
||||
'firewall_rule': {
|
||||
'name': rule['name'],
|
||||
'id': rule_binding['rule_id'],
|
||||
'description': rule['description'],
|
||||
'source_ip_address': rule['source']['ipAddress'][0],
|
||||
'destination_ip_address': rule['destination']['ipAddress'][0],
|
||||
'source_ip_address': rule['source']['ipAddress'],
|
||||
'destination_ip_address': rule['destination']['ipAddress'],
|
||||
'protocol': service['protocol'],
|
||||
'destination_port': dst_port_range,
|
||||
'source_port': src_port_range,
|
||||
'action': self._restore_firewall_action(rule['action']),
|
||||
'enabled': rule['enabled']}}
|
||||
if rule.get('name'):
|
||||
fw_rule['firewall_rule']['name'] = rule['name']
|
||||
if rule.get('description'):
|
||||
fw_rule['firewall_rule']['description'] = rule['description']
|
||||
return fw_rule
|
||||
|
||||
def _convert_firewall(self, context, firewall):
|
||||
def _convert_firewall(self, context, firewall, allow_external=False):
|
||||
#bulk configuration on firewall and rescheduling the rule binding
|
||||
ruleTag = 1
|
||||
vcns_rules = []
|
||||
@ -130,6 +138,11 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
vcns_rule = self._convert_firewall_rule(context, rule, ruleTag)
|
||||
vcns_rules.append(vcns_rule)
|
||||
ruleTag += 1
|
||||
if allow_external:
|
||||
vcns_rules.append(
|
||||
{'action': "accept",
|
||||
'enabled': True,
|
||||
'destination': {'vnicGroupId': ["external"]}})
|
||||
return {
|
||||
'featureType': "firewall_4.0",
|
||||
'firewallRules': {
|
||||
@ -140,7 +153,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
res['firewall_rule_list'] = []
|
||||
for rule in response['firewallRules']['firewallRules']:
|
||||
rule_binding = (
|
||||
vcns_db.get_vcns_edge_firewallrule_binding_by_vseid(
|
||||
nsxv_db.get_nsxv_edge_firewallrule_binding_by_vseid(
|
||||
context.session, edge_id, rule['ruleId']))
|
||||
if rule_binding is None:
|
||||
continue
|
||||
@ -151,17 +164,19 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
service['port'][0], service['port'][-1])
|
||||
item = {
|
||||
'firewall_rule': {
|
||||
'name': rule['name'],
|
||||
'id': rule_binding['rule_id'],
|
||||
'description': rule['description'],
|
||||
'source_ip_address': rule['source']['ipAddress'][0],
|
||||
'source_ip_address': rule['source']['ipAddress'],
|
||||
'destination_ip_address': rule[
|
||||
'destination']['ipAddress'][0],
|
||||
'destination']['ipAddress'],
|
||||
'protocol': service['protocol'],
|
||||
'destination_port': dst_port_range,
|
||||
'source_port': src_port_range,
|
||||
'action': self._restore_firewall_action(rule['action']),
|
||||
'enabled': rule['enabled']}}
|
||||
if rule.get('name'):
|
||||
item['firewall_rule']['name'] = rule['name']
|
||||
if rule.get('description'):
|
||||
item['firewall_rule']['description'] = rule['description']
|
||||
res['firewall_rule_list'].append(item)
|
||||
return res
|
||||
|
||||
@ -179,16 +194,16 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
'rule_vseid': rule_vseid,
|
||||
'edge_id': edge_id
|
||||
}
|
||||
vcns_db.add_vcns_edge_firewallrule_binding(
|
||||
nsxv_db.add_nsxv_edge_firewallrule_binding(
|
||||
context.session, map_info)
|
||||
|
||||
def _get_firewall(self, context, edge_id):
|
||||
try:
|
||||
return self.vcns.get_firewall(edge_id)[1]
|
||||
except vcns_exc.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
except vcns_exc.VcnsApiException as e:
|
||||
LOG.exception(_LE("Failed to get firewall with edge "
|
||||
"id: %s"), edge_id)
|
||||
raise e
|
||||
|
||||
def _get_firewall_rule_next(self, context, edge_id, rule_vseid):
|
||||
# Return the firewall rule below 'rule_vseid'
|
||||
@ -202,7 +217,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
return fw_cfg['firewallRules']['firewallRules'][i + 1]
|
||||
|
||||
def get_firewall_rule(self, context, id, edge_id):
|
||||
rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
|
||||
rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
|
||||
context.session, id, edge_id)
|
||||
if rule_map is None:
|
||||
msg = _("No rule id:%s found in the edge_firewall_binding") % id
|
||||
@ -213,12 +228,12 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
try:
|
||||
response = self.vcns.get_firewall_rule(
|
||||
edge_id, vcns_rule_id)[1]
|
||||
except vcns_exc.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
except vcns_exc.VcnsApiException as e:
|
||||
LOG.exception(_LE("Failed to get firewall rule: %(rule_id)s "
|
||||
"with edge_id: %(edge_id)s"), {
|
||||
'rule_id': id,
|
||||
'edge_id': edge_id})
|
||||
raise e
|
||||
return self._restore_firewall_rule(context, edge_id, response)
|
||||
|
||||
def get_firewall(self, context, edge_id):
|
||||
@ -229,27 +244,27 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
fw_req = self._convert_firewall(context, firewall)
|
||||
try:
|
||||
self.vcns.update_firewall(edge_id, fw_req)
|
||||
except vcns_exc.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
except vcns_exc.VcnsApiException as e:
|
||||
LOG.exception(_LE("Failed to update firewall "
|
||||
"with edge_id: %s"), edge_id)
|
||||
raise e
|
||||
fw_res = self._get_firewall(context, edge_id)
|
||||
vcns_db.cleanup_vcns_edge_firewallrule_binding(
|
||||
nsxv_db.cleanup_nsxv_edge_firewallrule_binding(
|
||||
context.session, edge_id)
|
||||
self._create_rule_id_mapping(context, edge_id, firewall, fw_res)
|
||||
|
||||
def delete_firewall(self, context, edge_id):
|
||||
try:
|
||||
self.vcns.delete_firewall(edge_id)
|
||||
except vcns_exc.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
except vcns_exc.VcnsApiException as e:
|
||||
LOG.exception(_LE("Failed to delete firewall "
|
||||
"with edge_id:%s"), edge_id)
|
||||
vcns_db.cleanup_vcns_edge_firewallrule_binding(
|
||||
raise e
|
||||
nsxv_db.cleanup_nsxv_edge_firewallrule_binding(
|
||||
context.session, edge_id)
|
||||
|
||||
def update_firewall_rule(self, context, id, edge_id, firewall_rule):
|
||||
rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
|
||||
rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
|
||||
context.session, id, edge_id)
|
||||
vcns_rule_id = rule_map.rule_vseid
|
||||
fwr_req = self._convert_firewall_rule(context, firewall_rule)
|
||||
@ -258,12 +273,13 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
except vcns_exc.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to update firewall rule: "
|
||||
"%(rule_id)s with edge_id: %(edge_id)s"),
|
||||
"%(rule_id)s "
|
||||
"with edge_id: %(edge_id)s"),
|
||||
{'rule_id': id,
|
||||
'edge_id': edge_id})
|
||||
|
||||
def delete_firewall_rule(self, context, id, edge_id):
|
||||
rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
|
||||
rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
|
||||
context.session, id, edge_id)
|
||||
vcns_rule_id = rule_map.rule_vseid
|
||||
try:
|
||||
@ -271,14 +287,15 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
except vcns_exc.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to delete firewall rule: "
|
||||
"%(rule_id)s with edge_id: %(edge_id)s"),
|
||||
"%(rule_id)s "
|
||||
"with edge_id: %(edge_id)s"),
|
||||
{'rule_id': id,
|
||||
'edge_id': edge_id})
|
||||
vcns_db.delete_vcns_edge_firewallrule_binding(
|
||||
context.session, id, edge_id)
|
||||
nsxv_db.delete_nsxv_edge_firewallrule_binding(
|
||||
context.session, id)
|
||||
|
||||
def _add_rule_above(self, context, ref_rule_id, edge_id, firewall_rule):
|
||||
rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
|
||||
rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
|
||||
context.session, ref_rule_id, edge_id)
|
||||
ref_vcns_rule_id = rule_map.rule_vseid
|
||||
fwr_req = self._convert_firewall_rule(context, firewall_rule)
|
||||
@ -298,11 +315,11 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
'rule_id': firewall_rule['id'],
|
||||
'rule_vseid': fwr_vseid,
|
||||
'edge_id': edge_id}
|
||||
vcns_db.add_vcns_edge_firewallrule_binding(
|
||||
nsxv_db.add_nsxv_edge_firewallrule_binding(
|
||||
context.session, map_info)
|
||||
|
||||
def _add_rule_below(self, context, ref_rule_id, edge_id, firewall_rule):
|
||||
rule_map = vcns_db.get_vcns_edge_firewallrule_binding(
|
||||
rule_map = nsxv_db.get_nsxv_edge_firewallrule_binding(
|
||||
context.session, ref_rule_id, edge_id)
|
||||
ref_vcns_rule_id = rule_map.rule_vseid
|
||||
fwr_vse_next = self._get_firewall_rule_next(
|
||||
@ -336,7 +353,7 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
'rule_vseid': fwr_vseid,
|
||||
'edge_id': edge_id
|
||||
}
|
||||
vcns_db.add_vcns_edge_firewallrule_binding(
|
||||
nsxv_db.add_nsxv_edge_firewallrule_binding(
|
||||
context.session, map_info)
|
||||
|
||||
def insert_rule(self, context, rule_info, edge_id, fwr):
|
||||
@ -350,3 +367,34 @@ class EdgeFirewallDriver(db_base_plugin_v2.NeutronDbPluginV2):
|
||||
msg = _("Can't execute insert rule operation "
|
||||
"without reference rule_id")
|
||||
raise vcns_exc.VcnsBadRequest(resource='firewall_rule', msg=msg)
|
||||
|
||||
def _asyn_update_firewall(self, task):
|
||||
edge_id = task.userdata['edge_id']
|
||||
config = task.userdata['config']
|
||||
context = task.userdata['jobdata']['context']
|
||||
try:
|
||||
self.vcns.update_firewall(edge_id, config)
|
||||
except vcns_exc.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to update firewall "
|
||||
"with edge_id: %s"), edge_id)
|
||||
vcns_fw_config = self._get_firewall(context, edge_id)
|
||||
task.userdata['vcns_fw_config'] = vcns_fw_config
|
||||
return task_const.TaskStatus.COMPLETED
|
||||
|
||||
def asyn_update_firewall(self, router_id, edge_id, firewall,
|
||||
jobdata=None, allow_external=True):
|
||||
# TODO(berlin): Remove uncessary context input parameter.
|
||||
config = self._convert_firewall(None, firewall,
|
||||
allow_external=allow_external)
|
||||
userdata = {
|
||||
'edge_id': edge_id,
|
||||
'config': config,
|
||||
'fw_config': firewall,
|
||||
'jobdata': jobdata}
|
||||
task_name = "update-firewall-%s" % edge_id
|
||||
task = tasks.Task(task_name, router_id,
|
||||
self._asyn_update_firewall, userdata=userdata)
|
||||
task.add_result_monitor(self.callbacks.firewall_update_result)
|
||||
self.task_manager.add(task)
|
||||
return task
|
||||
|
@ -16,7 +16,7 @@ from oslo.utils import excutils
|
||||
|
||||
from neutron.i18n import _LE, _LW
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.vshield.common import (
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import (
|
||||
exceptions as vcns_exc)
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
@ -16,10 +16,10 @@ from oslo.utils import excutils
|
||||
|
||||
from neutron.i18n import _LE
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.dbexts import vcns_db
|
||||
from neutron.plugins.vmware.vshield.common import (
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import nsxv_db
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import (
|
||||
constants as vcns_const)
|
||||
from neutron.plugins.vmware.vshield.common import (
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import (
|
||||
exceptions as vcns_exc)
|
||||
try:
|
||||
from neutron_lbaas.services.loadbalancer import constants as lb_constants
|
||||
@ -54,7 +54,7 @@ class EdgeLbDriver():
|
||||
|
||||
def _convert_lb_vip(self, context, edge_id, vip, app_profileid):
|
||||
pool_id = vip.get('pool_id')
|
||||
poolid_map = vcns_db.get_vcns_edge_pool_binding(
|
||||
poolid_map = nsxv_db.get_vcns_edge_pool_binding(
|
||||
context.session, pool_id, edge_id)
|
||||
pool_vseid = poolid_map['pool_vseid']
|
||||
return {
|
||||
@ -70,7 +70,7 @@ class EdgeLbDriver():
|
||||
}
|
||||
|
||||
def _restore_lb_vip(self, context, edge_id, vip_vse):
|
||||
pool_binding = vcns_db.get_vcns_edge_pool_binding_by_vseid(
|
||||
pool_binding = nsxv_db.get_vcns_edge_pool_binding_by_vseid(
|
||||
context.session,
|
||||
edge_id,
|
||||
vip_vse['defaultPoolId'])
|
||||
@ -105,7 +105,7 @@ class EdgeLbDriver():
|
||||
monitors = pool.get('health_monitors')
|
||||
if not monitors:
|
||||
return vsepool
|
||||
monitorid_map = vcns_db.get_vcns_edge_monitor_binding(
|
||||
monitorid_map = nsxv_db.get_vcns_edge_monitor_binding(
|
||||
context.session,
|
||||
monitors[0],
|
||||
edge_id)
|
||||
@ -204,10 +204,10 @@ class EdgeLbDriver():
|
||||
"edge_id": edge_id,
|
||||
"app_profileid": app_profileid
|
||||
}
|
||||
vcns_db.add_vcns_edge_vip_binding(context.session, map_info)
|
||||
nsxv_db.add_nsxv_edge_vip_binding(context.session, map_info)
|
||||
|
||||
def _get_vip_binding(self, session, id):
|
||||
vip_binding = vcns_db.get_vcns_edge_vip_binding(session, id)
|
||||
vip_binding = nsxv_db.get_nsxv_edge_vip_binding(session, id)
|
||||
if not vip_binding:
|
||||
msg = (_("vip_binding not found with id: %(id)s "
|
||||
"edge_id: %(edge_id)s") % {
|
||||
@ -219,7 +219,7 @@ class EdgeLbDriver():
|
||||
return vip_binding
|
||||
|
||||
def get_vip(self, context, id):
|
||||
vip_binding = vcns_db.get_vcns_edge_vip_binding(context.session, id)
|
||||
vip_binding = nsxv_db.get_nsxv_edge_vip_binding(context.session, id)
|
||||
edge_id = vip_binding[vcns_const.EDGE_ID]
|
||||
vip_vseid = vip_binding['vip_vseid']
|
||||
try:
|
||||
@ -276,7 +276,7 @@ class EdgeLbDriver():
|
||||
LOG.exception(_LE("Failed to delete app profile on edge: %s"),
|
||||
edge_id)
|
||||
|
||||
vcns_db.delete_vcns_edge_vip_binding(context.session, id)
|
||||
nsxv_db.delete_nsxv_edge_vip_binding(context.session, id)
|
||||
|
||||
def create_pool(self, context, edge_id, pool, members):
|
||||
pool_new = self._convert_lb_pool(context, edge_id, pool, members)
|
||||
@ -295,10 +295,10 @@ class EdgeLbDriver():
|
||||
"pool_vseid": pool_vseid,
|
||||
"edge_id": edge_id
|
||||
}
|
||||
vcns_db.add_vcns_edge_pool_binding(context.session, map_info)
|
||||
nsxv_db.add_vcns_edge_pool_binding(context.session, map_info)
|
||||
|
||||
def get_pool(self, context, id, edge_id):
|
||||
pool_binding = vcns_db.get_vcns_edge_pool_binding(
|
||||
pool_binding = nsxv_db.get_vcns_edge_pool_binding(
|
||||
context.session, id, edge_id)
|
||||
if not pool_binding:
|
||||
msg = (_("pool_binding not found with id: %(id)s "
|
||||
@ -315,7 +315,7 @@ class EdgeLbDriver():
|
||||
return self._restore_lb_pool(context, edge_id, response)
|
||||
|
||||
def update_pool(self, context, edge_id, pool, members):
|
||||
pool_binding = vcns_db.get_vcns_edge_pool_binding(
|
||||
pool_binding = nsxv_db.get_vcns_edge_pool_binding(
|
||||
context.session, pool['id'], edge_id)
|
||||
pool_vseid = pool_binding['pool_vseid']
|
||||
pool_new = self._convert_lb_pool(context, edge_id, pool, members)
|
||||
@ -326,7 +326,7 @@ class EdgeLbDriver():
|
||||
LOG.exception(_LE("Failed to update pool"))
|
||||
|
||||
def delete_pool(self, context, id, edge_id):
|
||||
pool_binding = vcns_db.get_vcns_edge_pool_binding(
|
||||
pool_binding = nsxv_db.get_vcns_edge_pool_binding(
|
||||
context.session, id, edge_id)
|
||||
pool_vseid = pool_binding['pool_vseid']
|
||||
try:
|
||||
@ -334,7 +334,7 @@ class EdgeLbDriver():
|
||||
except vcns_exc.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to delete pool"))
|
||||
vcns_db.delete_vcns_edge_pool_binding(
|
||||
nsxv_db.delete_vcns_edge_pool_binding(
|
||||
context.session, id, edge_id)
|
||||
|
||||
def create_health_monitor(self, context, edge_id, health_monitor):
|
||||
@ -355,10 +355,10 @@ class EdgeLbDriver():
|
||||
"monitor_vseid": monitor_vseid,
|
||||
"edge_id": edge_id
|
||||
}
|
||||
vcns_db.add_vcns_edge_monitor_binding(context.session, map_info)
|
||||
nsxv_db.add_vcns_edge_monitor_binding(context.session, map_info)
|
||||
|
||||
def get_health_monitor(self, context, id, edge_id):
|
||||
monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
|
||||
monitor_binding = nsxv_db.get_vcns_edge_monitor_binding(
|
||||
context.session, id, edge_id)
|
||||
if not monitor_binding:
|
||||
msg = (_("monitor_binding not found with id: %(id)s "
|
||||
@ -377,7 +377,7 @@ class EdgeLbDriver():
|
||||
|
||||
def update_health_monitor(self, context, edge_id,
|
||||
old_health_monitor, health_monitor):
|
||||
monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
|
||||
monitor_binding = nsxv_db.get_vcns_edge_monitor_binding(
|
||||
context.session,
|
||||
old_health_monitor['id'], edge_id)
|
||||
monitor_vseid = monitor_binding['monitor_vseid']
|
||||
@ -392,7 +392,7 @@ class EdgeLbDriver():
|
||||
edge_id)
|
||||
|
||||
def delete_health_monitor(self, context, id, edge_id):
|
||||
monitor_binding = vcns_db.get_vcns_edge_monitor_binding(
|
||||
monitor_binding = nsxv_db.get_vcns_edge_monitor_binding(
|
||||
context.session, id, edge_id)
|
||||
monitor_vseid = monitor_binding['monitor_vseid']
|
||||
try:
|
||||
@ -400,5 +400,5 @@ class EdgeLbDriver():
|
||||
except vcns_exc.VcnsApiException:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to delete monitor"))
|
||||
vcns_db.delete_vcns_edge_monitor_binding(
|
||||
nsxv_db.delete_vcns_edge_monitor_binding(
|
||||
context.session, id, edge_id)
|
||||
|
1349
vmware_nsx/neutron/plugins/vmware/vshield/edge_utils.py
Normal file
1349
vmware_nsx/neutron/plugins/vmware/vshield/edge_utils.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,67 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import abc
|
||||
|
||||
from oslo.serialization import jsonutils
|
||||
import six
|
||||
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import vcns
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class NsxvEdgeCfgObj(object):
|
||||
|
||||
def __init__(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_service_name(self):
|
||||
return
|
||||
|
||||
@abc.abstractmethod
|
||||
def serializable_payload(self):
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def get_object(vcns_obj, edge_id, service_name):
|
||||
uri = "%s/%s/%s" % (vcns.URI_PREFIX,
|
||||
edge_id,
|
||||
service_name)
|
||||
|
||||
h, v = vcns_obj.do_request(
|
||||
vcns.HTTP_GET,
|
||||
uri,
|
||||
decode=True)
|
||||
|
||||
return v
|
||||
|
||||
def submit_to_backend(self, vcns_obj, edge_id, async=True):
|
||||
uri = "%s/%s/%s/config" % (vcns.URI_PREFIX,
|
||||
edge_id,
|
||||
self.get_service_name())
|
||||
|
||||
if async:
|
||||
uri += '?async=true'
|
||||
|
||||
payload = jsonutils.dumps(self.serializable_payload(), sort_keys=True)
|
||||
|
||||
if payload:
|
||||
return vcns_obj.do_request(
|
||||
vcns.HTTP_PUT,
|
||||
uri,
|
||||
payload,
|
||||
format='json',
|
||||
encode=False)
|
391
vmware_nsx/neutron/plugins/vmware/vshield/nsxv_loadbalancer.py
Normal file
391
vmware_nsx/neutron/plugins/vmware/vshield/nsxv_loadbalancer.py
Normal file
@ -0,0 +1,391 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import nsxv_edge_cfg_obj
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NsxvLoadbalancer(nsxv_edge_cfg_obj.NsxvEdgeCfgObj):
|
||||
|
||||
SERVICE_NAME = 'loadbalancer'
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
enabled=True,
|
||||
enable_service_insertion=False,
|
||||
acceleration_enabled=False):
|
||||
super(NsxvLoadbalancer, self).__init__()
|
||||
self.payload = {
|
||||
'enabled': enabled,
|
||||
'enableServiceInsertion': enable_service_insertion,
|
||||
'accelerationEnabled': acceleration_enabled}
|
||||
self.virtual_servers = {}
|
||||
|
||||
def get_service_name(self):
|
||||
return self.SERVICE_NAME
|
||||
|
||||
def add_virtual_server(self, virtual_server):
|
||||
self.virtual_servers[virtual_server.payload['name']] = virtual_server
|
||||
|
||||
def del_virtual_server(self, name):
|
||||
self.virtual_servers.pop(name, None)
|
||||
|
||||
def serializable_payload(self):
|
||||
virt_servers = []
|
||||
app_profiles = []
|
||||
app_rules = []
|
||||
pools = []
|
||||
monitors = []
|
||||
|
||||
virt_id = 1
|
||||
app_prof_id = 1
|
||||
app_rule_id = 1
|
||||
pool_id = 1
|
||||
monitor_id = 1
|
||||
member_id = 1
|
||||
|
||||
for virtual_server in self.virtual_servers.values():
|
||||
s_virt = virtual_server.payload.copy()
|
||||
s_virt['virtualServerId'] = 'virtualServer-%d' % virt_id
|
||||
virt_id += 1
|
||||
|
||||
# Setup app profile
|
||||
s_app_prof = virtual_server.app_profile.payload.copy()
|
||||
s_app_prof['applicationProfileId'] = ('applicationProfile-%d' %
|
||||
app_prof_id)
|
||||
app_profiles.append(s_app_prof)
|
||||
app_prof_id += 1
|
||||
|
||||
# Bind virtual server to app profile
|
||||
s_virt['applicationProfileId'] = s_app_prof['applicationProfileId']
|
||||
|
||||
# Setup app rules
|
||||
if virtual_server.app_rules.values():
|
||||
s_virt['applicationRuleId'] = []
|
||||
for app_rule in virtual_server.app_rules.values():
|
||||
s_app_rule = app_rule.payload.copy()
|
||||
s_app_rule['applicationRuleId'] = ('applicationRule-%d' %
|
||||
app_rule_id)
|
||||
app_rule_id += 1
|
||||
|
||||
# Add to LB object, bind to virtual server
|
||||
app_rules.append(s_app_rule)
|
||||
s_virt['applicationRuleId'].append(
|
||||
s_app_rule['applicationRuleId'])
|
||||
|
||||
# Setup pools
|
||||
s_pool = virtual_server.default_pool.payload.copy()
|
||||
s_pool['poolId'] = 'pool-%d' % pool_id
|
||||
pool_id += 1
|
||||
pools.append(s_pool)
|
||||
|
||||
# Add pool members
|
||||
s_pool['member'] = []
|
||||
for member in virtual_server.default_pool.members.values():
|
||||
s_m = member.payload.copy()
|
||||
s_m['memberId'] = 'member-%d' % member_id
|
||||
member_id += 1
|
||||
s_pool['member'].append(s_m)
|
||||
|
||||
# Bind pool to virtual server
|
||||
s_virt['defaultPoolId'] = s_pool['poolId']
|
||||
|
||||
s_pool['monitorId'] = []
|
||||
# Add monitors
|
||||
for monitor in virtual_server.default_pool.monitors.values():
|
||||
s_mon = monitor.payload.copy()
|
||||
s_mon['monitorId'] = 'monitor-%d' % monitor_id
|
||||
monitor_id += 1
|
||||
|
||||
s_pool['monitorId'].append(s_mon['monitorId'])
|
||||
|
||||
monitors.append(s_mon)
|
||||
|
||||
virt_servers.append(s_virt)
|
||||
|
||||
payload = self.payload.copy()
|
||||
payload['applicationProfile'] = app_profiles
|
||||
if app_rules:
|
||||
payload['applicationRule'] = app_rules
|
||||
payload['monitor'] = monitors
|
||||
payload['pool'] = pools
|
||||
payload['virtualServer'] = virt_servers
|
||||
payload['featureType'] = 'loadbalancer_4.0'
|
||||
|
||||
return payload
|
||||
|
||||
@staticmethod
|
||||
def get_loadbalancer(vcns_obj, edge_id):
|
||||
edge_lb = nsxv_edge_cfg_obj.NsxvEdgeCfgObj.get_object(
|
||||
vcns_obj,
|
||||
edge_id,
|
||||
NsxvLoadbalancer.SERVICE_NAME)
|
||||
|
||||
lb_obj = NsxvLoadbalancer(
|
||||
edge_lb['enabled'],
|
||||
edge_lb['enableServiceInsertion'],
|
||||
edge_lb['accelerationEnabled'])
|
||||
|
||||
# Construct loadbalancer objects
|
||||
for virt_srvr in edge_lb['virtualServer']:
|
||||
v_s = NsxvLBVirtualServer(
|
||||
virt_srvr['name'],
|
||||
virt_srvr['ipAddress'],
|
||||
virt_srvr['port'],
|
||||
virt_srvr['protocol'],
|
||||
virt_srvr['enabled'],
|
||||
virt_srvr['accelerationEnabled'],
|
||||
virt_srvr['connectionLimit'])
|
||||
|
||||
# Find application profile objects, attach to virtual server
|
||||
for app_prof in edge_lb['applicationProfile']:
|
||||
if (virt_srvr['applicationProfileId']
|
||||
== app_prof['applicationProfileId']):
|
||||
a_p = NsxvLBAppProfile(
|
||||
app_prof['name'],
|
||||
app_prof['serverSslEnabled'],
|
||||
app_prof['sslPassthrough'],
|
||||
app_prof['template'],
|
||||
app_prof['insertXForwardedFor'])
|
||||
|
||||
if app_prof['persistence']:
|
||||
a_p.set_persistence(
|
||||
True,
|
||||
app_prof['persistence']['method'],
|
||||
app_prof['persistence'].get('cookieName'),
|
||||
app_prof['persistence'].get('cookieMode'),
|
||||
app_prof['persistence'].get('expire'))
|
||||
|
||||
v_s.set_app_profile(a_p)
|
||||
|
||||
# Find default pool, attach to virtual server
|
||||
for pool in edge_lb['pool']:
|
||||
if virt_srvr['defaultPoolId'] == pool['poolId']:
|
||||
p = NsxvLBPool(
|
||||
pool['name'],
|
||||
pool['algorithm'],
|
||||
pool['transparent'])
|
||||
|
||||
# Add pool members to pool
|
||||
for member in pool['member']:
|
||||
m = NsxvLBPoolMember(
|
||||
member['name'],
|
||||
member['ipAddress'],
|
||||
member['port'],
|
||||
member['monitorPort'],
|
||||
member['condition'],
|
||||
member['weight'],
|
||||
member['minConn'],
|
||||
member['maxConn'])
|
||||
|
||||
p.add_member(m)
|
||||
|
||||
# Add monitors to pool
|
||||
for mon in edge_lb['monitor']:
|
||||
if mon['monitorId'] in pool['monitorId']:
|
||||
m = NsxvLBMonitor(
|
||||
mon['name'],
|
||||
mon['interval'],
|
||||
mon['maxRetries'],
|
||||
mon['method'],
|
||||
mon['timeout'],
|
||||
mon['type'],
|
||||
mon['url'])
|
||||
|
||||
p.add_monitor(m)
|
||||
|
||||
v_s.set_default_pool(p)
|
||||
|
||||
# Add application rules to virtual server
|
||||
for rule in edge_lb['applicationRule']:
|
||||
if rule['applicationRuleId'] in virt_srvr['applicationRuleId']:
|
||||
r = NsxvLBAppRule(
|
||||
rule['name'],
|
||||
rule['script'])
|
||||
|
||||
v_s.add_app_rule(r)
|
||||
|
||||
lb_obj.add_virtual_server(v_s)
|
||||
|
||||
return lb_obj
|
||||
|
||||
|
||||
class NsxvLBAppProfile():
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
server_ssl_enabled=False,
|
||||
ssl_pass_through=False,
|
||||
template='TCP',
|
||||
insert_xff=False,
|
||||
persist=False,
|
||||
persist_method='cookie',
|
||||
persist_cookie_name='JSESSIONID',
|
||||
persist_cookie_mode='insert',
|
||||
persist_expire=30):
|
||||
self.payload = {
|
||||
'name': name,
|
||||
'serverSslEnabled': server_ssl_enabled,
|
||||
'sslPassthrough': ssl_pass_through,
|
||||
'template': template,
|
||||
'insertXForwardedFor': insert_xff}
|
||||
|
||||
if persist:
|
||||
self.payload['persistence'] = {
|
||||
'method': persist_method,
|
||||
'expire': persist_expire
|
||||
}
|
||||
if persist_cookie_mode == 'cookie':
|
||||
self.payload['persistence']['cookieMode'] = persist_cookie_mode
|
||||
self.payload['persistence']['cookieName'] = persist_cookie_name
|
||||
|
||||
def set_persistence(
|
||||
self,
|
||||
persist=False,
|
||||
persist_method='cookie',
|
||||
persist_cookie_name='JSESSIONID',
|
||||
persist_cookie_mode='insert',
|
||||
persist_expire=30):
|
||||
|
||||
if persist:
|
||||
self.payload['persistence'] = {
|
||||
'method': persist_method,
|
||||
'expire': persist_expire
|
||||
}
|
||||
if persist_cookie_mode == 'cookie':
|
||||
self.payload['persistence']['cookieMode'] = persist_cookie_mode
|
||||
self.payload['persistence']['cookieName'] = persist_cookie_name
|
||||
|
||||
else:
|
||||
self.payload.pop('persistence', None)
|
||||
|
||||
|
||||
class NsxvLBAppRule(object):
|
||||
def __init__(self, name, script):
|
||||
self.payload = {
|
||||
'name': name,
|
||||
'script': script}
|
||||
|
||||
|
||||
class NsxvLBVirtualServer(object):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
ip_address,
|
||||
port=80,
|
||||
protocol='HTTP',
|
||||
enabled=True,
|
||||
acceleration_enabled=False,
|
||||
connection_limit=0,
|
||||
enable_service_insertion=False):
|
||||
self.payload = {
|
||||
'name': name,
|
||||
'ipAddress': ip_address,
|
||||
'port': port,
|
||||
'protocol': protocol,
|
||||
'enabled': enabled,
|
||||
'accelerationEnabled': acceleration_enabled,
|
||||
'connectionLimit': connection_limit,
|
||||
'enableServiceInsertion': enable_service_insertion}
|
||||
|
||||
self.app_rules = {}
|
||||
self.app_profile = None
|
||||
self.default_pool = None
|
||||
|
||||
def add_app_rule(self, app_rule):
|
||||
self.app_rules[app_rule.payload['name']] = app_rule
|
||||
|
||||
def del_app_rule(self, name):
|
||||
self.app_rules.pop(name, None)
|
||||
|
||||
def set_default_pool(self, pool):
|
||||
self.default_pool = pool
|
||||
|
||||
def set_app_profile(self, app_profile):
|
||||
self.app_profile = app_profile
|
||||
|
||||
|
||||
class NsxvLBMonitor(object):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
interval=10,
|
||||
max_retries=3,
|
||||
method='GET',
|
||||
timeout=15,
|
||||
mon_type='http',
|
||||
url='/'):
|
||||
self.payload = {
|
||||
'name': name,
|
||||
'interval': interval,
|
||||
'maxRetries': max_retries,
|
||||
'method': method,
|
||||
'timeout': timeout,
|
||||
'type': mon_type,
|
||||
'url': url}
|
||||
|
||||
|
||||
class NsxvLBPoolMember(object):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
ip_address,
|
||||
port,
|
||||
monitor_port=None,
|
||||
condition='enabled',
|
||||
weight=1,
|
||||
min_conn=0,
|
||||
max_conn=0):
|
||||
|
||||
self.payload = {
|
||||
'name': name,
|
||||
'ipAddress': ip_address,
|
||||
'port': port,
|
||||
'monitorPort': monitor_port,
|
||||
'condition': condition,
|
||||
'weight': weight,
|
||||
'minConn': min_conn,
|
||||
'maxConn': max_conn}
|
||||
|
||||
|
||||
class NsxvLBPool(object):
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
algorithm='round-robin',
|
||||
transparent=False):
|
||||
self.payload = {
|
||||
'name': name,
|
||||
'algorithm': algorithm,
|
||||
'transparent': transparent}
|
||||
|
||||
self.members = {}
|
||||
self.monitors = {}
|
||||
|
||||
def add_member(self, member):
|
||||
self.members[member.payload['name']] = member
|
||||
|
||||
def del_member(self, name):
|
||||
self.members.pop(name, None)
|
||||
|
||||
def add_monitor(self, monitor):
|
||||
self.monitors[monitor.payload['name']] = monitor
|
||||
|
||||
def del_monitor(self, name):
|
||||
self.monitors.pop(name, None)
|
183
vmware_nsx/neutron/plugins/vmware/vshield/securitygroup_utils.py
Normal file
183
vmware_nsx/neutron/plugins/vmware/vshield/securitygroup_utils.py
Normal file
@ -0,0 +1,183 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import xml.etree.ElementTree as et
|
||||
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import loopingcall
|
||||
|
||||
WAIT_INTERVAL = 2000
|
||||
MAX_ATTEMPTS = 5
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NsxSecurityGroupUtils(object):
|
||||
|
||||
def __init__(self, nsxv_manager):
|
||||
LOG.debug("Start Security Group Utils initialization")
|
||||
self.nsxv_manager = nsxv_manager
|
||||
|
||||
def to_xml_string(self, element):
|
||||
return et.tostring(element)
|
||||
|
||||
def get_section_with_rules(self, name, rules):
|
||||
"""Helper method to create section dict with rules."""
|
||||
|
||||
section = et.Element('section')
|
||||
section.attrib['name'] = name
|
||||
for rule in rules:
|
||||
section.append(rule)
|
||||
return section
|
||||
|
||||
def get_container(self, nsx_sg_id):
|
||||
container = {'type': 'SecurityGroup', 'value': nsx_sg_id}
|
||||
return container
|
||||
|
||||
def get_remote_container(self, remote_group_id, remote_ip_mac):
|
||||
container = None
|
||||
if remote_group_id is not None:
|
||||
return self.get_container(remote_group_id)
|
||||
if remote_ip_mac is not None:
|
||||
container = {'type': 'Ipv4Address', 'value': remote_ip_mac}
|
||||
return container
|
||||
|
||||
def get_rule_config(self, applied_to_id, name, action='allow',
|
||||
applied_to='SecurityGroup',
|
||||
source=None, destination=None, services=None,
|
||||
flags=None):
|
||||
"""Helper method to create a nsx rule dict."""
|
||||
ruleTag = et.Element('rule')
|
||||
nameTag = et.SubElement(ruleTag, 'name')
|
||||
nameTag.text = name
|
||||
actionTag = et.SubElement(ruleTag, 'action')
|
||||
actionTag.text = action
|
||||
|
||||
apList = et.SubElement(ruleTag, 'appliedToList')
|
||||
apTag = et.SubElement(apList, 'appliedTo')
|
||||
apTypeTag = et.SubElement(apTag, 'type')
|
||||
apTypeTag.text = applied_to
|
||||
apValueTag = et.SubElement(apTag, 'value')
|
||||
apValueTag.text = applied_to_id
|
||||
|
||||
if source is not None:
|
||||
sources = et.SubElement(ruleTag, 'sources')
|
||||
sources.attrib['excluded'] = 'false'
|
||||
srcTag = et.SubElement(sources, 'source')
|
||||
srcTypeTag = et.SubElement(srcTag, 'type')
|
||||
srcTypeTag.text = source['type']
|
||||
srcValueTag = et.SubElement(srcTag, 'value')
|
||||
srcValueTag.text = source['value']
|
||||
|
||||
if destination is not None:
|
||||
dests = et.SubElement(ruleTag, 'destinations')
|
||||
dests.attrib['excluded'] = 'false'
|
||||
destTag = et.SubElement(dests, 'destination')
|
||||
destTypeTag = et.SubElement(destTag, 'type')
|
||||
destTypeTag.text = destination['type']
|
||||
destValueTag = et.SubElement(destTag, 'value')
|
||||
destValueTag.text = destination['value']
|
||||
|
||||
if services:
|
||||
s = et.SubElement(ruleTag, 'services')
|
||||
for protocol, port, icmptype, icmpcode in services:
|
||||
svcTag = et.SubElement(s, 'service')
|
||||
try:
|
||||
int(protocol)
|
||||
svcProtocolTag = et.SubElement(svcTag, 'protocol')
|
||||
svcProtocolTag.text = str(protocol)
|
||||
except ValueError:
|
||||
svcProtocolTag = et.SubElement(svcTag, 'protocolName')
|
||||
svcProtocolTag.text = protocol
|
||||
if port is not None:
|
||||
svcPortTag = et.SubElement(svcTag, 'destinationPort')
|
||||
svcPortTag.text = str(port)
|
||||
if icmptype is not None:
|
||||
svcPortTag = et.SubElement(svcTag, 'subProtocol')
|
||||
svcPortTag.text = str(icmptype)
|
||||
if icmpcode is not None:
|
||||
svcPortTag = et.SubElement(svcTag, 'icmpCode')
|
||||
svcPortTag.text = str(icmpcode)
|
||||
|
||||
if flags:
|
||||
if flags.get('ethertype') is not None:
|
||||
pktTag = et.SubElement(ruleTag, 'packetType')
|
||||
pktTag.text = flags.get('ethertype')
|
||||
if flags.get('direction') is not None:
|
||||
dirTag = et.SubElement(ruleTag, 'direction')
|
||||
dirTag.text = flags.get('direction')
|
||||
return ruleTag
|
||||
|
||||
def get_rule_id_pair_from_section(self, resp):
|
||||
root = et.fromstring(resp)
|
||||
pairs = []
|
||||
for rule in root.findall('rule'):
|
||||
pair = {'nsx_id': rule.attrib.get('id'),
|
||||
'neutron_id': rule.find('name').text}
|
||||
pairs.append(pair)
|
||||
return pairs
|
||||
|
||||
def insert_rule_in_section(self, section, nsx_rule):
|
||||
section.insert(0, nsx_rule)
|
||||
|
||||
def parse_section(self, xml_string):
|
||||
return et.fromstring(xml_string)
|
||||
|
||||
def add_port_to_security_group(self, nsx_sg_id, nsx_vnic_id):
|
||||
userdata = {
|
||||
'nsx_sg_id': nsx_sg_id,
|
||||
'nsx_vnic_id': nsx_vnic_id,
|
||||
'attempt': 1
|
||||
}
|
||||
LOG.info(_LI("Add task to add %(nsx_sg_id)s member to NSX security "
|
||||
"group %(nsx_vnic_id)s"), userdata)
|
||||
task = loopingcall.FixedIntervalLoopingCall(
|
||||
self._add_security_groups_port_mapping,
|
||||
userdata=userdata)
|
||||
task.start(WAIT_INTERVAL / 1000)
|
||||
|
||||
def _add_security_groups_port_mapping(self, userdata):
|
||||
nsx_vnic_id = userdata.get('nsx_vnic_id')
|
||||
nsx_sg_id = userdata.get('nsx_sg_id')
|
||||
attempt = userdata.get('attempt')
|
||||
LOG.debug("Trying to execute task to add %s to %s attempt %d",
|
||||
nsx_vnic_id, nsx_sg_id, attempt)
|
||||
if attempt >= MAX_ATTEMPTS:
|
||||
LOG.error(_LE("Stop task to add %(nsx_vnic_id)s to security group "
|
||||
"%(nsx_sg_id)s"), userdata)
|
||||
LOG.error(_LE("Exception %s"), userdata.get('exception'))
|
||||
raise loopingcall.LoopingCallDone()
|
||||
else:
|
||||
attempt = attempt + 1
|
||||
userdata['attempt'] = attempt
|
||||
|
||||
try:
|
||||
h, c = self.nsxv_manager.vcns.add_member_to_security_group(
|
||||
nsx_sg_id, nsx_vnic_id)
|
||||
LOG.info(_LI("Added %s(nsx_sg_id)s member to NSX security "
|
||||
"group %(nsx_vnic_id)s"), userdata)
|
||||
|
||||
except Exception as e:
|
||||
LOG.debug("NSX security group %(nsx_sg_id)s member add "
|
||||
"failed %(nsx_vnic_id)s - attempt %(attempt)d",
|
||||
{'nsx_sg_id': nsx_sg_id,
|
||||
'nsx_vnic_id': nsx_vnic_id,
|
||||
'attempt': attempt})
|
||||
userdata['exception'] = e
|
||||
LOG.debug("Exception %s", e)
|
||||
return
|
||||
|
||||
raise loopingcall.LoopingCallDone()
|
@ -23,7 +23,7 @@ from neutron.common import exceptions
|
||||
from neutron.i18n import _LE, _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.openstack.common import loopingcall
|
||||
from neutron.plugins.vmware.vshield.tasks import constants
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.tasks import constants
|
||||
|
||||
DEFAULT_INTERVAL = 1000
|
||||
|
||||
@ -184,15 +184,15 @@ class TaskManager():
|
||||
try:
|
||||
status = task._execute_callback(task)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
|
||||
LOG.exception(_LE("Task %(task)s encountered exception in "
|
||||
"%(cb)s"),
|
||||
{'task': str(task),
|
||||
'cb': str(task._execute_callback)})
|
||||
status = constants.TaskStatus.ERROR
|
||||
|
||||
LOG.debug("Task %(task)s return %(status)s", {
|
||||
'task': str(task),
|
||||
LOG.debug("Task %(task)s return %(status)s",
|
||||
{'task': str(task),
|
||||
'status': status})
|
||||
|
||||
task._update_status(status)
|
||||
task._executed()
|
||||
|
||||
@ -203,10 +203,10 @@ class TaskManager():
|
||||
try:
|
||||
task._result_callback(task)
|
||||
except Exception:
|
||||
LOG.exception(_LE("Task %(task)s encountered exception in %(cb)s"),
|
||||
LOG.exception(_LE("Task %(task)s encountered exception in "
|
||||
"%(cb)s"),
|
||||
{'task': str(task),
|
||||
'cb': str(task._result_callback)})
|
||||
|
||||
LOG.debug("Task %(task)s return %(status)s",
|
||||
{'task': str(task), 'status': task.status})
|
||||
|
||||
|
@ -1,3 +1,5 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2013 VMware, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -12,10 +14,16 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.serialization import jsonutils
|
||||
import time
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.serialization import jsonutils
|
||||
import xml.etree.ElementTree as et
|
||||
|
||||
from neutron.i18n import _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.vshield.common import VcnsApiClient
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import exceptions
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import VcnsApiClient
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@ -29,16 +37,27 @@ URI_PREFIX = "/api/4.0/edges"
|
||||
FIREWALL_SERVICE = "firewall/config"
|
||||
FIREWALL_RULE_RESOURCE = "rules"
|
||||
|
||||
#NSXv Constants
|
||||
FIREWALL_PREFIX = '/api/4.0/firewall/globalroot-0/config'
|
||||
SECURITYGROUP_PREFIX = '/api/2.0/services/securitygroup'
|
||||
VDN_PREFIX = '/api/2.0/vdn'
|
||||
SERVICES_PREFIX = '/api/2.0/services'
|
||||
|
||||
#LbaaS Constants
|
||||
LOADBALANCER_SERVICE = "loadbalancer/config"
|
||||
VIP_RESOURCE = "virtualservers"
|
||||
POOL_RESOURCE = "pools"
|
||||
MONITOR_RESOURCE = "monitors"
|
||||
APP_PROFILE_RESOURCE = "applicationprofiles"
|
||||
APP_RULE_RESOURCE = "applicationrules"
|
||||
|
||||
# IPsec VPNaaS Constants
|
||||
IPSEC_VPN_SERVICE = 'ipsec/config'
|
||||
|
||||
# Dhcp constants
|
||||
DHCP_SERVICE = "dhcp/config"
|
||||
DHCP_BINDING_RESOURCE = "bindings"
|
||||
|
||||
|
||||
class Vcns(object):
|
||||
|
||||
@ -48,16 +67,37 @@ class Vcns(object):
|
||||
self.password = password
|
||||
self.jsonapi_client = VcnsApiClient.VcnsApiHelper(address, user,
|
||||
password, 'json')
|
||||
self.xmlapi_client = VcnsApiClient.VcnsApiHelper(address, user,
|
||||
password, 'xml')
|
||||
|
||||
def _client_request(self, client, method, uri, params, headers,
|
||||
encodeParams):
|
||||
retries = max(cfg.CONF.nsxv.retries, 1)
|
||||
delay = 0.5
|
||||
for attempt in range(1, retries + 1):
|
||||
if attempt != 1:
|
||||
time.sleep(delay)
|
||||
delay = min(2 * delay, 60)
|
||||
try:
|
||||
return client(method, uri, params, headers, encodeParams)
|
||||
except exceptions.ServiceConflict as e:
|
||||
if attempt == retries:
|
||||
raise e
|
||||
LOG.info(_LI('NSXv: conflict on request. Trying again.'))
|
||||
|
||||
def do_request(self, method, uri, params=None, format='json', **kwargs):
|
||||
LOG.debug("VcnsApiHelper('%(method)s', '%(uri)s', '%(body)s')", {
|
||||
'method': method,
|
||||
'uri': uri,
|
||||
'body': jsonutils.dumps(params)})
|
||||
headers = kwargs.get('headers')
|
||||
encodeParams = kwargs.get('encode', True)
|
||||
if format == 'json':
|
||||
header, content = self.jsonapi_client.request(method, uri, params)
|
||||
_client = self.jsonapi_client.request
|
||||
else:
|
||||
header, content = self.xmlapi_client.request(method, uri, params)
|
||||
_client = self.xmlapi_client.request
|
||||
header, content = self._client_request(_client, method, uri, params,
|
||||
headers, encodeParams)
|
||||
LOG.debug("Header: '%s'", header)
|
||||
LOG.debug("Content: '%s'", content)
|
||||
if content == '':
|
||||
@ -70,10 +110,18 @@ class Vcns(object):
|
||||
uri = URI_PREFIX + "?async=true"
|
||||
return self.do_request(HTTP_POST, uri, request, decode=False)
|
||||
|
||||
def update_edge(self, edge_id, request):
|
||||
uri = "%s/%s?async=true" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_PUT, uri, request, decode=False)
|
||||
|
||||
def get_edge_id(self, job_id):
|
||||
uri = URI_PREFIX + "/jobs/%s" % job_id
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def get_edge_jobs(self, edge_id):
|
||||
uri = URI_PREFIX + "/%s/jobs" % edge_id
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def get_edge_deploy_status(self, edge_id):
|
||||
uri = URI_PREFIX + "/%s/status?getlatest=false" % edge_id
|
||||
return self.do_request(HTTP_GET, uri, decode="True")
|
||||
@ -82,16 +130,37 @@ class Vcns(object):
|
||||
uri = "%s/%s" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def add_vdr_internal_interface(self, edge_id, interface):
|
||||
uri = "%s/%s/interfaces?action=patch&async=true" % (URI_PREFIX,
|
||||
edge_id)
|
||||
return self.do_request(HTTP_POST, uri, interface, decode=True)
|
||||
|
||||
def update_vdr_internal_interface(
|
||||
self, edge_id, interface_index, interface):
|
||||
uri = "%s/%s/interfaces/%s?async=true" % (URI_PREFIX, edge_id,
|
||||
interface_index)
|
||||
return self.do_request(HTTP_PUT, uri, interface, decode=True)
|
||||
|
||||
def delete_vdr_internal_interface(self, edge_id, interface_index):
|
||||
uri = "%s/%s/interfaces/%d?async=true" % (URI_PREFIX, edge_id,
|
||||
interface_index)
|
||||
return self.do_request(HTTP_DELETE, uri, decode=True)
|
||||
|
||||
def update_interface(self, edge_id, vnic):
|
||||
uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic['index'])
|
||||
uri = "%s/%s/vnics/%d?async=true" % (URI_PREFIX, edge_id,
|
||||
vnic['index'])
|
||||
return self.do_request(HTTP_PUT, uri, vnic, decode=True)
|
||||
|
||||
def delete_interface(self, edge_id, vnic_index):
|
||||
uri = "%s/%s/vnics/%d?async=true" % (URI_PREFIX, edge_id, vnic_index)
|
||||
return self.do_request(HTTP_DELETE, uri, decode=True)
|
||||
|
||||
def get_nat_config(self, edge_id):
|
||||
uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def update_nat_config(self, edge_id, nat):
|
||||
uri = "%s/%s/nat/config" % (URI_PREFIX, edge_id)
|
||||
uri = "%s/%s/nat/config?async=true" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_PUT, uri, nat, decode=True)
|
||||
|
||||
def delete_nat_rule(self, edge_id, rule_id):
|
||||
@ -106,8 +175,12 @@ class Vcns(object):
|
||||
uri = URI_PREFIX
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def get_edge_interfaces(self, edge_id):
|
||||
uri = "%s/%s/interfaces" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def update_routes(self, edge_id, routes):
|
||||
uri = "%s/%s/routing/config/static" % (URI_PREFIX, edge_id)
|
||||
uri = "%s/%s/routing/config/static?async=true" % (URI_PREFIX, edge_id)
|
||||
return self.do_request(HTTP_PUT, uri, routes)
|
||||
|
||||
def create_lswitch(self, lsconfig):
|
||||
@ -129,11 +202,13 @@ class Vcns(object):
|
||||
def update_firewall(self, edge_id, fw_req):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, FIREWALL_SERVICE)
|
||||
uri += '?async=true'
|
||||
return self.do_request(HTTP_PUT, uri, fw_req)
|
||||
|
||||
def delete_firewall(self, edge_id):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, FIREWALL_SERVICE, None)
|
||||
uri += '?async=true'
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def update_firewall_rule(self, edge_id, vcns_rule_id, fwr_req):
|
||||
@ -270,6 +345,25 @@ class Vcns(object):
|
||||
app_profileid)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def create_app_rule(self, edge_id, app_rule):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
APP_RULE_RESOURCE)
|
||||
return self.do_request(HTTP_POST, uri, app_rule)
|
||||
|
||||
def update_app_rule(self, edge_id, app_ruleid, app_rule):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
APP_RULE_RESOURCE, app_ruleid)
|
||||
return self.do_request(HTTP_PUT, uri, app_rule)
|
||||
|
||||
def delete_app_rule(self, edge_id, app_ruleid):
|
||||
uri = self._build_uri_path(
|
||||
edge_id, LOADBALANCER_SERVICE,
|
||||
APP_RULE_RESOURCE,
|
||||
app_ruleid)
|
||||
return self.do_request(HTTP_DELETE, uri)
|
||||
|
||||
def update_ipsec_config(self, edge_id, ipsec_config):
|
||||
uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
|
||||
return self.do_request(HTTP_PUT, uri, ipsec_config)
|
||||
@ -282,6 +376,156 @@ class Vcns(object):
|
||||
uri = self._build_uri_path(edge_id, IPSEC_VPN_SERVICE)
|
||||
return self.do_request(HTTP_GET, uri)
|
||||
|
||||
def create_virtual_wire(self, vdn_scope_id, request):
|
||||
"""Creates a VXLAN virtual wire
|
||||
|
||||
The method will return the virtual wire ID.
|
||||
"""
|
||||
uri = '/api/2.0/vdn/scopes/%s/virtualwires' % vdn_scope_id
|
||||
return self.do_request(HTTP_POST, uri, request, format='xml',
|
||||
decode=False)
|
||||
|
||||
def delete_virtual_wire(self, virtualwire_id):
|
||||
"""Deletes a virtual wire."""
|
||||
uri = '/api/2.0/vdn/virtualwires/%s' % virtualwire_id
|
||||
return self.do_request(HTTP_DELETE, uri, format='xml')
|
||||
|
||||
def create_port_group(self, dvs_id, request):
|
||||
"""Creates a port group on a DVS
|
||||
|
||||
The method will return the port group ID.
|
||||
"""
|
||||
uri = '/api/2.0/xvs/switches/%s/networks' % dvs_id
|
||||
return self.do_request(HTTP_POST, uri, request, format='xml',
|
||||
decode=False)
|
||||
|
||||
def delete_port_group(self, dvs_id, portgroup_id):
|
||||
"""Deletes a portgroup."""
|
||||
uri = '/api/2.0/xvs/switches/%s/networks/%s' % (dvs_id,
|
||||
portgroup_id)
|
||||
return self.do_request(HTTP_DELETE, uri, format='xml', decode=False)
|
||||
|
||||
def query_interface(self, edge_id, vnic_index):
|
||||
uri = "%s/%s/vnics/%d" % (URI_PREFIX, edge_id, vnic_index)
|
||||
return self.do_request(HTTP_GET, uri, decode=True)
|
||||
|
||||
def reconfigure_dhcp_service(self, edge_id, request_config):
|
||||
"""Reconfigure dhcp static bindings in the created Edge."""
|
||||
uri = "/api/4.0/edges/%s/dhcp/config?async=true" % edge_id
|
||||
|
||||
return self.do_request(HTTP_PUT, uri, request_config)
|
||||
|
||||
def query_dhcp_configuration(self, edge_id):
|
||||
"""Query DHCP configuration from the specific edge."""
|
||||
uri = "/api/4.0/edges/%s/dhcp/config" % edge_id
|
||||
return self.do_request(HTTP_GET, uri)
|
||||
|
||||
def create_dhcp_binding(self, edge_id, request_config):
|
||||
"""Append one dhcp static binding on the edge."""
|
||||
uri = self._build_uri_path(edge_id,
|
||||
DHCP_SERVICE, DHCP_BINDING_RESOURCE,
|
||||
is_async=True)
|
||||
return self.do_request(HTTP_POST, uri, request_config, decode=False)
|
||||
|
||||
def delete_dhcp_binding(self, edge_id, binding_id):
|
||||
"""Delete one dhcp static binding on the edge."""
|
||||
uri = self._build_uri_path(edge_id,
|
||||
DHCP_SERVICE, DHCP_BINDING_RESOURCE,
|
||||
binding_id, is_async=True)
|
||||
return self.do_request(HTTP_DELETE, uri, decode=False)
|
||||
|
||||
def create_security_group(self, request):
|
||||
"""Creates a security group container in nsx.
|
||||
|
||||
The method will return the security group ID.
|
||||
"""
|
||||
uri = '%s/globalroot-0' % (SECURITYGROUP_PREFIX)
|
||||
return self.do_request(HTTP_POST, uri, request, format='xml',
|
||||
decode=False)
|
||||
|
||||
def delete_security_group(self, securitygroup_id):
|
||||
"""Deletes a security group container."""
|
||||
uri = '%s/%s?force=true' % (SECURITYGROUP_PREFIX, securitygroup_id)
|
||||
return self.do_request(HTTP_DELETE, uri, format='xml', decode=False)
|
||||
|
||||
def create_section(self, type, request):
|
||||
"""Creates a layer 3 or layer 2 section in nsx rule table.
|
||||
|
||||
The method will return the uri to newly created section.
|
||||
"""
|
||||
if type == 'ip':
|
||||
sec_type = 'layer3sections'
|
||||
else:
|
||||
sec_type = 'layer2sections'
|
||||
uri = '%s/%s?autoSaveDraft=false' % (FIREWALL_PREFIX, sec_type)
|
||||
return self.do_request(HTTP_POST, uri, request, format='xml',
|
||||
decode=False, encode=False)
|
||||
|
||||
def update_section(self, section_uri, request, h):
|
||||
"""Replaces a section in nsx rule table."""
|
||||
uri = '%s?autoSaveDraft=false' % section_uri
|
||||
headers = self._get_section_header(section_uri, h)
|
||||
return self.do_request(HTTP_PUT, uri, request, format='xml',
|
||||
decode=False, encode=False, headers=headers)
|
||||
|
||||
def delete_section(self, section_uri):
|
||||
"""Deletes a section in nsx rule table."""
|
||||
uri = '%s?autoSaveDraft=false' % section_uri
|
||||
return self.do_request(HTTP_DELETE, uri, format='xml', decode=False)
|
||||
|
||||
def get_section(self, section_uri):
|
||||
return self.do_request(HTTP_GET, section_uri, format='xml',
|
||||
decode=False)
|
||||
|
||||
def get_section_id(self, section_name):
|
||||
"""Retrieve the id of a section from nsx."""
|
||||
uri = FIREWALL_PREFIX
|
||||
h, section_list = self.do_request(HTTP_GET, uri, decode=False,
|
||||
format='xml')
|
||||
|
||||
root = et.fromstring(section_list)
|
||||
|
||||
for sec in root.iter('section'):
|
||||
if sec.attrib['name'] == section_name:
|
||||
return sec.attrib['id']
|
||||
|
||||
def update_section_by_id(self, id, type, request):
|
||||
"""Update a section while building its uri from the id."""
|
||||
if type == 'ip':
|
||||
sec_type = 'layer3sections'
|
||||
else:
|
||||
sec_type = 'layer2sections'
|
||||
section_uri = '%s/%s/%s' % (FIREWALL_PREFIX, sec_type, id)
|
||||
self.update_section(section_uri, request, h=None)
|
||||
|
||||
def _get_section_header(self, section_uri, h=None):
|
||||
if h is None:
|
||||
h, c = self.do_request(HTTP_GET, section_uri, format='xml',
|
||||
decode=False)
|
||||
etag = h['etag']
|
||||
headers = {'If-Match': etag}
|
||||
return headers
|
||||
|
||||
def remove_rule_from_section(self, section_uri, rule_id):
|
||||
"""Deletes a rule from nsx section table."""
|
||||
uri = '%s/rules/%s?autoSaveDraft=false' % (section_uri, rule_id)
|
||||
headers = self._get_section_header(section_uri)
|
||||
return self.do_request(HTTP_DELETE, uri, format='xml',
|
||||
headers=headers)
|
||||
|
||||
def add_member_to_security_group(self, security_group_id, member_id):
|
||||
"""Adds a vnic member to nsx security group."""
|
||||
uri = '%s/%s/members/%s' % (SECURITYGROUP_PREFIX,
|
||||
security_group_id, member_id)
|
||||
return self.do_request(HTTP_PUT, uri, format='xml', decode=False)
|
||||
|
||||
def remove_member_from_security_group(self, security_group_id,
|
||||
member_id):
|
||||
"""Removes a vnic member from nsx security group."""
|
||||
uri = '%s/%s/members/%s' % (SECURITYGROUP_PREFIX,
|
||||
security_group_id, member_id)
|
||||
return self.do_request(HTTP_DELETE, uri, format='xml', decode=False)
|
||||
|
||||
def _build_uri_path(self, edge_id,
|
||||
service,
|
||||
resource=None,
|
||||
@ -291,13 +535,61 @@ class Vcns(object):
|
||||
relations=None,
|
||||
filters=None,
|
||||
types=None,
|
||||
is_attachment=False):
|
||||
is_attachment=False,
|
||||
is_async=False):
|
||||
uri_prefix = "%s/%s/%s" % (URI_PREFIX, edge_id, service)
|
||||
if resource:
|
||||
res_path = resource
|
||||
if resource_id:
|
||||
res_path += "/%s" % resource_id
|
||||
res_path = resource + (resource_id and "/%s" % resource_id or '')
|
||||
uri_path = "%s/%s" % (uri_prefix, res_path)
|
||||
else:
|
||||
uri_path = uri_prefix
|
||||
if is_async:
|
||||
return (uri_path + "?async=true")
|
||||
else:
|
||||
return uri_path
|
||||
|
||||
def _scopingobjects_lookup(self, type_name, object_id):
|
||||
uri = '%s/usermgmt/scopingobjects' % SERVICES_PREFIX
|
||||
h, so_list = self.do_request(HTTP_GET, uri, decode=False,
|
||||
format='xml')
|
||||
|
||||
root = et.fromstring(so_list)
|
||||
for obj in root.iter('object'):
|
||||
if (obj.find('objectTypeName').text == type_name and
|
||||
obj.find('objectId').text == object_id):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def validate_datacenter_moid(self, object_id):
|
||||
return self._scopingobjects_lookup('Datacenter', object_id)
|
||||
|
||||
def validate_network(self, object_id):
|
||||
return (self._scopingobjects_lookup('Network', object_id) or
|
||||
self._scopingobjects_lookup('DistributedVirtualPortgroup',
|
||||
object_id) or
|
||||
self._scopingobjects_lookup('VirtualWire', object_id))
|
||||
|
||||
def validate_vdn_scope(self, object_id):
|
||||
uri = '%s/scopes' % VDN_PREFIX
|
||||
h, scope_list = self.do_request(HTTP_GET, uri, decode=False,
|
||||
format='xml')
|
||||
|
||||
root = et.fromstring(scope_list)
|
||||
for obj_id in root.iter('objectId'):
|
||||
if obj_id.text == object_id:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def validate_dvs(self, object_id):
|
||||
uri = '%s/switches' % VDN_PREFIX
|
||||
h, dvs_list = self.do_request(HTTP_GET, uri, decode=False,
|
||||
format='xml')
|
||||
|
||||
root = et.fromstring(dvs_list)
|
||||
for obj_id in root.iter('objectId'):
|
||||
if obj_id.text == object_id:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
47
vmware_nsx/neutron/plugins/vmware/vshield/vcns_driver.py
Normal file
47
vmware_nsx/neutron/plugins/vmware/vshield/vcns_driver.py
Normal file
@ -0,0 +1,47 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2013 VMware, Inc
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
|
||||
from neutron.openstack.common import log as logging
|
||||
from vmware_nsx.neutron.plugins.vmware.common import config # noqa
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import edge_appliance_driver
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import edge_firewall_driver
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.tasks import tasks
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import vcns
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VcnsDriver(edge_appliance_driver.EdgeApplianceDriver,
|
||||
edge_firewall_driver.EdgeFirewallDriver):
|
||||
|
||||
def __init__(self, callbacks):
|
||||
super(VcnsDriver, self).__init__()
|
||||
|
||||
self.callbacks = callbacks
|
||||
self.vcns_uri = cfg.CONF.nsxv.manager_uri
|
||||
self.vcns_user = cfg.CONF.nsxv.user
|
||||
self.vcns_passwd = cfg.CONF.nsxv.password
|
||||
self.datacenter_moid = cfg.CONF.nsxv.datacenter_moid
|
||||
self.deployment_container_id = cfg.CONF.nsxv.deployment_container_id
|
||||
self.resource_pool_id = cfg.CONF.nsxv.resource_pool_id
|
||||
self.datastore_id = cfg.CONF.nsxv.datastore_id
|
||||
self.external_network = cfg.CONF.nsxv.external_network
|
||||
interval = cfg.CONF.nsxv.task_status_check_interval
|
||||
self.task_manager = tasks.TaskManager(interval)
|
||||
self.task_manager.start()
|
||||
self.vcns = vcns.Vcns(self.vcns_uri, self.vcns_user, self.vcns_passwd)
|
@ -16,13 +16,15 @@
|
||||
|
||||
import os
|
||||
|
||||
from neutron.plugins.vmware.api_client import client as nsx_client
|
||||
from neutron.plugins.vmware.api_client import eventlet_client
|
||||
from neutron.plugins.vmware import extensions
|
||||
import neutron.plugins.vmware.plugin as neutron_plugin
|
||||
from neutron.plugins.vmware.vshield.common import VcnsApiClient as vcnsapi
|
||||
from neutron.plugins.vmware.vshield import vcns
|
||||
import neutron.plugins.vmware.vshield.vcns_driver as vcnsdriver
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import client as nsx_client
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import eventlet_client
|
||||
from vmware_nsx.neutron.plugins.vmware import extensions
|
||||
import vmware_nsx.neutron.plugins.vmware.plugin as neutron_plugin
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import (
|
||||
VcnsApiClient as vcnsapi)
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import edge_utils
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import vcns
|
||||
import vmware_nsx.neutron.plugins.vmware.vshield.vcns_driver as vcnsdriver
|
||||
|
||||
|
||||
plugin = neutron_plugin.NsxPlugin
|
||||
@ -31,6 +33,7 @@ evt_client = eventlet_client.EventletApiClient
|
||||
vcns_class = vcns.Vcns
|
||||
vcns_driver = vcnsdriver.VcnsDriver
|
||||
vcns_api_helper = vcnsapi.VcnsApiHelper
|
||||
edge_manage_class = edge_utils.EdgeManager
|
||||
|
||||
STUBS_PATH = os.path.join(os.path.dirname(__file__), 'etc')
|
||||
NSXEXT_PATH = os.path.dirname(extensions.__file__)
|
||||
@ -40,6 +43,8 @@ CLIENT_NAME = '%s.%s' % (evt_client.__module__, evt_client.__name__)
|
||||
VCNS_NAME = '%s.%s' % (vcns_class.__module__, vcns_class.__name__)
|
||||
VCNS_DRIVER_NAME = '%s.%s' % (vcns_driver.__module__, vcns_driver.__name__)
|
||||
VCNSAPI_NAME = '%s.%s' % (vcns_api_helper.__module__, vcns_api_helper.__name__)
|
||||
EDGE_MANAGE_NAME = '%s.%s' % (edge_manage_class.__module__,
|
||||
edge_manage_class.__name__)
|
||||
|
||||
|
||||
def get_fake_conf(filename):
|
||||
@ -47,4 +52,5 @@ def get_fake_conf(filename):
|
||||
|
||||
|
||||
def nsx_method(method_name, module_name='nsxlib'):
|
||||
return '%s.%s.%s' % ('neutron.plugins.vmware', module_name, method_name)
|
||||
return '%s.%s.%s' % ('vmware_nsx.neutron.plugins.vmware', module_name,
|
||||
method_name)
|
||||
|
@ -16,8 +16,8 @@
|
||||
|
||||
import httplib
|
||||
|
||||
from neutron.plugins.vmware import api_client
|
||||
from neutron.tests import base
|
||||
from vmware_nsx.neutron.plugins.vmware import api_client
|
||||
|
||||
|
||||
class ApiCommonTest(base.BaseTestCase):
|
||||
|
@ -21,9 +21,11 @@ import mock
|
||||
|
||||
from neutron.i18n import _LI
|
||||
from neutron.openstack.common import log as logging
|
||||
from neutron.plugins.vmware.api_client import eventlet_client as client
|
||||
from neutron.plugins.vmware.api_client import eventlet_request as request
|
||||
from neutron.tests import base
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import (
|
||||
eventlet_client as client)
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import (
|
||||
eventlet_request as request)
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
|
||||
|
||||
|
@ -17,9 +17,9 @@ from oslo.db import exception as d_exc
|
||||
|
||||
from neutron import context
|
||||
from neutron.db import models_v2
|
||||
from neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
from neutron.plugins.vmware.dbexts import models
|
||||
from neutron.tests.unit import testlib_api
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
|
||||
|
||||
class NsxDBTestCase(testlib_api.SqlTestCase):
|
||||
|
@ -5,3 +5,10 @@ nsx_user=foo
|
||||
nsx_password=bar
|
||||
default_l3_gw_service_uuid = whatever
|
||||
default_l2_gw_service_uuid = whatever
|
||||
|
||||
[nsxv]
|
||||
manager_uri = https://fake_manager
|
||||
user = fake_user
|
||||
password = fake_password
|
||||
vdn_scope_id = fake_vdn_scope_id
|
||||
dvs_id = fake_dvs_id
|
||||
|
@ -22,9 +22,9 @@ from neutron.api.v2 import attributes
|
||||
from neutron.common import test_lib
|
||||
from neutron import context
|
||||
from neutron.extensions import agent
|
||||
from neutron.plugins.vmware.api_client import version
|
||||
from neutron.plugins.vmware.common import sync
|
||||
from neutron.tests.unit import test_db_plugin
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import version
|
||||
from vmware_nsx.neutron.plugins.vmware.common import sync
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
|
||||
|
||||
|
@ -28,15 +28,15 @@ from neutron import manager
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.dbexts import networkgw_db
|
||||
from neutron.plugins.vmware.extensions import networkgw
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
|
||||
from neutron import quota
|
||||
from neutron.tests import base
|
||||
from neutron.tests.unit import test_api_v2
|
||||
from neutron.tests.unit import test_db_plugin
|
||||
from neutron.tests.unit import test_extensions
|
||||
from neutron.tests.unit import testlib_plugin
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import networkgw
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware import test_nsx_plugin
|
||||
|
||||
|
@ -16,8 +16,8 @@
|
||||
import mock
|
||||
|
||||
from neutron.common import test_lib
|
||||
from neutron.plugins.vmware.common import sync
|
||||
from neutron.tests.unit import test_extension_portsecurity as psec
|
||||
from vmware_nsx.neutron.plugins.vmware.common import sync
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
|
||||
|
||||
|
@ -21,9 +21,9 @@ import webob.exc
|
||||
|
||||
from neutron import context
|
||||
from neutron.plugins.vmware.dbexts import qos_db
|
||||
from neutron.plugins.vmware.extensions import qos as ext_qos
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.tests.unit import test_extensions
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import qos as ext_qos
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware import test_nsx_plugin
|
||||
|
||||
|
@ -0,0 +1,108 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config import cfg
|
||||
from oslo.db import exception as d_exc
|
||||
|
||||
from neutron.api.v2 import attributes as attr
|
||||
from neutron import context as neutron_context
|
||||
from neutron.db import db_base_plugin_v2
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.tests.unit import test_db_plugin
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import vnic_index_db
|
||||
from vmware_nsx.neutron.plugins.vmware.extensions import vnic_index as vnicidx
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
|
||||
|
||||
DB_PLUGIN_KLASS = ('vmware_nsx.neutron.tests.unit.vmware.extensions.'
|
||||
'test_vnic_index.VnicIndexTestPlugin')
|
||||
|
||||
_uuid = uuidutils.generate_uuid
|
||||
|
||||
|
||||
class VnicIndexTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
|
||||
vnic_index_db.VnicIndexDbMixin):
|
||||
|
||||
supported_extension_aliases = ["vnic-index"]
|
||||
|
||||
def update_port(self, context, id, port):
|
||||
p = port['port']
|
||||
current_port = super(VnicIndexTestPlugin, self).get_port(context, id)
|
||||
vnic_idx = p.get(vnicidx.VNIC_INDEX)
|
||||
device_id = current_port['device_id']
|
||||
if attr.is_attr_set(vnic_idx) and device_id != '':
|
||||
self._set_port_vnic_index_mapping(
|
||||
context, id, device_id, vnic_idx)
|
||||
|
||||
with context.session.begin(subtransactions=True):
|
||||
p = port['port']
|
||||
ret_port = super(VnicIndexTestPlugin, self).update_port(
|
||||
context, id, port)
|
||||
vnic_idx = current_port.get(vnicidx.VNIC_INDEX)
|
||||
if (attr.is_attr_set(vnic_idx) and
|
||||
device_id != ret_port['device_id']):
|
||||
self._delete_port_vnic_index_mapping(
|
||||
context, id)
|
||||
return ret_port
|
||||
|
||||
def delete_port(self, context, id):
|
||||
port_db = self.get_port(context, id)
|
||||
vnic_idx = port_db.get(vnicidx.VNIC_INDEX)
|
||||
if attr.is_attr_set(vnic_idx):
|
||||
self._delete_port_vnic_index_mapping(context, id)
|
||||
with context.session.begin(subtransactions=True):
|
||||
super(VnicIndexTestPlugin, self).delete_port(context, id)
|
||||
|
||||
|
||||
class VnicIndexDbTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
|
||||
def setUp(self, plugin=None, ext_mgr=None):
|
||||
plugin = plugin or DB_PLUGIN_KLASS
|
||||
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
|
||||
super(VnicIndexDbTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
|
||||
|
||||
def _port_index_update(self, port_id, index):
|
||||
data = {'port': {'vnic_index': index}}
|
||||
req = self.new_update_request('ports', data, port_id)
|
||||
res = self.deserialize('json', req.get_response(self.api))
|
||||
return res
|
||||
|
||||
def test_vnic_index_db(self):
|
||||
plugin = manager.NeutronManager.get_plugin()
|
||||
vnic_index = 2
|
||||
device_id = _uuid()
|
||||
context = neutron_context.get_admin_context()
|
||||
with self.port(device_id=device_id,
|
||||
device_owner='compute:None') as port:
|
||||
port_id = port['port']['id']
|
||||
res = self._port_index_update(port_id, vnic_index)
|
||||
self.assertEqual(res['port'][vnicidx.VNIC_INDEX], vnic_index)
|
||||
# Port should be associated with at most one vnic index
|
||||
self.assertRaises(d_exc.DBDuplicateEntry,
|
||||
plugin._set_port_vnic_index_mapping,
|
||||
context, port_id, device_id, 1)
|
||||
# Only one Port can be associated with a specific index on a device
|
||||
self.assertRaises(d_exc.DBDuplicateEntry,
|
||||
plugin._set_port_vnic_index_mapping,
|
||||
context, _uuid(), device_id, vnic_index)
|
||||
# Check that the call for _delete_port_vnic_index remove the row from
|
||||
# the table
|
||||
|
||||
# TODO(kobis): deletion was removed from port - fix this assert
|
||||
# self.assertIsNone(plugin._get_port_vnic_index(context, port_id))
|
||||
|
||||
|
||||
class TestVnicIndex(VnicIndexDbTestCase):
|
||||
pass
|
@ -0,0 +1,95 @@
|
||||
# Copyright 2014 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
import mock
|
||||
|
||||
from neutron.tests import base
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import nsxv_loadbalancer
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import vcns
|
||||
|
||||
|
||||
class NsxvLoadbalancerTestCase(base.BaseTestCase):
|
||||
|
||||
EDGE_OBJ_JSON = (
|
||||
'{"accelerationEnabled":false,"applicationProfile":[{'
|
||||
'"applicationProfileId":"applicationProfile-1","insertXForwardedFor":'
|
||||
'false,"name":"MDSrvProxy","persistence":{"cookieMode":"insert",'
|
||||
'"cookieName":"JSESSIONID","expire":"30","method":"cookie"},'
|
||||
'"serverSslEnabled":false,"sslPassthrough":false,"template":"HTTP"}],'
|
||||
'"applicationRule":[],"enableServiceInsertion":false,"enabled":true,'
|
||||
'"featureType":"loadbalancer_4.0","logging":{"enable":false,'
|
||||
'"logLevel":"info"},"monitor":[{"interval":10,"maxRetries":3,"method":'
|
||||
'"GET","monitorId":"monitor-1","name":"MDSrvMon","timeout":15,"type":'
|
||||
'"http","url":"/"}],"pool":[{"algorithm":"round-robin",'
|
||||
'"applicationRuleId":[],"member":[{"condition":"enabled","ipAddress":'
|
||||
'"192.168.0.39","maxConn":0,"memberId":"member-1","minConn":0,'
|
||||
'"monitorPort":8775,"name":"Member-1","port":8775,"weight":1}],'
|
||||
'"monitorId":["monitor-1"],"name":"MDSrvPool","poolId":"pool-1",'
|
||||
'"transparent":false}],"version":6,"virtualServer":[{'
|
||||
'"accelerationEnabled":false,"applicationProfileId":'
|
||||
'"applicationProfile-1","applicationRuleId":[],"connectionLimit":0,'
|
||||
'"defaultPoolId":"pool-1","enableServiceInsertion":false,'
|
||||
'"enabled":true,"ipAddress":"169.254.0.3","name":"MdSrv",'
|
||||
'"port":"8775","protocol":"http","virtualServerId":'
|
||||
'"virtualServer-1"}]}')
|
||||
|
||||
OUT_OBJ_JSON = (
|
||||
'{"accelerationEnabled": false, "applicationProfile": [{'
|
||||
'"applicationProfileId": "applicationProfile-1", '
|
||||
'"insertXForwardedFor": false, "name": "MDSrvProxy", "persistence": '
|
||||
'{"expire": "30", "method": "cookie"}, "serverSslEnabled": false, '
|
||||
'"sslPassthrough": false, "template": "HTTP"}],'
|
||||
' "enableServiceInsertion": false, "enabled": true, "featureType": '
|
||||
'"loadbalancer_4.0", "monitor": [{"interval": 10, "maxRetries": 3, '
|
||||
'"method": "GET", "monitorId": "monitor-1", "name": "MDSrvMon", '
|
||||
'"timeout": 15, "type": "http", "url": "/"}], "pool": [{"algorithm":'
|
||||
' "round-robin", "member": [{"condition": "enabled", "ipAddress": '
|
||||
'"192.168.0.39", "maxConn": 0, "memberId": "member-1", "minConn": 0, '
|
||||
'"monitorPort": 8775, "name": "Member-1", "port": 8775, "weight": 1}],'
|
||||
' "monitorId": ["monitor-1"], "name": "MDSrvPool", "poolId": "pool-1",'
|
||||
' "transparent": false}], "virtualServer": [{"accelerationEnabled": '
|
||||
'false, "applicationProfileId": "applicationProfile-1", '
|
||||
'"connectionLimit": 0, "defaultPoolId": "pool-1", '
|
||||
'"enableServiceInsertion": false, "enabled": true, "ipAddress": '
|
||||
'"169.254.0.3", "name": "MdSrv", "port": "8775", "protocol": '
|
||||
'"http", "virtualServerId": "virtualServer-1"}]}')
|
||||
|
||||
LB_URI = '/api/4.0/edges/%s/loadbalancer/config?async=true'
|
||||
EDGE_1 = 'edge-x'
|
||||
EDGE_2 = 'edge-y'
|
||||
|
||||
def setUp(self):
|
||||
super(NsxvLoadbalancerTestCase, self).setUp()
|
||||
self._lb = nsxv_loadbalancer.NsxvLoadbalancer()
|
||||
self._vcns = vcns.Vcns(None, None, None)
|
||||
|
||||
def test_get_edge_loadbalancer(self):
|
||||
h = None
|
||||
v = json.loads(self.EDGE_OBJ_JSON)
|
||||
|
||||
with mock.patch.object(self._vcns, 'do_request',
|
||||
return_value=(h, v)) as mock_do_request:
|
||||
lb = nsxv_loadbalancer.NsxvLoadbalancer.get_loadbalancer(
|
||||
self._vcns, self.EDGE_1)
|
||||
lb.submit_to_backend(self._vcns, self.EDGE_2)
|
||||
|
||||
mock_do_request.assert_called_with(
|
||||
vcns.HTTP_PUT,
|
||||
self.LB_URI % self.EDGE_2,
|
||||
self.OUT_OBJ_JSON,
|
||||
format='json',
|
||||
encode=False)
|
@ -16,13 +16,13 @@
|
||||
|
||||
import mock
|
||||
|
||||
from neutron.plugins.vmware.api_client import client
|
||||
from neutron.plugins.vmware.api_client import exception
|
||||
from neutron.plugins.vmware.api_client import version
|
||||
from neutron.plugins.vmware.common import config # noqa
|
||||
from neutron.plugins.vmware import nsx_cluster as cluster
|
||||
from neutron.tests import base
|
||||
from neutron.tests.unit import test_api_v2
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import client
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import version
|
||||
from vmware_nsx.neutron.plugins.vmware.common import config # noqa
|
||||
from vmware_nsx.neutron.plugins.vmware import nsx_cluster as cluster
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
|
||||
|
||||
|
@ -18,11 +18,11 @@ import mock
|
||||
from oslo.serialization import jsonutils
|
||||
|
||||
from neutron.plugins.vmware.api_client import exception
|
||||
from neutron.plugins.vmware.common import utils as nsx_utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
|
||||
from neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
from neutron.tests.unit import test_api_v2
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils as nsx_utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
|
||||
|
||||
_uuid = test_api_v2._uuid
|
||||
|
@ -19,9 +19,9 @@ from oslo.serialization import jsonutils
|
||||
from neutron.common import exceptions
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware.nsxlib import lsn as lsnlib
|
||||
from neutron.tests import base
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import lsn as lsnlib
|
||||
|
||||
|
||||
class LSNTestCase(base.BaseTestCase):
|
||||
@ -29,7 +29,7 @@ class LSNTestCase(base.BaseTestCase):
|
||||
def setUp(self):
|
||||
super(LSNTestCase, self).setUp()
|
||||
self.mock_request_p = mock.patch(
|
||||
'neutron.plugins.vmware.nsxlib.do_request')
|
||||
'vmware_nsx.neutron.plugins.vmware.nsxlib.do_request')
|
||||
self.mock_request = self.mock_request_p.start()
|
||||
self.cluster = mock.Mock()
|
||||
self.cluster.default_service_cluster_uuid = 'foo'
|
||||
|
@ -18,8 +18,8 @@ import mock
|
||||
|
||||
from neutron.common import exceptions
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import queue as queuelib
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import queue as queuelib
|
||||
from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
|
||||
|
||||
|
||||
|
@ -21,13 +21,14 @@ from oslo.config import cfg
|
||||
from neutron.common import exceptions
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.api_client import version as version_module
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
from neutron.tests.unit import test_api_v2
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import (
|
||||
version as version_module)
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
|
||||
|
||||
_uuid = test_api_v2._uuid
|
||||
|
@ -15,9 +15,9 @@
|
||||
#
|
||||
|
||||
from neutron.common import exceptions
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
|
||||
from neutron.tests.unit import test_api_v2
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import secgroup as secgrouplib
|
||||
from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
|
||||
|
||||
_uuid = test_api_v2._uuid
|
||||
|
@ -19,9 +19,9 @@ import mock
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import exceptions
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
from neutron.tests.unit import test_api_v2
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import switch as switchlib
|
||||
from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base
|
||||
|
||||
_uuid = test_api_v2._uuid
|
||||
|
@ -15,10 +15,11 @@
|
||||
#
|
||||
|
||||
from neutron.plugins.vmware.api_client import exception
|
||||
from neutron.plugins.vmware.api_client import version as version_module
|
||||
from neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from neutron.plugins.vmware.nsxlib import versioning
|
||||
from neutron.tests import base
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import (
|
||||
version as version_module)
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import router as routerlib
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import versioning
|
||||
|
||||
|
||||
class TestVersioning(base.BaseTestCase):
|
||||
|
@ -18,9 +18,9 @@ from oslo.config import cfg
|
||||
|
||||
from neutron.common import constants
|
||||
from neutron.common import test_lib
|
||||
from neutron.plugins.vmware.common import sync
|
||||
from neutron.plugins.vmware.dhcp_meta import rpc
|
||||
from neutron.tests.unit.openvswitch import test_agent_scheduler as test_base
|
||||
from vmware_nsx.neutron.plugins.vmware.common import sync
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
|
||||
|
||||
|
@ -23,13 +23,13 @@ from neutron import context
|
||||
from neutron.plugins.vmware.api_client import exception
|
||||
from neutron.plugins.vmware.common import exceptions as p_exc
|
||||
from neutron.plugins.vmware.dbexts import lsn_db
|
||||
from neutron.plugins.vmware.dhcp_meta import constants
|
||||
from neutron.plugins.vmware.dhcp_meta import lsnmanager as lsn_man
|
||||
from neutron.plugins.vmware.dhcp_meta import migration as mig_man
|
||||
from neutron.plugins.vmware.dhcp_meta import nsx
|
||||
from neutron.plugins.vmware.dhcp_meta import rpc
|
||||
from neutron.tests import base
|
||||
from neutron.tests.unit import testlib_api
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import constants
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import lsnmanager as lsn_man
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import migration as mig_man
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import nsx
|
||||
from vmware_nsx.neutron.plugins.vmware.dhcp_meta import rpc
|
||||
|
||||
|
||||
class DhcpMetadataBuilderTestCase(base.BaseTestCase):
|
||||
|
@ -20,14 +20,14 @@ from oslo.config import cfg
|
||||
|
||||
from neutron import manager
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.api_client import client
|
||||
from neutron.plugins.vmware.api_client import version
|
||||
from neutron.plugins.vmware.common import config # noqa
|
||||
from neutron.plugins.vmware.common import exceptions
|
||||
from neutron.plugins.vmware.common import sync
|
||||
from neutron.plugins.vmware import nsx_cluster
|
||||
from neutron.plugins.vmware.nsxlib import lsn as lsnlib
|
||||
from neutron.tests import base
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import client
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import version
|
||||
from vmware_nsx.neutron.plugins.vmware.common import config # noqa
|
||||
from vmware_nsx.neutron.plugins.vmware.common import sync
|
||||
from vmware_nsx.neutron.plugins.vmware import nsx_cluster
|
||||
from vmware_nsx.neutron.plugins.vmware.nsxlib import lsn as lsnlib
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
|
||||
BASE_CONF_PATH = vmware.get_fake_conf('neutron.conf.test')
|
||||
|
@ -39,18 +39,19 @@ from neutron import manager
|
||||
from neutron.openstack.common import log
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.api_client import version as version_module
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import sync
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.tests.unit import _test_extension_portbindings as test_bindings
|
||||
import neutron.tests.unit.test_db_plugin as test_plugin
|
||||
import neutron.tests.unit.test_extension_ext_gw_mode as test_ext_gw_mode
|
||||
import neutron.tests.unit.test_extension_security_group as ext_sg
|
||||
import neutron.tests.unit.test_l3_plugin as test_l3_plugin
|
||||
from neutron.tests.unit import testlib_api
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import (
|
||||
version as version_module)
|
||||
from vmware_nsx.neutron.plugins.vmware.common import sync
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import db as nsx_db
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
|
||||
|
||||
|
@ -27,17 +27,17 @@ from neutron.common import exceptions as n_exc
|
||||
from neutron import context
|
||||
from neutron.extensions import l3
|
||||
from neutron.openstack.common import log
|
||||
from neutron.plugins.vmware.api_client import client
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.api_client import version
|
||||
from neutron.plugins.vmware.common import sync
|
||||
from neutron.plugins.vmware.dbexts import db
|
||||
from neutron.plugins.vmware import nsx_cluster as cluster
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.plugins.vmware import plugin
|
||||
from neutron.tests import base
|
||||
from neutron.tests.unit import test_api_v2
|
||||
from neutron.tests.unit import testlib_api
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import client
|
||||
from vmware_nsx.neutron.plugins.vmware.api_client import version
|
||||
from vmware_nsx.neutron.plugins.vmware.common import sync
|
||||
from vmware_nsx.neutron.plugins.vmware.dbexts import db
|
||||
from vmware_nsx.neutron.plugins.vmware import nsx_cluster as cluster
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.plugins.vmware import plugin
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware.apiclient import fake
|
||||
|
||||
|
@ -21,11 +21,11 @@ from neutron.extensions import providernet as pnet
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.api_client import exception as api_exc
|
||||
from neutron.plugins.vmware.common import exceptions as nsx_exc
|
||||
from neutron.plugins.vmware.common import nsx_utils
|
||||
from neutron.plugins.vmware.common import utils
|
||||
from neutron.plugins.vmware.dbexts import models
|
||||
from neutron.plugins.vmware import nsxlib
|
||||
from neutron.tests import base
|
||||
from vmware_nsx.neutron.plugins.vmware.common import nsx_utils
|
||||
from vmware_nsx.neutron.plugins.vmware.common import utils
|
||||
from vmware_nsx.neutron.plugins.vmware import nsxlib
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware.nsxlib import base as nsx_base
|
||||
|
||||
|
1614
vmware_nsx/neutron/tests/unit/vmware/test_nsx_v_plugin.py
Normal file
1614
vmware_nsx/neutron/tests/unit/vmware/test_nsx_v_plugin.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -14,10 +14,13 @@
|
||||
|
||||
import copy
|
||||
|
||||
from oslo.serialization import jsonutils
|
||||
from oslo.serialization import jsonutils as json
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
from neutron.openstack.common import uuidutils
|
||||
from neutron.plugins.vmware.vshield.common import exceptions
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import exceptions
|
||||
|
||||
SECTION_LOCATION_HEADER = '/api/4.0/firewall/globalroot-0/config/%s/%s'
|
||||
|
||||
|
||||
class FakeVcns(object):
|
||||
@ -55,6 +58,13 @@ class FakeVcns(object):
|
||||
self._fake_monitors_dict = {}
|
||||
self._fake_app_profiles_dict = {}
|
||||
self._fake_loadbalancer_config = {}
|
||||
self._fake_virtual_wires = {}
|
||||
self._virtual_wire_id = 0
|
||||
self._fake_portgroups = {}
|
||||
self._portgroup_id = 0
|
||||
self._securitygroups = {'ids': 0, 'names': set()}
|
||||
self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()}
|
||||
self._dhcp_bindings = {}
|
||||
|
||||
def set_fake_nsx_api(self, fake_nsx_api):
|
||||
self._fake_nsx_api = fake_nsx_api
|
||||
@ -80,7 +90,7 @@ class FakeVcns(object):
|
||||
'moduleName': 'vShield Edge',
|
||||
'errorData': None
|
||||
}
|
||||
return (header, jsonutils.dumps(response))
|
||||
return (header, json.dumps(response))
|
||||
|
||||
self._job_idx = self._job_idx + 1
|
||||
job_id = "jobdata-%d" % self._job_idx
|
||||
@ -91,7 +101,8 @@ class FakeVcns(object):
|
||||
'name': request['name'],
|
||||
'request': request,
|
||||
'nat_rules': None,
|
||||
'nat_rule_id': 0
|
||||
'nat_rule_id': 0,
|
||||
'interface_index': 1
|
||||
}
|
||||
header = {
|
||||
'status': 200,
|
||||
@ -100,6 +111,17 @@ class FakeVcns(object):
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def update_edge(self, edge_id, request):
|
||||
if edge_id not in self._edges:
|
||||
raise Exception(_("Edge %s does not exist") % edge_id)
|
||||
edge = self._edges[edge_id]
|
||||
edge['name'] = request['name']
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def get_edge_id(self, job_id):
|
||||
if job_id not in self._jobs:
|
||||
raise Exception(_("Job %s does not nexist") % job_id)
|
||||
@ -133,6 +155,47 @@ class FakeVcns(object):
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def add_vdr_internal_interface(self, edge_id, interface):
|
||||
interface = interface['interfaces'][0]
|
||||
if not self._edges[edge_id].get('interfaces'):
|
||||
self._edges[edge_id]['interfaces'] = []
|
||||
index = len(self._edges[edge_id]['interfaces'])
|
||||
interface['index'] = str(index)
|
||||
self._edges[edge_id]['interfaces'].append(interface)
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = {"interfaces": [{"index": str(index)}]}
|
||||
return (header, response)
|
||||
|
||||
def get_edge_interfaces(self, edge_id):
|
||||
if not self._edges[edge_id].get('interfaces'):
|
||||
self._edges[edge_id]['interfaces'] = []
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = {"interfaces": self._edges[edge_id].get('interfaces', [])}
|
||||
return (header, response)
|
||||
|
||||
def update_vdr_internal_interface(
|
||||
self, edge_id, interface_index, interface):
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def delete_vdr_internal_interface(self, edge_id, interface_index):
|
||||
for interface in self._edges[edge_id].get('interfaces', []):
|
||||
if int(interface['index']) == int(interface_index):
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
break
|
||||
header = {'status': 404}
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def update_interface(self, edge_id, vnic):
|
||||
header = {
|
||||
'status': 200
|
||||
@ -140,6 +203,73 @@ class FakeVcns(object):
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def delete_interface(self, edge_id, vnic_index):
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def query_interface(self, edge_id, vnic_index):
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = {
|
||||
'label': 'vNic_1',
|
||||
'name': 'internal1',
|
||||
'address_groups': {'address_groups': []},
|
||||
'mtu': 1500,
|
||||
'type': 'trunk',
|
||||
'subInterfaces': {'subInterfaces': []},
|
||||
'isConnected': True
|
||||
}
|
||||
return (header, response)
|
||||
|
||||
def reconfigure_dhcp_service(self, edge_id, request):
|
||||
header = {
|
||||
'status': 201
|
||||
}
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def query_dhcp_configuration(self, edge_id):
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = {
|
||||
"featureType": "dhcp_4.0",
|
||||
"version": 14,
|
||||
"enabled": True,
|
||||
"staticBindings": {"staticBindings": [{}]},
|
||||
"ipPools": {"ipPools": []}
|
||||
}
|
||||
return (header, response)
|
||||
|
||||
def create_dhcp_binding(self, edge_id, request):
|
||||
if not self._dhcp_bindings.get(edge_id):
|
||||
self._dhcp_bindings[edge_id] = {}
|
||||
self._dhcp_bindings[edge_id]['idx'] = 0
|
||||
binding_idx = self._dhcp_bindings[edge_id]['idx']
|
||||
binding_idx_str = "binding-" + str(binding_idx)
|
||||
self._dhcp_bindings[edge_id][binding_idx_str] = request
|
||||
self._dhcp_bindings[edge_id]['idx'] = binding_idx + 1
|
||||
header = {
|
||||
'status': 200,
|
||||
'location': '/dhcp/config/bindings/%s' % binding_idx_str
|
||||
}
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def delete_dhcp_binding(self, edge_id, binding_id):
|
||||
if binding_id not in self._dhcp_bindings[edge_id]:
|
||||
raise Exception(_("binding %s does not exist") % binding_id)
|
||||
del self._dhcp_bindings[edge_id][binding_id]
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def get_nat_config(self, edge_id):
|
||||
if edge_id not in self._edges:
|
||||
raise Exception(_("Edge %s does not exist") % edge_id)
|
||||
@ -242,8 +372,7 @@ class FakeVcns(object):
|
||||
# The lswitch is created via VCNS API so the fake nsx_api will not
|
||||
# see it. Added to fake nsx_api here.
|
||||
if self._fake_nsx_api:
|
||||
lswitch = \
|
||||
self._fake_nsx_api._add_lswitch(jsonutils.dumps(lsconfig))
|
||||
lswitch = self._fake_nsx_api._add_lswitch(json.dumps(lsconfig))
|
||||
else:
|
||||
lswitch = lsconfig
|
||||
lswitch['uuid'] = uuidutils.generate_uuid()
|
||||
@ -579,6 +708,40 @@ class FakeVcns(object):
|
||||
self._fake_loadbalancer_config[edge_id] = True
|
||||
return self.return_helper(header, response)
|
||||
|
||||
def create_virtual_wire(self, vdn_scope_id, request):
|
||||
self._virtual_wire_id += 1
|
||||
header = {'status': 200}
|
||||
virtual_wire = 'virtualwire-%s' % self._virtual_wire_id
|
||||
data = {'name': request['virtualWireCreateSpec']['name'],
|
||||
'objectId': virtual_wire}
|
||||
self._fake_virtual_wires.update({virtual_wire: data})
|
||||
return (header, virtual_wire)
|
||||
|
||||
def delete_virtual_wire(self, virtualwire_id):
|
||||
del self._fake_virtual_wires[virtualwire_id]
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def create_port_group(self, dvs_id, request):
|
||||
self._portgroup_id += 1
|
||||
header = {'status': 200}
|
||||
portgroup = 'dvportgroup-%s' % self._portgroup_id
|
||||
data = {'name': request['networkSpec']['networkName'],
|
||||
'objectId': portgroup}
|
||||
self._fake_portgroups.update({portgroup: data})
|
||||
return (header, portgroup)
|
||||
|
||||
def delete_port_group(self, dvs_id, portgroup_id):
|
||||
del self._fake_portgroups[portgroup_id]
|
||||
header = {
|
||||
'status': 200
|
||||
}
|
||||
response = ''
|
||||
return (header, response)
|
||||
|
||||
def return_helper(self, header, response):
|
||||
status = int(header['status'])
|
||||
if 200 <= status <= 300:
|
||||
@ -590,6 +753,194 @@ class FakeVcns(object):
|
||||
raise cls(
|
||||
status=status, header=header, uri='fake_url', response=response)
|
||||
|
||||
def _get_bad_req_response(self, details, error_code, module_name):
|
||||
bad_req_response_format = """
|
||||
<error>
|
||||
<details>%(details)s</details>
|
||||
<errorCode>%(error_code)s</errorCode>
|
||||
<moduleName>%(module_name)s</moduleName>
|
||||
</error>
|
||||
"""
|
||||
return bad_req_response_format % {
|
||||
'details': details,
|
||||
'error_code': error_code,
|
||||
'module_name': module_name,
|
||||
}
|
||||
|
||||
def _get_section_location(self, type, section_id):
|
||||
return SECTION_LOCATION_HEADER % (type, section_id)
|
||||
|
||||
def _get_section_id_from_uri(self, section_uri):
|
||||
return section_uri.split('/')[-1]
|
||||
|
||||
def _section_not_found(self, section_id):
|
||||
msg = "Invalid section id found : %s" % section_id
|
||||
response = self._get_bad_req_response(msg, 100089, 'vShield App')
|
||||
headers = {'status': 400}
|
||||
return (headers, response)
|
||||
|
||||
def _unknown_error(self):
|
||||
msg = "Unknown Error Occured.Please look into tech support logs."
|
||||
response = self._get_bad_req_response(msg, 100046, 'vShield App')
|
||||
headers = {'status': 400}
|
||||
return (headers, response)
|
||||
|
||||
def create_security_group(self, request):
|
||||
sg = request['securitygroup']
|
||||
if sg['name'] in self._securitygroups['names']:
|
||||
status = 400
|
||||
msg = ("Another object with same name : %s already exists in "
|
||||
"the current scope : globalroot-0." % sg['name'])
|
||||
response = self._get_bad_req_response(msg, 210, 'core-services')
|
||||
else:
|
||||
sg_id = str(self._securitygroups['ids'])
|
||||
self._securitygroups['ids'] += 1
|
||||
sg['members'] = set()
|
||||
self._securitygroups[sg_id] = sg
|
||||
self._securitygroups['names'].add(sg['name'])
|
||||
status, response = 201, sg_id
|
||||
return ({'status': status}, response)
|
||||
|
||||
def delete_security_group(self, securitygroup_id):
|
||||
try:
|
||||
del self._securitygroups[securitygroup_id]
|
||||
except KeyError:
|
||||
status = 404
|
||||
msg = ("The requested object : %s could "
|
||||
"not be found. Object identifiers are case sensitive."
|
||||
% securitygroup_id)
|
||||
response = self._get_bad_req_response(msg, 210, 'core-services')
|
||||
else:
|
||||
status, response = 200, ''
|
||||
return ({'status': status}, response)
|
||||
|
||||
def create_section(self, type, request):
|
||||
section = ET.fromstring(request)
|
||||
section_name = section.attrib.get('name')
|
||||
if section_name in self._sections['names']:
|
||||
msg = "Section with name %s already exists." % section_name
|
||||
response = self._get_bad_req_response(msg, 100092, 'vShield App')
|
||||
headers = {'status': 400}
|
||||
else:
|
||||
section_id = str(self._sections['section_ids'])
|
||||
section.attrib['id'] = section_id
|
||||
_section = self._sections[section_id] = {'name': section_name,
|
||||
'etag': 'Etag-0',
|
||||
'rules': {}}
|
||||
self._sections['names'].add(section_name)
|
||||
for rule in section.findall('rule'):
|
||||
rule_id = str(self._sections['rule_ids'])
|
||||
rule.attrib['id'] = rule_id
|
||||
_section['rules'][rule_id] = ET.tostring(rule)
|
||||
self._sections['rule_ids'] += 1
|
||||
response = ET.tostring(section)
|
||||
headers = {
|
||||
'status': 201,
|
||||
'location': self._get_section_location(type, section_id),
|
||||
'etag': _section['etag']
|
||||
}
|
||||
self._sections['section_ids'] += 1
|
||||
return (headers, response)
|
||||
|
||||
def update_section(self, section_uri, request, h):
|
||||
section = ET.fromstring(request)
|
||||
section_id = section.attrib.get('id')
|
||||
section_name = section.attrib.get('name')
|
||||
if section_id not in self._sections:
|
||||
return self._section_not_found(section_id)
|
||||
_section = self._sections[section_id]
|
||||
if (_section['name'] != section_name and
|
||||
section_name in self._sections['names']):
|
||||
# Theres a section with this name already
|
||||
headers, response = self._unknown_error()
|
||||
else:
|
||||
# Different Etag every successful update
|
||||
_section['etag'] = ('Etag-1' if _section['etag'] == 'Etag-0'
|
||||
else 'Etag-0')
|
||||
self._sections['names'].remove(_section['name'])
|
||||
_section['name'] = section_name
|
||||
self._sections['names'].add(section_name)
|
||||
_section['rules'] = {}
|
||||
for rule in section.findall('rule'):
|
||||
if not rule.attrib.get('id'):
|
||||
rule.attrib['id'] = str(self._sections['rule_ids'])
|
||||
self._sections['rule_ids'] += 1
|
||||
rule_id = rule.attrib.get('id')
|
||||
_section['rules'][rule_id] = ET.tostring(rule)
|
||||
response = ET.tostring(section)
|
||||
headers = {
|
||||
'status': 200,
|
||||
'location': self._get_section_location(type, section_id),
|
||||
'etag': _section['etag']
|
||||
}
|
||||
return (headers, response)
|
||||
|
||||
def delete_section(self, section_uri):
|
||||
section_id = self._get_section_id_from_uri(section_uri)
|
||||
if section_id not in self._sections:
|
||||
headers, response = self._unknown_error()
|
||||
else:
|
||||
section_name = self._sections[section_id]['name']
|
||||
del self._sections[section_id]
|
||||
self._sections['names'].remove(section_name)
|
||||
response = ''
|
||||
headers = {'status': 204}
|
||||
return (headers, response)
|
||||
|
||||
def get_section(self, section_uri):
|
||||
section_id = self._get_section_id_from_uri(section_uri)
|
||||
if section_id not in self._sections:
|
||||
headers, response = self._section_not_found(section_id)
|
||||
else:
|
||||
section_rules = (''.join(self._sections[section_id]['rules'].
|
||||
values()))
|
||||
response = ('<section id="%s"><rules>%s</rules></section>'
|
||||
% (section_id, section_rules))
|
||||
headers = {'status': 200,
|
||||
'etag': self._sections[section_id]['etag']}
|
||||
return (headers, response)
|
||||
|
||||
def remove_rule_from_section(self, section_uri, rule_id):
|
||||
section_id = self._get_section_id_from_uri(section_uri)
|
||||
if section_id not in self._sections:
|
||||
headers, response = self._section_not_found(section_id)
|
||||
else:
|
||||
section = self._sections[section_id]
|
||||
if rule_id in section['rules']:
|
||||
del section['rules'][rule_id]
|
||||
response = ''
|
||||
headers = {'status': 204}
|
||||
else:
|
||||
headers, response = self._unknown_error()
|
||||
return (headers, response)
|
||||
|
||||
def add_member_to_security_group(self, security_group_id, member_id):
|
||||
if security_group_id not in self._securitygroups:
|
||||
msg = ("The requested object : %s could not be found."
|
||||
"Object identifiers are case "
|
||||
"sensitive.") % security_group_id
|
||||
response = self._get_bad_req_response(msg, 202, 'core-services')
|
||||
headers = {'status': 404}
|
||||
else:
|
||||
self._securitygroups[security_group_id]['members'].add(member_id)
|
||||
response = ''
|
||||
headers = {'status': 200}
|
||||
return (headers, response)
|
||||
|
||||
def remove_member_from_security_group(self, security_group_id, member_id):
|
||||
if security_group_id not in self._securitygroups:
|
||||
msg = ("The requested object : %s could not be found."
|
||||
"Object identifiers are "
|
||||
"case sensitive.") % security_group_id
|
||||
response = self._get_bad_req_response(msg, 202, 'core-services')
|
||||
headers = {'status': 404}
|
||||
else:
|
||||
self._securitygroups[security_group_id]['members'].remove(
|
||||
member_id)
|
||||
response = ''
|
||||
headers = {'status': 200}
|
||||
return (headers, response)
|
||||
|
||||
def reset_all(self):
|
||||
self._jobs.clear()
|
||||
self._edges.clear()
|
||||
@ -600,3 +951,22 @@ class FakeVcns(object):
|
||||
self._fake_monitors_dict = {}
|
||||
self._fake_app_profiles_dict = {}
|
||||
self._fake_loadbalancer_config = {}
|
||||
self._fake_virtual_wires = {}
|
||||
self._virtual_wire_id = 0
|
||||
self._fake_portgroups = {}
|
||||
self._portgroup_id = 0
|
||||
self._securitygroups = {'ids': 0, 'names': set()}
|
||||
self._sections = {'section_ids': 0, 'rule_ids': 0, 'names': set()}
|
||||
self._dhcp_bindings = {}
|
||||
|
||||
def validate_datacenter_moid(self, object_id):
|
||||
return True
|
||||
|
||||
def validate_network(self, object_id):
|
||||
return True
|
||||
|
||||
def validate_vdn_scope(self, object_id):
|
||||
return True
|
||||
|
||||
def validate_dvs(self, object_id):
|
||||
return True
|
||||
|
@ -16,11 +16,13 @@
|
||||
from eventlet import greenthread
|
||||
import mock
|
||||
|
||||
from neutron.plugins.vmware.vshield.common import constants as vcns_const
|
||||
from neutron.plugins.vmware.vshield.tasks import constants as ts_const
|
||||
from neutron.plugins.vmware.vshield.tasks import tasks as ts
|
||||
from neutron.plugins.vmware.vshield import vcns_driver
|
||||
from neutron.tests import base
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.common import (
|
||||
constants as vcns_const)
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.tasks import (
|
||||
constants as ts_const)
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield.tasks import tasks as ts
|
||||
from vmware_nsx.neutron.plugins.vmware.vshield import vcns_driver
|
||||
from vmware_nsx.neutron.tests.unit import vmware
|
||||
from vmware_nsx.neutron.tests.unit.vmware.vshield import fake_vcns
|
||||
|
||||
@ -554,13 +556,9 @@ class VcnsDriverTestCase(base.BaseTestCase):
|
||||
|
||||
def test_update_interface(self):
|
||||
self._deploy_edge()
|
||||
jobdata = {}
|
||||
task = self.vcns_driver.update_interface(
|
||||
self.vcns_driver.update_interface(
|
||||
'router-id', self.edge_id, vcns_const.EXTERNAL_VNIC_INDEX,
|
||||
'network-id', address='100.0.0.3', netmask='255.255.255.0',
|
||||
jobdata=jobdata)
|
||||
task.wait(ts_const.TaskState.RESULT)
|
||||
self.assertTrue(jobdata.get('interface_update_result'))
|
||||
'network-id', address='100.0.0.3', netmask='255.255.255.0')
|
||||
|
||||
def test_delete_edge(self):
|
||||
self._deploy_edge()
|
||||
|
Loading…
Reference in New Issue
Block a user