Merge "Add NVP port security implementation"

This commit is contained in:
Jenkins 2013-01-29 14:49:58 +00:00 committed by Gerrit Code Review
commit 18c35e8500
4 changed files with 246 additions and 55 deletions

View File

@ -0,0 +1,73 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""inital port security
Revision ID: 1149d7de0cfa
Revises: 1b693c095aa3
Create Date: 2013-01-22 14:05:20.696502
"""
# revision identifiers, used by Alembic.
revision = '1149d7de0cfa'
down_revision = '1b693c095aa3'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2'
]
from alembic import op
import sqlalchemy as sa
from quantum.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
### commands auto generated by Alembic - please adjust! ###
op.create_table('networksecuritybindings',
sa.Column('network_id', sa.String(length=36),
nullable=False),
sa.Column('port_security_enabled', sa.Boolean(),
nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table('portsecuritybindings',
sa.Column('port_id', sa.String(length=36),
nullable=False),
sa.Column('port_security_enabled', sa.Boolean(),
nullable=False),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('port_id'))
### end Alembic commands ###
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
### commands auto generated by Alembic - please adjust! ###
op.drop_table('portsecuritybindings')
op.drop_table('networksecuritybindings')
### end Alembic commands ###

View File

@ -34,8 +34,10 @@ from quantum.common import topics
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import portsecurity_db
# NOTE: quota_db cannot be removed, it is for db model
from quantum.db import quota_db
from quantum.extensions import portsecurity as psec
from quantum.extensions import providernet as pnet
from quantum.openstack.common import cfg
from quantum.openstack.common import rpc
@ -105,16 +107,22 @@ class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
return q_rpc.PluginRpcDispatcher([self])
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
portsecurity_db.PortSecurityDbMixin):
"""
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
"""
supported_extension_aliases = ["provider", "quotas"]
supported_extension_aliases = ["provider", "quotas", "port-security"]
# Default controller cluster
default_cluster = None
provider_network_view = "extension:provider_network:view"
provider_network_set = "extension:provider_network:set"
port_security_enabled_create = "create_port:port_security_enabled"
port_security_enabled_update = "update_port:port_security_enabled"
def __init__(self, loglevel=None):
if loglevel:
logging.basicConfig(level=loglevel)
@ -216,15 +224,11 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
else:
return self.default_cluster
def _check_provider_view_auth(self, context, network):
return policy.check(context,
"extension:provider_network:view",
network)
def _check_view_auth(self, context, resource, action):
return policy.check(context, action, resource)
def _enforce_provider_set_auth(self, context, network):
return policy.enforce(context,
"extension:provider_network:set",
network)
def _enforce_set_auth(self, context, resource, action):
return policy.enforce(context, action, resource)
def _handle_provider_create(self, context, attrs):
# NOTE(salvatore-orlando): This method has been borrowed from
@ -240,7 +244,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
return
# Authorize before exposing plugin details to client
self._enforce_provider_set_auth(context, attrs)
self._enforce_set_auth(context, attrs, self.provider_network_set)
err_msg = None
if not network_type_set:
err_msg = _("%s required") % pnet.NETWORK_TYPE
@ -273,7 +277,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
# which should be specified in physical_network
def _extend_network_dict_provider(self, context, network, binding=None):
if self._check_provider_view_auth(context, network):
if self._check_view_auth(context, network, self.provider_network_view):
if not binding:
binding = nicira_db.get_network_binding(context.session,
network['id'])
@ -365,6 +369,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
with context.session.begin(subtransactions=True):
new_net = super(NvpPluginV2, self).create_network(context,
network)
self._process_network_create_port_security(context,
network['network'])
if net_data.get(pnet.NETWORK_TYPE):
net_binding = nicira_db.add_network_binding(
context.session, new_net['id'],
@ -373,6 +379,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
net_data.get(pnet.SEGMENTATION_ID))
self._extend_network_dict_provider(context, new_net,
net_binding)
self._extend_network_port_security_dict(context, new_net)
return new_net
def delete_network(self, context, id):
@ -409,6 +416,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
network = self._get_network(context, id)
net_result = self._make_network_dict(network, None)
self._extend_network_dict_provider(context, net_result)
self._extend_network_port_security_dict(context, net_result)
# verify the fabric status of the corresponding
# logical switch(es) in nvp
try:
@ -441,6 +450,7 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
super(NvpPluginV2, self).get_networks(context, filters))
for net in quantum_lswitches:
self._extend_network_dict_provider(context, net)
self._extend_network_port_security_dict(context, net)
if context.is_admin and not filters.get("tenant_id"):
tenant_filter = ""
@ -516,10 +526,23 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
raise q_exc.NotImplementedError(_("admin_state_up=False "
"networks are not "
"supported."))
return super(NvpPluginV2, self).update_network(context, id, network)
with context.session.begin(subtransactions=True):
quantum_db = super(NvpPluginV2, self).update_network(
context, id, network)
if psec.PORTSECURITY in network['network']:
self._update_network_security_binding(
context, id, network['network'][psec.PORTSECURITY])
self._extend_network_port_security_dict(
context, quantum_db)
return quantum_db
def get_ports(self, context, filters=None, fields=None):
quantum_lports = super(NvpPluginV2, self).get_ports(context, filters)
with context.session.begin(subtransactions=True):
quantum_lports = super(NvpPluginV2, self).get_ports(
context, filters)
for quantum_lport in quantum_lports:
self._extend_port_port_security_dict(context, quantum_lport)
vm_filter = ""
tenant_filter = ""
# This is used when calling delete_network. Quantum checks to see if
@ -607,12 +630,28 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
return lports
def create_port(self, context, port):
# If PORTSECURITY is not the default value ATTR_NOT_SPECIFIED
# then we pass the port to the policy engine. The reason why we don't
# pass the value to the policy engine when the port is
# ATTR_NOT_SPECIFIED is for the case where a port is created on a
# shared network that is not owned by the tenant.
# TODO(arosen) fix policy engine to do this for us automatically.
if attributes.is_attr_set(port['port'].get(psec.PORTSECURITY)):
self._enforce_set_auth(context, port,
self.port_security_enabled_create)
port_data = port['port']
with context.session.begin(subtransactions=True):
# First we allocate port in quantum database
quantum_db = super(NvpPluginV2, self).create_port(context, port)
# Update fields obtained from quantum db (eg: MAC address)
port["port"].update(quantum_db)
port_data = port['port']
# port security extension checks
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, port_data)
port_data[psec.PORTSECURITY] = port_security
self._process_port_security_create(context, port_data)
# provider networking extension checks
# Fetch the network and network binding from Quantum db
network = self._get_network(context, port_data['network_id'])
network_binding = nicira_db.get_network_binding(
@ -639,7 +678,8 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
port_data['device_id'],
port_data['admin_state_up'],
port_data['mac_address'],
port_data['fixed_ips'])
port_data['fixed_ips'],
port_data[psec.PORTSECURITY])
# Get NVP ls uuid for quantum network
nvplib.plug_interface(cluster, selected_lswitch['uuid'],
lport['uuid'], "VifAttachment",
@ -660,20 +700,50 @@ class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2):
LOG.debug(_("create_port completed on NVP for tenant "
"%(tenant_id)s: (%(id)s)"), port_data)
return port_data
self._extend_port_port_security_dict(context, port_data)
return port_data
def update_port(self, context, id, port):
params = {}
port_quantum = super(NvpPluginV2, self).get_port(context, id)
port_nvp, cluster = (
nvplib.get_port_by_quantum_tag(self.clusters.itervalues(),
port_quantum["network_id"], id))
params["cluster"] = cluster
params["port"] = port_quantum
LOG.debug(_("Update port request: %s"), params)
nvplib.update_port(port_quantum['network_id'],
port_nvp['uuid'], **params)
return super(NvpPluginV2, self).update_port(context, id, port)
self._enforce_set_auth(context, port,
self.port_security_enabled_update)
tenant_id = self._get_tenant_id_for_create(context, port)
with context.session.begin(subtransactions=True):
ret_port = super(NvpPluginV2, self).update_port(
context, id, port)
# copy values over
ret_port.update(port['port'])
# Handle port security
if psec.PORTSECURITY in port['port']:
self._update_port_security_binding(
context, id, ret_port[psec.PORTSECURITY])
# populate with value
else:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
port_nvp, cluster = (
nvplib.get_port_by_quantum_tag(self.clusters.itervalues(),
ret_port["network_id"], id))
LOG.debug(_("Update port request: %s"), port)
nvplib.update_port(cluster, ret_port['network_id'],
port_nvp['uuid'], id, tenant_id,
ret_port['name'], ret_port['device_id'],
ret_port['admin_state_up'],
ret_port['mac_address'],
ret_port['fixed_ips'],
ret_port[psec.PORTSECURITY])
# Update the port status from nvp. If we fail here hide it since
# the port was successfully updated but we were not able to retrieve
# the status.
try:
ret_port['status'] = nvplib.get_port_status(
cluster, ret_port['network_id'], port_nvp['uuid'])
except:
LOG.warn(_("Unable to retrieve port status for: %s."),
port_nvp['uuid'])
return ret_port
def delete_port(self, context, id):
# TODO(salvatore-orlando): pass only actual cluster

View File

@ -290,7 +290,6 @@ def get_all_networks(cluster, tenant_id, networks):
raise exception.QuantumException()
if not resp_obj:
return []
lswitches = json.loads(resp_obj)["results"]
networks_result = copy(networks)
return networks_result
@ -371,7 +370,7 @@ def get_port_by_quantum_tag(clusters, lswitch, quantum_tag):
for c in clusters:
try:
res_obj = do_single_request('GET', query, cluster=c)
except Exception as e:
except Exception:
continue
res = json.loads(res_obj)
if len(res["results"]) == 1:
@ -417,44 +416,56 @@ def get_port(cluster, network, port, relations=None):
return port
def update_port(network, port_id, **params):
cluster = params["cluster"]
lport_obj = {}
def _configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled):
lport_obj['allowed_address_pairs'] = []
if port_security_enabled:
for fixed_ip in fixed_ips:
ip_address = fixed_ip.get('ip_address')
if ip_address:
lport_obj['allowed_address_pairs'].append(
{'mac_address': mac_address, 'ip_address': ip_address})
# add address pair allowing src_ip 0.0.0.0 to leave
# this is required for outgoing dhcp request
lport_obj["allowed_address_pairs"].append(
{"mac_address": mac_address,
"ip_address": "0.0.0.0"})
admin_state_up = params['port'].get('admin_state_up')
name = params["port"].get("name")
device_id = params["port"].get("device_id")
if admin_state_up:
lport_obj["admin_status_enabled"] = admin_state_up
if name:
lport_obj["display_name"] = name
if device_id:
# device_id can be longer than 40 so we rehash it
device_id = hashlib.sha1(device_id).hexdigest()
lport_obj["tags"] = (
[dict(scope='os_tid', tag=params["port"].get("tenant_id")),
dict(scope='q_port_id', tag=params["port"]["id"]),
dict(scope='vm_id', tag=device_id)])
def update_port(cluster, lswitch_uuid, lport_uuid, quantum_port_id, tenant_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None, port_security_enabled=None):
uri = "/ws.v1/lswitch/" + network + "/lport/" + port_id
# device_id can be longer than 40 so we rehash it
hashed_device_id = hashlib.sha1(device_id).hexdigest()
lport_obj = dict(
admin_status_enabled=admin_status_enabled,
display_name=display_name,
tags=[dict(scope='os_tid', tag=tenant_id),
dict(scope='q_port_id', tag=quantum_port_id),
dict(scope='vm_id', tag=hashed_device_id)])
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled)
path = "/ws.v1/lswitch/" + lswitch_uuid + "/lport/" + lport_uuid
try:
resp_obj = do_single_request("PUT", uri, json.dumps(lport_obj),
resp_obj = do_single_request("PUT", path, json.dumps(lport_obj),
cluster=cluster)
except NvpApiClient.ResourceNotFound as e:
LOG.error(_("Port or Network not found, Error: %s"), str(e))
raise exception.PortNotFound(port_id=port_id, net_id=network)
raise exception.PortNotFound(port_id=lport_uuid, net_id=lswitch_uuid)
except NvpApiClient.NvpApiException as e:
raise exception.QuantumException()
obj = json.loads(resp_obj)
obj["port-op-status"] = get_port_status(cluster, network, obj["uuid"])
return obj
result = json.loads(resp_obj)
LOG.debug(_("Updated logical port %(result)s on logical swtich %(uuid)s"),
{'result': result['uuid'], 'uuid': lswitch_uuid})
return result
def create_lport(cluster, lswitch_uuid, tenant_id, quantum_port_id,
display_name, device_id, admin_status_enabled,
mac_address=None, fixed_ips=None):
mac_address=None, fixed_ips=None, port_security_enabled=None):
""" Creates a logical port on the assigned logical switch """
# device_id can be longer than 40 so we rehash it
hashed_device_id = hashlib.sha1(device_id).hexdigest()
@ -465,6 +476,10 @@ def create_lport(cluster, lswitch_uuid, tenant_id, quantum_port_id,
dict(scope='q_port_id', tag=quantum_port_id),
dict(scope='vm_id', tag=hashed_device_id)],
)
_configure_extensions(lport_obj, mac_address, fixed_ips,
port_security_enabled)
path = _build_uri_path(LPORT_RESOURCE, parent_resource_id=lswitch_uuid)
try:
resp_obj = do_single_request("POST", path,

View File

@ -27,6 +27,8 @@ from quantum.openstack.common import cfg
from quantum.plugins.nicira.nicira_nvp_plugin import nvplib
from quantum.tests.unit.nicira import fake_nvpapiclient
import quantum.tests.unit.test_db_plugin as test_plugin
import quantum.tests.unit.test_extension_portsecurity as psec
LOG = logging.getLogger(__name__)
NICIRA_PKG_PATH = 'quantum.plugins.nicira.nicira_nvp_plugin'
@ -152,3 +154,34 @@ class TestNiciraNetworksV2(test_plugin.TestNetworksV2,
with self.assertRaises(webob.exc.HTTPClientError) as ctx_manager:
self._test_create_bridge_network(vlan_id=5000)
self.assertEquals(ctx_manager.exception.code, 400)
class NiciraPortSecurityTestCase(psec.PortSecurityDBTestCase):
_plugin_name = ('%s.QuantumPlugin.NvpPluginV2' % NICIRA_PKG_PATH)
def setUp(self):
etc_path = os.path.join(os.path.dirname(__file__), 'etc')
test_lib.test_config['config_files'] = [os.path.join(etc_path,
'nvp.ini.test')]
# mock nvp api client
fc = fake_nvpapiclient.FakeClient(etc_path)
self.mock_nvpapi = mock.patch('%s.NvpApiClient.NVPApiHelper'
% NICIRA_PKG_PATH, autospec=True)
instance = self.mock_nvpapi.start()
instance.return_value.login.return_value = "the_cookie"
def _fake_request(*args, **kwargs):
return fc.fake_request(*args, **kwargs)
instance.return_value.request.side_effect = _fake_request
super(NiciraPortSecurityTestCase, self).setUp(self._plugin_name)
def tearDown(self):
super(NiciraPortSecurityTestCase, self).tearDown()
self.mock_nvpapi.stop()
class TestNiciraPortSecurity(psec.TestPortSecurity,
NiciraPortSecurityTestCase):
pass