diff --git a/neutron_taas/db/__init__.py b/neutron_taas/db/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/neutron_taas/db/migration/__init__.py b/neutron_taas/db/migration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/neutron_taas/db/migration/taas_init_ops.py b/neutron_taas/db/migration/taas_init_ops.py new file mode 100644 index 0000000..b1addf2 --- /dev/null +++ b/neutron_taas/db/migration/taas_init_ops.py @@ -0,0 +1,52 @@ +# Copyright 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +# Initial schema operations for Tap-as-a-Service service plugin + + +from alembic import op +import sqlalchemy as sa + + +direction_types = sa.Enum('IN', 'OUT', 'BOTH', name='tapflows_direction') + + +def upgrade(): + op.create_table( + 'tap_services', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=1024), nullable=True), + sa.Column('port_id', sa.String(36), nullable=False), + sa.Column('network_id', sa.String(36), nullable=True)) + + op.create_table( + 'tap_flows', + sa.Column('id', sa.String(length=36), nullable=False), + sa.Column('tenant_id', sa.String(length=255), nullable=True), + sa.Column('name', sa.String(length=255), nullable=True), + sa.Column('description', sa.String(length=1024), nullable=True), + sa.Column('tap_service_id', sa.String(length=36), + sa.ForeignKey("tap_services.id", + ondelete="CASCADE"), nullable=False), + sa.Column('source_port', sa.String(length=36), nullable=False), + sa.Column('direction', direction_types, nullable=False)) + + op.create_table( + 'tap_id_associations', + sa.Column(sa.String(length=36)), + sa.Column(sa.INTEGER, primary_key=True, autoincrement=True)) diff --git a/neutron_taas/db/taas_db.py b/neutron_taas/db/taas_db.py new file mode 100755 index 0000000..fc9db3f --- /dev/null +++ b/neutron_taas/db/taas_db.py @@ -0,0 +1,231 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutron.db import common_db_mixin as base_db +from neutron.db import model_base +from neutron.db import models_v2 +from neutron import manager +# from neutron.openstack.common import uuidutils +from neutron_taas.extensions import taas +from oslo_log import log as logging +from oslo_utils import uuidutils +import sqlalchemy as sa +from sqlalchemy.orm import exc + + +LOG = logging.getLogger(__name__) + + +class TapService(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + + # Represents a V2 TapService Object + __tablename__ = 'tap_services' + name = sa.Column(sa.String(255), nullable=True) + description = sa.Column(sa.String(1024), nullable=True) + port_id = sa.Column(sa.String(36), nullable=False) + network_id = sa.Column(sa.String(36), nullable=True) +''' + def __init__(self, id, tenant_id, name, description, port_id, network_id): + self.id = id + self.tenant_id = tenant_id + self.name = name + self.description = description + self.port_id = port_id + self.network_id = network_id + + def __repr__(self): + return " %s" % self.id +''' + + +class TapFlow(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant): + + # Represents a V2 TapFlow Object + __tablename__ = 'tap_flows' + name = sa.Column(sa.String(255), nullable=True) + description = sa.Column(sa.String(1024), nullable=True) + tap_service_id = sa.Column(sa.String(36), + sa.ForeignKey("tap_services.id", + ondelete="CASCADE"), + nullable=False) + source_port = sa.Column(sa.String(36), nullable=False) + direction = sa.Column(sa.Enum('IN', 'OUT', 'BOTH', + name='tapflows_direction')) + + +class TapIdAssociation(model_base.BASEV2): + + # Internal mapping between a TAP Service and + # id to be used by the Agents + __tablename__ = 'tap_id_associations' + tap_service_id = sa.Column(sa.String(36)) + taas_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) + + +class Tass_db_Mixin(taas.TaasPluginBase, base_db.CommonDbMixin): + + def _core_plugin(self): + return manager.NeutronManager.get_plugin() + + def _get_tap_service(self, context, id): + try: + return self._get_by_id(context, TapService, id) + except exc.NoResultFound: + raise taas.TapServiceNotFound(tap_id=id) + + def _get_tap_id_association(self, context, tap_service_id): + try: + query = self._model_query(context, TapIdAssociation) + return query.filter(TapIdAssociation.tap_service_id == + tap_service_id).one() + except exc.NoResultFound: + raise taas.TapServiceNotFound(tap_id=tap_service_id) + + def _get_tap_flow(self, context, id): + try: + return self._get_by_id(context, TapFlow, id) + except Exception: + raise taas.TapFlowNotFound(flow_id=id) + + def _make_tap_service_dict(self, tap_service, fields=None): + res = {'id': tap_service['id'], + 'tenant_id': tap_service['tenant_id'], + 'name': tap_service['name'], + 'description': tap_service['description'], + 'port_id': tap_service['port_id'], + 'network_id': tap_service['network_id']} + + return self._fields(res, fields) + + def _make_tap_id_association_dict(self, tap_id_association): + res = {'tap_service_id': tap_id_association['tap_service_id'], + 'taas_id': tap_id_association['taas_id']} + + return res + + def _make_tap_flow_dict(self, tap_flow, fields=None): + res = {'id': tap_flow['id'], + 'tenant_id': tap_flow['tenant_id'], + 'tap_service_id': tap_flow['tap_service_id'], + 'name': tap_flow['name'], + 'description': tap_flow['description'], + 'source_port': tap_flow['source_port'], + 'direction': tap_flow['direction']} + + return self._fields(res, fields) + + def create_tap_service(self, context, tap_service): + LOG.debug("create_tap_service() called") + t_s = tap_service['tap_service'] + tenant_id = self._get_tenant_id_for_create(context, t_s) + with context.session.begin(subtransactions=True): + tap_service_db = TapService( + id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=t_s['name'], + description=t_s['description'], + port_id=t_s['port_id'], + network_id=t_s['network_id'] + ) + context.session.add(tap_service_db) + + # create the TapIdAssociation object + with context.session.begin(subtransactions=True): + tap_id_association_db = TapIdAssociation( + tap_service_id=tap_service_db['id'] + ) + context.session.add(tap_id_association_db) + + return self._make_tap_service_dict(tap_service_db) + + def create_tap_flow(self, context, tap_flow): + LOG.debug("create_tap_flow() called") + t_f = tap_flow['tap_flow'] + tenant_id = self._get_tenant_id_for_create(context, t_f) + # TODO(Vinay): Check for the tenant_id validation + # TODO(Vinay): Check for the source port validation + with context.session.begin(subtransactions=True): + tap_flow_db = TapFlow( + id=uuidutils.generate_uuid(), + tenant_id=tenant_id, + name=t_f['name'], + description=t_f['description'], + tap_service_id=t_f['tap_service_id'], + source_port=t_f['source_port'], + direction=t_f['direction'] + ) + context.session.add(tap_flow_db) + + return self._make_tap_flow_dict(tap_flow_db) + + def delete_tap_service(self, context, id): + LOG.debug("delete_tap_service() called") + + count = context.session.query(TapService).filter_by(id=id).delete() + + if not count: + raise taas.TapServiceNotFound(tap_id=id) + + context.session.query(TapIdAssociation).filter_by( + tap_service_id=id).delete() + + def delete_tap_flow(self, context, id): + LOG.debug("delete_tap_flow() called") + + count = context.session.query(TapFlow).filter_by(id=id).delete() + + if not count: + raise taas.TapFlowNotFound(flow_id=id) + + def get_tap_service(self, context, id, fields=None): + LOG.debug("get_tap_service() called") + + t_s = self._get_tap_service(context, id) + return self._make_tap_service_dict(t_s, fields) + + def get_tap_id_association(self, context, tap_service_id): + LOG.debug("get_tap_id_association() called") + + t_a = self._get_tap_id_association(context, tap_service_id) + return self._make_tap_id_association_dict(t_a) + + def get_tap_flow(self, context, id, fields=None): + LOG.debug("get_tap_flow() called") + + t_f = self._get_tap_flow(context, id) + return self._make_tap_flow_dict(t_f, fields) + + def get_tap_services(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + LOG.debug("get_tap_services() called") + return self._get_collection(context, TapService, + self._make_tap_service_dict, + filters=filters, fields=fields) + + def get_tap_flows(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + LOG.debug("get_tap_flows() called") + return self._get_collection(context, TapFlow, + self._make_tap_flow_dict, + filters=filters, fields=fields) + + def _get_port_details(self, context, port_id): + with context.session.begin(subtransactions=True): + port = self._core_plugin().get_port(context, port_id) + + return port diff --git a/neutron_taas/extensions/__init__.py b/neutron_taas/extensions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/neutron_taas/extensions/taas.py b/neutron_taas/extensions/taas.py new file mode 100644 index 0000000..ec11426 --- /dev/null +++ b/neutron_taas/extensions/taas.py @@ -0,0 +1,260 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +from neutron.api import extensions +from neutron.api.v2 import attributes as attr +from neutron.api.v2 import resource_helper +from neutron.common import exceptions as qexception +from neutron.plugins.common import constants +from neutron.services import service_base + +from oslo.config import cfg +from oslo_log import log as logging + +import six + +LOG = logging.getLogger(__name__) + +# TaaS exception handling classes + + +class TapServiceNotFound(qexception.NotFound): + message = _("Tap Service %(tap_id)s does not exist") + + +class TapFlowNotFound(qexception.NotFound): + message = _("Tap Flow %(flow_id)s does not exist") + + +class InvalidDestinationPort(qexception.NotFound): + message = _("Destination Port %(port)s does not exist") + + +class InvalidSourcePort(qexception.NotFound): + message = _("Source Port %(port)s does not exist") + + +class PortDoesNotBelongToTenant(qexception.NotAuthorized): + message = _("The port specified does not belong to the tenant") + + +class TapServiceNotBelongToTenant(qexception.NotAuthorized): + message = _("Tap Service specified does not belong to the tenant") + + +class TapServiceLimitReached(qexception.OverQuota): + message = _("Reached the upper limit of Tap Services Creatable") + + +direction_enum = [None, 'IN', 'OUT', 'BOTH'] + + +''' +Resource Attribute Map: + +Note: + +'tap_services' data model refers to the Tap Service created. The port_id +can be specified by the tenant to which the mirrored data is sent. If port_id +is specified then the network_id will not be used. If the port_id is not +specified, the TaaS will create a port on the network specified by the +network_id. +''' + +RESOURCE_ATTRIBUTE_MAP = { + 'tap_services': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'port_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': True}, + 'network_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'is_visible': False} + }, + 'tap_flows': { + 'id': {'allow_post': False, 'allow_put': False, + 'validate': {'type:uuid': None}, 'is_visible': True, + 'primary_key': True}, + 'tenant_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': None}, + 'required_by_policy': True, 'is_visible': True}, + 'name': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'description': {'allow_post': True, 'allow_put': True, + 'validate': {'type:string': None}, + 'is_visible': True, 'default': ''}, + 'tap_service_id': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'required_by_policy': True, 'is_visible': True}, + 'source_port': {'allow_post': True, 'allow_put': False, + 'validate': {'type:uuid': None}, + 'required_by_policy': True, 'is_visible': True}, + 'direction': {'allow_post': True, 'allow_put': False, + 'validate': {'type:string': direction_enum}, + 'is_visible': True} + } +} + + +taas_quota_opts = [ + cfg.IntOpt('quota_tap_service', + default=1, + help=_('Number of Tap Service instances allowed per tenant')), + cfg.IntOpt('quota_tap_flow', + default=10, + help=_('Number of Tap flows allowed per tenant')) +] +cfg.CONF.register_opts(taas_quota_opts, 'QUOTAS') + + +TaasOpts = [ + cfg.StrOpt( + 'driver', + default='', + help=_("Name of the TaaS Driver")), + cfg.BoolOpt( + 'enabled', + default=False, + help=_("Enable TaaS")), + cfg.IntOpt( + 'vlan_range_start', + default=3900, + help=_("Starting range of TAAS VLAN IDs")), + cfg.IntOpt( + 'vlan_range_end', + default=4000, + help=_("End range of TAAS VLAN IDs")), +] +cfg.CONF.register_opts(TaasOpts, 'taas') + + +class Taas(extensions.ExtensionDescriptor): + @classmethod + def get_name(cls): + return "Neutron Tap as a Service" + + @classmethod + def get_alias(cls): + return "taas" + + @classmethod + def get_description(cls): + return "Neutron Tap as a Service Extension." + + @classmethod + def get_namespace(cls): + return "http://wiki.openstack.org/wiki/Neutron/Taas/#API" + + @classmethod + def get_updated(cls): + return "2015-01-14T10:00:00-00:00" + + @classmethod + def get_plugin_interface(cls): + return TaasPluginBase + + @classmethod + def get_resources(cls): + """Returns Ext Resources.""" + plural_mappings = resource_helper.build_plural_mappings( + {}, RESOURCE_ATTRIBUTE_MAP) + + attr.PLURALS.update(plural_mappings) + + return resource_helper.build_resource_info(plural_mappings, + RESOURCE_ATTRIBUTE_MAP, + constants.TAAS, + translate_name=True, + allow_bulk=True) + + def update_attributes_map(self, attributes): + super(Taas, self).update_attributes_map( + attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP) + + def get_extended_resources(self, version): + if version == "2.0": + return RESOURCE_ATTRIBUTE_MAP + else: + return {} + + +@six.add_metaclass(abc.ABCMeta) +class TaasPluginBase(service_base.ServicePluginBase): + def get_plugin_name(self): + return constants.TAAS + + def get_plugin_description(self): + return "Tap Service Plugin" + + def get_plugin_type(self): + return constants.TAAS + + @abc.abstractmethod + def create_tap_service(self, context, tap_service): + """Create a Tap Service.""" + pass + + @abc.abstractmethod + def delete_tap_service(self, context, id): + """Delete a Tap Service.""" + pass + + @abc.abstractmethod + def get_tap_service(self, context, id, fields=None): + """Get a Tap Service.""" + pass + + @abc.abstractmethod + def get_tap_services(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + """List all Tap Services.""" + pass + + @abc.abstractmethod + def create_tap_flow(self, context, tap_flow): + """Create a Tap Flow.""" + pass + + @abc.abstractmethod + def get_tap_flow(self, context, id, fields=None): + """Get a Tap Flow.""" + pass + + @abc.abstractmethod + def delete_tap_flow(self, context, id): + """Delete a Tap Flow.""" + pass + + @abc.abstractmethod + def get_tap_flows(self, context, filters=None, fields=None, + sorts=None, limit=None, marker=None, + page_reverse=False): + """List all Tap Flows.""" + pass diff --git a/neutron_taas/neutron_dependencies/constants.py b/neutron_taas/neutron_dependencies/constants.py new file mode 100644 index 0000000..93f440a --- /dev/null +++ b/neutron_taas/neutron_dependencies/constants.py @@ -0,0 +1,98 @@ +# Copyright 2012 OpenStack Foundation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Service type constants: +CORE = "CORE" +DUMMY = "DUMMY" +LOADBALANCER = "LOADBALANCER" +LOADBALANCERV2 = "LOADBALANCERV2" +FIREWALL = "FIREWALL" +VPN = "VPN" +METERING = "METERING" +L3_ROUTER_NAT = "L3_ROUTER_NAT" +FLAVORS = "FLAVORS" +TAAS = "TAAS" + +# Maps extension alias to service type +EXT_TO_SERVICE_MAPPING = { + 'dummy': DUMMY, + 'lbaas': LOADBALANCER, + 'lbaasv2': LOADBALANCERV2, + 'fwaas': FIREWALL, + 'vpnaas': VPN, + 'metering': METERING, + 'router': L3_ROUTER_NAT, + 'flavors': FLAVORS, + 'taas': TAAS +} + +# TODO(salvatore-orlando): Move these (or derive them) from conf file +ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER, FIREWALL, VPN, METERING, + L3_ROUTER_NAT, LOADBALANCERV2, TAAS] + +COMMON_PREFIXES = { + CORE: "", + DUMMY: "/dummy_svc", + LOADBALANCER: "/lb", + LOADBALANCERV2: "/lbaas", + FIREWALL: "/fw", + VPN: "/vpn", + METERING: "/metering", + L3_ROUTER_NAT: "", + TAAS: "/taas" +} + +# Service operation status constants +ACTIVE = "ACTIVE" +DOWN = "DOWN" +CREATED = "CREATED" +PENDING_CREATE = "PENDING_CREATE" +PENDING_UPDATE = "PENDING_UPDATE" +PENDING_DELETE = "PENDING_DELETE" +INACTIVE = "INACTIVE" +ERROR = "ERROR" + +ACTIVE_PENDING_STATUSES = ( + ACTIVE, + PENDING_CREATE, + PENDING_UPDATE +) + +# Network Type constants +TYPE_FLAT = 'flat' +TYPE_GRE = 'gre' +TYPE_LOCAL = 'local' +TYPE_VXLAN = 'vxlan' +TYPE_VLAN = 'vlan' +TYPE_NONE = 'none' + +# Values for network_type + +# For VLAN Network +MIN_VLAN_TAG = 1 +MAX_VLAN_TAG = 4094 + +# For GRE Tunnel +MIN_GRE_ID = 1 +MAX_GRE_ID = 2 ** 32 - 1 + +# For VXLAN Tunnel +MIN_VXLAN_VNI = 1 +MAX_VXLAN_VNI = 2 ** 24 - 1 +VXLAN_UDP_PORT = 4789 + +# Network Type MTU overhead +GRE_ENCAP_OVERHEAD = 42 +VXLAN_ENCAP_OVERHEAD = 50 diff --git a/neutron_taas/neutron_dependencies/neutron.conf b/neutron_taas/neutron_dependencies/neutron.conf new file mode 100644 index 0000000..8453158 --- /dev/null +++ b/neutron_taas/neutron_dependencies/neutron.conf @@ -0,0 +1,1058 @@ +[DEFAULT] +notify_nova_on_port_data_changes = True +notify_nova_on_port_status_changes = True +auth_strategy = keystone +allow_overlapping_ips = True +policy_file = /etc/neutron/policy.json +debug = True +verbose = True +service_plugins = neutron.services.l3_router.l3_router_plugin.L3RouterPlugin, neutron_taas.services.taas.taas_plugin.TaasPlugin +core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin +rabbit_userid = stackrabbit +rabbit_password = stackqueue +rabbit_hosts = 10.0.2.15 +rpc_backend = rabbit +logging_exception_prefix = %(color)s%(asctime)s.%(msecs)03d TRACE %(name)s %(instance)s +logging_debug_format_suffix = from (pid=%(process)d) %(funcName)s %(pathname)s:%(lineno)d +logging_default_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [-%(color)s] %(instance)s%(color)s%(message)s +logging_context_format_string = %(asctime)s.%(msecs)03d %(color)s%(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_id)s%(color)s] %(instance)s%(color)s%(message)s +use_syslog = False +state_path = /opt/stack/data/neutron + +[taas] +driver = neutron_taas.services.taas.drivers.linux.ovs_taas.OvsTaasDriver +enabled = True +vlan_range_start = 3000 +vlan_range_end = 3500 + +# Print more verbose output (set logging level to INFO instead of default WARNING level). +# verbose = False + +# =========Start Global Config Option for Distributed L3 Router=============== +# Setting the "router_distributed" flag to "True" will default to the creation +# of distributed tenant routers. The admin can override this flag by specifying +# the type of the router on the create request (admin-only attribute). Default +# value is "False" to support legacy mode (centralized) routers. +# +# router_distributed = False +# +# ===========End Global Config Option for Distributed L3 Router=============== + +# Print debugging output (set logging level to DEBUG instead of default WARNING level). +# debug = False + +# Where to store Neutron state files. This directory must be writable by the +# user executing the agent. +# state_path = /var/lib/neutron + +# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s +# log_date_format = %Y-%m-%d %H:%M:%S + +# use_syslog -> syslog +# log_file and log_dir -> log_dir/log_file +# (not log_file) and log_dir -> log_dir/{binary_name}.log +# use_stderr -> stderr +# (not user_stderr) and (not log_file) -> stdout +# publish_errors -> notification system + +# use_syslog = False +# syslog_log_facility = LOG_USER + +# use_stderr = True +# log_file = +# log_dir = + +# publish_errors = False + +# Address to bind the API server to +# bind_host = 0.0.0.0 + +# Port the bind the API server to +# bind_port = 9696 + +# Path to the extensions. Note that this can be a colon-separated list of +# paths. For example: +# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions +# The __path__ of neutron.extensions is appended to this, so if your +# extensions are in there you don't need to specify them here +# api_extensions_path = + +# (StrOpt) Neutron core plugin entrypoint to be loaded from the +# neutron.core_plugins namespace. See setup.cfg for the entrypoint names of the +# plugins included in the neutron source distribution. For compatibility with +# previous versions, the class name of a plugin can be specified instead of its +# entrypoint name. +# +# core_plugin = +# Example: core_plugin = ml2 + +# (ListOpt) List of service plugin entrypoints to be loaded from the +# neutron.service_plugins namespace. See setup.cfg for the entrypoint names of +# the plugins included in the neutron source distribution. For compatibility +# with previous versions, the class name of a plugin can be specified instead +# of its entrypoint name. +# +# service_plugins = +# Example: service_plugins = router,firewall,lbaas,vpnaas,metering + +# Paste configuration file +# api_paste_config = api-paste.ini + +# (StrOpt) Hostname to be used by the neutron server, agents and services +# running on this machine. All the agents and services running on this machine +# must use the same host value. +# The default value is hostname of the machine. +# +# host = + +# The strategy to be used for auth. +# Supported values are 'keystone'(default), 'noauth'. +# auth_strategy = keystone + +# Base MAC address. The first 3 octets will remain unchanged. If the +# 4h octet is not 00, it will also be used. The others will be +# randomly generated. +# 3 octet +# base_mac = fa:16:3e:00:00:00 +# 4 octet +# base_mac = fa:16:3e:4f:00:00 + +# DVR Base MAC address. The first 3 octets will remain unchanged. If the +# 4th octet is not 00, it will also be used. The others will be randomly +# generated. The 'dvr_base_mac' *must* be different from 'base_mac' to +# avoid mixing them up with MAC's allocated for tenant ports. +# A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00 +# The default is 3 octet +# dvr_base_mac = fa:16:3f:00:00:00 + +# Maximum amount of retries to generate a unique MAC address +# mac_generation_retries = 16 + +# DHCP Lease duration (in seconds). Use -1 to +# tell dnsmasq to use infinite lease times. +# dhcp_lease_duration = 86400 + +# Allow sending resource operation notification to DHCP agent +# dhcp_agent_notification = True + +# Enable or disable bulk create/update/delete operations +# allow_bulk = True +# Enable or disable pagination +# allow_pagination = False +# Enable or disable sorting +# allow_sorting = False +# Enable or disable overlapping IPs for subnets +# Attention: the following parameter MUST be set to False if Neutron is +# being used in conjunction with nova security groups +# allow_overlapping_ips = False +# Ensure that configured gateway is on subnet. For IPv6, validate only if +# gateway is not a link local address. Deprecated, to be removed during the +# K release, at which point the check will be mandatory. +# force_gateway_on_subnet = True + +# Default maximum number of items returned in a single response, +# value == infinite and value < 0 means no max limit, and value must +# be greater than 0. If the number of items requested is greater than +# pagination_max_limit, server will just return pagination_max_limit +# of number of items. +# pagination_max_limit = -1 + +# Maximum number of DNS nameservers per subnet +# max_dns_nameservers = 5 + +# Maximum number of host routes per subnet +# max_subnet_host_routes = 20 + +# Maximum number of fixed ips per port +# max_fixed_ips_per_port = 5 + +# Maximum number of routes per router +# max_routes = 30 + +# Default Subnet Pool to be used for IPv4 subnet-allocation. +# Specifies by UUID the pool to be used in case of subnet-create being called +# without a subnet-pool ID. The default of None means that no pool will be +# used unless passed explicitly to subnet create. If no pool is used, then a +# CIDR must be passed to create a subnet and that subnet will not be allocated +# from any pool; it will be considered part of the tenant's private address +# space. +# default_ipv4_subnet_pool = + +# Default Subnet Pool to be used for IPv6 subnet-allocation. +# Specifies by UUID the pool to be used in case of subnet-create being +# called without a subnet-pool ID. Set to "prefix_delegation" +# to enable IPv6 Prefix Delegation in a PD-capable environment. +# See the description for default_ipv4_subnet_pool for more information. +# default_ipv6_subnet_pool = + +# =========== items for MTU selection and advertisement ============= +# Advertise MTU. If True, effort is made to advertise MTU +# settings to VMs via network methods (ie. DHCP and RA MTU options) +# when the network's preferred MTU is known. +# advertise_mtu = False +# ======== end of items for MTU selection and advertisement ========= + +# =========== items for agent management extension ============= +# Seconds to regard the agent as down; should be at least twice +# report_interval, to be sure the agent is down for good +# agent_down_time = 75 +# =========== end of items for agent management extension ===== + +# =========== items for agent scheduler extension ============= +# Driver to use for scheduling network to DHCP agent +# network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.ChanceScheduler +# Driver to use for scheduling router to a default L3 agent +# router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler +# Driver to use for scheduling a loadbalancer pool to an lbaas agent +# loadbalancer_pool_scheduler_driver = neutron.services.loadbalancer.agent_scheduler.ChanceScheduler + +# (StrOpt) Representing the resource type whose load is being reported by +# the agent. +# This can be 'networks','subnets' or 'ports'. When specified (Default is networks), +# the server will extract particular load sent as part of its agent configuration object +# from the agent report state, which is the number of resources being consumed, at +# every report_interval. +# dhcp_load_type can be used in combination with network_scheduler_driver = +# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler +# When the network_scheduler_driver is WeightScheduler, dhcp_load_type can +# be configured to represent the choice for the resource being balanced. +# Example: dhcp_load_type = networks +# Values: +# networks - number of networks hosted on the agent +# subnets - number of subnets associated with the networks hosted on the agent +# ports - number of ports associated with the networks hosted on the agent +# dhcp_load_type = networks + +# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted +# networks to first DHCP agent which sends get_active_networks message to +# neutron server +# network_auto_schedule = True + +# Allow auto scheduling routers to L3 agent. It will schedule non-hosted +# routers to first L3 agent which sends sync_routers message to neutron server +# router_auto_schedule = True + +# Allow automatic rescheduling of routers from dead L3 agents with +# admin_state_up set to True to alive agents. +# allow_automatic_l3agent_failover = False + +# Allow automatic removal of networks from dead DHCP agents with +# admin_state_up set to True. +# Networks could then be rescheduled if network_auto_schedule is True +# allow_automatic_dhcp_failover = True + +# Number of DHCP agents scheduled to host a network. This enables redundant +# DHCP agents for configured networks. +# dhcp_agents_per_network = 1 + +# Enable services on agents with admin_state_up False. +# If this option is False, when admin_state_up of an agent is turned to +# False, services on it will be disabled. If this option is True, services +# on agents with admin_state_up False keep available and manual scheduling +# to such agents is available. Agents with admin_state_up False are not +# selected for automatic scheduling regardless of this option. +# enable_services_on_agents_with_admin_state_down = False + +# =========== end of items for agent scheduler extension ===== + +# =========== items for l3 extension ============== +# Enable high availability for virtual routers. +# l3_ha = False +# +# Maximum number of l3 agents which a HA router will be scheduled on. If it +# is set to 0 the router will be scheduled on every agent. +# max_l3_agents_per_router = 3 +# +# Minimum number of l3 agents which a HA router will be scheduled on. The +# default value is 2. +# min_l3_agents_per_router = 2 +# +# CIDR of the administrative network if HA mode is enabled +# l3_ha_net_cidr = 169.254.192.0/18 +# =========== end of items for l3 extension ======= + +# =========== items for metadata proxy configuration ============== +# User (uid or name) running metadata proxy after its initialization +# (if empty: agent effective user) +# metadata_proxy_user = + +# Group (gid or name) running metadata proxy after its initialization +# (if empty: agent effective group) +# metadata_proxy_group = + +# Enable/Disable log watch by metadata proxy, it should be disabled when +# metadata_proxy_user/group is not allowed to read/write its log file and +# 'copytruncate' logrotate option must be used if logrotate is enabled on +# metadata proxy log files. Option default value is deduced from +# metadata_proxy_user: watch log is enabled if metadata_proxy_user is agent +# effective user id/name. +# metadata_proxy_watch_log = + +# Location of Metadata Proxy UNIX domain socket +# metadata_proxy_socket = $state_path/metadata_proxy +# =========== end of items for metadata proxy configuration ============== + +# ========== items for VLAN trunking networks ========== +# Setting this flag to True will allow plugins that support it to +# create VLAN transparent networks. This flag has no effect for +# plugins that do not support VLAN transparent networks. +# vlan_transparent = False +# ========== end of items for VLAN trunking networks ========== + +# =========== WSGI parameters related to the API server ============== +# Number of separate worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as workers. The parent process manages them. +# api_workers = 0 + +# Number of separate RPC worker processes to spawn. The default, 0, runs the +# worker thread in the current process. Greater than 0 launches that number of +# child processes as RPC workers. The parent process manages them. +# This feature is experimental until issues are addressed and testing has been +# enabled for various plugins for compatibility. +# rpc_workers = 0 + +# Timeout for client connections socket operations. If an +# incoming connection is idle for this number of seconds it +# will be closed. A value of '0' means wait forever. (integer +# value) +# client_socket_timeout = 900 + +# wsgi keepalive option. Determines if connections are allowed to be held open +# by clients after a request is fulfilled. A value of False will ensure that +# the socket connection will be explicitly closed once a response has been +# sent to the client. +# wsgi_keep_alive = True + +# Sets the value of TCP_KEEPIDLE in seconds to use for each server socket when +# starting API server. Not supported on OS X. +# tcp_keepidle = 600 + +# Number of seconds to keep retrying to listen +# retry_until_window = 30 + +# Number of backlog requests to configure the socket with. +# backlog = 4096 + +# Max header line to accommodate large tokens +# max_header_line = 16384 + +# Enable SSL on the API server +# use_ssl = False + +# Certificate file to use when starting API server securely +# ssl_cert_file = /path/to/certfile + +# Private key file to use when starting API server securely +# ssl_key_file = /path/to/keyfile + +# CA certificate file to use when starting API server securely to +# verify connecting clients. This is an optional parameter only required if +# API clients need to authenticate to the API server using SSL certificates +# signed by a trusted CA +# ssl_ca_file = /path/to/cafile +# ======== end of WSGI parameters related to the API server ========== + +# ======== neutron nova interactions ========== +# Send notification to nova when port status is active. +# notify_nova_on_port_status_changes = True + +# Send notifications to nova when port data (fixed_ips/floatingips) change +# so nova can update it's cache. +# notify_nova_on_port_data_changes = True + +# URL for connection to nova (Only supports one nova region currently). +# nova_url = http://127.0.0.1:8774/v2 + +# Name of nova region to use. Useful if keystone manages more than one region +# nova_region_name = + +# Username for connection to nova in admin context +# nova_admin_username = + +# The uuid of the admin nova tenant +# nova_admin_tenant_id = + +# The name of the admin nova tenant. If the uuid of the admin nova tenant +# is set, this is optional. Useful for cases where the uuid of the admin +# nova tenant is not available when configuration is being done. +# nova_admin_tenant_name = + +# Password for connection to nova in admin context. +# nova_admin_password = + +# Authorization URL for connection to nova in admin context. +# nova_admin_auth_url = + +# CA file for novaclient to verify server certificates +# nova_ca_certificates_file = + +# Boolean to control ignoring SSL errors on the nova url +# nova_api_insecure = False + +# Number of seconds between sending events to nova if there are any events to send +# send_events_interval = 2 + +# ======== end of neutron nova interactions ========== + +# +# Options defined in oslo.messaging +# + +# Use durable queues in amqp. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +# amqp_durable_queues=false + +# Auto-delete queues in amqp. (boolean value) +# amqp_auto_delete=false + +# Size of RPC connection pool. (integer value) +# rpc_conn_pool_size=30 + +# Qpid broker hostname. (string value) +# qpid_hostname=localhost + +# Qpid broker port. (integer value) +# qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +# qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +# qpid_username= + +# Password for Qpid connection. (string value) +# qpid_password= + +# Space separated list of SASL mechanisms to use for auth. +# (string value) +# qpid_sasl_mechanisms= + +# Seconds between connection keepalive heartbeats. (integer +# value) +# qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +# qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +# qpid_tcp_nodelay=true + +# The qpid topology version to use. Version 1 is what was +# originally used by impl_qpid. Version 2 includes some +# backwards-incompatible changes that allow broker federation +# to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. +# (integer value) +# qpid_topology_version=1 + +# SSL version to use (valid only if SSL enabled). valid values +# are TLSv1, SSLv23 and SSLv3. SSLv2 may be available on some +# distributions. (string value) +# kombu_ssl_version= + +# SSL key file (valid only if SSL enabled). (string value) +# kombu_ssl_keyfile= + +# SSL cert file (valid only if SSL enabled). (string value) +# kombu_ssl_certfile= + +# SSL certification authority file (valid only if SSL +# enabled). (string value) +# kombu_ssl_ca_certs= + +# How long to wait before reconnecting in response to an AMQP +# consumer cancel notification. (floating point value) +# kombu_reconnect_delay=1.0 + +# The RabbitMQ broker address where a single node is used. +# (string value) +# rabbit_host=localhost + +# The RabbitMQ broker port where a single node is used. +# (integer value) +# rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +# rabbit_hosts=$rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +# rabbit_use_ssl=false + +# The RabbitMQ userid. (string value) +# rabbit_userid=guest + +# The RabbitMQ password. (string value) +# rabbit_password=guest + +# the RabbitMQ login method (string value) +# rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +# rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer +# value) +# rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to +# RabbitMQ. (integer value) +# rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +# rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change +# this option, you must wipe the RabbitMQ database. (boolean +# value) +# rabbit_ha_queues=false + +# If passed, use a fake RabbitMQ provider. (boolean value) +# fake_rabbit=false + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve +# to this address. (string value) +# rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +# rpc_zmq_matchmaker=oslo.messaging._drivers.matchmaker.MatchMakerLocalhost + +# ZeroMQ receiver listening port. (integer value) +# rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +# rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per +# topic. Default is unlimited. (integer value) +# rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +# rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP +# address. Must match "host" option, if running Nova. (string +# value) +# rpc_zmq_host=oslo + +# Seconds to wait before a cast expires (TTL). Only supported +# by impl_zmq. (integer value) +# rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +# matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +# matchmaker_heartbeat_ttl=600 + +# Size of RPC greenthread pool. (integer value) +# rpc_thread_pool_size=64 + +# Driver or drivers to handle sending notifications. (multi +# valued) +# notification_driver= + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +# notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +# rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend +# option and driver specific configuration. (string value) +# transport_url= + +# The messaging driver to use, defaults to rabbit. Other +# drivers include qpid and zmq. (string value) +# rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the +# transport_url option. (string value) +# control_exchange=openstack + + +[matchmaker_redis] + +# +# Options defined in oslo.messaging +# + +# Host to locate redis. (string value) +# host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +# port=6379 + +# Password for Redis server (optional). (string value) +# password= + + +[matchmaker_ring] + +# +# Options defined in oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +# ringfile=/etc/oslo/matchmaker_ring.json + +[quotas] +# Default driver to use for quota checks +# quota_driver = neutron.db.quota_db.DbQuotaDriver + +# Resource name(s) that are supported in quota features +# quota_items = network,subnet,port + +# Default number of resource allowed per tenant. A negative value means +# unlimited. +# default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# quota_network = 10 + +# Number of subnets allowed per tenant. A negative value means unlimited. +# quota_subnet = 10 + +# Number of ports allowed per tenant. A negative value means unlimited. +# quota_port = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. +# quota_security_group = 10 + +# Number of security group rules allowed per tenant. A negative value means +# unlimited. +# quota_security_group_rule = 100 + +# Number of vips allowed per tenant. A negative value means unlimited. +# quota_vip = 10 + +# Number of pools allowed per tenant. A negative value means unlimited. +# quota_pool = 10 + +# Number of pool members allowed per tenant. A negative value means unlimited. +# The default is unlimited because a member is not a real resource consumer +# on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_member = -1 + +# Number of health monitors allowed per tenant. A negative value means +# unlimited. +# The default is unlimited because a health monitor is not a real resource +# consumer on Openstack. However, on back-end, a member is a resource consumer +# and that is the reason why quota is possible. +# quota_health_monitor = -1 + +# Number of loadbalancers allowed per tenant. A negative value means unlimited. +# quota_loadbalancer = 10 + +# Number of listeners allowed per tenant. A negative value means unlimited. +# quota_listener = -1 + +# Number of v2 health monitors allowed per tenant. A negative value means +# unlimited. These health monitors exist under the lbaas v2 API +# quota_healthmonitor = -1 + +# Number of routers allowed per tenant. A negative value means unlimited. +# quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# quota_floatingip = 50 + +# Number of firewalls allowed per tenant. A negative value means unlimited. +# quota_firewall = 1 + +# Number of firewall policies allowed per tenant. A negative value means +# unlimited. +# quota_firewall_policy = 1 + +# Number of firewall rules allowed per tenant. A negative value means +# unlimited. +# quota_firewall_rule = 100 + +[agent] +root_helper_daemon = sudo /usr/local/bin/neutron-rootwrap-daemon /etc/neutron/rootwrap.conf +root_helper = sudo /usr/local/bin/neutron-rootwrap /etc/neutron/rootwrap.conf +# Use "sudo neutron-rootwrap /etc/neutron/rootwrap.conf" to use the real +# root filter facility. +# Change to "sudo" to skip the filtering and just run the command directly +# root_helper = sudo + +# Set to true to add comments to generated iptables rules that describe +# each rule's purpose. (System must support the iptables comments module.) +# comment_iptables_rules = True + +# Maximum number of elements which can be stored in an IPset. +# If None is specified, the system default will be used. +# ipset_maxelem = 131072 + +# Initial hash size for an IPset. Must be a power of 2, +# else the kernel will round it up automatically. +# If None is specified, the system default will be used. +# ipset_hashsize = 2048 + +# Root helper daemon application to use when possible. +# root_helper_daemon = + +# Use the root helper when listing the namespaces on a system. This may not +# be required depending on the security configuration. If the root helper is +# not required, set this to False for a performance improvement. +# use_helper_for_ns_read = True + +# The interval to check external processes for failure in seconds (0=disabled) +# check_child_processes_interval = 60 + +# Action to take when an external process spawned by an agent dies +# Values: +# respawn - Respawns the external process +# exit - Exits the agent +# check_child_processes_action = respawn + +# =========== items for agent management extension ============= +# seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time +# report_interval = 30 + +# =========== end of items for agent management extension ===== + +[keystone_authtoken] +signing_dir = /var/cache/neutron +cafile = /opt/stack/data/ca-bundle.pem +project_domain_id = default +project_name = service +user_domain_id = default +password = nomoresecrete +username = neutron +auth_url = http://10.0.2.15:35357 +auth_plugin = password +auth_uri = http://10.0.2.15:5000 +identity_uri = http://127.0.0.1:5000 +admin_tenant_name = %SERVICE_TENANT_NAME% +admin_user = %SERVICE_USER% +admin_password = %SERVICE_PASSWORD% + +[database] +connection = mysql://root:stackdb@127.0.0.1/neutron?charset=utf8 +# This line MUST be changed to actually run the plugin. +# Example: +# connection = mysql://root:pass@127.0.0.1:3306/neutron +# Replace 127.0.0.1 above with the IP address of the database used by the +# main neutron server. (Leave it as is if the database runs on this host.) +# connection = sqlite:// +# NOTE: In deployment the [database] section and its connection attribute may +# be set in the corresponding core plugin '.ini' file. However, it is suggested +# to put the [database] section and its connection attribute in this +# configuration file. + +# Database engine for which script will be generated when using offline +# migration +# engine = + +# The SQLAlchemy connection string used to connect to the slave database +# slave_connection = + +# Database reconnection retry times - in event connectivity is lost +# set to -1 implies an infinite retry count +# max_retries = 10 + +# Database reconnection interval in seconds - if the initial connection to the +# database fails +# retry_interval = 10 + +# Minimum number of SQL connections to keep open in a pool +# min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool +# max_pool_size = 10 + +# Timeout in seconds before idle sql connections are reaped +# idle_timeout = 3600 + +# If set, use this value for max_overflow with sqlalchemy +# max_overflow = 20 + +# Verbosity of SQL debugging information. 0=None, 100=Everything +# connection_debug = 0 + +# Add python stack traces to SQL as comment strings +# connection_trace = False + +# If set, use this value for pool_timeout with sqlalchemy +# pool_timeout = 10 + +[nova] +region_name = RegionOne +project_domain_id = default +project_name = service +user_domain_id = default +password = nomoresecrete +username = nova +auth_url = http://10.0.2.15:35357 +auth_plugin = password +# Name of the plugin to load +# auth_plugin = + +# Config Section from which to load plugin specific options +# auth_section = + +# PEM encoded Certificate Authority to use when verifying HTTPs connections. +# cafile = + +# PEM encoded client certificate cert file +# certfile = + +# Verify HTTPS connections. +# insecure = False + +# PEM encoded client certificate key file +# keyfile = + +# Name of nova region to use. Useful if keystone manages more than one region. +# region_name = + +# Timeout value for http requests +# timeout = + +[oslo_concurrency] + +# Directory to use for lock files. For security, the specified directory should +# only be writable by the user running the processes that need locking. +# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, +# a lock path must be set. +lock_path = $state_path/lock + +# Enables or disables inter-process locks. +# disable_process_locking = False + +[oslo_policy] + +# The JSON file that defines policies. +# policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. +# policy_default_rule = default + +# Directories where policy configuration files are stored. +# They can be relative to any directory in the search path defined by the +# config_dir option, or absolute paths. The file defined by policy_file +# must exist for these directories to be searched. Missing or empty +# directories are ignored. +# policy_dirs = policy.d + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# Address prefix used when sending to a specific server (string value) +# Deprecated group/name - [amqp1]/server_request_prefix +# server_request_prefix = exclusive + +# Address prefix used when broadcasting to all servers (string value) +# Deprecated group/name - [amqp1]/broadcast_prefix +# broadcast_prefix = broadcast + +# Address prefix when sending to any server in group (string value) +# Deprecated group/name - [amqp1]/group_request_prefix +# group_request_prefix = unicast + +# Name for the AMQP container (string value) +# Deprecated group/name - [amqp1]/container_name +# container_name = + +# Timeout for inactive connections (in seconds) (integer value) +# Deprecated group/name - [amqp1]/idle_timeout +# idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +# Deprecated group/name - [amqp1]/trace +# trace = false + +# CA certificate PEM file for verifing server certificate (string value) +# Deprecated group/name - [amqp1]/ssl_ca_file +# ssl_ca_file = + +# Identifying certificate PEM file to present to clients (string value) +# Deprecated group/name - [amqp1]/ssl_cert_file +# ssl_cert_file = + +# Private key PEM file used to sign cert_file certificate (string value) +# Deprecated group/name - [amqp1]/ssl_key_file +# ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +# Deprecated group/name - [amqp1]/ssl_key_password +# ssl_key_password = + +# Accept clients using either SSL or plain TCP (boolean value) +# Deprecated group/name - [amqp1]/allow_insecure_clients +# allow_insecure_clients = false + + +[oslo_messaging_qpid] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +# amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +# amqp_auto_delete = false + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +# rpc_conn_pool_size = 30 + +# Qpid broker hostname. (string value) +# Deprecated group/name - [DEFAULT]/qpid_hostname +# qpid_hostname = localhost + +# Qpid broker port. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_port +# qpid_port = 5672 + +# Qpid HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/qpid_hosts +# qpid_hosts = $qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_username +# qpid_username = + +# Password for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_password +# qpid_password = + +# Space separated list of SASL mechanisms to use for auth. (string value) +# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms +# qpid_sasl_mechanisms = + +# Seconds between connection keepalive heartbeats. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_heartbeat +# qpid_heartbeat = 60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +# Deprecated group/name - [DEFAULT]/qpid_protocol +# qpid_protocol = tcp + +# Whether to disable the Nagle algorithm. (boolean value) +# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay +# qpid_tcp_nodelay = true + +# The number of prefetched messages held by receiver. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity +# qpid_receiver_capacity = 1 + +# The qpid topology version to use. Version 1 is what was originally used by +# impl_qpid. Version 2 includes some backwards-incompatible changes that allow +# broker federation to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_topology_version +# qpid_topology_version = 1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +# amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +# amqp_auto_delete = false + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +# rpc_conn_pool_size = 30 + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_version +# kombu_ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile +# kombu_ssl_keyfile = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile +# kombu_ssl_certfile = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs +# kombu_ssl_ca_certs = + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay +# kombu_reconnect_delay = 1.0 + +# The RabbitMQ broker address where a single node is used. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_host +# rabbit_host = localhost + +# The RabbitMQ broker port where a single node is used. (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_port +# rabbit_port = 5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/rabbit_hosts +# rabbit_hosts = $rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_use_ssl +# rabbit_use_ssl = false + +# The RabbitMQ userid. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_userid +# rabbit_userid = guest + +# The RabbitMQ password. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_password +# rabbit_password = guest + +# The RabbitMQ login method. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_login_method +# rabbit_login_method = AMQPLAIN + +# The RabbitMQ virtual host. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_virtual_host +# rabbit_virtual_host = / + +# How frequently to retry connecting with RabbitMQ. (integer value) +# rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff +# rabbit_retry_backoff = 2 + +# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry +# count). (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_max_retries +# rabbit_max_retries = 0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you +# must wipe the RabbitMQ database. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_ha_queues +# rabbit_ha_queues = false + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) +# Deprecated group/name - [DEFAULT]/fake_rabbit +# fake_rabbit = false diff --git a/neutron_taas/neutron_dependencies/neutron_taas_db_init.py b/neutron_taas/neutron_dependencies/neutron_taas_db_init.py new file mode 100755 index 0000000..1418a65 --- /dev/null +++ b/neutron_taas/neutron_dependencies/neutron_taas_db_init.py @@ -0,0 +1,120 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutron.db import models_v2 +import sqlalchemy as sa +from sqlalchemy import create_engine +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +import sys + +BASE = declarative_base() + +_ENGINE = None +_MAKER = None + +direction_types = sa.Enum('IN', 'OUT', 'BOTH', name='tapflows_direction') + + +class TapService(BASE, models_v2.HasId, models_v2.HasTenant): + # Represents a V2 TapService Object + + __tablename__ = 'tap_services' + name = sa.Column(sa.String(255), nullable=True) + description = sa.Column(sa.String(1024), nullable=True) + port_id = sa.Column(sa.String(36), nullable=False) + network_id = sa.Column(sa.String(36), nullable=True) + + +class TapFlow(BASE, models_v2.HasId, models_v2.HasTenant): + # Represents a V2 TapFlow Object + + __tablename__ = 'tap_flows' + name = sa.Column(sa.String(255), nullable=True) + description = sa.Column(sa.String(1024), nullable=True) + tap_service_id = sa.Column(sa.String(36), + sa.ForeignKey("tap_services.id", + ondelete="CASCADE"), + nullable=False) + source_port = sa.Column(sa.String(36), nullable=False) + direction = sa.Column(direction_types, nullable=False) + + +class TapIdAssociation(BASE): + # Internal mapping between a TAP Service and id to be used + # by the Agents + + __tablename__ = 'tap_id_associations' + tap_service_id = sa.Column(sa.String(36)) + taas_id = sa.Column(sa.Integer, primary_key=True, autoincrement=True) + + +def configure_db(options): + # Establish the database, create an engine if needed, and + # register the models. + # param options: Mapping of configuration options + + global _ENGINE + if not _ENGINE: + _ENGINE = create_engine(options['sql_connection'], + echo=False, + echo_pool=True, + pool_recycle=3600) + register_models() + + +def clear_db(): + global _ENGINE + assert _ENGINE + for table in reversed(BASE.metadata.sorted_tables): + _ENGINE.execute(table.delete()) + + +def get_session(autocommit=True, expire_on_commit=False): + # Helper method to grab session + global _MAKER, _ENGINE + if not _MAKER: + assert _ENGINE + _MAKER = sessionmaker(bind=_ENGINE, + autocommit=autocommit, + expire_on_commit=expire_on_commit) + return _MAKER() + + +def register_models(): + # Register Models and create properties + global _ENGINE + assert _ENGINE + BASE.metadata.create_all(_ENGINE) + + +def unregister_mode(): + # Unregister Models, useful clearing out data before testing + global _ENGINE + assert _ENGINE + BASE.metadata.drop_all(_ENGINE) + + +def main(argv): + print'Configuring the Neutron TaaS database' + configure_db({'sql_connection': ('mysql://root:%s@127.0.0.1/neutron' % + argv[0])}) + print'Configured the Neutron TaaS database' + return + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/neutron_taas/neutron_dependencies/patch_conf_file.sh b/neutron_taas/neutron_dependencies/patch_conf_file.sh new file mode 100755 index 0000000..0568b89 --- /dev/null +++ b/neutron_taas/neutron_dependencies/patch_conf_file.sh @@ -0,0 +1,80 @@ +#!/bin/bash +# +# Copyright (C) 2015 Ericsson AB +# Copyright (C) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +INSERT_KEY="core_plugin" + +T_CONF_FILE=$1 +N_CONF_FILE=$2 + +if [ -z $T_CONF_FILE ] || [ -z $N_CONF_FILE ]; then + echo "Usage: $0 " + exit 1 +fi + +ACTION="unspecified" + +while read -r LINE; do + # Skip blank lines + if [ ${#LINE} -eq 0 ]; then + continue; + fi + + # Identify action to be performed based on section headers + read -a WORD_ARRAY <<< $LINE + if [[ ${WORD_ARRAY[0]} == "[DEFAULT]" ]]; then + ACTION="merge" + + # Advance to next line + continue + fi + if [[ ${WORD_ARRAY[0]} == "[taas]" ]]; then + ACTION="append" + + # Add a blank line (separator) to Neutron config file + echo "" >> $N_CONF_FILE + fi + + # Execute action + case $ACTION in + "merge") + KEY="${WORD_ARRAY[0]}" + VALUE="${WORD_ARRAY[2]}" + + # Add 'value' to the end of the line beginning with 'key' + # in the Neutron config file. If such a line does not + # exist, create a 'key = value' line and insert it just + # after the line beginning with 'INSERT_KEY'. + grep ^$KEY $N_CONF_FILE > /dev/null + if [ $? -eq 0 ]; then + sed -i "/^$KEY/ s/$/, $VALUE/" $N_CONF_FILE + else + sed -i "/^$INSERT_KEY/ a $KEY = $VALUE" $N_CONF_FILE + fi + ;; + + "append") + # Add entire line to Neutron config file + echo "$LINE" >> $N_CONF_FILE + ;; + + *) + ;; + esac +done < $T_CONF_FILE + +exit 0 diff --git a/neutron_taas/neutron_dependencies/repos.py b/neutron_taas/neutron_dependencies/repos.py new file mode 100644 index 0000000..7d9ab1b --- /dev/null +++ b/neutron_taas/neutron_dependencies/repos.py @@ -0,0 +1,102 @@ +# Copyright (c) 2015, A10 Networks +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ConfigParser +import importlib +import os + +from oslo_config import cfg +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +class NeutronModules(object): + + MODULES = { + 'neutron_fwaas': { + 'alembic-name': 'fwaas', + }, + 'neutron_lbaas': { + 'alembic-name': 'lbaas', + }, + 'neutron_vpnaas': { + 'alembic-name': 'vpnaas', + }, + 'neutron_taas': { + 'alembic-name': 'taas', + }, + } + + def __init__(self): + self.repos = {} + for repo in self.MODULES: + self.repos[repo] = {} + self.repos[repo]['mod'] = self._import_or_none(repo) + self.repos[repo]['ini'] = None + + def _import_or_none(self, module): + try: + return importlib.import_module(module) + except ImportError: + return None + + def installed_list(self): + z = filter(lambda k: self.repos[k]['mod'] is not None, self.repos) + LOG.debug("NeutronModules related repos installed = %s", z) + return z + + def module(self, module): + return self.repos[module]['mod'] + + def alembic_name(self, module): + return self.MODULES[module]['alembic-name'] + + # Return an INI parser for the child module. oslo.config is a bit too + # magical in its INI loading, and in one notable case, we need to merge + # together the [service_providers] section across at least four + # repositories. + def ini(self, module): + if self.repos[module]['ini'] is None: + neutron_dir = None + try: + neutron_dir = cfg.CONF.config_dir + except cfg.NoSuchOptError: + pass + + if neutron_dir is None: + neutron_dir = '/etc/neutron' + + ini = ConfigParser.SafeConfigParser() + ini_path = os.path.join(neutron_dir, '%s.conf' % module) + if os.path.exists(ini_path): + ini.read(ini_path) + + self.repos[module]['ini'] = ini + + return self.repos[module]['ini'] + + def service_providers(self, module): + ini = self.ini(module) + + sp = [] + try: + for name, value in ini.items('service_providers'): + if name == 'service_provider': + sp.append(value) + except ConfigParser.NoSectionError: + pass + + return sp diff --git a/neutron_taas/neutron_dependencies/taas.conf b/neutron_taas/neutron_dependencies/taas.conf new file mode 100755 index 0000000..2cedf16 --- /dev/null +++ b/neutron_taas/neutron_dependencies/taas.conf @@ -0,0 +1,8 @@ +[DEFAULT] +service_plugins = neutron_taas.services.taas.taas_plugin.TaasPlugin + +[taas] +driver = neutron_taas.services.taas.drivers.linux.ovs_taas.OvsTaasDriver +enabled = True +vlan_range_start = 3000 +vlan_range_end = 3500 diff --git a/neutron_taas/neutron_dependencies/topics.py b/neutron_taas/neutron_dependencies/topics.py new file mode 100644 index 0000000..4f38842 --- /dev/null +++ b/neutron_taas/neutron_dependencies/topics.py @@ -0,0 +1,60 @@ +# Copyright (c) 2012 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +NETWORK = 'network' +SUBNET = 'subnet' +PORT = 'port' +SECURITY_GROUP = 'security_group' +L2POPULATION = 'l2population' +DVR = 'dvr' + +CREATE = 'create' +DELETE = 'delete' +UPDATE = 'update' + +AGENT = 'q-agent-notifier' +PLUGIN = 'q-plugin' +L3PLUGIN = 'q-l3-plugin' +DHCP = 'q-dhcp-notifer' +FIREWALL_PLUGIN = 'q-firewall-plugin' +METERING_PLUGIN = 'q-metering-plugin' +LOADBALANCER_PLUGIN = 'n-lbaas-plugin' +TAAS_PLUGIN = 'n-taas-plugin' + +L3_AGENT = 'l3_agent' +DHCP_AGENT = 'dhcp_agent' +METERING_AGENT = 'metering_agent' +LOADBALANCER_AGENT = 'n-lbaas_agent' +TAAS_AGENT = 'n-taas_agent' + + +def get_topic_name(prefix, table, operation, host=None): + """Create a topic name. + + The topic name needs to be synced between the agent and the + plugin. The plugin will send a fanout message to all of the + listening agents so that the agents in turn can perform their + updates accordingly. + + :param prefix: Common prefix for the plugin/agent message queues. + :param table: The table in question (NETWORK, SUBNET, PORT). + :param operation: The operation that invokes notification (CREATE, + DELETE, UPDATE) + :param host: Add host to the topic + :returns: The topic name. + """ + if host: + return '%s-%s-%s.%s' % (prefix, table, operation, host) + return '%s-%s-%s' % (prefix, table, operation) diff --git a/neutron_taas/services/__init__.py b/neutron_taas/services/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/neutron_taas/services/taas/__init__.py b/neutron_taas/services/taas/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/neutron_taas/services/taas/agents/TaasAgent.sh b/neutron_taas/services/taas/agents/TaasAgent.sh new file mode 100755 index 0000000..f42c737 --- /dev/null +++ b/neutron_taas/services/taas/agents/TaasAgent.sh @@ -0,0 +1,280 @@ +#!/bin/bash + +taas_get_mac_addr() +{ + NEUTRON_PORT=$1 + + MAC_ADDR=`neutron port-show $NEUTRON_PORT | grep mac_address | cut -d'|' -f 3 | cut -d' ' -f 2` + + echo $MAC_ADDR +} + +taas_get_ovs_portname() +{ + NEUTRON_PORT=$1 + + echo "qvo${NEUTRON_PORT:0:11}" +} + +taas_get_ovs_portid() +{ + BRIDGE=$1 + OVS_PORT_NAME=$2 + + PORT_ID=`ovs-ofctl show $BRIDGE | grep $OVS_PORT_NAME | cut -d' ' -f 2 | cut -d'(' -f 1` + + echo $PORT_ID +} + +taas_init() +{ + HOST_IP=$1 + + # br-tap + BRIDGE="tcp:$HOST_IP:6632" + PATCHTAPINT_PORT_ID=`taas_get_ovs_portid $BRIDGE patch-tapint` + PATCHTAPTUN_PORT_ID=`taas_get_ovs_portid $BRIDGE patch-taptun` + ovs-ofctl add-flow $BRIDGE "table=0,priority=1,in_port=$PATCHTAPINT_PORT_ID,actions=resubmit(,1)" + ovs-ofctl add-flow $BRIDGE "table=0,priority=1,in_port=$PATCHTAPTUN_PORT_ID,actions=resubmit(,2)" + ovs-ofctl add-flow $BRIDGE "table=0,priority=0,actions=drop" + ovs-ofctl add-flow $BRIDGE "table=1,priority=0,actions=output:$PATCHTAPTUN_PORT_ID" + ovs-ofctl add-flow $BRIDGE "table=2,priority=0,actions=drop" + + # br-tun + BRIDGE="tcp:$HOST_IP:6633" + PATCHTUNTAP_PORT_ID=`taas_get_ovs_portid $BRIDGE patch-tuntap` + ovs-ofctl add-flow $BRIDGE "table=0,priority=1,in_port=$PATCHTUNTAP_PORT_ID,actions=resubmit(,30)" + ovs-ofctl add-flow $BRIDGE "table=30,priority=0,actions=move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID[0..11],mod_vlan_vid:1,output:2,output:3" + ovs-ofctl add-flow $BRIDGE "table=31,priority=2,reg0=0,actions=output:$PATCHTUNTAP_PORT_ID" + ovs-ofctl add-flow $BRIDGE "table=31,priority=1,reg0=1,actions=output:$PATCHTUNTAP_PORT_ID,move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID[0..11],mod_vlan_vid:2,output:in_port" + ovs-ofctl add-flow $BRIDGE "table=31,priority=0,reg0=2,actions=learn(table=30,hard_timeout=60,priority=1,NXM_OF_VLAN_TCI[0..11],load:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID[0..11],load:0->NXM_OF_VLAN_TCI[0..11],output:NXM_OF_IN_PORT[])" +} + +taas_clean() +{ + HOST_IP=$1 + + # br-tap + BRIDGE="tcp:$HOST_IP:6632" + ovs-ofctl del-flows $BRIDGE "table=0" + ovs-ofctl del-flows $BRIDGE "table=1" + ovs-ofctl del-flows $BRIDGE "table=2" + + # br-tun + BRIDGE="tcp:$HOST_IP:6633" + PATCHTUNTAP_PORT_ID=`taas_get_ovs_portid $BRIDGE patch-tuntap` + ovs-ofctl del-flows $BRIDGE "table=0,in_port=$PATCHTUNTAP_PORT_ID" + ovs-ofctl del-flows $BRIDGE "table=30" + ovs-ofctl del-flows $BRIDGE "table=31" +} + +taas_create() +{ + HOST_IP=$1 + TAAS_ID=$2 + NEUTRON_PORT=$3 + VLAN=$4 + + taas_init $HOST_IP + + # br-int + BRIDGE="tcp:$HOST_IP:6631" + PATCHINTTAP_PORT_ID=`taas_get_ovs_portid $BRIDGE patch-inttap` + OVS_PORT_NAME=`taas_get_ovs_portname $NEUTRON_PORT` + OVS_PORT_ID=`taas_get_ovs_portid $BRIDGE $OVS_PORT_NAME` + ovs-ofctl add-flow $BRIDGE "table=0,priority=20,in_port=$PATCHINTTAP_PORT_ID,dl_vlan=$TAAS_ID,actions=mod_vlan_vid:$VLAN,output:$OVS_PORT_ID" + + # br-tap + BRIDGE="tcp:$HOST_IP:6632" + PATCHTAPINT_PORT_ID=`taas_get_ovs_portid $BRIDGE patch-tapint` + ovs-ofctl add-flow $BRIDGE "table=1,priority=1,dl_vlan=$TAAS_ID,actions=output:in_port" + ovs-ofctl add-flow $BRIDGE "table=2,priority=1,dl_vlan=$TAAS_ID,actions=output:$PATCHTAPINT_PORT_ID" + + # br-tun + BRIDGE="tcp:$HOST_IP:6633" + ovs-ofctl add-flow $BRIDGE "table=2,priority=1,tun_id=$TAAS_ID,actions=move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID[0..11]->NXM_OF_VLAN_TCI[0..11],resubmit(,31)" +} + +taas_destroy() +{ + HOST_IP=$1 + TAAS_ID=$2 + + # br-int + BRIDGE="tcp:$HOST_IP:6631" + PATCHINTTAP_PORT_ID=`taas_get_ovs_portid $BRIDGE patch-inttap` + ovs-ofctl del-flows $BRIDGE "table=0,in_port=$PATCHINTTAP_PORT_ID,dl_vlan=$TAAS_ID" + + # br-tap + BRIDGE="tcp:$HOST_IP:6632" + ovs-ofctl del-flows $BRIDGE "table=1,dl_vlan=$TAAS_ID" + ovs-ofctl del-flows $BRIDGE "table=2,dl_vlan=$TAAS_ID" + + # br-tun + BRIDGE="tcp:$HOST_IP:6633" + ovs-ofctl del-flows $BRIDGE "table=2,tun_id=$TAAS_ID" +} + +taas_src_add() +{ + HOST_IP=$1 + TAAS_ID=$2 + NEUTRON_PORT=$3 + VLAN=$4 + DIR=$5 + + taas_init $HOST_IP + + # br-int + BRIDGE="tcp:$HOST_IP:6631" + if [ $DIR = "e" ] || [ $DIR = "b" ] + then + PATCHINTTAP_PORT_ID=`taas_get_ovs_portid $BRIDGE patch-inttap` + OVS_PORT_NAME=`taas_get_ovs_portname $NEUTRON_PORT` + OVS_PORT_ID=`taas_get_ovs_portid $BRIDGE $OVS_PORT_NAME` + ovs-ofctl add-flow $BRIDGE "table=0,priority=10,in_port=$OVS_PORT_ID,actions=normal,mod_vlan_vid:$TAAS_ID,output:$PATCHINTTAP_PORT_ID" + fi + if [ $DIR = "i" ] || [ $DIR = "b" ] + then + MAC_ADDR=`taas_get_mac_addr $NEUTRON_PORT` + ovs-ofctl add-flow $BRIDGE "table=0,priority=10,dl_vlan=$VLAN,dl_dst=$MAC_ADDR,actions=normal,mod_vlan_vid:$TAAS_ID,output:$PATCHINTTAP_PORT_ID" + ovs-ofctl add-flow $BRIDGE "table=0,priority=10,dl_vlan=$VLAN,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00,actions=normal,mod_vlan_vid:$TAAS_ID,output:$PATCHINTTAP_PORT_ID" + fi + + # br-tun + BRIDGE="tcp:$HOST_IP:6633" + ovs-ofctl add-flow $BRIDGE "table=2,priority=1,tun_id=$TAAS_ID,actions=move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID[0..11]->NXM_OF_VLAN_TCI[0..11],resubmit(,31)" +} + +taas_src_delete() +{ + HOST_IP=$1 + TAAS_ID=$2 + NEUTRON_PORT=$3 + VLAN=$4 + DIR=$5 + + # br-int + BRIDGE="tcp:$HOST_IP:6631" + if [ $DIR = "e" ] || [ $DIR = "b" ] + then + OVS_PORT_NAME=`taas_get_ovs_portname $NEUTRON_PORT` + OVS_PORT_ID=`taas_get_ovs_portid $BRIDGE $OVS_PORT_NAME` + ovs-ofctl del-flows $BRIDGE "table=0,in_port=$OVS_PORT_ID" + fi + if [ $DIR = "i" ] || [ $DIR = "b" ] + then + MAC_ADDR=`taas_get_mac_addr $NEUTRON_PORT` + ovs-ofctl del-flows $BRIDGE "table=0,dl_vlan=$VLAN,dl_dst=$MAC_ADDR" + ovs-ofctl del-flows $BRIDGE "table=0,dl_vlan=$VLAN,dl_dst=01:00:00:00:00:00/01:00:00:00:00:00" + fi + + # br-tun + BRIDGE="tcp:$HOST_IP:6633" + ovs-ofctl del-flows $BRIDGE "table=2,tun_id=$TAAS_ID" +} + +taas_dumpflows() +{ + HOST_IP=$1 + + echo -e "\n*** Tap-aaS Flows ($HOST_IP) ***\n" + + # br-int + echo "br-int" + BRIDGE="tcp:$HOST_IP:6631" + ovs-ofctl dump-flows $BRIDGE table=0 + echo -e "\n" + + # br-tap + echo "br-tap" + BRIDGE="tcp:$HOST_IP:6632" + ovs-ofctl dump-flows $BRIDGE + echo -e "\n" + + # br-tun + echo "br-tun" + BRIDGE="tcp:$HOST_IP:6633" + ovs-ofctl dump-flows $BRIDGE table=0 + ovs-ofctl dump-flows $BRIDGE table=2 + ovs-ofctl dump-flows $BRIDGE table=30 + ovs-ofctl dump-flows $BRIDGE table=31 + echo -e "\n" +} + + +CMD=$1 +case $CMD in + "clean") + HOST_IP=$2 + if [ -z $HOST_IP ] + then + echo "Usage: $0 $CMD " + exit 1 + fi + taas_clean $HOST_IP + ;; + "create") + HOST_IP=$2 + TAAS_ID=$3 + NEUTRON_PORT=$4 + VLAN=$5 + if [ -z $HOST_IP ] || [ -z $TAAS_ID ] || [ -z $NEUTRON_PORT ] || [ -z $VLAN ] + then + echo "Usage: $0 $CMD " + exit 1 + fi + taas_create $HOST_IP $TAAS_ID $NEUTRON_PORT $VLAN + ;; + "destroy") + HOST_IP=$2 + TAAS_ID=$3 + if [ -z $HOST_IP ] || [ -z $TAAS_ID ] + then + echo "Usage: $0 $CMD " + exit 1 + fi + taas_destroy $HOST_IP $TAAS_ID + ;; + "src-add") + HOST_IP=$2 + TAAS_ID=$3 + NEUTRON_PORT=$4 + VLAN=$5 + DIR=$6 + if [ -z $HOST_IP ] || [ -z $TAAS_ID ] || [ -z $NEUTRON_PORT ] || [ -z $VLAN ] || [ -z $DIR ] + then + echo "Usage: $0 $CMD " + exit 1 + fi + taas_src_add $HOST_IP $TAAS_ID $NEUTRON_PORT $VLAN $DIR + ;; + "src-delete") + HOST_IP=$2 + TAAS_ID=$3 + NEUTRON_PORT=$4 + VLAN=$5 + DIR=$6 + if [ -z $HOST_IP ] || [ -z $TAAS_ID ] || [ -z $NEUTRON_PORT ] || [ -z $VLAN ] || [ -z $DIR ] + then + echo "Usage: $0 $CMD " + exit 1 + fi + taas_src_delete $HOST_IP $TAAS_ID $NEUTRON_PORT $VLAN $DIR + ;; + "dumpflows") + HOST_IP=$2 + if [ -z $HOST_IP ] + then + echo "Usage: $0 $CMD " + exit 1 + fi + taas_dumpflows $HOST_IP + ;; + *) + echo "Usage: $0 [option] [option] ..." + exit 1 + ;; +esac + +exit 0 diff --git a/neutron_taas/services/taas/agents/__init__.py b/neutron_taas/services/taas/agents/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/neutron_taas/services/taas/agents/ovs/__init__.py b/neutron_taas/services/taas/agents/ovs/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/neutron_taas/services/taas/agents/ovs/agent.py b/neutron_taas/services/taas/agents/ovs/agent.py new file mode 100755 index 0000000..1b56885 --- /dev/null +++ b/neutron_taas/services/taas/agents/ovs/agent.py @@ -0,0 +1,74 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import sys + +import eventlet +eventlet.monkey_patch() + +from oslo.config import cfg +from oslo_service import service + +from neutron.agent.common import config +from neutron.common import config as common_config +from neutron.common import rpc as n_rpc +from neutron.common import topics +# from neutron.openstack.common import service +from neutron_taas.services.taas.agents.ovs import taas_ovs_agent + + +OPTS = [ + cfg.IntOpt( + 'taas_agent_periodic_interval', + default=5, + help=_('Seconds between periodic task runs') + ) +] + + +class TaaSOVSAgentService(n_rpc.Service): + def start(self): + super(TaaSOVSAgentService, self).start() + self.tg.add_timer( + cfg.CONF.taas_agent_periodic_interval, + self.manager.periodic_tasks, + None, + None + ) + + +def main(): + # Load the configuration parameters. + cfg.CONF.register_opts(OPTS) + config.register_root_helper(cfg.CONF) + common_config.init(sys.argv[1:]) + config.setup_logging() + + # Set up RPC + mgr = taas_ovs_agent.TaasOvsAgentRpcCallback(cfg.CONF) + endpoints = [mgr] + conn = n_rpc.create_connection(new=True) + conn.create_consumer(topics.TAAS_AGENT, endpoints, fanout=False) + conn.consume_in_threads() + + svc = TaaSOVSAgentService( + host=cfg.CONF.host, + topic=topics.TAAS_PLUGIN, + manager=mgr + ) + service.launch(cfg.CONF, svc).wait() + +if __name__ == '__main__': + main() diff --git a/neutron_taas/services/taas/agents/ovs/taas_ovs_agent.py b/neutron_taas/services/taas/agents/ovs/taas_ovs_agent.py new file mode 100755 index 0000000..ce07458 --- /dev/null +++ b/neutron_taas/services/taas/agents/ovs/taas_ovs_agent.py @@ -0,0 +1,124 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutron.agent.common import config +from neutron.common import topics +from neutron_taas.services.taas.agents import taas_agent_api as api + +from oslo.config import cfg +from oslo_log import log as logging +from oslo_utils import importutils + +LOG = logging.getLogger(__name__) + + +class TaasOvsPluginApi(api.TaasPluginApiMixin): + # Currently there are not any APIs from the the agent towards plugin + + def __init__(self, topic, host): + super(TaasOvsPluginApi, self).__init__(topic, host) + return + + +class TaasOvsAgentRpcCallback(api.TaasAgentRpcCallbackMixin): + + def __init__(self, conf): + + LOG.debug("TaaS OVS Agent initialize called") + + self.conf = conf + taas_driver_class_path = cfg.CONF.taas.driver + self.taas_enabled = cfg.CONF.taas.enabled + + self.root_helper = config.get_root_helper(conf) + + try: + self.taas_driver = importutils.import_object( + taas_driver_class_path, self.root_helper) + LOG.debug("TaaS Driver Loaded: '%s'", taas_driver_class_path) + except ImportError: + msg = _('Error importing TaaS device driver: %s') + raise ImportError(msg % taas_driver_class_path) + + # setup RPC to msg taas plugin + self.taas_plugin_rpc = TaasOvsPluginApi(topics.TAAS_PLUGIN, + conf.host) + super(TaasOvsAgentRpcCallback, self).__init__() + + return + + def _invoke_driver_for_plugin_api(self, context, args, func_name): + LOG.debug("Invoking Driver for %(func_name)s from agent", + {'func_name': func_name}) + + try: + self.taas_driver.__getattribute__(func_name)(args) + except Exception: + LOG.debug("Failed to invoke the driver") + + return + + def create_tap_service(self, context, tap_service, host): + """Handle Rpc from plugin to create a firewall.""" + if host != self.conf.host: + return + LOG.debug("In RPC Call for Create Tap Service: MSG=%s" % tap_service) + + return self._invoke_driver_for_plugin_api( + context, + tap_service, + 'create_tap_service') + + def create_tap_flow(self, context, tap_flow_msg, host): + if host != self.conf.host: + return + LOG.debug("In RPC Call for Create Tap Flow: MSG=%s" % tap_flow_msg) + + return self._invoke_driver_for_plugin_api( + context, + tap_flow_msg, + 'create_tap_flow') + + def delete_tap_service(self, context, tap_service, host): + # + # Cleanup operations must be performed by all hosts + # where the source and/or destination ports associated + # with this tap service were residing. + # + LOG.debug("In RPC Call for Delete Tap Service: MSG=%s" % tap_service) + + return self._invoke_driver_for_plugin_api( + context, + tap_service, + 'delete_tap_service') + + def delete_tap_flow(self, context, tap_flow_msg, host): + if host != self.conf.host: + return + LOG.debug("In RPC Call for Delete Tap Flow: MSG=%s" % tap_flow_msg) + + return self._invoke_driver_for_plugin_api( + context, + tap_flow_msg, + 'delete_tap_flow') + + def periodic_tasks(self, argv): + # + # Regenerate the flow in br-tun's TAAS_SEND_FLOOD table + # to ensure all existing tunnel ports are included. + # + self.taas_driver.update_tunnel_flood_flow() + pass diff --git a/neutron_taas/services/taas/agents/taas_agent_api.py b/neutron_taas/services/taas/agents/taas_agent_api.py new file mode 100755 index 0000000..09bf519 --- /dev/null +++ b/neutron_taas/services/taas/agents/taas_agent_api.py @@ -0,0 +1,70 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo.config import cfg +# from oslo_messaging import messaging +import oslo_messaging as messaging + +from neutron.common import rpc as n_rpc +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + +TaasOpts = [ + cfg.StrOpt( + 'driver', + default='', + help=_("Name of the TaaS Driver")), + cfg.BoolOpt( + 'enabled', + default=False, + help=_("Enable TaaS")), +] +cfg.CONF.register_opts(TaasOpts, 'taas') + + +class TaasPluginApiMixin(object): + + # Currently there are no Calls the Agent makes towards the Plugin. + + def __init__(self, topic, host): + self.host = host + target = messaging.Target(topic=topic, version='1.0') + self.client = n_rpc.get_client(target) + super(TaasPluginApiMixin, self).__init__() + return + + +class TaasAgentRpcCallbackMixin(object): + """Mixin for Taas agent Implementations.""" + + def __init__(self): + super(TaasAgentRpcCallbackMixin, self).__init__() + + def create_tap_service(self, context, tap_service, host): + """Handle RPC cast from plugin to create a tap service.""" + pass + + def delete_tap_service(self, context, tap_service, host): + """Handle RPC cast from plugin to delete a tap service.""" + pass + + def create_tap_flow(self, context, tap_flow_msg, host): + """Handle RPC cast from plugin to create a tap flow""" + pass + + def delete_tap_flow(self, context, tap_flow_msg, host): + """Handle RPC cast from plugin to delete a tap flow""" + pass diff --git a/neutron_taas/services/taas/drivers/__init__.py b/neutron_taas/services/taas/drivers/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/neutron_taas/services/taas/drivers/linux/__init__.py b/neutron_taas/services/taas/drivers/linux/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/neutron_taas/services/taas/drivers/linux/ovs_constants.py b/neutron_taas/services/taas/drivers/linux/ovs_constants.py new file mode 100755 index 0000000..09fea87 --- /dev/null +++ b/neutron_taas/services/taas/drivers/linux/ovs_constants.py @@ -0,0 +1,28 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# OVS tables used by TaaS in br-tap +TAAS_RECV_LOC = 1 +TAAS_RECV_REM = 2 + +# OVS tables used by TaaS in br-tun +TAAS_SEND_UCAST = 30 +TAAS_SEND_FLOOD = 31 +TAAS_CLASSIFY = 35 +TAAS_DST_CHECK = 36 +TAAS_SRC_CHECK = 37 +TAAS_DST_RESPOND = 38 +TAAS_SRC_RESPOND = 39 diff --git a/neutron_taas/services/taas/drivers/linux/ovs_taas.py b/neutron_taas/services/taas/drivers/linux/ovs_taas.py new file mode 100755 index 0000000..de771a2 --- /dev/null +++ b/neutron_taas/services/taas/drivers/linux/ovs_taas.py @@ -0,0 +1,472 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from neutron.agent.common import ovs_lib +from neutron.agent.linux import utils +# from neutron.plugins.openvswitch.common import constants as ovs_consts +from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants \ + as ovs_consts +from neutron_taas.services.taas.drivers import taas_base +from oslo_log import log as logging +import ovs_constants as taas_ovs_consts +import ovs_utils as taas_ovs_utils + +LOG = logging.getLogger(__name__) + +TaaS_DRIVER_NAME = 'Taas OVS driver' + + +class OVSBridge_tap_extension(ovs_lib.OVSBridge): + def __init__(self, br_name, root_helper): + super(OVSBridge_tap_extension, self).__init__(br_name) + + +class OvsTaasDriver(taas_base.TaasDriverBase): + def __init__(self, root_helper): + LOG.debug("Initializing Taas OVS Driver") + + self.root_helper = root_helper + + self.int_br = OVSBridge_tap_extension('br-int', self.root_helper) + self.tun_br = OVSBridge_tap_extension('br-tun', self.root_helper) + self.tap_br = OVSBridge_tap_extension('br-tap', self.root_helper) + + # Prepare OVS bridges for TaaS + self.setup_ovs_bridges() + + # Setup key-value manager for ingress BCMC flows + self.bcmc_kvm = taas_ovs_utils.key_value_mgr(4096) + + def setup_ovs_bridges(self): + # + # br-int : Integration Bridge + # br-tap : Tap Bridge + # br-tun : Tunnel Bridge + # + + # Create br-tap + self.tap_br.create() + + # Connect br-tap to br-int and br-tun + self.int_br.add_patch_port('patch-int-tap', 'patch-tap-int') + self.tap_br.add_patch_port('patch-tap-int', 'patch-int-tap') + self.tun_br.add_patch_port('patch-tun-tap', 'patch-tap-tun') + self.tap_br.add_patch_port('patch-tap-tun', 'patch-tun-tap') + + # Get patch port IDs + patch_tap_int_id = self.tap_br.get_port_ofport('patch-tap-int') + patch_tap_tun_id = self.tap_br.get_port_ofport('patch-tap-tun') + patch_tun_tap_id = self.tun_br.get_port_ofport('patch-tun-tap') + + # Purge all existing Taas flows from br-tap and br-tun + self.tap_br.delete_flows(table=0) + self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_LOC) + self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_REM) + + self.tun_br.delete_flows(table=0, + in_port=patch_tun_tap_id) + self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SEND_UCAST) + self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SEND_FLOOD) + self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_CLASSIFY) + self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_CHECK) + self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_CHECK) + self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_RESPOND) + self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_RESPOND) + + # + # Configure standard TaaS flows in br-tap + # + self.tap_br.add_flow(table=0, + priority=1, + in_port=patch_tap_int_id, + actions="resubmit(,%s)" % + taas_ovs_consts.TAAS_RECV_LOC) + + self.tap_br.add_flow(table=0, + priority=1, + in_port=patch_tap_tun_id, + actions="resubmit(,%s)" % + taas_ovs_consts.TAAS_RECV_REM) + + self.tap_br.add_flow(table=0, + priority=0, + actions="drop") + + self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_LOC, + priority=0, + actions="output:%s" % str(patch_tap_tun_id)) + + self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_REM, + priority=0, + actions="drop") + + # + # Configure standard Taas flows in br-tun + # + self.tun_br.add_flow(table=0, + priority=1, + in_port=patch_tun_tap_id, + actions="resubmit(,%s)" % + taas_ovs_consts.TAAS_SEND_UCAST) + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SEND_UCAST, + priority=0, + actions="resubmit(,%s)" % + taas_ovs_consts.TAAS_SEND_FLOOD) + + flow_action = self._create_tunnel_flood_flow_action() + if flow_action != "": + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SEND_FLOOD, + priority=0, + actions=flow_action) + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY, + priority=2, + reg0=0, + actions="resubmit(,%s)" % + taas_ovs_consts.TAAS_DST_CHECK) + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY, + priority=1, + reg0=1, + actions="resubmit(,%s)" % + taas_ovs_consts.TAAS_DST_CHECK) + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_CLASSIFY, + priority=1, + reg0=2, + actions="resubmit(,%s)" % + taas_ovs_consts.TAAS_SRC_CHECK) + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_CHECK, + priority=0, + actions="drop") + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_CHECK, + priority=0, + actions="drop") + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_RESPOND, + priority=2, + reg0=0, + actions="output:%s" % str(patch_tun_tap_id)) + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_RESPOND, + priority=1, + reg0=1, + actions=( + "output:%s," + "move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID" + "[0..11],mod_vlan_vid:2,output:in_port" % + str(patch_tun_tap_id))) + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_RESPOND, + priority=1, + actions=( + "learn(table=%s,hard_timeout=60," + "priority=1,NXM_OF_VLAN_TCI[0..11]," + "load:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID" + "[0..11],load:0->NXM_OF_VLAN_TCI[0..11]," + "output:NXM_OF_IN_PORT[])" % + taas_ovs_consts.TAAS_SEND_UCAST)) + + return + + def create_tap_service(self, tap_service): + taas_id = tap_service['taas_id'] + port = tap_service['port'] + + # Get OVS port id for tap service port + ovs_port = self.int_br.get_vif_port_by_id(port['id']) + ovs_port_id = ovs_port.ofport + + # Get VLAN id for tap service port + port_dict = self.int_br.get_port_tag_dict() + port_vlan_id = port_dict[ovs_port.port_name] + + # Get patch port IDs + patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap') + patch_tap_int_id = self.tap_br.get_port_ofport('patch-tap-int') + + # Add flow(s) in br-int + self.int_br.add_flow(table=0, + priority=25, + in_port=patch_int_tap_id, + dl_vlan=taas_id, + actions="mod_vlan_vid:%s,output:%s" % + (str(port_vlan_id), str(ovs_port_id))) + + # Add flow(s) in br-tap + self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_LOC, + priority=1, + dl_vlan=taas_id, + actions="output:in_port") + + self.tap_br.add_flow(table=taas_ovs_consts.TAAS_RECV_REM, + priority=1, + dl_vlan=taas_id, + actions="output:%s" % str(patch_tap_int_id)) + + # Add flow(s) in br-tun + for tunnel_type in ovs_consts.TUNNEL_NETWORK_TYPES: + self.tun_br.add_flow(table=ovs_consts.TUN_TABLE[tunnel_type], + priority=1, + tun_id=taas_id, + actions=( + "move:NXM_OF_VLAN_TCI[0..11]->" + "NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID" + "[0..11]->NXM_OF_VLAN_TCI[0..11]," + "resubmit(,%s)" % + taas_ovs_consts.TAAS_CLASSIFY)) + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_DST_CHECK, + priority=1, + tun_id=taas_id, + actions="resubmit(,%s)" % + taas_ovs_consts.TAAS_DST_RESPOND) + + # + # Disable mac-address learning in the Linux bridge to which + # the OVS port is attached (via the veth pair). This will + # effectively turn the bridge into a hub, ensuring that all + # incoming mirrored traffic reaches the tap interface (used + # for attaching a VM to the bridge) irrespective of the + # destination mac addresses in mirrored packets. + # + ovs_port_name = ovs_port.port_name + linux_br_name = ovs_port_name.replace('qvo', 'qbr') + utils.execute(['brctl', 'setageing', linux_br_name, 0], + run_as_root=True) + + return + + def delete_tap_service(self, tap_service): + taas_id = tap_service['taas_id'] + + # Get patch port ID + patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap') + + # Delete flow(s) from br-int + self.int_br.delete_flows(table=0, + in_port=patch_int_tap_id, + dl_vlan=taas_id) + + # Delete flow(s) from br-tap + self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_LOC, + dl_vlan=taas_id) + + self.tap_br.delete_flows(table=taas_ovs_consts.TAAS_RECV_REM, + dl_vlan=taas_id) + + # Delete flow(s) from br-tun + for tunnel_type in ovs_consts.TUNNEL_NETWORK_TYPES: + self.tun_br.delete_flows(table=ovs_consts.TUN_TABLE[tunnel_type], + tun_id=taas_id) + + self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_DST_CHECK, + tun_id=taas_id) + + self.tun_br.delete_flows(table=taas_ovs_consts.TAAS_SRC_CHECK, + tun_id=taas_id) + + return + + def create_tap_flow(self, tap_flow): + taas_id = tap_flow['taas_id'] + port = tap_flow['port'] + direction = tap_flow['tap_flow']['direction'] + + # Get OVS port id for tap flow port + ovs_port = self.int_br.get_vif_port_by_id(port['id']) + ovs_port_id = ovs_port.ofport + + # Get VLAN id for tap flow port + port_dict = self.int_br.get_port_tag_dict() + port_vlan_id = port_dict[ovs_port.port_name] + + # Get patch port ID + patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap') + + # Add flow(s) in br-int + if direction == 'OUT' or direction == 'BOTH': + self.int_br.add_flow(table=0, + priority=20, + in_port=ovs_port_id, + actions="normal,mod_vlan_vid:%s,output:%s" % + (str(taas_id), str(patch_int_tap_id))) + + if direction == 'IN' or direction == 'BOTH': + port_mac = tap_flow['port_mac'] + self.int_br.add_flow(table=0, + priority=20, + dl_vlan=port_vlan_id, + dl_dst=port_mac, + actions="normal,mod_vlan_vid:%s,output:%s" % + (str(taas_id), str(patch_int_tap_id))) + + self._add_update_ingress_bcmc_flow(port_vlan_id, + taas_id, + patch_int_tap_id) + + # Add flow(s) in br-tun + for tunnel_type in ovs_consts.TUNNEL_NETWORK_TYPES: + self.tun_br.add_flow(table=ovs_consts.TUN_TABLE[tunnel_type], + priority=1, + tun_id=taas_id, + actions=( + "move:NXM_OF_VLAN_TCI[0..11]->" + "NXM_NX_REG0[0..11],move:NXM_NX_TUN_ID" + "[0..11]->NXM_OF_VLAN_TCI[0..11]," + "resubmit(,%s)" % + taas_ovs_consts.TAAS_CLASSIFY)) + + self.tun_br.add_flow(table=taas_ovs_consts.TAAS_SRC_CHECK, + priority=1, + tun_id=taas_id, + actions="resubmit(,%s)" % + taas_ovs_consts.TAAS_SRC_RESPOND) + + return + + def delete_tap_flow(self, tap_flow): + taas_id = tap_flow['taas_id'] + port = tap_flow['port'] + direction = tap_flow['tap_flow']['direction'] + + # Get OVS port id for tap flow port + ovs_port = self.int_br.get_vif_port_by_id(port['id']) + ovs_port_id = ovs_port.ofport + + # Get VLAN id for tap flow port + port_dict = self.int_br.get_port_tag_dict() + port_vlan_id = port_dict[ovs_port.port_name] + + # Get patch port ID + patch_int_tap_id = self.int_br.get_port_ofport('patch-int-tap') + + # Delete flow(s) from br-int + if direction == 'OUT' or direction == 'BOTH': + self.int_br.delete_flows(table=0, + in_port=ovs_port_id) + + if direction == 'IN' or direction == 'BOTH': + port_mac = tap_flow['port_mac'] + self.int_br.delete_flows(table=0, + dl_vlan=port_vlan_id, + dl_dst=port_mac) + + self._del_update_ingress_bcmc_flow(port_vlan_id, + taas_id, + patch_int_tap_id) + + return + + def update_tunnel_flood_flow(self): + flow_action = self._create_tunnel_flood_flow_action() + if flow_action != "": + self.tun_br.mod_flow(table=taas_ovs_consts.TAAS_SEND_FLOOD, + actions=flow_action) + + def _create_tunnel_flood_flow_action(self): + + args = ["ovs-vsctl", "list-ports", "br-tun"] + res = utils.execute(args, run_as_root=True) + + port_name_list = res.splitlines() + + flow_action = ("move:NXM_OF_VLAN_TCI[0..11]->NXM_NX_TUN_ID[0..11]," + "mod_vlan_vid:1") + + tunnel_ports_exist = False + + for port_name in port_name_list: + if (port_name != 'patch-int') and (port_name != 'patch-tun-tap'): + flow_action += (",output:%d" % + self.tun_br.get_port_ofport(port_name)) + tunnel_ports_exist = True + + if tunnel_ports_exist: + return flow_action + else: + return "" + + def _create_ingress_bcmc_flow_action(self, taas_id_list, out_port_id): + flow_action = "normal" + for taas_id in taas_id_list: + flow_action += (",mod_vlan_vid:%d,output:%d" % + (taas_id, out_port_id)) + + return flow_action + + # + # Adds or updates a special flow in br-int to mirror (duplicate and + # redirect to 'out_port_id') all ingress broadcast/multicast traffic, + # associated with a VLAN, to possibly multiple tap service instances. + # + def _add_update_ingress_bcmc_flow(self, vlan_id, taas_id, out_port_id): + # Add a tap service instance affiliation with VLAN + self.bcmc_kvm.affiliate(vlan_id, taas_id) + + # Find all tap service instances affiliated with VLAN + taas_id_list = self.bcmc_kvm.list_affiliations(vlan_id) + + # + # Add/update flow to mirror ingress BCMC traffic, associated + # with VLAN, to all affiliated tap-service instances. + # + flow_action = self._create_ingress_bcmc_flow_action(taas_id_list, + out_port_id) + self.int_br.add_flow(table=0, + priority=20, + dl_vlan=vlan_id, + dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", + actions=flow_action) + + return + + # + # Removes or updates a special flow in br-int to mirror (duplicate + # and redirect to 'out_port_id') all ingress broadcast/multicast + # traffic, associated with a VLAN, to possibly multiple tap-service + # instances. + # + def _del_update_ingress_bcmc_flow(self, vlan_id, taas_id, out_port_id): + # Remove a tap-service instance affiliation with VLAN + self.bcmc_kvm.unaffiliate(vlan_id, taas_id) + + # Find all tap-service instances affiliated with VLAN + taas_id_list = self.bcmc_kvm.list_affiliations(vlan_id) + + # + # If there are tap service instances affiliated with VLAN, update + # the flow to mirror ingress BCMC traffic, associated with VLAN, + # to all of them. Otherwise, remove the flow. + # + if taas_id_list: + flow_action = self._create_ingress_bcmc_flow_action(taas_id_list, + out_port_id) + self.int_br.add_flow(table=0, + priority=20, + dl_vlan=vlan_id, + dl_dst="01:00:00:00:00:00/01:00:00:00:00:00", + actions=flow_action) + else: + self.int_br.delete_flows(table=0, + dl_vlan=vlan_id, + dl_dst=("01:00:00:00:00:00/" + "01:00:00:00:00:00")) + + return diff --git a/neutron_taas/services/taas/drivers/linux/ovs_utils.py b/neutron_taas/services/taas/drivers/linux/ovs_utils.py new file mode 100755 index 0000000..b73983d --- /dev/null +++ b/neutron_taas/services/taas/drivers/linux/ovs_utils.py @@ -0,0 +1,99 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +# +# This class implements a simple key-value manager that can support +# the following relationships. +# +# - Multiple values may be affiliated with a key. +# - A value may be affiliated with multiple keys. +# - A value may be affiliated with a key multiple times. +# +class key_value_mgr(object): + # + # Initializes internal state for specified # keys + # + def __init__(self, nr_keys): + self.key_list = [] + for i in range(nr_keys): + self.key_list.append([]) + + return + + # + # Returns specified key-value affilation, if it exists. + # + def _find_affiliation(self, key, value): + aff_list = self.key_list[key] + + for aff in aff_list: + if aff['value'] == value: + return aff + + return None + + # + # Adds an affiliation of 'value' with 'key' + # + def affiliate(self, key, value): + # Locate key-value affiliation + aff = self._find_affiliation(key, value) + if aff is None: + # Create a (new) key-value affiliation + aff = { + 'value': value, + 'refcnt': 0, + } + aff_list = self.key_list[key] + aff_list.append(aff) + + # Increment affiliation reference count + aff['refcnt'] += 1 + + return + + # + # Removes an affiliation of 'value' with 'key' + # + def unaffiliate(self, key, value): + # Locate key-value affiliation + aff = self._find_affiliation(key, value) + if aff is None: + return + + # Decrement affiliation reference count + aff['refcnt'] -= 1 + + # Destroy affiliation iff no outstanding references + if aff['refcnt'] <= 0: + aff_list = self.key_list[key] + aff_list.remove(aff) + + return + + # + # Lists all values affiliated with 'key' + # + # Note: The returned list is a set (contains no duplicates) + # + def list_affiliations(self, key): + aff_list = self.key_list[key] + + value_list = [] + for aff in aff_list: + value_list.append(aff['value']) + + return value_list diff --git a/neutron_taas/services/taas/drivers/taas_base.py b/neutron_taas/services/taas/drivers/taas_base.py new file mode 100755 index 0000000..020c1f7 --- /dev/null +++ b/neutron_taas/services/taas/drivers/taas_base.py @@ -0,0 +1,37 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc + +import six + + +@six.add_metaclass(abc.ABCMeta) +class TaasDriverBase(object): + @abc.abstractmethod + def create_tap_service(self, tap_service): + pass + + @abc.abstractmethod + def delete_tap_service(self, tap_service): + pass + + @abc.abstractmethod + def create_tap_flow(self, tap_flow): + pass + + @abc.abstractmethod + def delete_tap_flow(self, tap_flow): + pass diff --git a/neutron_taas/services/taas/taas_plugin.py b/neutron_taas/services/taas/taas_plugin.py new file mode 100755 index 0000000..fcbd445 --- /dev/null +++ b/neutron_taas/services/taas/taas_plugin.py @@ -0,0 +1,259 @@ +# Copyright (C) 2015 Ericsson AB +# Copyright (c) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from oslo.config import cfg +# from oslo import messaging +import oslo_messaging as messaging + +from neutron.common import rpc as n_rpc +from neutron.common import topics +from neutron_taas.db import taas_db +from neutron_taas.extensions import taas as taas_ex +from oslo_log import log as logging + +LOG = logging.getLogger(__name__) + + +class TaasCallbacks(object): + """Currently there are no callbacks to the Taas Plugin.""" + + def __init__(self, plugin): + super(TaasCallbacks, self).__init__() + self.plugin = plugin + return + + +class TaasAgentApi(object): + """RPC calls to agent APIs""" + + def __init__(self, topic, host): + self.host = host + target = messaging.Target(topic=topic, version='1.0') + self.client = n_rpc.get_client(target) + return + + def create_tap_service(self, context, tap_service, host): + LOG.debug("In RPC Call for Create Tap Service: Host=%s, MSG=%s" % + (host, tap_service)) + + cctxt = self.client.prepare(fanout=True) + cctxt.cast(context, 'create_tap_service', tap_service=tap_service, + host=host) + + return + + def create_tap_flow(self, context, tap_flow_msg, host): + LOG.debug("In RPC Call for Create Tap Flow: Host=%s, MSG=%s" % + (host, tap_flow_msg)) + + cctxt = self.client.prepare(fanout=True) + cctxt.cast(context, 'create_tap_flow', tap_flow_msg=tap_flow_msg, + host=host) + + return + + def delete_tap_service(self, context, tap_service, host): + LOG.debug("In RPC Call for Delete Tap Service: Host=%s, MSG=%s" % + (host, tap_service)) + + cctxt = self.client.prepare(fanout=True) + cctxt.cast(context, 'delete_tap_service', tap_service=tap_service, + host=host) + + return + + def delete_tap_flow(self, context, tap_flow_msg, host): + LOG.debug("In RPC Call for Delete Tap Flow: Host=%s, MSG=%s" % + (host, tap_flow_msg)) + + cctxt = self.client.prepare(fanout=True) + cctxt.cast(context, 'delete_tap_flow', tap_flow_msg=tap_flow_msg, + host=host) + + return + + +class TaasPlugin(taas_db.Tass_db_Mixin): + + supported_extension_aliases = ["taas"] + + def __init__(self): + + LOG.debug("TAAS PLUGIN INITIALIZED") + self.endpoints = [TaasCallbacks(self)] + + self.conn = n_rpc.create_connection(new=True) + self.conn.create_consumer( + topics.TAAS_PLUGIN, self.endpoints, fanout=False) + self.conn.consume_in_threads() + + self.agent_rpc = TaasAgentApi( + topics.TAAS_AGENT, + cfg.CONF.host + ) + + return + + def create_tap_service(self, context, tap_service): + LOG.debug("create_tap_service() called") + + tenant_id = self._get_tenant_id_for_create(context, + tap_service['tap_service']) + + t_s = tap_service['tap_service'] + port_id = t_s['port_id'] + + # Get port details + port = self._get_port_details(context, port_id) + + # Check if the port is owned by the tenant. + if port['tenant_id'] != tenant_id: + raise taas_ex.PortDoesNotBelongToTenant() + + # Extract the host where the port is located + host = port['binding:host_id'] + + if host is not None: + LOG.debug("Host on which the port is created = %s" % host) + else: + LOG.debug("Host could not be found, Port Binding disbaled!") + + # Create tap service in the db model + ts = super(TaasPlugin, self).create_tap_service(context, tap_service) + # Get taas id associated with the Tap Service + tap_id_association = self.get_tap_id_association( + context, + tap_service_id=ts['id']) + + taas_vlan_id = (tap_id_association['taas_id'] + + cfg.CONF.taas.vlan_range_start) + + if taas_vlan_id > cfg.CONF.taas.vlan_range_end: + raise taas_ex.TapServiceLimitReached() + + rpc_msg = {'tap_service': ts, 'taas_id': taas_vlan_id, 'port': port} + + self.agent_rpc.create_tap_service(context, rpc_msg, host) + + return ts + + def delete_tap_service(self, context, id): + LOG.debug("delete_tap_service() called") + + # Get taas id associated with the Tap Service + tap_id_association = self.get_tap_id_association( + context, + tap_service_id=id) + + ts = self.get_tap_service(context, id) + + # Get all the tap Flows that are associated with the Tap service + # and delete them as well + t_f_collection = self.get_tap_flows( + context, + filters={'tap_service_id': [id]}, fields=['id']) + + for t_f in t_f_collection: + self.delete_tap_flow(context, t_f['id']) + + # Get the port and the host that it is on + port_id = ts['port_id'] + + port = self._get_port_details(context, port_id) + + host = port['binding:host_id'] + + super(TaasPlugin, self).delete_tap_service(context, id) + + taas_vlan_id = (tap_id_association['taas_id'] + + cfg.CONF.taas.vlan_range_start) + + rpc_msg = {'tap_service': ts, + 'taas_id': taas_vlan_id, + 'port': port} + + self.agent_rpc.delete_tap_service(context, rpc_msg, host) + + return ts + + def create_tap_flow(self, context, tap_flow): + LOG.debug("create_tap_flow() called") + + tenant_id = self._get_tenant_id_for_create(context, + tap_flow['tap_flow']) + + t_f = tap_flow['tap_flow'] + + # Check if the tenant id of the source port is the same as the + # tenant_id of the tap service we are attaching it to. + + ts = self.get_tap_service(context, t_f['tap_service_id']) + ts_tenant_id = ts['tenant_id'] + + taas_id = (self.get_tap_id_association( + context, + tap_service_id=ts['id'])['taas_id'] + + cfg.CONF.taas.vlan_range_start) + + if tenant_id != ts_tenant_id: + raise taas_ex.TapServiceNotBelongToTenant() + + # Extract the host where the source port is located + port = self._get_port_details(context, t_f['source_port']) + host = port['binding:host_id'] + port_mac = port['mac_address'] + + # create tap flow in the db model + tf = super(TaasPlugin, self).create_tap_flow(context, tap_flow) + + # Send RPC message to both the source port host and + # tap service(destination) port host + rpc_msg = {'tap_flow': tf, + 'port_mac': port_mac, + 'taas_id': taas_id, + 'port': port} + + self.agent_rpc.create_tap_flow(context, rpc_msg, host) + + return tf + + def delete_tap_flow(self, context, id): + LOG.debug("delete_tap_flow() called") + + tf = self.get_tap_flow(context, id) + # ts = self.get_tap_service(context, tf['tap_service_id']) + + taas_id = (self.get_tap_id_association( + context, + tf['tap_service_id'])['taas_id'] + + cfg.CONF.taas.vlan_range_start) + + port = self._get_port_details(context, tf['source_port']) + host = port['binding:host_id'] + port_mac = port['mac_address'] + + super(TaasPlugin, self).delete_tap_flow(context, id) + + # Send RPC message to both the source port host and + # tap service(destination) port host + rpc_msg = {'tap_flow': tf, + 'port_mac': port_mac, + 'taas_id': taas_id, + 'port': port} + + self.agent_rpc.delete_tap_flow(context, rpc_msg, host) + + return tf diff --git a/neutron_taas/taas_cli/openrc.sh b/neutron_taas/taas_cli/openrc.sh new file mode 100755 index 0000000..6e582d5 --- /dev/null +++ b/neutron_taas/taas_cli/openrc.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# URL for authentication service +export OS_AUTH_URL=http://10.0.2.15:5000/v2.0 + +# Tenant name +export OS_TENANT_NAME="demo" + +# Username +export OS_USERNAME="demo" + +# Password +echo "Please enter your OpenStack Password: " +read -sr OS_PASSWORD_INPUT +export OS_PASSWORD=$OS_PASSWORD_INPUT diff --git a/neutron_taas/taas_cli/taas_cli.py b/neutron_taas/taas_cli/taas_cli.py new file mode 100755 index 0000000..a4b33e3 --- /dev/null +++ b/neutron_taas/taas_cli/taas_cli.py @@ -0,0 +1,536 @@ +#!/usr/bin/python + +# Copyright (C) 2015 Ericsson AB +# Copyright (C) 2015 Gigamon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import argparse +import json +import os +import requests + + +SUCCESS = 0 +FAILURE = 1 + + +class taas_rest_api(object): + + def __init__(self): + # + # Initialize authentication service endpoint and user + # credentials from environment variables + # + self.auth_url = os.environ['OS_AUTH_URL'] + self.tenant_name = os.environ['OS_TENANT_NAME'] + self.username = os.environ['OS_USERNAME'] + self.password = os.environ['OS_PASSWORD'] + + self.token_id = None + + return + + def authenticate(self): + # Generate a new authentication token + req_data = {} + req_data['auth'] = { + 'tenantName': self.tenant_name, + 'passwordCredentials': { + 'username': self.username, + 'password': self.password + } + } + + req_data_json = json.dumps(req_data) + + url = '%s/tokens' % self.auth_url + + response = requests.post(url, req_data_json) + res_data = response.json() + + if response.status_code != 200: + error = res_data['error'] + print ('Error code: %s (%s)' % (error['code'], error['title'])) + return FAILURE + + # Remember new token ID + self.token_id = res_data['access']['token']['id'] + + # Remember tenant ID + self.tenant_id = res_data['access']['token']['tenant']['id'] + + # Find network service endpoint + found = False + service_catalog = res_data['access']['serviceCatalog'] + for service in service_catalog: + if service['type'] == 'network': + endpoints = service['endpoints'] + endpoint = endpoints[0] + network_service_url = endpoint['publicURL'] + found = True + break + + if found is False: + print ('Error: Could not find network service endpoint') + return FAILURE + + # Formulate and remember TaaS endpoint + self.taas_url = '%s/v2.0/taas/' % network_service_url + + return SUCCESS + + def tap_service_create(self, + name, + description, + port_id, + network_id): + header_data = {} + header_data['X-Auth-Token'] = self.token_id + + req_data = {} + req_data['tap_service'] = { + 'tenant_id': self.tenant_id, + 'name': name, + 'description': description, + 'port_id': port_id, + 'network_id': network_id + } + + req_data_json = json.dumps(req_data) + + url = '%s/tap-services' % self.taas_url + + response = requests.post(url, req_data_json, headers=header_data) + res_data = response.json() + + if response.status_code != 201: + error = res_data['NeutronError'] + print (error['message']) + return FAILURE + + tap_service = res_data['tap_service'] + + # Show information of the new tap service + self.tap_service_show(tap_service['id']) + + return SUCCESS + + def tap_service_delete(self, tap_service_id): + header_data = {} + header_data['X-Auth-Token'] = self.token_id + + url = '%s/tap-services/%s' % (self.taas_url, tap_service_id) + + response = requests.delete(url, headers=header_data) + + if response.status_code != 204: + res_data = response.json() + error = res_data['NeutronError'] + print (error['message']) + return FAILURE + + return SUCCESS + + def tap_service_list(self): + header_data = {} + header_data['X-Auth-Token'] = self.token_id + + url = '%s/tap-services' % self.taas_url + + response = requests.get(url, headers=header_data) + res_data = response.json() + + if response.status_code != 200: + error = res_data['NeutronError'] + print (error['message']) + return FAILURE + + tap_services = res_data['tap_services'] + + sep = '+' + '-' * 38 + '+' + '-' * 12 + '+' + '-' * 38 + '+' + print (sep) + print ('| {:36s} | {:10s} | {:36s} |'.format('ID', 'Name', 'Port ID')) + print (sep) + for tap_service in tap_services: + print ('| {:36s} | {:10s} | {:36s} |' + .format(tap_service['id'], + tap_service['name'], + tap_service['port_id'])) + print (sep) + + return SUCCESS + + def tap_service_show(self, tap_service_id): + header_data = {} + header_data['X-Auth-Token'] = self.token_id + + url = '%s/tap-services/%s' % (self.taas_url, tap_service_id) + + response = requests.get(url, headers=header_data) + res_data = response.json() + + if response.status_code != 200: + error = res_data['NeutronError'] + print (error['message']) + return FAILURE + + tap_service = res_data['tap_service'] + + sep = '+' + '-' * 13 + '+' + '-' * 38 + '+' + print (sep) + print ('| {:11} | {:36s} |'.format('Field', 'Value')) + print (sep) + print ('| {:11} | {:36s} |' + .format('Name', tap_service['name'])) + print ('| {:11} | {:36s} |' + .format('Description', tap_service['description'])) + print ('| {:11} | {:36s} |' + .format('ID', tap_service['id'])) + print ('| {:11} | {:36s} |' + .format('Port ID', tap_service['port_id'])) + print ('| {:11} | {:36s} |' + .format('Tenant ID', tap_service['tenant_id'])) + print (sep) + + return SUCCESS + + def tap_flow_create(self, + name, + description, + port_id, + direction, + tap_service_id): + header_data = {} + header_data['X-Auth-Token'] = self.token_id + + req_data = {} + req_data['tap_flow'] = { + 'tenant_id': self.tenant_id, + 'name': name, + 'description': description, + 'source_port': port_id, + 'direction': direction, + 'tap_service_id': tap_service_id + } + + req_data_json = json.dumps(req_data) + + url = '%s/tap-flows' % self.taas_url + + response = requests.post(url, req_data_json, headers=header_data) + res_data = response.json() + + if response.status_code != 201: + error = res_data['NeutronError'] + print (error['message']) + return FAILURE + + tap_flow = res_data['tap_flow'] + + # Show information of the new tap flow + self.tap_flow_show(tap_flow['id']) + + return SUCCESS + + def tap_flow_delete(self, tap_flow_id): + header_data = {} + header_data['X-Auth-Token'] = self.token_id + + url = '%s/tap-flows/%s' % (self.taas_url, tap_flow_id) + + response = requests.delete(url, headers=header_data) + + if response.status_code != 204: + res_data = response.json() + error = res_data['NeutronError'] + print (error['message']) + return FAILURE + + return SUCCESS + + def tap_flow_list(self, tap_service_id): + header_data = {} + header_data['X-Auth-Token'] = self.token_id + + url = '%s/tap-flows' % self.taas_url + + response = requests.get(url, headers=header_data) + res_data = response.json() + + if response.status_code != 200: + error = res_data['NeutronError'] + print (error['message']) + return FAILURE + + tap_flows = res_data['tap_flows'] + + sep = '+' + '-' * 38 + '+' + '-' * 12 + \ + '+' + '-' * 38 + '+' + '-' * 38 + '+' + print (sep) + print ('| {:36s} | {:10s} | {:36s} | {:36s} |'.format( + 'ID', 'Name', 'Port ID', 'Tap Service ID')) + print (sep) + for tap_flow in tap_flows: + # + # If a tap service has been specified only display tap flows + # associated with that tap service; otherwise display all tap + # flows that belong to the tenant + # + if (tap_service_id is None or + tap_service_id == tap_flow['tap_service_id']): + + print ('| {:36s} | {:10s} | {:36s} | {:36s} |' + .format(tap_flow['id'], + tap_flow['name'], + tap_flow['source_port'], + tap_flow['tap_service_id'])) + print (sep) + + return SUCCESS + + def tap_flow_show(self, tap_flow_id): + header_data = {} + header_data['X-Auth-Token'] = self.token_id + + url = '%s/tap-flows/%s' % (self.taas_url, tap_flow_id) + + response = requests.get(url, headers=header_data) + res_data = response.json() + + if response.status_code != 200: + error = res_data['NeutronError'] + print (error['message']) + return FAILURE + + tap_flow = res_data['tap_flow'] + + sep = '+' + '-' * 16 + '+' + '-' * 38 + '+' + print (sep) + print ('| {:14} | {:36s} |'.format('Field', 'Value')) + print (sep) + print ('| {:14} | {:36s} |' + .format('Name', tap_flow['name'])) + print ('| {:14} | {:36s} |' + .format('Description', tap_flow['description'])) + print ('| {:14} | {:36s} |' + .format('ID', tap_flow['id'])) + print ('| {:14} | {:36s} |' + .format('Port ID', tap_flow['source_port'])) + print ('| {:14} | {:36s} |' + .format('Direction', tap_flow['direction'])) + print ('| {:14} | {:36s} |' + .format('Tap Service ID', tap_flow['tap_service_id'])) + print ('| {:14} | {:36s} |' + .format('Tenant ID', tap_flow['tenant_id'])) + print (sep) + + return SUCCESS + + +# Handler for 'tap-service-create' subcommand +def tap_service_create(args): + api = taas_rest_api() + + retval = api.authenticate() + if retval != SUCCESS: + return retval + + return api.tap_service_create(args.name, + args.description, + args.port_id, + args.network_id) + + +# Handler for 'tap-service-delete' subcommand +def tap_service_delete(args): + api = taas_rest_api() + + retval = api.authenticate() + if retval != SUCCESS: + return retval + + return api.tap_service_delete(args.id) + + +# Handler for 'tap-service-list' subcommand +def tap_service_list(args): + api = taas_rest_api() + + retval = api.authenticate() + if retval != SUCCESS: + return retval + + return api.tap_service_list() + + +# Handler for 'tap-service-show' subcommand +def tap_service_show(args): + api = taas_rest_api() + + retval = api.authenticate() + if retval != SUCCESS: + return retval + + return api.tap_service_show(args.id) + + +# Handler for 'tap-flow-create' subcommand +def tap_flow_create(args): + api = taas_rest_api() + + retval = api.authenticate() + if retval != SUCCESS: + return retval + + return api.tap_flow_create(args.name, + args.description, + args.port_id, + args.direction, + args.tap_service_id) + + +# Handler for 'tap-flow-delete' subcommand +def tap_flow_delete(args): + api = taas_rest_api() + + retval = api.authenticate() + if retval != SUCCESS: + return retval + + return api.tap_flow_delete(args.id) + + +# Handler for 'tap-flow-list' subcommand +def tap_flow_list(args): + api = taas_rest_api() + + retval = api.authenticate() + if retval != SUCCESS: + return retval + + return api.tap_flow_list(args.tap_service_id) + + +# Handler for 'tap-flow-show' subcommand +def tap_flow_show(args): + api = taas_rest_api() + + retval = api.authenticate() + if retval != SUCCESS: + return retval + + return api.tap_flow_show(args.id) + + +def main(): + parser = argparse.ArgumentParser( + description='Command-line interface \ + to the OpenStack Tap-as-a-Service API') + subparsers = parser.add_subparsers(title='subcommands') + + # Sub-parser for 'tap-service-create' subcommand + parser_tap_service_create = subparsers.add_parser( + 'tap-service-create', + help='Create a tap service for a given tenant') + parser_tap_service_create.add_argument('--name', + dest='name', + required=False, + default='') + parser_tap_service_create.add_argument('--description', + dest='description', + required=False, + default='') + parser_tap_service_create.add_argument('--port-id', + dest='port_id', + required=True) + parser_tap_service_create.add_argument('--network-id', + dest='network_id', + required=True) + parser_tap_service_create.set_defaults(func=tap_service_create) + + # Sub-parser for 'tap-service-delete' subcommand + parser_tap_service_delete = subparsers.add_parser( + 'tap-service-delete', + help='Delete a given tap service') + parser_tap_service_delete.add_argument('id') + parser_tap_service_delete.set_defaults(func=tap_service_delete) + + # Sub-parser for 'tap-service-list' subcommand + parser_tap_service_list = subparsers.add_parser( + 'tap-service-list', + help='List tap services that belong to a given tenant') + parser_tap_service_list.set_defaults(func=tap_service_list) + + # Sub-parser for 'tap-service-show' subcommand + parser_tap_service_show = subparsers.add_parser( + 'tap-service-show', + help='Show information of a given tap service') + parser_tap_service_show.add_argument('id') + parser_tap_service_show.set_defaults(func=tap_service_show) + + # Sub-parser for 'tap-flow-create' subcommand + parser_tap_flow_create = subparsers.add_parser( + 'tap-flow-create', + help='Create a tap flow for a given tap service') + parser_tap_flow_create.add_argument('--name', + dest='name', + required=False, + default='') + parser_tap_flow_create.add_argument('--description', + dest='description', + required=False, + default='') + parser_tap_flow_create.add_argument('--port-id', + dest='port_id', + required=True) + parser_tap_flow_create.add_argument('--direction', + dest='direction', + required=True) + parser_tap_flow_create.add_argument('--tap-service-id', + dest='tap_service_id', + required=True) + parser_tap_flow_create.set_defaults(func=tap_flow_create) + + # Sub-parser for 'tap-flow-delete' subcommand + parser_tap_flow_delete = subparsers.add_parser( + 'tap-flow-delete', + help='Delete a given tap flow') + parser_tap_flow_delete.add_argument('id') + parser_tap_flow_delete.set_defaults(func=tap_flow_delete) + + # Sub-parser for 'tap-flow-list' subcommand + parser_tap_flow_list = subparsers.add_parser( + 'tap-flow-list', + help='List tap flows that belong to given tenant') + parser_tap_flow_list.add_argument('--tap-service-id', + dest='tap_service_id', + required=False, + default=None) + parser_tap_flow_list.set_defaults(func=tap_flow_list) + + # Sub-parser for 'tap-flow-show' subcommand + parser_tap_flow_show = subparsers.add_parser( + 'tap-flow-show', + help='Show information of a given tap flow') + parser_tap_flow_show.add_argument('id') + parser_tap_flow_show.set_defaults(func=tap_flow_show) + + args = parser.parse_args() + + return args.func(args) + + +if __name__ == '__main__': + main()