diff --git a/.coveragerc b/.coveragerc
index d6ed4d0..b356732 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,7 +1,7 @@
[run]
branch = True
-source = tricircle
-omit = tricircle/tests/*, tricircle/tempestplugin/*
+source = trio2o
+omit = trio2o/tests/*, trio2o/tempestplugin/*
[report]
ignore_errors = True
diff --git a/.gitreview b/.gitreview
index 57a6737..1350437 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
[gerrit]
host=review.openstack.org
port=29418
-project=openstack/tricircle.git
+project=openstack/trio2o.git
diff --git a/.testr.conf b/.testr.conf
index 43200f9..f8799c8 100644
--- a/.testr.conf
+++ b/.testr.conf
@@ -2,6 +2,6 @@
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
- ${PYTHON:-python} -m subunit.run discover $TRICIRCLE_TEST_DIRECTORY $LISTOPT $IDOPTION
+ ${PYTHON:-python} -m subunit.run discover $TRIO2O_TEST_DIRECTORY $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index b6a6c96..c8de9cd 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -14,4 +14,4 @@ Any pull requests submitted through GitHub will be ignored.
Any bug should be filed on Launchpad, not GitHub:
- https://bugs.launchpad.net/tricircle
+ https://bugs.launchpad.net/trio2o
diff --git a/HACKING.rst b/HACKING.rst
index f2c7f77..2b7ff7b 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -1,5 +1,5 @@
================================
-The Tricircle Style Commandments
+The Trio2o Style Commandments
================================
Please read the OpenStack Style Commandments
diff --git a/README.rst b/README.rst
index be769c2..7ac8f1e 100644
--- a/README.rst
+++ b/README.rst
@@ -1,37 +1,34 @@
=========
-Tricircle
+Trio2o
=========
-The Tricircle provides an OpenStack API gateway and networking automation
-funtionality to allow multiple OpenStack instances, spanning in one site or
-multiple sites or in hybrid cloud, to be managed as a single OpenStack cloud.
+The Trio2o provides an OpenStack API gateway to allow multiple OpenStack
+instances, spanning in one site or multiple sites or in hybrid cloud, to
+be managed as a single OpenStack cloud.
-The Tricircle and these managed OpenStack instances will use shared KeyStone
+The Trio2o and these managed OpenStack instances will use shared KeyStone
(with centralized or distributed deployment) or federated KeyStones for
identity management.
-The Tricircle presents one big region to the end user in KeyStone. And each
-OpenStack instance called a pod is a sub-region of the Tricircle in
+The Trio2o presents one big region to the end user in KeyStone. And each
+OpenStack instance called a pod is a sub-region of the Trio2o in
KeyStone, and usually not visible to end user directly.
-The Tricircle acts as OpenStack API gateway, can handle OpenStack API calls,
+The Trio2o acts as OpenStack API gateway, can handle OpenStack API calls,
schedule one proper OpenStack instance if needed during the API calls handling,
-forward the API calls to the appropriate OpenStack instance, and deal with
-tenant level L2/L3 networking across OpenStack instances automatically. So it
-doesn't matter on which bottom OpenStack instance the VMs for the tenant are
-running, they can communicate with each other via L2 or L3.
+forward the API calls to the appropriate OpenStack instance.
The end user can see avaialbility zone(AZ) and use AZ to provision
-VM, Volume, even Network through the Tricircle. One AZ can include many
-OpenStack instances, the Tricircle can schedule and bind OpenStack instance
-for the tenant inside one AZ. A tenant's resources could be bound to multiple
-specific bottom OpenStack instances in one or multiple AZs automatically.
+VM, Volume, through the Trio2o. One AZ can include many OpenStack instances,
+the Trio2o can schedule and bind OpenStack instance for the tenant inside one
+AZ. A tenant's resources could be bound to multiple specific bottom OpenStack
+instances in one or multiple AZs automatically.
* Free software: Apache license
-* Design documentation: `Tricircle Design Blueprint `_
-* Wiki: https://wiki.openstack.org/wiki/tricircle
-* Installation with DevStack: https://github.com/openstack/tricircle/blob/master/doc/source/installation.rst
-* Tricircle Admin API documentation: https://github.com/openstack/tricircle/blob/master/doc/source/api_v1.rst
-* Source: https://github.com/openstack/tricircle
-* Bugs: http://bugs.launchpad.net/tricircle
-* Blueprints: https://launchpad.net/tricircle
+* Design documentation: `Trio2o Design Blueprint `_
+* Wiki: https://wiki.openstack.org/wiki/trio2o
+* Installation with DevStack: https://github.com/openstack/trio2o/blob/master/doc/source/
+* Trio2o Admin API documentation: https://github.com/openstack/trio2o/blob/master/doc/source/api_v1.rst
+* Source: https://github.com/openstack/trio2o
+* Bugs: http://bugs.launchpad.net/trio2o
+* Blueprints: https://launchpad.net/trio2o
diff --git a/cmd/api.py b/cmd/api.py
index 230d72d..810353d 100644
--- a/cmd/api.py
+++ b/cmd/api.py
@@ -23,11 +23,11 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import wsgi
-from tricircle.api import app
-from tricircle.common import config
-from tricircle.common.i18n import _LI
-from tricircle.common.i18n import _LW
-from tricircle.common import restapp
+from trio2o.api import app
+from trio2o.common import config
+from trio2o.common.i18n import _LI
+from trio2o.common.i18n import _LW
+from trio2o.common import restapp
CONF = cfg.CONF
@@ -49,7 +49,7 @@ def main():
LOG.info(_LI("Admin API on http://%(host)s:%(port)s with %(workers)s"),
{'host': host, 'port': port, 'workers': workers})
- service = wsgi.Server(CONF, 'Tricircle Admin_API', application, host, port)
+ service = wsgi.Server(CONF, 'Trio2o Admin_API', application, host, port)
restapp.serve(service, CONF, workers)
LOG.info(_LI("Configuration:"))
diff --git a/cmd/cinder_apigw.py b/cmd/cinder_apigw.py
index a29240c..f24f265 100644
--- a/cmd/cinder_apigw.py
+++ b/cmd/cinder_apigw.py
@@ -23,12 +23,12 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import wsgi
-from tricircle.common import config
-from tricircle.common.i18n import _LI
-from tricircle.common.i18n import _LW
-from tricircle.common import restapp
+from trio2o.common import config
+from trio2o.common.i18n import _LI
+from trio2o.common.i18n import _LW
+from trio2o.common import restapp
-from tricircle.cinder_apigw import app
+from trio2o.cinder_apigw import app
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -49,7 +49,7 @@ def main():
LOG.info(_LI("Cinder_APIGW on http://%(host)s:%(port)s with %(workers)s"),
{'host': host, 'port': port, 'workers': workers})
- service = wsgi.Server(CONF, 'Tricircle Cinder_APIGW',
+ service = wsgi.Server(CONF, 'Trio2o Cinder_APIGW',
application, host, port)
restapp.serve(service, CONF, workers)
diff --git a/cmd/manage.py b/cmd/manage.py
index ba76b74..ecf015c 100644
--- a/cmd/manage.py
+++ b/cmd/manage.py
@@ -18,14 +18,14 @@ import sys
from oslo_config import cfg
-from tricircle.db import core
-from tricircle.db import migration_helpers
+from trio2o.db import core
+from trio2o.db import migration_helpers
def main(argv=None, config_files=None):
core.initialize()
cfg.CONF(args=argv[2:],
- project='tricircle',
+ project='trio2o',
default_config_files=config_files)
migration_helpers.find_migrate_repo()
migration_helpers.sync_repo(2)
diff --git a/cmd/nova_apigw.py b/cmd/nova_apigw.py
index 310706c..12fa2f3 100644
--- a/cmd/nova_apigw.py
+++ b/cmd/nova_apigw.py
@@ -28,12 +28,12 @@ from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import wsgi
-from tricircle.common import config
-from tricircle.common.i18n import _LI
-from tricircle.common.i18n import _LW
-from tricircle.common import restapp
+from trio2o.common import config
+from trio2o.common.i18n import _LI
+from trio2o.common.i18n import _LW
+from trio2o.common import restapp
-from tricircle.nova_apigw import app
+from trio2o.nova_apigw import app
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@@ -54,7 +54,7 @@ def main():
LOG.info(_LI("Nova_APIGW on http://%(host)s:%(port)s with %(workers)s"),
{'host': host, 'port': port, 'workers': workers})
- service = wsgi.Server(CONF, 'Tricircle Nova_APIGW',
+ service = wsgi.Server(CONF, 'Trio2o Nova_APIGW',
application, host, port)
restapp.serve(service, CONF, workers)
diff --git a/cmd/xjob.py b/cmd/xjob.py
index fdc2754..58d6eab 100644
--- a/cmd/xjob.py
+++ b/cmd/xjob.py
@@ -27,11 +27,11 @@ import sys
from oslo_config import cfg
from oslo_log import log as logging
-from tricircle.common import config
-from tricircle.common.i18n import _LI
-from tricircle.common.i18n import _LW
+from trio2o.common import config
+from trio2o.common.i18n import _LI
+from trio2o.common.i18n import _LW
-from tricircle.xjob import xservice
+from trio2o.xjob import xservice
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
diff --git a/devstack/local.conf.node_1.sample b/devstack/local.conf.node_1.sample
deleted file mode 100644
index d32ba9f..0000000
--- a/devstack/local.conf.node_1.sample
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-# Sample DevStack local.conf.
-#
-# This sample file is intended to be used for your typical Tricircle DevStack
-# multi-node environment. As this file configures, DevStack will setup two
-# regions, one top region running Tricircle services, Keystone, Glance, Nova
-# API gateway, Cinder API gateway and Neutron with Tricircle plugin; and one
-# bottom region running original Nova, Cinder and Neutron.
-#
-# This file works with local.conf.node_2.sample to help you build a two-node
-# three-region Tricircle environment. Keystone and Glance in top region are
-# shared by services in all the regions.
-#
-# Some options needs to be change to adapt to your environment, see README.md
-# for detail.
-#
-
-[[local|localrc]]
-
-DATABASE_PASSWORD=password
-RABBIT_PASSWORD=password
-SERVICE_PASSWORD=password
-SERVICE_TOKEN=password
-ADMIN_PASSWORD=password
-LOGFILE=/opt/stack/logs/stack.sh.log
-VERBOSE=True
-LOG_COLOR=True
-SCREEN_LOGDIR=/opt/stack/logs
-FIXED_RANGE=10.0.0.0/24
-NETWORK_GATEWAY=10.0.0.1
-FIXED_NETWORK_SIZE=256
-FLOATING_RANGE=10.100.100.160/24
-Q_FLOATING_ALLOCATION_POOL=start=10.100.100.160,end=10.100.100.192
-
-PUBLIC_NETWORK_GATEWAY=10.100.100.3
-
-Q_USE_SECGROUP=False
-LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
-NEUTRON_CREATE_INITIAL_NETWORKS=False
-Q_USE_PROVIDERNET_FOR_PUBLIC=True
-
-HOST_IP=10.250.201.24
-Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:2001:3000)
-OVS_BRIDGE_MAPPINGS=bridge:br-bridge
-
-Q_ENABLE_TRICIRCLE=True
-enable_plugin tricircle https://github.com/openstack/tricircle/
-
-# Tricircle Services
-enable_service t-api
-enable_service t-ngw
-enable_service t-cgw
-enable_service t-job
-
-# Use Neutron instead of nova-network
-disable_service n-net
-enable_service q-svc
-enable_service q-svc1
-enable_service q-dhcp
-enable_service q-agt
-enable_service q-l3
-
-enable_service c-api
-enable_service c-vol
-enable_service c-sch
-
-disable_service n-obj
-disable_service c-bak
-disable_service tempest
-disable_service horizon
diff --git a/devstack/local.conf.sample b/devstack/local.conf.sample
index ec5959f..fa470ae 100644
--- a/devstack/local.conf.sample
+++ b/devstack/local.conf.sample
@@ -1,10 +1,11 @@
#
-# Sample DevStack local.conf.
+# Sample DevStack local.conf.sample
#
-# This sample file is intended to be used for your typical Tricircle DevStack
-# environment that's running all of OpenStack on a single host.
+# This sample file is intended to be used for your typical Trio2o DevStack
+# environment that's running Trio2o and one bottom OpenStack Pod1 on a
+# single host.
#
-# No changes to this sample configuration are required for this to work.
+# Changes HOST_IP in this sample configuration are required.
#
[[local|localrc]]
@@ -18,33 +19,27 @@ LOGFILE=/opt/stack/logs/stack.sh.log
VERBOSE=True
LOG_COLOR=True
SCREEN_LOGDIR=/opt/stack/logs
-HOST_IP=127.0.0.1
FIXED_RANGE=10.0.0.0/24
NETWORK_GATEWAY=10.0.0.1
FIXED_NETWORK_SIZE=256
FLOATING_RANGE=10.100.100.160/24
Q_FLOATING_ALLOCATION_POOL=start=10.100.100.160,end=10.100.100.192
-
NEUTRON_CREATE_INITIAL_NETWORKS=False
-
PUBLIC_NETWORK_GATEWAY=10.100.100.3
-
Q_USE_SECGROUP=False
LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
-Q_ENABLE_TRICIRCLE=True
-enable_plugin tricircle https://github.com/openstack/tricircle/
+# Enable Trio2o
+Q_ENABLE_TRIO2O=True
+enable_plugin trio2o https://github.com/openstack/trio2o/
-# Tricircle Services
-enable_service t-api
-enable_service t-ngw
-enable_service t-cgw
-enable_service t-job
+# Change the HOST_IP address to the host's IP address where
+# the Trio2o is running
+HOST_IP=162.3.124.203
# Use Neutron instead of nova-network
disable_service n-net
enable_service q-svc
-enable_service q-svc1
enable_service q-dhcp
enable_service q-agt
@@ -56,5 +51,5 @@ enable_service c-api
enable_service c-vol
enable_service c-sch
disable_service c-bak
-# disable_service tempest
+disable_service tempest
disable_service horizon
diff --git a/devstack/local.conf.node_2.sample b/devstack/local.conf.sample2
similarity index 52%
rename from devstack/local.conf.node_2.sample
rename to devstack/local.conf.sample2
index 5437630..67beb6c 100644
--- a/devstack/local.conf.node_2.sample
+++ b/devstack/local.conf.sample2
@@ -1,20 +1,22 @@
#
-# Sample DevStack local.conf.
+# Sample DevStack local.conf.sample2
#
-# This sample file is intended to be used for your typical Tricircle DevStack
+# This sample file is intended to be used for your typical Trio2o DevStack
# multi-node environment. As this file configures, DevStack will setup one
-# bottom region running original Nova, Cinder and Neutron.
+# one more bottom OpenStack Pod2 running original Nova, Cinder and Neutron.
#
-# This file works with local.conf.node_1.sample to help you build a two-node
-# three-region Tricircle environment. Keystone and Glance in top region are
-# shared by services in all the regions.
+# This file works with local.conf.node.sample to help you build a two-node
+# three-region Trio2o environment. Keystone, Neutron and Glance in top region
+# are shared by services in all the regions.
#
-# Some options needs to be change to adapt to your environment, see README.md
-# for detail.
+# Some options needs to be change to adapt to your environment, read
+# installation.rst for detail.
#
[[local|localrc]]
+RECLONE=no
+
DATABASE_PASSWORD=password
RABBIT_PASSWORD=password
SERVICE_PASSWORD=password
@@ -29,24 +31,31 @@ NETWORK_GATEWAY=10.0.0.1
FIXED_NETWORK_SIZE=256
FLOATING_RANGE=10.100.100.160/24
Q_FLOATING_ALLOCATION_POOL=start=10.100.100.160,end=10.100.100.192
-
PUBLIC_NETWORK_GATEWAY=10.100.100.3
-
Q_USE_SECGROUP=False
LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver
NEUTRON_CREATE_INITIAL_NETWORKS=False
Q_USE_PROVIDERNET_FOR_PUBLIC=True
-HOST_IP=10.250.201.25
+# the region name of this OpenStack instance, and it's also
+# the pod name in Trio2o
REGION_NAME=Pod2
-KEYSTONE_REGION_NAME=RegionOne
-SERVICE_HOST=$HOST_IP
-KEYSTONE_SERVICE_HOST=10.250.201.24
-KEYSTONE_AUTH_HOST=10.250.201.24
-GLANCE_SERVICE_HOST=10.250.201.24
-Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:2001:3000,extern:3001:4000)
-OVS_BRIDGE_MAPPINGS=bridge:br-bridge,extern:br-ext
+# Change the HOST_IP, SERVICE_HOST and GLANCE_SERVICE_HOST to
+# the host's IP address where the Pod2 is running
+HOST_IP=162.3.124.204
+SERVICE_HOST=162.3.124.204
+
+# Use the KeyStone which is located in RegionOne, where the Trio2o is
+# installed, change the KEYSTONE_SERVICE_HOST and KEYSTONE_AUTH_HOST to
+# host's IP address where the KeyStone is served.
+KEYSTONE_REGION_NAME=RegionOne
+KEYSTONE_SERVICE_HOST=162.3.124.203
+KEYSTONE_AUTH_HOST=162.3.124.203
+
+# Use the Glance which is located in RegionOne, where the Trio2o is
+# installed
+GLANCE_SERVICE_HOST=162.3.124.203
# Use Neutron instead of nova-network
disable_service n-net
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 9eab6f3..3ca0a0d 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -1,30 +1,30 @@
-# Devstack extras script to install Tricircle
+# Devstack extras script to install Trio2o
-# Test if any tricircle services are enabled
-# is_tricircle_enabled
-function is_tricircle_enabled {
+# Test if any trio2o services are enabled
+# is_trio2o_enabled
+function is_trio2o_enabled {
[[ ,${ENABLED_SERVICES} =~ ,"t-api" ]] && return 0
return 1
}
-# create_tricircle_accounts() - Set up common required tricircle
+# create_trio2o_accounts() - Set up common required trio2o
# service accounts in keystone
# Project User Roles
# -------------------------------------------------------------------------
-# $SERVICE_TENANT_NAME tricircle service
+# $SERVICE_TENANT_NAME trio2o service
-function create_tricircle_accounts {
+function create_trio2o_accounts {
if [[ "$ENABLED_SERVICES" =~ "t-api" ]]; then
- create_service_user "tricircle"
+ create_service_user "trio2o"
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- local tricircle_api=$(get_or_create_service "tricircle" \
+ local trio2o_api=$(get_or_create_service "trio2o" \
"Cascading" "OpenStack Cascading Service")
- get_or_create_endpoint $tricircle_api \
+ get_or_create_endpoint $trio2o_api \
"$REGION_NAME" \
- "$SERVICE_PROTOCOL://$TRICIRCLE_API_HOST:$TRICIRCLE_API_PORT/v1.0" \
- "$SERVICE_PROTOCOL://$TRICIRCLE_API_HOST:$TRICIRCLE_API_PORT/v1.0" \
- "$SERVICE_PROTOCOL://$TRICIRCLE_API_HOST:$TRICIRCLE_API_PORT/v1.0"
+ "$SERVICE_PROTOCOL://$TRIO2O_API_HOST:$TRIO2O_API_PORT/v1.0" \
+ "$SERVICE_PROTOCOL://$TRIO2O_API_HOST:$TRIO2O_API_PORT/v1.0" \
+ "$SERVICE_PROTOCOL://$TRIO2O_API_HOST:$TRIO2O_API_PORT/v1.0"
fi
fi
}
@@ -41,16 +41,16 @@ function create_nova_apigw_accounts {
create_service_user "nova_apigw"
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- local tricircle_nova_apigw=$(get_or_create_service "nova" \
+ local trio2o_nova_apigw=$(get_or_create_service "nova" \
"compute" "Nova Compute Service")
- remove_old_endpoint_conf $tricircle_nova_apigw
+ remove_old_endpoint_conf $trio2o_nova_apigw
- get_or_create_endpoint $tricircle_nova_apigw \
+ get_or_create_endpoint $trio2o_nova_apigw \
"$REGION_NAME" \
- "$SERVICE_PROTOCOL://$TRICIRCLE_NOVA_APIGW_HOST:$TRICIRCLE_NOVA_APIGW_PORT/v2.1/"'$(tenant_id)s' \
- "$SERVICE_PROTOCOL://$TRICIRCLE_NOVA_APIGW_HOST:$TRICIRCLE_NOVA_APIGW_PORT/v2.1/"'$(tenant_id)s' \
- "$SERVICE_PROTOCOL://$TRICIRCLE_NOVA_APIGW_HOST:$TRICIRCLE_NOVA_APIGW_PORT/v2.1/"'$(tenant_id)s'
+ "$SERVICE_PROTOCOL://$TRIO2O_NOVA_APIGW_HOST:$TRIO2O_NOVA_APIGW_PORT/v2.1/"'$(tenant_id)s' \
+ "$SERVICE_PROTOCOL://$TRIO2O_NOVA_APIGW_HOST:$TRIO2O_NOVA_APIGW_PORT/v2.1/"'$(tenant_id)s' \
+ "$SERVICE_PROTOCOL://$TRIO2O_NOVA_APIGW_HOST:$TRIO2O_NOVA_APIGW_PORT/v2.1/"'$(tenant_id)s'
fi
fi
}
@@ -67,22 +67,22 @@ function create_cinder_apigw_accounts {
create_service_user "cinder_apigw"
if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then
- local tricircle_cinder_apigw=$(get_or_create_service "cinder" \
+ local trio2o_cinder_apigw=$(get_or_create_service "cinder" \
"volumev2" "Cinder Volume Service")
- remove_old_endpoint_conf $tricircle_cinder_apigw
+ remove_old_endpoint_conf $trio2o_cinder_apigw
- get_or_create_endpoint $tricircle_cinder_apigw \
+ get_or_create_endpoint $trio2o_cinder_apigw \
"$REGION_NAME" \
- "$SERVICE_PROTOCOL://$TRICIRCLE_CINDER_APIGW_HOST:$TRICIRCLE_CINDER_APIGW_PORT/v2/"'$(tenant_id)s' \
- "$SERVICE_PROTOCOL://$TRICIRCLE_CINDER_APIGW_HOST:$TRICIRCLE_CINDER_APIGW_PORT/v2/"'$(tenant_id)s' \
- "$SERVICE_PROTOCOL://$TRICIRCLE_CINDER_APIGW_HOST:$TRICIRCLE_CINDER_APIGW_PORT/v2/"'$(tenant_id)s'
+ "$SERVICE_PROTOCOL://$TRIO2O_CINDER_APIGW_HOST:$TRIO2O_CINDER_APIGW_PORT/v2/"'$(tenant_id)s' \
+ "$SERVICE_PROTOCOL://$TRIO2O_CINDER_APIGW_HOST:$TRIO2O_CINDER_APIGW_PORT/v2/"'$(tenant_id)s' \
+ "$SERVICE_PROTOCOL://$TRIO2O_CINDER_APIGW_HOST:$TRIO2O_CINDER_APIGW_PORT/v2/"'$(tenant_id)s'
fi
fi
}
-# common config-file configuration for tricircle services
+# common config-file configuration for trio2o services
function remove_old_endpoint_conf {
local service=$1
@@ -102,24 +102,24 @@ function remove_old_endpoint_conf {
}
-# create_tricircle_cache_dir() - Set up cache dir for tricircle
-function create_tricircle_cache_dir {
+# create_trio2o_cache_dir() - Set up cache dir for trio2o
+function create_trio2o_cache_dir {
# Delete existing dir
- sudo rm -rf $TRICIRCLE_AUTH_CACHE_DIR
- sudo mkdir -p $TRICIRCLE_AUTH_CACHE_DIR
- sudo chown `whoami` $TRICIRCLE_AUTH_CACHE_DIR
+ sudo rm -rf $TRIO2O_AUTH_CACHE_DIR
+ sudo mkdir -p $TRIO2O_AUTH_CACHE_DIR
+ sudo chown `whoami` $TRIO2O_AUTH_CACHE_DIR
}
-# common config-file configuration for tricircle services
-function init_common_tricircle_conf {
+# common config-file configuration for trio2o services
+function init_common_trio2o_conf {
local conf_file=$1
touch $conf_file
iniset $conf_file DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL
iniset $conf_file DEFAULT verbose True
iniset $conf_file DEFAULT use_syslog $SYSLOG
- iniset $conf_file DEFAULT tricircle_db_connection `database_connection_url tricircle`
+ iniset $conf_file DEFAULT trio2o_db_connection `database_connection_url trio2o`
iniset $conf_file client admin_username admin
iniset $conf_file client admin_password $ADMIN_PASSWORD
@@ -127,181 +127,154 @@ function init_common_tricircle_conf {
iniset $conf_file client auto_refresh_endpoint True
iniset $conf_file client top_pod_name $REGION_NAME
- iniset $conf_file oslo_concurrency lock_path $TRICIRCLE_STATE_PATH/lock
+ iniset $conf_file oslo_concurrency lock_path $TRIO2O_STATE_PATH/lock
}
-function configure_tricircle_api {
+function configure_trio2o_api {
if is_service_enabled t-api ; then
- echo "Configuring Tricircle API"
+ echo "Configuring Trio2o API"
- init_common_tricircle_conf $TRICIRCLE_API_CONF
+ init_common_trio2o_conf $TRIO2O_API_CONF
- setup_colorized_logging $TRICIRCLE_API_CONF DEFAULT tenant_name
+ setup_colorized_logging $TRIO2O_API_CONF DEFAULT tenant_name
if is_service_enabled keystone; then
- create_tricircle_cache_dir
+ create_trio2o_cache_dir
# Configure auth token middleware
- configure_auth_token_middleware $TRICIRCLE_API_CONF tricircle \
- $TRICIRCLE_AUTH_CACHE_DIR
+ configure_auth_token_middleware $TRIO2O_API_CONF trio2o \
+ $TRIO2O_AUTH_CACHE_DIR
else
- iniset $TRICIRCLE_API_CONF DEFAULT auth_strategy noauth
+ iniset $TRIO2O_API_CONF DEFAULT auth_strategy noauth
fi
fi
}
-function configure_tricircle_nova_apigw {
+function configure_trio2o_nova_apigw {
if is_service_enabled t-ngw ; then
- echo "Configuring Tricircle Nova APIGW"
+ echo "Configuring Trio2o Nova APIGW"
- init_common_tricircle_conf $TRICIRCLE_NOVA_APIGW_CONF
+ init_common_trio2o_conf $TRIO2O_NOVA_APIGW_CONF
- setup_colorized_logging $TRICIRCLE_NOVA_APIGW_CONF DEFAULT tenant_name
+ setup_colorized_logging $TRIO2O_NOVA_APIGW_CONF DEFAULT tenant_name
if is_service_enabled keystone; then
- create_tricircle_cache_dir
+ create_trio2o_cache_dir
# Configure auth token middleware
- configure_auth_token_middleware $TRICIRCLE_NOVA_APIGW_CONF tricircle \
- $TRICIRCLE_AUTH_CACHE_DIR
+ configure_auth_token_middleware $TRIO2O_NOVA_APIGW_CONF trio2o \
+ $TRIO2O_AUTH_CACHE_DIR
else
- iniset $TRICIRCLE_NOVA_APIGW_CONF DEFAULT auth_strategy noauth
+ iniset $TRIO2O_NOVA_APIGW_CONF DEFAULT auth_strategy noauth
fi
fi
}
-function configure_tricircle_cinder_apigw {
+function configure_trio2o_cinder_apigw {
if is_service_enabled t-cgw ; then
- echo "Configuring Tricircle Cinder APIGW"
+ echo "Configuring Trio2o Cinder APIGW"
- init_common_tricircle_conf $TRICIRCLE_CINDER_APIGW_CONF
+ init_common_trio2o_conf $TRIO2O_CINDER_APIGW_CONF
- setup_colorized_logging $TRICIRCLE_CINDER_APIGW_CONF DEFAULT tenant_name
+ setup_colorized_logging $TRIO2O_CINDER_APIGW_CONF DEFAULT tenant_name
if is_service_enabled keystone; then
- create_tricircle_cache_dir
+ create_trio2o_cache_dir
# Configure auth token middleware
- configure_auth_token_middleware $TRICIRCLE_CINDER_APIGW_CONF tricircle \
- $TRICIRCLE_AUTH_CACHE_DIR
+ configure_auth_token_middleware $TRIO2O_CINDER_APIGW_CONF trio2o \
+ $TRIO2O_AUTH_CACHE_DIR
else
- iniset $TRICIRCLE_CINDER_APIGW_CONF DEFAULT auth_strategy noauth
+ iniset $TRIO2O_CINDER_APIGW_CONF DEFAULT auth_strategy noauth
fi
fi
}
-function configure_tricircle_xjob {
+function configure_trio2o_xjob {
if is_service_enabled t-job ; then
- echo "Configuring Tricircle xjob"
+ echo "Configuring Trio2o xjob"
- init_common_tricircle_conf $TRICIRCLE_XJOB_CONF
+ init_common_trio2o_conf $TRIO2O_XJOB_CONF
- setup_colorized_logging $TRICIRCLE_XJOB_CONF DEFAULT
+ setup_colorized_logging $TRIO2O_XJOB_CONF DEFAULT
fi
}
-function start_new_neutron_server {
- local server_index=$1
- local region_name=$2
- local q_port=$3
+function move_neutron_server {
+ local region_name=$1
+
+ remove_old_endpoint_conf "neutron"
get_or_create_service "neutron" "network" "Neutron Service"
get_or_create_endpoint "network" \
"$region_name" \
- "$Q_PROTOCOL://$SERVICE_HOST:$q_port/" \
- "$Q_PROTOCOL://$SERVICE_HOST:$q_port/" \
- "$Q_PROTOCOL://$SERVICE_HOST:$q_port/"
+ "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
+ "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \
+ "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/"
- cp $NEUTRON_CONF $NEUTRON_CONF.$server_index
- iniset $NEUTRON_CONF.$server_index database connection `database_connection_url $Q_DB_NAME$server_index`
- iniset $NEUTRON_CONF.$server_index nova region_name $region_name
- iniset $NEUTRON_CONF.$server_index DEFAULT bind_port $q_port
+ iniset $NEUTRON_CONF nova region_name $region_name
- recreate_database $Q_DB_NAME$server_index
- $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF.$server_index --config-file /$Q_PLUGIN_CONF_FILE upgrade head
-
- run_process q-svc$server_index "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF.$server_index --config-file /$Q_PLUGIN_CONF_FILE"
+ stop_process q-svc
+ # remove previous failure flag file since we are going to restart service
+ rm -f "$SERVICE_DIR/$SCREEN_NAME"/q-svc.failure
+ sleep 20
+ run_process q-svc "$NEUTRON_BIN_DIR/neutron-server --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE"
}
-
-if [[ "$Q_ENABLE_TRICIRCLE" == "True" ]]; then
+if [[ "$Q_ENABLE_TRIO2O" == "True" ]]; then
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
- echo summary "Tricircle pre-install"
+ echo summary "Trio2o pre-install"
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
- echo_summary "Installing Tricircle"
+ echo_summary "Installing Trio2o"
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
- echo_summary "Configuring Tricircle"
+ echo_summary "Configuring Trio2o"
- sudo install -d -o $STACK_USER -m 755 $TRICIRCLE_CONF_DIR
+ sudo install -d -o $STACK_USER -m 755 $TRIO2O_CONF_DIR
- configure_tricircle_api
- configure_tricircle_nova_apigw
- configure_tricircle_cinder_apigw
- configure_tricircle_xjob
+ enable_service t-api t-job t-ngw t-cgw
- echo export PYTHONPATH=\$PYTHONPATH:$TRICIRCLE_DIR >> $RC_DIR/.localrc.auto
+ configure_trio2o_api
+ configure_trio2o_nova_apigw
+ configure_trio2o_cinder_apigw
+ configure_trio2o_xjob
- setup_package $TRICIRCLE_DIR -e
+ echo export PYTHONPATH=\$PYTHONPATH:$TRIO2O_DIR >> $RC_DIR/.localrc.auto
- recreate_database tricircle
- python "$TRICIRCLE_DIR/cmd/manage.py" "$TRICIRCLE_API_CONF"
+ setup_package $TRIO2O_DIR -e
- if is_service_enabled q-svc ; then
- start_new_neutron_server 1 $POD_REGION_NAME $TRICIRCLE_NEUTRON_PORT
-
- # reconfigure neutron server to use our own plugin
- echo "Configuring Neutron plugin for Tricircle"
- Q_PLUGIN_CLASS="tricircle.network.plugin.TricirclePlugin"
-
- iniset $NEUTRON_CONF DEFAULT core_plugin "$Q_PLUGIN_CLASS"
- iniset $NEUTRON_CONF DEFAULT service_plugins ""
- iniset $NEUTRON_CONF DEFAULT tricircle_db_connection `database_connection_url tricircle`
- iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes False
- iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes False
- iniset $NEUTRON_CONF client admin_username admin
- iniset $NEUTRON_CONF client admin_password $ADMIN_PASSWORD
- iniset $NEUTRON_CONF client admin_tenant demo
- iniset $NEUTRON_CONF client auto_refresh_endpoint True
- iniset $NEUTRON_CONF client top_pod_name $REGION_NAME
-
- if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
- iniset $NEUTRON_CONF tricircle type_drivers local,shared_vlan
- iniset $NEUTRON_CONF tricircle tenant_network_types local,shared_vlan
- iniset $NEUTRON_CONF tricircle network_vlan_ranges `echo $Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS | awk -F= '{print $2}'`
- iniset $NEUTRON_CONF tricircle bridge_network_type shared_vlan
- fi
- fi
+ recreate_database trio2o
+ python "$TRIO2O_DIR/cmd/manage.py" "$TRIO2O_API_CONF"
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
- echo_summary "Initializing Tricircle Service"
+ echo_summary "Initializing Trio2o Service"
if is_service_enabled t-api; then
- create_tricircle_accounts
+ create_trio2o_accounts
- run_process t-api "python $TRICIRCLE_API --config-file $TRICIRCLE_API_CONF"
+ run_process t-api "python $TRIO2O_API --config-file $TRIO2O_API_CONF"
fi
if is_service_enabled t-ngw; then
create_nova_apigw_accounts
- run_process t-ngw "python $TRICIRCLE_NOVA_APIGW --config-file $TRICIRCLE_NOVA_APIGW_CONF"
+ run_process t-ngw "python $TRIO2O_NOVA_APIGW --config-file $TRIO2O_NOVA_APIGW_CONF"
# Nova services are running, but we need to re-configure them to
# move them to bottom region
iniset $NOVA_CONF neutron region_name $POD_REGION_NAME
- iniset $NOVA_CONF neutron url "$Q_PROTOCOL://$SERVICE_HOST:$TRICIRCLE_NEUTRON_PORT"
+ iniset $NOVA_CONF neutron url "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT"
iniset $NOVA_CONF cinder os_region_name $POD_REGION_NAME
get_or_create_endpoint "compute" \
@@ -320,11 +293,15 @@ if [[ "$Q_ENABLE_TRICIRCLE" == "True" ]]; then
run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $NOVA_CONF" $LIBVIRT_GROUP
fi
+ if is_service_enabled q-svc; then
+ move_neutron_server $POD_REGION_NAME
+ fi
+
if is_service_enabled t-cgw; then
create_cinder_apigw_accounts
- run_process t-cgw "python $TRICIRCLE_CINDER_APIGW --config-file $TRICIRCLE_CINDER_APIGW_CONF"
+ run_process t-cgw "python $TRIO2O_CINDER_APIGW --config-file $TRIO2O_CINDER_APIGW_CONF"
get_or_create_endpoint "volumev2" \
"$POD_REGION_NAME" \
@@ -335,7 +312,7 @@ if [[ "$Q_ENABLE_TRICIRCLE" == "True" ]]; then
if is_service_enabled t-job; then
- run_process t-job "python $TRICIRCLE_XJOB --config-file $TRICIRCLE_XJOB_CONF"
+ run_process t-job "python $TRIO2O_XJOB --config-file $TRIO2O_XJOB_CONF"
fi
fi
@@ -356,9 +333,5 @@ if [[ "$Q_ENABLE_TRICIRCLE" == "True" ]]; then
if is_service_enabled t-job; then
stop_process t-job
fi
-
- if is_service_enabled q-svc1; then
- stop_process q-svc1
- fi
fi
fi
diff --git a/devstack/settings b/devstack/settings
index a404215..d18d7f0 100644
--- a/devstack/settings
+++ b/devstack/settings
@@ -1,45 +1,44 @@
# Git information
-TRICIRCLE_REPO=${TRICIRCLE_REPO:-https://git.openstack.org/cgit/openstack/tricircle/}
-TRICIRCLE_DIR=$DEST/tricircle
-TRICIRCLE_BRANCH=${TRICIRCLE_BRANCH:-master}
+TRIO2O_REPO=${TRIO2O_REPO:-https://git.openstack.org/cgit/openstack/trio2o/}
+TRIO2O_DIR=$DEST/trio2o
+TRIO2O_BRANCH=${TRIO2O_BRANCH:-master}
# common variables
POD_REGION_NAME=${POD_REGION_NAME:-Pod1}
-TRICIRCLE_NEUTRON_PORT=${TRICIRCLE_NEUTRON_PORT:-20001}
-TRICIRCLE_CONF_DIR=${TRICIRCLE_CONF_DIR:-/etc/tricircle}
-TRICIRCLE_STATE_PATH=${TRICIRCLE_STATE_PATH:-/var/lib/tricircle}
+TRIO2O_CONF_DIR=${TRIO2O_CONF_DIR:-/etc/trio2o}
+TRIO2O_STATE_PATH=${TRIO2O_STATE_PATH:-/var/lib/trio2o}
-# tricircle rest admin api
-TRICIRCLE_API=$TRICIRCLE_DIR/cmd/api.py
-TRICIRCLE_API_CONF=$TRICIRCLE_CONF_DIR/api.conf
+# trio2o rest admin api
+TRIO2O_API=$TRIO2O_DIR/cmd/api.py
+TRIO2O_API_CONF=$TRIO2O_CONF_DIR/api.conf
-TRICIRCLE_API_LISTEN_ADDRESS=${TRICIRCLE_API_LISTEN_ADDRESS:-0.0.0.0}
-TRICIRCLE_API_HOST=${TRICIRCLE_API_HOST:-$SERVICE_HOST}
-TRICIRCLE_API_PORT=${TRICIRCLE_API_PORT:-19999}
-TRICIRCLE_API_PROTOCOL=${TRICIRCLE_API_PROTOCOL:-$SERVICE_PROTOCOL}
+TRIO2O_API_LISTEN_ADDRESS=${TRIO2O_API_LISTEN_ADDRESS:-0.0.0.0}
+TRIO2O_API_HOST=${TRIO2O_API_HOST:-$SERVICE_HOST}
+TRIO2O_API_PORT=${TRIO2O_API_PORT:-19999}
+TRIO2O_API_PROTOCOL=${TRIO2O_API_PROTOCOL:-$SERVICE_PROTOCOL}
-# tricircle nova_apigw
-TRICIRCLE_NOVA_APIGW=$TRICIRCLE_DIR/cmd/nova_apigw.py
-TRICIRCLE_NOVA_APIGW_CONF=$TRICIRCLE_CONF_DIR/nova_apigw.conf
+# trio2o nova_apigw
+TRIO2O_NOVA_APIGW=$TRIO2O_DIR/cmd/nova_apigw.py
+TRIO2O_NOVA_APIGW_CONF=$TRIO2O_CONF_DIR/nova_apigw.conf
-TRICIRCLE_NOVA_APIGW_LISTEN_ADDRESS=${TRICIRCLE_NOVA_APIGW_LISTEN_ADDRESS:-0.0.0.0}
-TRICIRCLE_NOVA_APIGW_HOST=${TRICIRCLE_NOVA_APIGW_HOST:-$SERVICE_HOST}
-TRICIRCLE_NOVA_APIGW_PORT=${TRICIRCLE_NOVA_APIGW_PORT:-19998}
-TRICIRCLE_NOVA_APIGW_PROTOCOL=${TRICIRCLE_NOVA_APIGW_PROTOCOL:-$SERVICE_PROTOCOL}
+TRIO2O_NOVA_APIGW_LISTEN_ADDRESS=${TRIO2O_NOVA_APIGW_LISTEN_ADDRESS:-0.0.0.0}
+TRIO2O_NOVA_APIGW_HOST=${TRIO2O_NOVA_APIGW_HOST:-$SERVICE_HOST}
+TRIO2O_NOVA_APIGW_PORT=${TRIO2O_NOVA_APIGW_PORT:-19998}
+TRIO2O_NOVA_APIGW_PROTOCOL=${TRIO2O_NOVA_APIGW_PROTOCOL:-$SERVICE_PROTOCOL}
-# tricircle cinder_apigw
-TRICIRCLE_CINDER_APIGW=$TRICIRCLE_DIR/cmd/cinder_apigw.py
-TRICIRCLE_CINDER_APIGW_CONF=$TRICIRCLE_CONF_DIR/cinder_apigw.conf
+# trio2o cinder_apigw
+TRIO2O_CINDER_APIGW=$TRIO2O_DIR/cmd/cinder_apigw.py
+TRIO2O_CINDER_APIGW_CONF=$TRIO2O_CONF_DIR/cinder_apigw.conf
-TRICIRCLE_CINDER_APIGW_LISTEN_ADDRESS=${TRICIRCLE_CINDER_APIGW_LISTEN_ADDRESS:-0.0.0.0}
-TRICIRCLE_CINDER_APIGW_HOST=${TRICIRCLE_CINDER_APIGW_HOST:-$SERVICE_HOST}
-TRICIRCLE_CINDER_APIGW_PORT=${TRICIRCLE_CINDER_APIGW_PORT:-19997}
-TRICIRCLE_CINDER_APIGW_PROTOCOL=${TRICIRCLE_CINDER_APIGW_PROTOCOL:-$SERVICE_PROTOCOL}
+TRIO2O_CINDER_APIGW_LISTEN_ADDRESS=${TRIO2O_CINDER_APIGW_LISTEN_ADDRESS:-0.0.0.0}
+TRIO2O_CINDER_APIGW_HOST=${TRIO2O_CINDER_APIGW_HOST:-$SERVICE_HOST}
+TRIO2O_CINDER_APIGW_PORT=${TRIO2O_CINDER_APIGW_PORT:-19997}
+TRIO2O_CINDER_APIGW_PROTOCOL=${TRIO2O_CINDER_APIGW_PROTOCOL:-$SERVICE_PROTOCOL}
-# tricircle xjob
-TRICIRCLE_XJOB=$TRICIRCLE_DIR/cmd/xjob.py
-TRICIRCLE_XJOB_CONF=$TRICIRCLE_CONF_DIR/xjob.conf
+# trio2o xjob
+TRIO2O_XJOB=$TRIO2O_DIR/cmd/xjob.py
+TRIO2O_XJOB_CONF=$TRIO2O_CONF_DIR/xjob.conf
-TRICIRCLE_AUTH_CACHE_DIR=${TRICIRCLE_AUTH_CACHE_DIR:-/var/cache/tricircle}
+TRIO2O_AUTH_CACHE_DIR=${TRIO2O_AUTH_CACHE_DIR:-/var/cache/trio2o}
-export PYTHONPATH=$PYTHONPATH:$TRICIRCLE_DIR
+export PYTHONPATH=$PYTHONPATH:$TRIO2O_DIR
diff --git a/devstack/verify_cross_pod_install.sh b/devstack/verify_cross_pod_install.sh
deleted file mode 100755
index 717dffa..0000000
--- a/devstack/verify_cross_pod_install.sh
+++ /dev/null
@@ -1,143 +0,0 @@
-#!/bin/bash
-#
-# Script name: verify_cross_pod_install.sh
-# This script is to verify the installation of Tricircle in cross pod L3 networking.
-# It verify both east-west and north-south networks.
-#
-# In this script, there are some parameters you need to consider before running it.
-#
-# 1, Post URL whether is 127.0.0.1 or something else,
-# 2, This script create 2 subnets 10.0.1.0/24 and 10.0.2.0/24, Change these if needed.
-# 3, This script create external subnet ext-net 10.50.11.0/26, Change it according
-# your own environment.
-# 4, The floating ip attached to the VM with ip 10.0.2.3, created by the script
-# "verify_cross_pod_install.sh", modify it to your own environment.
-#
-# Change the parameters according to your own environment.
-# Finally, execute "verify_cross_pod_install.sh" in the Node1.
-#
-# Author: Pengfei Shi
-#
-
-set -o xtrace
-
-TEST_DIR=$(pwd)
-echo "Test work directory is $TEST_DIR."
-
-if [ ! -r admin-openrc.sh ];then
- set -o xtrace
- echo "Your work directory doesn't have admin-openrc.sh,"
- echo "Please check whether you are in tricircle/devstack/ or not and run this script."
-exit 1
-fi
-
-echo "Begining the verify testing..."
-
-echo "Import client environment variables:"
-source $TEST_DIR/admin-openrc.sh
-
-echo "******************************"
-echo "* Verify Endpoint *"
-echo "******************************"
-
-echo "List openstack endpoint:"
-openstack --debug endpoint list
-
-token=$(openstack token issue | awk 'NR==5 {print $4}')
-
-echo $token
-
-curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne"}}'
-
-curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod1", "az_name": "az1"}}'
-
-curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod2", "az_name": "az2"}}'
-
-echo "******************************"
-echo "* Verify Nova *"
-echo "******************************"
-
-echo "Show nova aggregate:"
-nova aggregate-list
-
-curl -X POST http://127.0.0.1:9696/v2.0/networks -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" \
- -d '{"network": {"name": "net1", "admin_state_up": true, "availability_zone_hints": ["az1"]}}'
-curl -X POST http://127.0.0.1:9696/v2.0/networks -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" \
- -d '{"network": {"name": "net2", "admin_state_up": true, "availability_zone_hints": ["az2"]}}'
-
-echo "Create external network ext-net by curl:"
-curl -X POST http://127.0.0.1:9696/v2.0/networks -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" \
- -d '{"network": {"name": "ext-net", "admin_state_up": true, "router:external": true, "provider:network_type": "vlan", "provider:physical_network": "extern", "availability_zone_hints": ["Pod2"]}}'
-
-echo "Create test flavor:"
-nova flavor-create test 1 1024 10 1
-
-echo "******************************"
-echo "* Verify Neutron *"
-echo "******************************"
-
-echo "Create external subnet with floating ips:"
-neutron subnet-create --name ext-subnet --disable-dhcp ext-net 10.50.11.0/26 --allocation-pool start=10.50.11.30,end=10.50.11.50 --gateway 10.50.11.1
-
-echo "Create router for subnets:"
-neutron router-create router
-
-echo "Set router external gateway:"
-neutron router-gateway-set router ext-net
-
-echo "Create net1 in Node1:"
-neutron subnet-create net1 10.0.1.0/24
-
-echo "Create net2 in Node2:"
-neutron subnet-create net2 10.0.2.0/24
-
-net1_id=$(neutron net-list |grep net1 | awk '{print $2}')
-net2_id=$(neutron net-list |grep net2 | awk '{print $2}')
-image_id=$(glance image-list |awk 'NR==4 {print $2}')
-
-echo "Boot vm1 in az1:"
-nova boot --flavor 1 --image $image_id --nic net-id=$net1_id --availability-zone az1 vm1
-echo "Boot vm2 in az2:"
-nova boot --flavor 1 --image $image_id --nic net-id=$net2_id --availability-zone az2 vm2
-
-subnet1_id=$(neutron net-list |grep net1 |awk '{print $6}')
-subnet2_id=$(neutron net-list |grep net2 |awk '{print $6}')
-
-echo "Add interface of subnet1:"
-neutron router-interface-add router $subnet1_id
-echo "Add interface of subnet2:"
-neutron router-interface-add router $subnet2_id
-
-echo "******************************"
-echo "* Verify VNC connection *"
-echo "******************************"
-
-echo "Get the VNC url of vm1:"
-nova --os-region-name Pod1 get-vnc-console vm1 novnc
-echo "Get the VNC url of vm2:"
-nova --os-region-name Pod2 get-vnc-console vm2 novnc
-
-echo "**************************************"
-echo "* Verify External network *"
-echo "**************************************"
-
-echo "Create floating ip:"
-neutron floatingip-create ext-net
-
-echo "Show floating ips:"
-neutron floatingip-list
-
-echo "Show neutron ports:"
-neutron port-list
-
-floatingip_id=$(neutron floatingip-list | awk 'NR==4 {print $2}')
-port_id=$(neutron port-list |grep 10.0.2.3 |awk '{print $2}')
-
-echo "Associate floating ip:"
-neutron floatingip-associate $floatingip_id $port_id
diff --git a/devstack/verify_top_install.sh b/devstack/verify_top_install.sh
deleted file mode 100755
index 8fc6927..0000000
--- a/devstack/verify_top_install.sh
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/bin/bash
-#
-# Script name: verify_top_install.sh
-# This script is to verify the installation of Tricircle in Top OpenStack.
-#
-# In this script, there are some parameters you need to consider before running it.
-#
-# 1, Post URL whether is 127.0.0.1 or something else,
-# 2, This script create a subnet called net1 10.0.0.0/24, Change these if needed.
-#
-# Change the parameters according to your own environment.
-# Execute "verify_top_install.sh" in the top OpenStack
-#
-# Author: Pengfei Shi
-#
-
-set -o xtrace
-
-TEST_DIR=$(pwd)
-echo "Test work directory is $TEST_DIR."
-
-if [ ! -r admin-openrc.sh ];then
- set -o xtrace
- echo "Your work directory doesn't have admin-openrc.sh,"
- echo "Please check whether you are in tricircle/devstack/ or not and run this script."
-exit 1
-fi
-
-echo "Begining the verify testing..."
-
-echo "Import client environment variables:"
-source $TEST_DIR/admin-openrc.sh
-
-echo "******************************"
-echo "* Verify Endpoint *"
-echo "******************************"
-
-echo "List openstack endpoint:"
-
-openstack --debug endpoint list
-
-token=$(openstack token issue | awk 'NR==5 {print $4}')
-
-echo $token
-
-curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne"}}'
-
-curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod1", "az_name": "az1"}}'
-
-echo "******************************"
-echo "* Verify Nova *"
-echo "******************************"
-
-echo "Show nova aggregate:"
-nova --debug aggregate-list
-
-echo "Create test flavor:"
-nova --debug flavor-create test 1 1024 10 1
-
-echo "******************************"
-echo "* Verify Neutron *"
-echo "******************************"
-
-echo "Create net1:"
-neutron --debug net-create net1
-
-echo "Create subnet of net1:"
-neutron --debug subnet-create net1 10.0.0.0/24
-
-image_id=$(glance image-list |awk 'NR==4 {print $2}')
-net_id=$(neutron net-list|grep net1 |awk '{print $2}')
-
-echo "Boot vm1 in az1:"
-nova --debug boot --flavor 1 --image $image_id --nic net-id=$net_id --availability-zone az1 vm1
-
-echo "******************************"
-echo "* Verify Cinder *"
-echo "******************************"
-
-echo "Create a volume in az1:"
-cinder --debug create --availability-zone=az1 1
-
-echo "Show volume list:"
-cinder --debug list
-volume_id=$(cinder list |grep lvmdriver-1 | awk '{print $2}')
-
-echo "Show detailed volume info:"
-cinder --debug show $volume_id
-
-echo "Delete test volume:"
-cinder --debug delete $volume_id
-cinder --debug list
diff --git a/doc/source/api_v1.rst b/doc/source/api_v1.rst
index 13fe42b..fa6c01c 100644
--- a/doc/source/api_v1.rst
+++ b/doc/source/api_v1.rst
@@ -1,13 +1,13 @@
=======================
-The Tricircle Admin API
+The Trio2o Admin API
=======================
-This Admin API describes the ways of interacting with the Tricircle service
+This Admin API describes the ways of interacting with the Trio2o service
via HTTP protocol using Representational State Transfer(ReST).
API Versions
============
In order to bring new features to users over time, versioning is supported
-by the Tricircle. The latest version of the Tricircle is v1.0.
+by the Trio2o. The latest version of the Trio2o is v1.0.
The Version APIs work the same as other APIs as they still require
authentication.
@@ -22,20 +22,20 @@ Service URLs
============
All API calls through the rest of this document require authentication with
the OpenStack Identity service. They also require a base service url that can
-be got from the OpenStack Tricircle endpoint. This will be the root url that
+be got from the OpenStack Trio2o endpoint. This will be the root url that
every call below will be added to build a full path.
-For instance, if the Tricircle service url is http://127.0.0.1:19999/v1.0 then
+For instance, if the Trio2o service url is http://127.0.0.1:19999/v1.0 then
the full API call for /pods is http://127.0.0.1:19999/v1.0/pods.
As such, for the rest of this document we will leave out the root url where
-GET /pods really means GET {tricircle_service_url}/pods.
+GET /pods really means GET {trio2o_service_url}/pods.
Pod
===
-A pod represents a region in Keystone. When operating a pod, the Tricircle
+A pod represents a region in Keystone. When operating a pod, the Trio2o
decides the correct endpoints to send request based on the region of the pod.
-Considering the 2-layers architecture of the Tricircle, we also have two kinds
+Considering the 2-layers architecture of the Trio2o, we also have two kinds
of pods: top pod and bottom pod.
@@ -59,7 +59,7 @@ following table.
+-----------+-------+---------------+-----------------------------------------------------+
|pod_name |body | string |pod_name is specified by user but must match the |
| | | |region name registered in Keystone. When creating a |
-| | | |bottom pod, the Tricircle automatically creates a |
+| | | |bottom pod, the Trio2o automatically creates a |
| | | |host aggregation and assigns the new availability |
| | | |zone id to it. |
+-----------+-------+---------------+-----------------------------------------------------+
@@ -142,7 +142,7 @@ means a bottom pod. All of its attributes are described in the following table.
+-----------+-------+---------------+-----------------------------------------------------+
|pod_name |body | string |pod_name is specified by user but must match the |
| | | |region name registered in Keystone. When creating a |
-| | | |bottom pod, the Tricircle automatically creates a |
+| | | |bottom pod, the Trio2o automatically creates a |
| | | |host aggregation and assigns the new availability |
| | | |zone id to it. |
+-----------+-------+---------------+-----------------------------------------------------+
@@ -198,7 +198,7 @@ in the following table.
+===========+=======+===============+=====================================================+
|pod_name |body | string |pod_name is specified by user but must match the |
| | | |region name registered in Keystone. When creating a |
-| | | |bottom pod, the Tricircle automatically creates a |
+| | | |bottom pod, the Trio2o automatically creates a |
| | | |host aggregation and assigns the new availability |
| | | |zone id to it. |
+-----------+-------+---------------+-----------------------------------------------------+
@@ -232,7 +232,7 @@ are listed below.
+-----------+-------+---------------+-----------------------------------------------------+
|pod_name |body | string |pod_name is specified by user but must match the |
| | | |region name registered in Keystone. When creating a |
-| | | |bottom pod, the Tricircle automatically creates a |
+| | | |bottom pod, the Trio2o automatically creates a |
| | | |host aggregation and assigns the new availability |
| | | |zone id to it. |
+-----------+-------+---------------+-----------------------------------------------------+
diff --git a/doc/source/conf.py b/doc/source/conf.py
index e1d893d..5e8915d 100755
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -37,7 +37,7 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-project = u'tricircle'
+project = u'trio2o'
copyright = u'2015, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index f614d5c..adfde1d 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,9 +1,9 @@
-.. tricircle documentation master file, created by
+.. trio2o documentation master file, created by
sphinx-quickstart on Wed Dec 2 17:00:36 2015.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
-Welcome to tricircle's documentation!
+Welcome to trio2o's documentation!
========================================================
Contents:
diff --git a/doc/source/installation.rst b/doc/source/installation.rst
index 6a0b2fd..8a120b2 100644
--- a/doc/source/installation.rst
+++ b/doc/source/installation.rst
@@ -1,467 +1,270 @@
-=====================
-Installation with pip
-=====================
+==================================
+Trio2o installation with DevStack
+==================================
-At the command line::
+Now the Trio2o can be played with all-in-one single node DevStack. For
+the resource requirement to setup single node DevStack, please refer
+to `All-In-One Single Machine `_ for
+installing DevStack in physical machine
+or `All-In-One Single VM `_ for
+installing DevStack in virtual machine.
- $ pip install tricircle
-
-Or, if you have virtualenvwrapper installed::
-
- $ mkvirtualenv tricircle
- $ pip install tricircle
-
-
-======================================
-Single node installation with DevStack
-======================================
-
-Now the Tricircle can be played with DevStack.
-
-- 1 Install DevStack. Please refer to
- http://docs.openstack.org/developer/devstack/
+- 1 Install DevStack. Please refer to `DevStack document
+ `_
on how to install DevStack into single VM or physcial machine
+
- 2 In DevStack folder, create a file local.conf, and copy the content of
- https://github.com/openstack/tricircle/blob/master/devstack/local.conf.sample
+ https://github.com/openstack/trio2o/blob/master/devstack/local.conf.sample
to local.conf, change password in the file if needed.
-- 3 Run DevStack. In DevStack folder, run::
+
+- 3 In local.conf, change HOST_IP to the host's IP address where the Trio2o
+ will be installed to, for example::
+
+ HOST_IP=162.3.124.203
+
+- 4 Run DevStack. In DevStack folder, run::
./stack.sh
-- 4 In DevStack folder, create a file adminrc, and copy the content of
- https://github.com/openstack/tricircle/blob/master/devstack/admin-openrc.sh
- to the adminrc, change the password in the file if needed.
- And run the following command to set the environment variables::
+- 5 After DevStack successfully starts, we need to create environment variables for
+ the user (admin user as example in this document). In DevStack folder::
- source adminrc
+ source openrc admin admin
-- 5 After DevStack successfully starts, check if services have been correctly
- registered. Run "openstack endpoint list" and you should get output look
- like as following::
+- 6 Unset the region name environment variable, so that the command can be issued to
+ specified region in following commands as needed::
+
+ unset OS_REGION_NAME
+
+- 7 Check if services have been correctly registered. Run::
+
+ openstack --os-region-name=RegionOne endpoint list
+
+ you should get output looks like as following::
+----------------------------------+-----------+--------------+----------------+
| ID | Region | Service Name | Service Type |
+----------------------------------+-----------+--------------+----------------+
- | 230059e8533e4d389e034fd68257034b | RegionOne | glance | image |
- | 25180a0a08cb41f69de52a7773452b28 | RegionOne | nova | compute |
- | bd1ed1d6f0cc42398688a77bcc3bda91 | Pod1 | neutron | network |
- | 673736f54ec147b79e97c395afe832f9 | RegionOne | ec2 | ec2 |
- | fd7f188e2ba04ebd856d582828cdc50c | RegionOne | neutron | network |
- | ffb56fd8b24a4a27bf6a707a7f78157f | RegionOne | keystone | identity |
- | 88da40693bfa43b9b02e1478b1fa0bc6 | Pod1 | nova | compute |
- | f35d64c2ddc44c16a4f9dfcd76e23d9f | RegionOne | nova_legacy | compute_legacy |
- | 8759b2941fe7469e9651de3f6a123998 | RegionOne | tricircle | Cascading |
+ | e8a1f1a333334106909e05037db3fbf6 | Pod1 | neutron | network |
+ | 72c02a11856a4814a84b60ff72e0028d | Pod1 | cinderv2 | volumev2 |
+ | a26cff63563a480eaba334185a7f2cec | Pod1 | nova | compute |
+ | f90d97f8959948088ab58bc143ecb011 | RegionOne | cinderv3 | volumev3 |
+ | ed1af45af0d8459ea409e5c0dd0aadba | RegionOne | cinder | volume |
+ | ae6024a582534c21aee0c6d7fa5b90fb | RegionOne | nova | compute |
+ | c75ab09edc874bb781b0d376cec74623 | RegionOne | cinderv2 | volumev2 |
+ | 80ce6a2d12aa43fab693f4e619670d97 | RegionOne | trio2o | Cascading |
+ | 11a4b451da1a4db6ae14b0aa282f9ba6 | RegionOne | nova_legacy | compute_legacy |
+ | 546a8abf29244223bc9d5dd4960553a7 | RegionOne | glance | image |
+ | 0e9c9343b50e4b7080b25f4e297f79d3 | RegionOne | keystone | identity |
+----------------------------------+-----------+--------------+----------------+
+ "RegionOne" is the region where the Trio2o Admin API(ID is
+ 80ce6a2d12aa43fab693f4e619670d97 in the above list), Nova API gateway(
+ ID is ae6024a582534c21aee0c6d7fa5b90fb) and Cinder API gateway( ID is
+ c75ab09edc874bb781b0d376cec74623) are running in. "Pod1" is the normal
+ bottom OpenStack region which includes Nova, Cinder, Neutron.
- "RegionOne" is the region you set in local.conf via REGION_NAME, whose default
- value is "RegionOne", we use it as the region for the Tricircle instance;
- "Pod1" is the region set via "POD_REGION_NAME", new configuration option
- introduced by the Tricircle, we use it as the bottom OpenStack instance.
-- 6 Create pod instances for Tricircle and bottom OpenStack::
+- 8 Get token for the later commands. Run::
- curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne"}}'
+ openstack --os-region-name=RegionOne token issue
- curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod1", "az_name": "az1"}}'
+- 9 Create pod instances for the Trio2o to manage the mapping between
+ availability zone and OpenStack instances, the "$token" is obtained in the
+ step 7::
+
+ curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
+ -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne"}}'
+
+ curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
+ -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod1", "az_name": "az1"}}'
Pay attention to "pod_name" parameter we specify when creating pod. Pod name
- should exactly match the region name registered in Keystone since it is used
- by the Tricircle to route API request. In the above commands, we create pods
- named "RegionOne" and "Pod1" for the Tricircle instance and bottom OpenStack
- instance. The Tricircle API service will automatically create an aggregate
- when user creates a bottom pod, so command "nova aggregate-list" will show
- the following result::
+ should exactly match the region name registered in Keystone. In the above
+ commands, we create pods named "RegionOne" and "Pod1".
- +----+----------+-------------------+
- | Id | Name | Availability Zone |
- +----+----------+-------------------+
- | 1 | ag_Pod1 | az1 |
- +----+----------+-------------------+
-
-- 7 Create necessary resources to boot a virtual machine::
-
- nova flavor-create test 1 1024 10 1
- neutron net-create net1
- neutron subnet-create net1 10.0.0.0/24
- glance image-list
-
- Note that flavor mapping has not been implemented yet so the created flavor
- is just record saved in database as metadata. Actual flavor is saved in
- bottom OpenStack instance.
-- 8 Boot a virtual machine::
-
- nova boot --flavor 1 --image $image_id --nic net-id=$net_id --availability-zone az1 vm1
-
-- 9 Create, list, show and delete volume::
-
- cinder --debug create --availability-zone=az1 1
- cinder --debug list
- cinder --debug show $volume_id
- cinder --debug delete $volume_id
- cinder --debug list
-
-Verification with script
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-
-A sample of admin-openrc.sh and an installation verification script can be found
-in devstack/ in the Tricircle root folder. 'admin-openrc.sh' is used to create
-environment variables for the admin user as the following::
-
- export OS_PROJECT_DOMAIN_ID=default
- export OS_USER_DOMAIN_ID=default
- export OS_PROJECT_NAME=admin
- export OS_TENANT_NAME=admin
- export OS_USERNAME=admin
- export OS_PASSWORD=password #change password as you set in your own environment
- export OS_AUTH_URL=http://127.0.0.1:5000
- export OS_IDENTITY_API_VERSION=3
- export OS_IMAGE_API_VERSION=2
- export OS_REGION_NAME=RegionOne
-
-The command to use the admin-openrc.sh is::
-
- source tricircle/devstack/admin-openrc.sh
-
-'verify_top_install.sh' script is to quickly verify the installation of
-the Tricircle in Top OpenStack as the step 5-9 above and save the output
-to logs.
-
-Before verifying the installation, you should modify the script based on your
-own environment.
+- 10 Create necessary resources in local Neutron server::
-- 1 The default post URL is 127.0.0.1, change it if needed,
-- 2 The default create net1's networ address is 10.0.0.0/24, change it if
- needed.
+ neutron --os-region-name=Pod1 net-create net1
+ neutron --os-region-name=Pod1 subnet-create net1 10.0.0.0/24
-Then you do the following steps to verify::
+ Please note that the net1 ID will be used in later step to boot VM.
- cd tricircle/devstack/
- ./verify_top_install.sh 2>&1 | tee logs
+- 11 Get image ID and flavor ID which will be used in VM booting::
+ glance --os-region-name=RegionOne image-list
+ nova --os-region-name=RegionOne flavor-create test 1 1024 10 1
+ nova --os-region-name=RegionOne flavor-list
-======================================================================
-Two nodes installation with DevStack for Cross-OpenStack L3 networking
-======================================================================
+- 12 Boot a virtual machine::
-Introduction
-^^^^^^^^^^^^
+ nova --os-region-name=RegionOne boot --flavor 1 --image $image_id --nic net-id=$net_id vm1
-Now the Tricircle supports cross-pod l3 networking.
+- 13 Verify the VM is connected to the net1::
-To achieve cross-pod l3 networking, Tricircle utilizes a shared provider VLAN
-network at first phase. We are considering later using DCI controller to create
-a multi-segment VLAN network, VxLAN network for L3 networking purpose. When a
-subnet is attached to a router in top pod, Tricircle not only creates
-corresponding subnet and router in bottom pod, but also creates a VLAN type
-"bridge" network. Both tenant network and "bridge" network are attached to
-bottom router. Each tenant will have one allocated VLAN, which is shared by
-the tenant's "bridge" networks across bottom pods. The CIDRs of "bridge"
-networks for one tenant are also the same, so the router interfaces in
-"bridge" networks across different bottom pods can communicate with each
-other via the provider VLAN network. By adding an extra route as following::
+ neutron --os-region-name=Pod1 port-list
+ nova --os-region-name=RegionOne list
- destination: CIDR of tenant network in another bottom pod
- nexthop: "bridge" network interface ip in another bottom pod
+- 14 Create, list, show and delete volume::
-when a server sends a packet whose receiver is in another network and in
-another bottom pod, the packet first goes to router namespace, then is
-forwarded to the router namespace in another bottom pod according to the extra
-route, at last the packet is sent to the target server. This configuration job
-is triggered when user attaches a subnet to a router in top pod and finished
-asynchronously.
+ cinder --os-region-name=RegionOne create --availability-zone=az1 1
+ cinder --os-region-name=RegionOne list
+ cinder --os-region-name=RegionOne show $volume_id
+ cinder --os-region-name=RegionOne delete $volume_id
+ cinder --os-region-name=RegionOne list
-Currently cross-pod L2 networking is not supported yet, so tenant networks
-cannot cross pods, that is to say, one network in top pod can only locate in
-one bottom pod, tenant network is bound to bottom pod. Otherwise we cannot
-correctly configure extra route since for one destination CIDR, we have more
-than one possible nexthop addresses.
+- 15 Using --debug to make sure the commands are issued to Nova API gateway
+ or Cinder API gateway::
-*When cross-pod L2 networking is introduced, L2GW will be used to connect L2
-network in different pods. No extra route is required to connect L2 network
-All L3 traffic will be forwarded to the local L2 network, then go to the
-server in another pod via the L2GW.*
+ nova --debug --os-region-name=RegionOne list
+ cinder --debug --os-region-name=RegionOne list
-We use "availability_zone_hints" attribute for user to specify the bottom pod
-he wants to create the bottom network. Currently we do not support attaching
-a network to a router without setting "availability_zone_hints" attribute of
-the network.
+ The nova command should be sent to http://162.3.124.203:19998/ and cinder
+ command to http://162.3.124.203:19997/
-Prerequisite
-^^^^^^^^^^^^
+========================================
+Add another pod to Trio2o with DevStack
+========================================
+- 1 Prepare another node(suppose it's node-2), be sure the node is ping-able
+ from the node(suppose it's node-1) where the Trio2o is installed and running.
+ For the resource requirement to setup another node DevStack, please refer
+ to `All-In-One Single Machine `_ for
+ installing DevStack in physical machine
+ or `All-In-One Single VM `_ for
+ installing DevStack in virtual machine.
-To play cross-pod L3 networking, two nodes are needed. One to run Tricircle
-and one bottom pod, the other one to run another bottom pod. Both nodes have
-two network interfaces, for management and provider VLAN network. For VLAN
-network, the physical network infrastructure should support VLAN tagging. If
-you would like to try north-south networking, too, you should prepare one more
-network interface in the second node for external network. In this guide, the
-external network is also vlan type, so the local.conf sample is based on vlan
-type external network setup.
+- 2 Install DevStack in node-2. Please refer to `DevStack document
+ `_
+ on how to install DevStack into single VM or physcial machine
-Setup
-^^^^^
-In node1,
+- 3 In node-2 DevStack folder, create a file local.conf, and copy the
+ content of https://github.com/openstack/trio2o/blob/master/devstack/local.conf.sample2
+ to local.conf, change password in the file if needed.
-- 1 Git clone DevStack.
-- 2 Git clone Tricircle, or just download devstack/local.conf.node_1.sample.
-- 3 Copy devstack/local.conf.node_1.sample to DevStack folder and rename it to
- local.conf, change password in the file if needed.
-- 4 Change the following options according to your environment::
+- 4 In node-2 local.conf, change the REGION_NAME for the REGION_NAME is
+ used as the region name if needed::
- HOST_IP=10.250.201.24
+ REGION_NAME=Pod2
- change to your management interface ip::
+- 5 In node-2 local.conf, change following IP to the host's IP address of node-2,
+ for example, if node-2's management interface IP address is 162.3.124.204::
- Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:2001:3000)
+ HOST_IP=162.3.124.204
+ SERVICE_HOST=162.3.124.204
- the format is (network_vlan_ranges=::),
- you can change physical network name, but remember to adapt your change
- to the commands showed in this guide; also, change min vlan and max vlan
- to adapt the vlan range your physical network supports::
+- 6 In node-2, the OpenStack will use the KeyStone which is running in
+ node-1, so change the KEYSTONE_REGION_NAME and KEYSTONE host IP address
+ to node-1 IP address accordingly::
- OVS_BRIDGE_MAPPINGS=bridge:br-bridge
+ KEYSTONE_REGION_NAME=RegionOne
+ KEYSTONE_SERVICE_HOST=162.3.124.203
+ KEYSTONE_AUTH_HOST=162.3.124.203
- the format is :, you can change
- these names, but remember to adapt your change to the commands showed in
- this guide::
+- 7 In node-2, the OpenStack will use the Glance which is running in
+ node-1, so change the GLANCE_SERVICE_HOST IP address to node-1 IP
+ address accordingly::
+ GLANCE_SERVICE_HOST=162.3.124.203
- Q_USE_PROVIDERNET_FOR_PUBLIC=True
+- 8 Run DevStack. In DevStack folder, run::
- use this option if you would like to try L3 north-south networking.
+ ./stack.sh
+- 9 After node-2 DevStack successfully starts, return to the noed-1. In
+ node-1 DevStack folder::
-- 5 Create OVS bridge and attach the VLAN network interface to it::
+ source openrc admin admin
- sudo ovs-vsctl add-br br-bridge
- sudo ovs-vsctl add-port br-bridge eth1
+- 10 Unset the region name environment variable in node-1, so that the command
+ can be issued to specified region in following commands as needed::
- br-bridge is the OVS bridge name you configure on OVS_PHYSICAL_BRIDGE, eth1 is
- the device name of your VLAN network interface
-- 6 Run DevStack.
-- 7 After DevStack successfully starts, begin to setup node2.
+ unset OS_REGION_NAME
-In node2,
+- 11 Check if services in node-1 and node-2 have been correctly registered.
+ Run::
-- 1 Git clone DevStack.
-- 2 Git clone Tricircle, or just download devstack/local.conf.node_2.sample.
-- 3 Copy devstack/local.conf.node_2.sample to DevStack folder and rename it to
- local.conf, change password in the file if needed.
-- 4 Change the following options according to your environment::
+ openstack --os-region-name=RegionOne endpoint list
- HOST_IP=10.250.201.25
+ you should get output looks like as following::
- change to your management interface ip::
+ +----------------------------------+-----------+--------------+----------------+
+ | ID | Region | Service Name | Service Type |
+ +----------------------------------+-----------+--------------+----------------+
+ | e09ca9acfa6341aa8f2671571c73db28 | RegionOne | glance | image |
+ | 2730fbf212604687ada1f20b203fa0d7 | Pod2 | nova_legacy | compute_legacy |
+ | 7edd2273b0ae4bc68bbf714f561c2958 | Pod2 | cinder | volume |
+ | b39c6e4d1be143d694f620b53b4a6015 | Pod2 | cinderv2 | volumev2 |
+ | 9612c10655bb4fc994f3db4af72bfdac | Pod2 | nova | compute |
+ | 6c28b4a76fa148578a12423362a5ade1 | RegionOne | trio2o | Cascading |
+ | a1f439e8933d48e9891d238ad8e18bd5 | RegionOne | keystone | identity |
+ | 452b249592d04f0b903ee24fa0dbb573 | RegionOne | nova | compute |
+ | 30e7efc5e8f841f192cbea4da31ae5d5 | RegionOne | cinderv3 | volumev3 |
+ | 63b88f4023cc44b59cfca53ad9606b85 | RegionOne | cinderv2 | volumev2 |
+ | 653693d607934da7b7724c0cd1c49fb0 | Pod2 | neutron | network |
+ | 3e3ccb71b8424958ad5def048077ddf8 | Pod1 | nova | compute |
+ | d4615bce839f43f2a8856f3795df6833 | Pod1 | neutron | network |
+ | fd2004b26b6847df87d1036c2363ed22 | RegionOne | cinder | volume |
+ | 04ae8677ec704b779a1c00fa0eca2636 | Pod1 | cinderv2 | volumev2 |
+ | e11be9f233d1434bbf8c4b8edf6a2f50 | RegionOne | nova_legacy | compute_legacy |
+ | d50e2dfbb87b43e98a5899eae4fd4d72 | Pod2 | cinderv3 | volumev3 |
+ +----------------------------------+-----------+--------------+----------------+
- KEYSTONE_SERVICE_HOST=10.250.201.24
+ "RegionOne" is the region where the Trio2o Admin API(ID is
+ 6c28b4a76fa148578a12423362a5ade1 in the above list), Nova API gateway(
+ ID is 452b249592d04f0b903ee24fa0dbb573) and Cinder API gateway(ID is
+ 63b88f4023cc44b59cfca53ad9606b85) are running in. "Pod1" is the normal
+ bottom OpenStack region which includes Nova, Cinder, Neutron in node-1.
+ "Pod2" is the normal bottom OpenStack region which includes Nova, Cinder,
+ Neutron in node-2.
- change to management interface ip of node1::
+- 12 Get token for the later commands. Run::
- KEYSTONE_AUTH_HOST=10.250.201.24
+ openstack --os-region-name=RegionOne token issue
- change to management interface ip of node1::
+- 13 Create Pod2 instances for the Trio2o to manage the mapping between
+ availability zone and OpenStack instances, the "$token" is obtained in the
+ step 11::
- GLANCE_SERVICE_HOST=10.250.201.24
+ curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
+ -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod2", "az_name": "az2"}}'
- change to management interface ip of node1::
+ Pay attention to "pod_name" parameter we specify when creating pod. Pod name
+ should exactly match the region name registered in Keystone. In the above
+ commands, we create pod named "Pod2" in "az2".
- Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=(network_vlan_ranges=bridge:2001:3000,extern:3001:4000)
+- 14 Create necessary resources in local Neutron server::
- the format is (network_vlan_ranges=::),
- you can change physical network name, but remember to adapt your change
- to the commands showed in this guide; also, change min vlan and max vlan
- to adapt the vlan range your physical network supports::
+ neutron --os-region-name=Pod2 net-create net2
+ neutron --os-region-name=Pod2 subnet-create net2 10.0.0.0/24
- OVS_BRIDGE_MAPPINGS=bridge:br-bridge,extern:br-ext
+ Please note that the net2 ID will be used in later step to boot VM.
- the format is :, you can change
- these names, but remember to adapt your change to the commands showed in
- this guide::
+- 15 Get image ID and flavor ID which will be used in VM booting, flavor
+ should have been created in node-1 installation, if not, please create
+ one::
- Q_USE_PROVIDERNET_FOR_PUBLIC=True
+ glance --os-region-name=RegionOne image-list
+ nova --os-region-name=RegionOne flavor-create test 1 1024 10 1
+ nova --os-region-name=RegionOne flavor-list
- use this option if you would like to try L3 north-south networking.
+- 16 Boot a virtual machine in net2, replace $net-id to net2's ID::
- In this guide, we define two physical networks in node2, one is "bridge" for
- bridge network, the other one is "extern" for external network. If you do not
- want to try L3 north-south networking, you can simply remove the "extern" part.
- The external network type we use in the guide is vlan, if you want to use other
- network type like flat, please refer to
- [DevStack document](http://docs.openstack.org/developer/devstack/).
+ nova --os-region-name=RegionOne boot --availability-zone az2 --flavor 1 --image $image_id --nic net-id=$net_id vm2
-- 5 Create OVS bridge and attach the VLAN network interface to it::
+- 17 Verify the VM is connected to the net2::
- sudo ovs-vsctl add-br br-bridge
- sudo ovs-vsctl add-port br-bridge eth1
+ neutron --os-region-name=Pod2 port-list
+ nova --os-region-name=RegionOne list
- br-bridge is the OVS bridge name you configure on OVS_PHYSICAL_BRIDGE, eth1 is
- the device name of your VLAN network interface
-- 6 Run DevStack.
-- 7 After DevStack successfully starts, the setup is finished.
+- 18 Create, list, show and delete volume::
-How to play
-^^^^^^^^^^^
-
-All the following operations are performed in node1
-
-- 1 Check if services have been correctly registered. Run "openstack endpoint
- list" and you should get similar output as following::
-
- +----------------------------------+-----------+--------------+----------------+
- | ID | Region | Service Name | Service Type |
- +----------------------------------+-----------+--------------+----------------+
- | 1fadbddef9074f81b986131569c3741e | RegionOne | tricircle | Cascading |
- | a5c5c37613244cbab96230d9051af1a5 | RegionOne | ec2 | ec2 |
- | 809a3f7282f94c8e86f051e15988e6f5 | Pod2 | neutron | network |
- | e6ad9acc51074f1290fc9d128d236bca | Pod1 | neutron | network |
- | aee8a185fa6944b6860415a438c42c32 | RegionOne | keystone | identity |
- | 280ebc45bf9842b4b4156eb5f8f9eaa4 | RegionOne | glance | image |
- | aa54df57d7b942a1a327ed0722dba96e | Pod2 | nova_legacy | compute_legacy |
- | aa25ae2a3f5a4e4d8bc0cae2f5fbb603 | Pod2 | nova | compute |
- | 932550311ae84539987bfe9eb874dea3 | RegionOne | nova_legacy | compute_legacy |
- | f89fbeffd7e446d0a552e2a6cf7be2ec | Pod1 | nova | compute |
- | e2e19c164060456f8a1e75f8d3331f47 | Pod2 | ec2 | ec2 |
- | de698ad5c6794edd91e69f0e57113e97 | RegionOne | nova | compute |
- | 8a4b2332d2a4460ca3f740875236a967 | Pod2 | keystone | identity |
- | b3ad80035f8742f29d12df67bdc2f70c | RegionOne | neutron | network |
- +----------------------------------+-----------+--------------+----------------+
-
- "RegionOne" is the region you set in local.conf via REGION_NAME in node1, whose
- default value is "RegionOne", we use it as the region for Tricircle; "Pod1" is
- the region set via POD_REGION_NAME, new configuration option introduced by
- Tricircle, we use it as the bottom OpenStack; "Pod2" is the region you set via
- REGION_NAME in node2, we use it as another bottom OpenStack. In node2, you also
- need to set KEYSTONE_REGION_NAME the same as REGION_NAME in node1, which is
- "RegionOne" in this example. So services in node2 can interact with Keystone
- service in RegionOne.
-- 2 Create pod instances for Tricircle and bottom OpenStack::
-
- curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne"}}'
-
- curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod1", "az_name": "az1"}}'
-
- curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod2", "az_name": "az2"}}'
-
-- 3 Create network with AZ scheduler hints specified::
-
- curl -X POST http://127.0.0.1:9696/v2.0/networks -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" \
- -d '{"network": {"name": "net1", "admin_state_up": true, "availability_zone_hints": ["az1"]}}'
- curl -X POST http://127.0.0.1:9696/v2.0/networks -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" \
- -d '{"network": {"name": "net2", "admin_state_up": true, "availability_zone_hints": ["az2"]}}'
-
- Here we create two networks separately bound to Pod1 and Pod2
-- 4 Create necessary resources to boot virtual machines::
-
- nova flavor-create test 1 1024 10 1
- neutron subnet-create net1 10.0.1.0/24
- neutron subnet-create net2 10.0.2.0/24
- glance image-list
-
-- 5 Boot virtual machines::
-
- nova boot --flavor 1 --image $image_id --nic net-id=$net1_id --availability-zone az1 vm1
- nova boot --flavor 1 --image $image_id --nic net-id=$net2_id --availability-zone az2 vm2
-
-- 6 Create router and attach interface::
-
- neutron router-create router
- neutron router-interface-add router $subnet1_id
- neutron router-interface-add router $subnet2_id
-
-- 7 Launch VNC console anc check connectivity
- By now, two networks are connected by the router, the two virtual machines
- should be able to communicate with each other, we can launch a VNC console to
- check. Currently Tricircle doesn't support VNC proxy, we need to go to bottom
- OpenStack to obtain a VNC console::
-
- nova --os-region-name Pod1 get-vnc-console vm1 novnc
- nova --os-region-name Pod2 get-vnc-console vm2 novnc
-
- Login one virtual machine via VNC and you should find it can "ping" the other
- virtual machine. Default security group is applied so no need to configure
- security group rule.
-
-North-South Networking
-^^^^^^^^^^^^^^^^^^^^^^
-
-Before running DevStack in node2, you need to create another ovs bridge for
-external network and then attach port::
-
- sudo ovs-vsctl add-br br-ext
- sudo ovs-vsctl add-port br-ext eth2
-
-Below listed the operations related to north-south networking.
-
-- 1 Create external network::
-
- curl -X POST http://127.0.0.1:9696/v2.0/networks -H "Content-Type: application/json" \
- -H "X-Auth-Token: $token" \
- -d '{"network": {"name": "ext-net", "admin_state_up": true, "router:external": true, "provider:network_type": "vlan", "provider:physical_network": "extern", "availability_zone_hints": ["Pod2"]}}'
-
- Pay attention that when creating external network, we still need to pass
- "availability_zone_hints" parameter, but the value we pass is the name of pod,
- not the name of availability zone.
-
- *Currently external network needs to be created before attaching subnet to the
- router, because plugin needs to utilize external network information to setup
- bridge network when handling interface adding operation. This limitation will
- be removed later.*
-
-- 2 Create external subnet::
-
- neutron subnet-create --name ext-subnet --disable-dhcp ext-net 163.3.124.0/24
-
-- 3 Set router external gateway::
-
- neutron router-gateway-set router ext-net
-
- Now virtual machine in the subnet attached to the router should be able to
- "ping" machines in the external network. In our test, we use hypervisor tool
- to directly start a virtual machine in the external network to check the
- network connectivity.
-
-- 4 Create floating ip::
-
- neutron floatingip-create ext-net
-
-- 5 Associate floating ip::
-
- neutron floatingip-list
- neutron port-list
- neutron floatingip-associate $floatingip_id $port_id
-
- Now you should be able to access virtual machine with floating ip bound from
- the external network.
-
-Verification with script
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-A sample of admin-openrc.sh and an installation verification script can be
-found in devstack/ directory. And a demo blog with virtualbox can be found in https://wiki.openstack.org/wiki/Play_tricircle_with_virtualbox
-
-Script 'verify_cross_pod_install.sh' is to quickly verify the installation of
-the Tricircle in Cross Pod OpenStack as the contents above and save the output
-to logs.
-Before verifying the installation, some parameters should be modified to your
-own environment.
-
-- 1 The default URL is 127.0.0.1, change it if needed,
-- 2 This script create a external network 10.50.11.0/26 according to the work
- environment, change it if needed.
-- 3 This script create 2 subnets 10.0.1.0/24 and 10.0.2.0/24, Change these if
- needed.
-- 4 The default created floating-ip is attached to the VM with port 10.0.2.3
- created by the subnets, modify it according to your environment.
-
-Then do the following steps in Node1 OpenStack to verify network functions::
-
- cd tricircle/devstack/
- ./verify_cross_pod_install.sh 2>&1 | tee logs
+ cinder --os-region-name=RegionOne create --availability-zone=az2 1
+ cinder --os-region-name=RegionOne list
+ cinder --os-region-name=RegionOne show $volume_id
+ cinder --os-region-name=RegionOne delete $volume_id
+ cinder --os-region-name=RegionOne list
+- 19 Using --debug to make sure the commands are issued to Nova API gateway
+ or Cinder API gateway::
+ nova --debug --os-region-name=RegionOne list
+ cinder --debug --os-region-name=RegionOne list
+ The nova command should be sent to http://127.0.0.1:19998/ and cinder
+ command to http://127.0.0.1:19997/
diff --git a/doc/source/usage.rst b/doc/source/usage.rst
index 7c407cf..f321170 100644
--- a/doc/source/usage.rst
+++ b/doc/source/usage.rst
@@ -2,6 +2,6 @@
Usage
======
-To use tricircle in a project::
+To use trio2o in a project::
- import tricircle
+ import trio2o
diff --git a/etc/api-cfg-gen.conf b/etc/api-cfg-gen.conf
index 5070824..13d39a3 100644
--- a/etc/api-cfg-gen.conf
+++ b/etc/api-cfg-gen.conf
@@ -1,9 +1,9 @@
[DEFAULT]
output_file = etc/api.conf.sample
wrap_width = 79
-namespace = tricircle.api
-namespace = tricircle.common
-namespace = tricircle.db
+namespace = trio2o.api
+namespace = trio2o.common
+namespace = trio2o.db
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.policy
diff --git a/etc/cinder_apigw-cfg-gen.conf b/etc/cinder_apigw-cfg-gen.conf
index d6ff0ae..40284bc 100644
--- a/etc/cinder_apigw-cfg-gen.conf
+++ b/etc/cinder_apigw-cfg-gen.conf
@@ -1,9 +1,9 @@
[DEFAULT]
output_file = etc/cinder_apigw.conf.sample
wrap_width = 79
-namespace = tricircle.cinder_apigw
-namespace = tricircle.common
-namespace = tricircle.db
+namespace = trio2o.cinder_apigw
+namespace = trio2o.common
+namespace = trio2o.db
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.policy
diff --git a/etc/nova_apigw-cfg-gen.conf b/etc/nova_apigw-cfg-gen.conf
index b5be4ed..d34b0e3 100644
--- a/etc/nova_apigw-cfg-gen.conf
+++ b/etc/nova_apigw-cfg-gen.conf
@@ -1,9 +1,9 @@
[DEFAULT]
output_file = etc/nova_apigw.conf.sample
wrap_width = 79
-namespace = tricircle.nova_apigw
-namespace = tricircle.common
-namespace = tricircle.db
+namespace = trio2o.nova_apigw
+namespace = trio2o.common
+namespace = trio2o.db
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.policy
diff --git a/etc/tricircle_plugin-cfg-gen.conf b/etc/tricircle_plugin-cfg-gen.conf
deleted file mode 100644
index fa22422..0000000
--- a/etc/tricircle_plugin-cfg-gen.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-[DEFAULT]
-output_file = etc/tricircle_plugin.conf.sample
-wrap_width = 79
-namespace = tricircle.network
diff --git a/etc/xjob-cfg-gen.conf b/etc/xjob-cfg-gen.conf
index dc1ed8a..db663d2 100644
--- a/etc/xjob-cfg-gen.conf
+++ b/etc/xjob-cfg-gen.conf
@@ -1,8 +1,8 @@
[DEFAULT]
output_file = etc/xjob.conf.sample
wrap_width = 79
-namespace = tricircle.xjob
-namespace = tricircle.common
+namespace = trio2o.xjob
+namespace = trio2o.common
namespace = oslo.log
namespace = oslo.messaging
namespace = oslo.policy
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index 43cfae6..e8c2a0a 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -55,7 +55,7 @@ source_suffix = '.rst'
master_doc = 'index'
# General information about the project.
-project = u'The Tricircle Release Notes'
+project = u'The Trio2o Release Notes'
copyright = u'2016, OpenStack Foundation'
# The version info for the project you're documenting, acts as replacement for
diff --git a/requirements.txt b/requirements.txt
index a865f55..9ff7310 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,7 +1,7 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
-pbr>=1.6 # Apache-2.0
+pbr>=1.8 # Apache-2.0
Babel>=2.3.4 # BSD
Paste # MIT
@@ -10,38 +10,38 @@ Routes!=2.0,!=2.1,!=2.3.0,>=1.12.3;python_version=='2.7' # MIT
Routes!=2.0,!=2.3.0,>=1.12.3;python_version!='2.7' # MIT
debtcollector>=1.2.0 # Apache-2.0
eventlet!=0.18.3,>=0.18.2 # MIT
-pecan!=1.0.2,!=1.0.3,!=1.0.4,>=1.0.0 # BSD
+pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD
greenlet>=0.3.2 # MIT
httplib2>=0.7.5 # MIT
requests>=2.10.0 # Apache-2.0
Jinja2>=2.8 # BSD License (3 clause)
-keystonemiddleware!=4.1.0,!=4.5.0,>=4.0.0 # Apache-2.0
-netaddr!=0.7.16,>=0.7.12 # BSD
+keystonemiddleware!=4.5.0,>=4.2.0 # Apache-2.0
+netaddr!=0.7.16,>=0.7.13 # BSD
netifaces>=0.10.4 # MIT
-neutron-lib>=0.4.0 # Apache-2.0
+neutron-lib>=1.0.0 # Apache-2.0
retrying!=1.3.0,>=1.2.3 # Apache-2.0
SQLAlchemy<1.1.0,>=1.0.10 # MIT
-WebOb>=1.2.3 # MIT
+WebOb>=1.6.0 # MIT
python-cinderclient!=1.7.0,!=1.7.1,>=1.6.0 # Apache-2.0
-python-glanceclient!=2.4.0,>=2.3.0 # Apache-2.0
-python-keystoneclient!=2.1.0,>=2.0.0 # Apache-2.0
+python-glanceclient>=2.5.0 # Apache-2.0
+python-keystoneclient>=3.6.0 # Apache-2.0
python-neutronclient>=5.1.0 # Apache-2.0
python-novaclient!=2.33.0,>=2.29.0 # Apache-2.0
alembic>=0.8.4 # MIT
six>=1.9.0 # MIT
-stevedore>=1.16.0 # Apache-2.0
+stevedore>=1.17.1 # Apache-2.0
oslo.concurrency>=3.8.0 # Apache-2.0
-oslo.config>=3.14.0 # Apache-2.0
+oslo.config!=3.18.0,>=3.14.0 # Apache-2.0
oslo.context>=2.9.0 # Apache-2.0
-oslo.db>=4.10.0 # Apache-2.0
+oslo.db!=4.13.1,!=4.13.2,>=4.11.0 # Apache-2.0
oslo.i18n>=2.1.0 # Apache-2.0
-oslo.log>=1.14.0 # Apache-2.0
+oslo.log>=3.11.0 # Apache-2.0
oslo.messaging>=5.2.0 # Apache-2.0
oslo.middleware>=3.0.0 # Apache-2.0
-oslo.policy>=1.9.0 # Apache-2.0
+oslo.policy>=1.15.0 # Apache-2.0
oslo.rootwrap>=5.0.0 # Apache-2.0
oslo.serialization>=1.10.0 # Apache-2.0
oslo.service>=1.10.0 # Apache-2.0
-oslo.utils>=3.16.0 # Apache-2.0
+oslo.utils>=3.18.0 # Apache-2.0
oslo.versionedobjects>=1.13.0 # Apache-2.0
sqlalchemy-migrate>=0.9.6 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 07e5e50..5630215 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,10 +1,10 @@
[metadata]
-name = tricircle
-summary = the Tricircle provides an OpenStack API gateway and networking automation to allow multiple OpenStack instances, spanning in one site or multiple sites or in hybrid cloud, to be managed as a single OpenStack cloud
+name = trio2o
+summary = the Trio2o provides an OpenStack API gateway to allow multiple OpenStack instances, spanning in one site or multiple sites or in hybrid cloud, to be managed as a single OpenStack cloud
description-file = README.rst
-author = OpenStack
+author = OpenStack Trio2o
author-email = openstack-dev@lists.openstack.org
-home-page = http://www.openstack.org/
+home-page = wiki.openstack.org/wiki/Trio2o
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -20,7 +20,7 @@ classifier =
[files]
packages =
- tricircle
+ trio2o
[build_sphinx]
source-dir = doc/source
@@ -31,34 +31,29 @@ all_files = 1
upload-dir = doc/build/html
[compile_catalog]
-directory = tricircle/locale
-domain = tricircle
+directory = trio2o/locale
+domain = trio2o
[update_catalog]
-domain = tricircle
-output_dir = tricircle/locale
-input_file = tricircle/locale/tricircle.pot
+domain = trio2o
+output_dir = trio2o/locale
+input_file = trio2o/locale/trio2o.pot
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
-output_file = tricircle/locale/tricircle.pot
+output_file = trio2o/locale/trio2o.pot
[entry_points]
oslo.config.opts =
- tricircle.api = tricircle.api.opts:list_opts
- tricircle.common = tricircle.common.opts:list_opts
- tricircle.db = tricircle.db.opts:list_opts
- tricircle.network = tricircle.network.opts:list_opts
+ trio2o.api = trio2o.api.opts:list_opts
+ trio2o.common = trio2o.common.opts:list_opts
+ trio2o.db = trio2o.db.opts:list_opts
- tricircle.nova_apigw = tricircle.nova_apigw.opts:list_opts
- tricircle.cinder_apigw = tricircle.cinder_apigw.opts:list_opts
- tricircle.xjob = tricircle.xjob.opts:list_opts
+ trio2o.nova_apigw = trio2o.nova_apigw.opts:list_opts
+ trio2o.cinder_apigw = trio2o.cinder_apigw.opts:list_opts
+ trio2o.xjob = trio2o.xjob.opts:list_opts
tempest.test_plugins =
- tricircle_tests = tricircle.tempestplugin.plugin:TricircleTempestPlugin
-
-tricircle.network.type_drivers =
- local = tricircle.network.drivers.type_local:LocalTypeDriver
- shared_vlan = tricircle.network.drivers.type_shared_vlan:SharedVLANTypeDriver
+ trio2o_tests = trio2o.tempestplugin.plugin:Trio2oTempestPlugin
diff --git a/specs/cross-pod-l2-networking.rst b/specs/newton/cross-pod-l2-networking.rst
similarity index 100%
rename from specs/cross-pod-l2-networking.rst
rename to specs/newton/cross-pod-l2-networking.rst
diff --git a/specs/dynamic-pod-binding.rst b/specs/newton/dynamic-pod-binding.rst
similarity index 100%
rename from specs/dynamic-pod-binding.rst
rename to specs/newton/dynamic-pod-binding.rst
diff --git a/specs/ocata/dynamic-pod-binding.rst b/specs/ocata/dynamic-pod-binding.rst
new file mode 100644
index 0000000..6c4c73e
--- /dev/null
+++ b/specs/ocata/dynamic-pod-binding.rst
@@ -0,0 +1,236 @@
+=================================
+Dynamic Pod Binding in Trio2o
+=================================
+
+Background
+===========
+
+Most public cloud infrastructure is built with Availability Zones (AZs).
+Each AZ is consisted of one or more discrete data centers, each with high
+bandwidth and low latency network connection, separate power and facilities.
+These AZs offer cloud tenants the ability to operate production
+applications and databases deployed into multiple AZs are more highly
+available, fault tolerant and scalable than a single data center.
+
+In production clouds, each AZ is built by modularized OpenStack, and each
+OpenStack is one pod. Moreover, one AZ can include multiple pods. Among the
+pods, they are classified into different categories. For example, servers
+in one pod are only for general purposes, and the other pods may be built
+for heavy load CAD modeling with GPU. So pods in one AZ could be divided
+into different groups. Different pod groups for different purposes, and
+the VM's cost and performance are also different.
+
+The concept "pod" is created for the Trio2o to facilitate managing
+OpenStack instances among AZs, which therefore is transparent to cloud
+tenants. The Trio2o maintains and manages a pod binding table which
+records the mapping relationship between a cloud tenant and pods. When the
+cloud tenant creates a VM or a volume, the Trio2o tries to assign a pod
+based on the pod binding table.
+
+Motivation
+===========
+
+In resource allocation scenario, when a tenant creates a VM in one pod and a
+new volume in a another pod respectively. If the tenant attempt to attach the
+volume to the VM, the operation will fail. In other words, the volume should
+be in the same pod where the VM is, otherwise the volume and VM would not be
+able to finish the attachment. Hence, the Trio2o needs to ensure the pod
+binding so as to guarantee that VM and volume are created in one pod.
+
+In capacity expansion scenario, when resources in one pod are exhausted,
+then a new pod with the same type should be added into the AZ. Therefore,
+new resources of this type should be provisioned in the new added pod, which
+requires dynamical change of pod binding. The pod binding could be done
+dynamically by the Trio2o, or by admin through admin api for maintenance
+purpose. For example, for maintenance(upgrade, repairement) window, all
+new provision requests should be forwarded to the running one, but not
+the one under maintenance.
+
+Solution: dynamic pod binding
+==============================
+
+It's quite headache for capacity expansion inside one pod, you have to
+estimate, calculate, monitor, simulate, test, and do online grey expansion
+for controller nodes and network nodes whenever you add new machines to the
+pod. It's quite big challenge as more and more resources added to one pod,
+and at last you will reach limitation of one OpenStack. If this pod's
+resources exhausted or reach the limit for new resources provisioning, the
+Trio2o needs to bind tenant to a new pod instead of expanding the current
+pod unlimitedly. The Trio2o needs to select a proper pod and stay binding
+for a duration, in this duration VM and volume will be created for one tenant
+in the same pod.
+
+For example, suppose we have two groups of pods, and each group has 3 pods,
+i.e.,
+
+GroupA(Pod1, Pod2, Pod3) for general purpose VM,
+
+GroupB(Pod4, Pod5, Pod6) for CAD modeling.
+
+Tenant1 is bound to Pod1, Pod4 during the first phase for several months.
+In the first phase, we can just add weight in Pod, for example, Pod1, weight 1,
+Pod2, weight2, this could be done by adding one new field in pod table, or no
+field at all, just link them by the order created in the Trio2o. In this
+case, we use the pod creation time as the weight.
+
+If the tenant wants to allocate VM/volume for general VM, Pod1 should be
+selected. It can be implemented with flavor or volume type metadata. For
+general VM/Volume, there is no special tag in flavor or volume type metadata.
+
+If the tenant wants to allocate VM/volume for CAD modeling VM, Pod4 should be
+selected. For CAD modeling VM/Volume, a special tag "resource: CAD Modeling"
+in flavor or volume type metadata determines the binding.
+
+When it is detected that there is no more resources in Pod1, Pod4. Based on
+the resource_affinity_tag, the Trio2o queries the pod table for available
+pods which provision a specific type of resources. The field resource_affinity
+is a key-value pair. The pods will be selected when there are matched
+key-value in flavor extra-spec or volume extra-spec. A tenant will be bound
+to one pod in one group of pods with same resource_affinity_tag. In this case,
+the Trio2o obtains Pod2 and Pod3 for general purpose, as well as Pod5 an
+Pod6 for CAD purpose. The Trio2o needs to change the binding, for example,
+tenant1 needs to be bound to Pod2, Pod5.
+
+Implementation
+===============
+
+Measurement
+-------------
+
+To get the information of resource utilization of pods, the Trio2o needs to
+conduct some measurements on pods. The statistic task should be done in
+bottom pod.
+
+For resources usages, current cells provide interface to retrieve usage for
+cells [1]. OpenStack provides details of capacity of a cell, including disk
+and ram via api of showing cell capacities [1].
+
+If OpenStack is not running with cells mode, we can ask Nova to provide
+an interface to show the usage detail in AZ. Moreover, an API for usage
+query at host level is provided for admins [3], through which we can obtain
+details of a host, including cpu, memory, disk, and so on.
+
+Cinder also provides interface to retrieve the backend pool usage,
+including updated time, total capacity, free capacity and so on [2].
+
+The Trio2o needs to have one task to collect the usage in the bottom on
+daily base, to evaluate whether the threshold is reached or not. A threshold
+or headroom could be configured for each pod, but not to reach 100% exhaustion
+of resources.
+
+On top there should be no heavy process. So getting the sum info from the
+bottom can be done in the Trio2o. After collecting the details, the
+Trio2o can judge whether a pod reaches its limit.
+
+Trio2o
+----------
+
+The Trio2o needs a framework to support different binding policy (filter).
+
+Each pod is one OpenStack instance, including controller nodes and compute
+nodes. E.g.,
+
+::
+
+ +-> controller(s) - pod1 <--> compute nodes <---+
+ |
+ The trio2o +-> controller(s) - pod2 <--> compute nodes <---+ resource migration, if necessary
+ (resource controller) .... |
+ +-> controller(s) - pod{N} <--> compute nodes <-+
+
+
+The Trio2o selects a pod to decide where the requests should be forwarded
+to which controller. Then the controllers in the selected pod will do its own
+scheduling.
+
+One simplest binding filter is as follows. Line up all available pods in a
+list and always select the first one. When all the resources in the first pod
+has been allocated, remove it from the list. This is quite like how production
+cloud is built: at first, only a few pods are in the list, and then add more
+and more pods if there is not enough resources in current cloud. For example,
+
+List1 for general pool: Pod1 <- Pod2 <- Pod3
+List2 for CAD modeling pool: Pod4 <- Pod5 <- Pod6
+
+If Pod1's resource exhausted, Pod1 is removed from List1. The List1 is changed
+to: Pod2 <- Pod3.
+If Pod4's resource exhausted, Pod4 is removed from List2. The List2 is changed
+to: Pod5 <- Pod6
+
+If the tenant wants to allocate resources for general VM, the Trio2o
+selects Pod2. If the tenant wants to allocate resources for CAD modeling VM,
+the Trio2o selects Pod5.
+
+Filtering
+-------------
+
+For the strategy of selecting pods, we need a series of filters. Before
+implementing dynamic pod binding, the binding criteria are hard coded to
+select the first pod in the AZ. Hence, we need to design a series of filter
+algorithms. Firstly, we plan to design an ALLPodsFilter which does no
+filtering and passes all the available pods. Secondly, we plan to design an
+AvailabilityZoneFilter which passes the pods matching the specified available
+zone. Thirdly, we plan to design a ResourceAffiniyFilter which passes the pods
+matching the specified resource type. Based on the resource_affinity_tag,
+the Trio2o can be aware of which type of resource the tenant wants to
+provision. In the future, we can add more filters, which requires adding more
+information in the pod table.
+
+Weighting
+-------------
+
+After filtering all the pods, the Trio2o obtains the available pods for a
+tenant. The Trio2o needs to select the most suitable pod for the tenant.
+Hence, we need to define a weight function to calculate the corresponding
+weight of each pod. Based on the weights, the Trio2o selects the pod which
+has the maximum weight value. When calculating the weight of a pod, we need
+to design a series of weigher. We first take the pod creation time into
+consideration when designing the weight function. The second one is the idle
+capacity, to select a pod which has the most idle capacity. Other metrics
+will be added in the future, e.g., cost.
+
+Data Model Impact
+==================
+
+Firstly, we need to add a column “resource_affinity_tag” to the pod table,
+which is used to store the key-value pair, to match flavor extra-spec and
+volume extra-spec.
+
+Secondly, in the pod binding table, we need to add fields of start binding
+time and end binding time, so the history of the binding relationship could
+be stored.
+
+Thirdly, we need a table to store the usage of each pod for Cinder/Nova.
+We plan to use JSON object to store the usage information. Hence, even if
+the usage structure is changed, we don't need to update the table. And if
+the usage value is null, that means the usage has not been initialized yet.
+As just mentioned above, the usage could be refreshed in daily basis. If it's
+not initialized yet, it means there is still lots of resources available,
+which could be scheduled just like this pod has not reach usage threshold.
+
+Dependencies
+=============
+
+None
+
+
+Testing
+========
+
+None
+
+
+Documentation Impact
+=====================
+
+None
+
+
+Reference
+==========
+
+[1] http://developer.openstack.org/api-ref-compute-v2.1.html#showCellCapacities
+
+[2] http://developer.openstack.org/api-ref-blockstorage-v2.html#os-vol-pool-v2
+
+[3] http://developer.openstack.org/api-ref-compute-v2.1.html#showinfo
diff --git a/test-requirements.txt b/test-requirements.txt
index ec7511c..cac56d7 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -3,21 +3,21 @@
# process, which may cause wedges in the gate later.
hacking<0.11,>=0.10.2
-cliff!=1.16.0,!=1.17.0,>=1.15.0 # Apache-2.0
-coverage>=3.6 # Apache-2.0
+cliff>=2.2.0 # Apache-2.0
+coverage>=4.0 # Apache-2.0
fixtures>=3.0.0 # Apache-2.0/BSD
mock>=2.0 # BSD
python-subunit>=0.0.18 # Apache-2.0/BSD
-requests-mock>=1.0 # Apache-2.0
-sphinx!=1.3b1,<1.3,>=1.2.1 # BSD
-oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
+requests-mock>=1.1 # Apache-2.0
+sphinx!=1.3b1,<1.4,>=1.2.1 # BSD
+oslosphinx>=4.7.0 # Apache-2.0
testrepository>=0.0.18 # Apache-2.0/BSD
testtools>=1.4.0 # MIT
testresources>=0.2.4 # Apache-2.0/BSD
testscenarios>=0.4 # Apache-2.0/BSD
WebTest>=2.0 # MIT
oslotest>=1.10.0 # Apache-2.0
-os-testr>=0.7.0 # Apache-2.0
+os-testr>=0.8.0 # Apache-2.0
tempest-lib>=0.14.0 # Apache-2.0
ddt>=1.0.1 # MIT
pylint==1.4.5 # GPLv2
diff --git a/tox.ini b/tox.ini
index 2275ee2..73bec9e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -10,10 +10,9 @@ install_command = pip install -U --force-reinstall {opts} {packages}
setenv =
VIRTUAL_ENV={envdir}
PYTHONWARNINGS=default::DeprecationWarning
- TRICIRCLE_TEST_DIRECTORY=tricircle/tests
+ TRIO2O_TEST_DIRECTORY=trio2o/tests
deps =
-r{toxinidir}/test-requirements.txt
- -egit+https://git.openstack.org/openstack/neutron@master#egg=neutron
commands = python setup.py testr --slowest --testr-args='{posargs}'
whitelist_externals = rm
@@ -31,7 +30,6 @@ commands = oslo-config-generator --config-file=etc/api-cfg-gen.conf
oslo-config-generator --config-file=etc/nova_apigw-cfg-gen.conf
oslo-config-generator --config-file=etc/cinder_apigw-cfg-gen.conf
oslo-config-generator --config-file=etc/xjob-cfg-gen.conf
- oslo-config-generator --config-file=etc/tricircle_plugin-cfg-gen.conf
[testenv:docs]
commands = python setup.py build_sphinx
diff --git a/tricircle/network/drivers/type_local.py b/tricircle/network/drivers/type_local.py
deleted file mode 100644
index 6d07ddb..0000000
--- a/tricircle/network/drivers/type_local.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.plugins.ml2 import driver_api
-
-from tricircle.common import constants
-
-
-class LocalTypeDriver(driver_api.TypeDriver):
- def get_type(self):
- return constants.NT_LOCAL
-
- def initialize(self):
- pass
-
- def is_partial_segment(self, segment):
- return False
-
- def validate_provider_segment(self, segment):
- pass
-
- def reserve_provider_segment(self, session, segment):
- return segment
-
- def allocate_tenant_segment(self, session):
- return {driver_api.NETWORK_TYPE: constants.NT_LOCAL}
-
- def release_segment(self, session, segment):
- pass
-
- def get_mtu(self, physical):
- pass
diff --git a/tricircle/network/drivers/type_shared_vlan.py b/tricircle/network/drivers/type_shared_vlan.py
deleted file mode 100644
index 12761f7..0000000
--- a/tricircle/network/drivers/type_shared_vlan.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import sys
-
-from oslo_config import cfg
-from oslo_log import log
-
-from neutron.plugins.common import utils as plugin_utils
-from neutron.plugins.ml2 import driver_api
-from neutron.plugins.ml2.drivers import type_vlan
-
-from tricircle.common import constants
-from tricircle.common.i18n import _LE
-from tricircle.common.i18n import _LI
-
-LOG = log.getLogger(__name__)
-
-
-class SharedVLANTypeDriver(type_vlan.VlanTypeDriver):
- def __init__(self):
- super(SharedVLANTypeDriver, self).__init__()
-
- def _parse_network_vlan_ranges(self):
- try:
- self.network_vlan_ranges = plugin_utils.parse_network_vlan_ranges(
- cfg.CONF.tricircle.network_vlan_ranges)
- except Exception:
- LOG.exception(_LE('Failed to parse network_vlan_ranges. '
- 'Service terminated!'))
- sys.exit(1)
- LOG.info(_LI('Network VLAN ranges: %s'), self.network_vlan_ranges)
-
- def get_type(self):
- return constants.NT_SHARED_VLAN
-
- def reserve_provider_segment(self, session, segment):
- res = super(SharedVLANTypeDriver,
- self).reserve_provider_segment(session, segment)
- res[driver_api.NETWORK_TYPE] = constants.NT_SHARED_VLAN
- return res
-
- def allocate_tenant_segment(self, session):
- res = super(SharedVLANTypeDriver,
- self).allocate_tenant_segment(session)
- res[driver_api.NETWORK_TYPE] = constants.NT_SHARED_VLAN
- return res
-
- def get_mtu(self, physical):
- pass
diff --git a/tricircle/network/exceptions.py b/tricircle/network/exceptions.py
deleted file mode 100644
index 11ae2e9..0000000
--- a/tricircle/network/exceptions.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron_lib import exceptions
-
-from tricircle.common.i18n import _
-
-
-class RemoteGroupNotSupported(exceptions.InvalidInput):
- message = _('Remote group not supported by Tricircle plugin')
-
-
-class DefaultGroupUpdateNotSupported(exceptions.InvalidInput):
- message = _('Default group update not supported by Tricircle plugin')
-
-
-class BottomPodOperationFailure(exceptions.NeutronException):
- message = _('Operation for %(resource)s on bottom pod %(pod_name)s fails')
diff --git a/tricircle/network/helper.py b/tricircle/network/helper.py
deleted file mode 100644
index 0477658..0000000
--- a/tricircle/network/helper.py
+++ /dev/null
@@ -1,555 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-
-from neutron_lib import constants
-import neutronclient.common.exceptions as q_cli_exceptions
-
-from tricircle.common import client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-import tricircle.common.lock_handle as t_lock
-from tricircle.common import utils
-import tricircle.db.api as db_api
-import tricircle.network.exceptions as t_network_exc
-
-
-# manually define these constants to avoid depending on neutron repos
-# neutron.extensions.availability_zone.AZ_HINTS
-AZ_HINTS = 'availability_zone_hints'
-EXTERNAL = 'router:external' # neutron.extensions.external_net.EXTERNAL
-TYPE_VLAN = 'vlan' # neutron.plugins.common.constants.TYPE_VLAN
-
-
-class NetworkHelper(object):
- def __init__(self, call_obj=None):
- self.clients = {}
- self.call_obj = call_obj
-
- @staticmethod
- def _transfer_network_type(network_type):
- network_type_map = {t_constants.NT_SHARED_VLAN: TYPE_VLAN}
- return network_type_map.get(network_type, network_type)
-
- def _get_client(self, pod_name=None):
- if not pod_name:
- if t_constants.TOP not in self.clients:
- self.clients[t_constants.TOP] = client.Client()
- return self.clients[t_constants.TOP]
- if pod_name not in self.clients:
- self.clients[pod_name] = client.Client(pod_name)
- return self.clients[pod_name]
-
- # operate top resource
- def _prepare_top_element_by_call(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- def list_resources(t_ctx_, q_ctx_, pod_, ele_, _type_):
- return getattr(super(self.call_obj.__class__, self.call_obj),
- 'get_%ss' % _type_)(q_ctx_,
- filters={'name': [ele_['id']]})
-
- def create_resources(t_ctx_, q_ctx_, pod_, body_, _type_):
- if _type_ == t_constants.RT_NETWORK:
- # for network, we call TricirclePlugin's own create_network to
- # handle network segment
- return self.call_obj.create_network(q_ctx_, body_)
- else:
- return getattr(super(self.call_obj.__class__, self.call_obj),
- 'create_%s' % _type_)(q_ctx_, body_)
-
- return t_lock.get_or_create_element(
- t_ctx, q_ctx,
- project_id, pod, ele, _type, body,
- list_resources, create_resources)
-
- def _prepare_top_element_by_client(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- def list_resources(t_ctx_, q_ctx_, pod_, ele_, _type_):
- client = self._get_client()
- return client.list_resources(_type_, t_ctx_,
- [{'key': 'name', 'comparator': 'eq',
- 'value': ele_['id']}])
-
- def create_resources(t_ctx_, q_ctx_, pod_, body_, _type_):
- client = self._get_client()
- return client.create_resources(_type_, t_ctx_, body_)
-
- assert _type == 'port'
- # currently only top port is possible to be created via client, other
- # top resources should be created directly by plugin
- return t_lock.get_or_create_element(
- t_ctx, q_ctx,
- project_id, pod, ele, _type, body,
- list_resources, create_resources)
-
- def prepare_top_element(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- """Get or create shared top networking resource
-
- :param t_ctx: tricircle context
- :param q_ctx: neutron context
- :param project_id: project id
- :param pod: dict of top pod
- :param ele: dict with "id" as key and distinctive identifier as value
- :param _type: type of the resource
- :param body: request body to create resource
- :return: boolean value indicating whether the resource is newly
- created or already exists and id of the resource
- """
- if self.call_obj:
- return self._prepare_top_element_by_call(
- t_ctx, q_ctx, project_id, pod, ele, _type, body)
- else:
- return self._prepare_top_element_by_client(
- t_ctx, q_ctx, project_id, pod, ele, _type, body)
-
- def get_bridge_interface(self, t_ctx, q_ctx, project_id, pod,
- t_net_id, b_router_id, b_port_id, is_ew):
- """Get or create top bridge interface
-
- :param t_ctx: tricircle context
- :param q_ctx: neutron context
- :param project_id: project id
- :param pod: dict of top pod
- :param t_net_id: top bridge network id
- :param b_router_id: bottom router id
- :param b_port_id: needed when creating bridge interface for south-
- north network, id of the internal port bound to floating ip
- :param is_ew: create the bridge interface for east-west network or
- south-north network
- :return: bridge interface id
- """
- if is_ew:
- port_name = t_constants.ew_bridge_port_name % (project_id,
- b_router_id)
- else:
- port_name = t_constants.ns_bridge_port_name % (project_id,
- b_router_id,
- b_port_id)
- port_ele = {'id': port_name}
- port_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': port_name,
- 'network_id': t_net_id,
- 'device_id': '',
- 'device_owner': ''
- }
- }
- if self.call_obj:
- port_body['port'].update(
- {'mac_address': constants.ATTR_NOT_SPECIFIED,
- 'fixed_ips': constants.ATTR_NOT_SPECIFIED})
- _, port_id = self.prepare_top_element(
- t_ctx, q_ctx, project_id, pod, port_ele, 'port', port_body)
- return port_id
-
- # operate bottom resource
- def prepare_bottom_element(self, t_ctx,
- project_id, pod, ele, _type, body):
- """Get or create bottom networking resource based on top resource
-
- :param t_ctx: tricircle context
- :param project_id: project id
- :param pod: dict of bottom pod
- :param ele: dict of top resource
- :param _type: type of the resource
- :param body: request body to create resource
- :return: boolean value indicating whether the resource is newly
- created or already exists and id of the resource
- """
- def list_resources(t_ctx_, q_ctx, pod_, ele_, _type_):
- client = self._get_client(pod_['pod_name'])
- if _type_ == t_constants.RT_NETWORK:
- value = utils.get_bottom_network_name(ele_)
- else:
- value = ele_['id']
- return client.list_resources(_type_, t_ctx_,
- [{'key': 'name', 'comparator': 'eq',
- 'value': value}])
-
- def create_resources(t_ctx_, q_ctx, pod_, body_, _type_):
- client = self._get_client(pod_['pod_name'])
- return client.create_resources(_type_, t_ctx_, body_)
-
- return t_lock.get_or_create_element(
- t_ctx, None, # we don't need neutron context, so pass None
- project_id, pod, ele, _type, body,
- list_resources, create_resources)
-
- @staticmethod
- def get_create_network_body(project_id, network):
- """Get request body to create bottom network
-
- :param project_id: project id
- :param network: top network dict
- :return: request body to create bottom network
- """
- body = {
- 'network': {
- 'tenant_id': project_id,
- 'name': utils.get_bottom_network_name(network),
- 'admin_state_up': True
- }
- }
- network_type = network.get('provider:network_type')
- if network_type == t_constants.NT_SHARED_VLAN:
- body['network']['provider:network_type'] = 'vlan'
- body['network']['provider:physical_network'] = network[
- 'provider:physical_network']
- body['network']['provider:segmentation_id'] = network[
- 'provider:segmentation_id']
- return body
-
- @staticmethod
- def get_create_subnet_body(project_id, t_subnet, b_net_id, gateway_ip):
- """Get request body to create bottom subnet
-
- :param project_id: project id
- :param t_subnet: top subnet dict
- :param b_net_id: bottom network id
- :param gateway_ip: bottom gateway ip
- :return: request body to create bottom subnet
- """
- pools = t_subnet['allocation_pools']
- new_pools = []
- g_ip = netaddr.IPAddress(gateway_ip)
- ip_found = False
- for pool in pools:
- if ip_found:
- new_pools.append({'start': pool['start'],
- 'end': pool['end']})
- continue
- ip_range = netaddr.IPRange(pool['start'], pool['end'])
- ip_num = len(ip_range)
- for i, ip in enumerate(ip_range):
- if g_ip == ip:
- ip_found = True
- if i > 0:
- new_pools.append({'start': ip_range[0].format(),
- 'end': ip_range[i - 1].format()})
- if i < ip_num - 1:
- new_pools.append(
- {'start': ip_range[i + 1].format(),
- 'end': ip_range[ip_num - 1].format()})
- body = {
- 'subnet': {
- 'network_id': b_net_id,
- 'name': t_subnet['id'],
- 'ip_version': t_subnet['ip_version'],
- 'cidr': t_subnet['cidr'],
- 'gateway_ip': gateway_ip,
- 'allocation_pools': new_pools,
- 'enable_dhcp': False,
- 'tenant_id': project_id
- }
- }
- return body
-
- @staticmethod
- def get_create_port_body(project_id, t_port, subnet_map, b_net_id,
- b_security_group_ids=None):
- """Get request body to create bottom port
-
- :param project_id: project id
- :param t_port: top port dict
- :param subnet_map: dict with top subnet id as key and bottom subnet
- id as value
- :param b_net_id: bottom network id
- :param security_group_ids: list of bottom security group id
- :return: request body to create bottom port
- """
- b_fixed_ips = []
- for ip in t_port['fixed_ips']:
- b_ip = {'subnet_id': subnet_map[ip['subnet_id']],
- 'ip_address': ip['ip_address']}
- b_fixed_ips.append(b_ip)
- body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': t_port['id'],
- 'network_id': b_net_id,
- 'mac_address': t_port['mac_address'],
- 'fixed_ips': b_fixed_ips
- }
- }
- if b_security_group_ids:
- body['port']['security_groups'] = b_security_group_ids
- return body
-
- def get_create_interface_body(self, project_id, t_net_id, b_pod_id,
- t_subnet_id):
- """Get request body to create top interface
-
- :param project_id: project id
- :param t_net_id: top network id
- :param b_pod_id: bottom pod id
- :param t_subnet_id: top subnet id
- :return:
- """
- t_interface_name = t_constants.interface_port_name % (b_pod_id,
- t_subnet_id)
- t_interface_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': t_interface_name,
- 'network_id': t_net_id,
- 'device_id': '',
- 'device_owner': 'network:router_interface',
- }
- }
- if self.call_obj:
- t_interface_body['port'].update(
- {'mac_address': constants.ATTR_NOT_SPECIFIED,
- 'fixed_ips': constants.ATTR_NOT_SPECIFIED})
- return t_interface_body
-
- def prepare_bottom_network_subnets(self, t_ctx, q_ctx, project_id, pod,
- t_net, t_subnets):
- """Get or create bottom network, subnet and dhcp port
-
- :param t_ctx: tricircle context
- :param q_ctx: neutron context
- :param project_id: project id
- :param pod: dict of bottom pod
- :param t_net: dict of top network
- :param t_subnets: list of top subnet dict
- :return: bottom network id and a dict with top subnet id as key,
- bottom subnet id as value
- """
- # network
- net_body = self.get_create_network_body(project_id, t_net)
- if net_body['network'].get('provider:network_type'):
- # if network type specified, we need to switch to admin account
- admin_context = t_context.get_admin_context()
-
- _, b_net_id = self.prepare_bottom_element(
- admin_context, project_id, pod, t_net, t_constants.RT_NETWORK,
- net_body)
- else:
- _, b_net_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, t_net, t_constants.RT_NETWORK,
- net_body)
-
- # subnet
- subnet_map = {}
- subnet_dhcp_map = {}
-
- for subnet in t_subnets:
- # gateway
- t_interface_name = t_constants.interface_port_name % (
- pod['pod_id'], subnet['id'])
-
- t_interface_body = self.get_create_interface_body(
- project_id, t_net['id'], pod['pod_id'], subnet['id'])
-
- _, t_interface_id = self.prepare_top_element(
- t_ctx, q_ctx, project_id, pod, {'id': t_interface_name},
- t_constants.RT_PORT, t_interface_body)
- t_interface = self._get_top_element(
- t_ctx, q_ctx, t_constants.RT_PORT, t_interface_id)
- gateway_ip = t_interface['fixed_ips'][0]['ip_address']
-
- subnet_body = self.get_create_subnet_body(
- project_id, subnet, b_net_id, gateway_ip)
- _, b_subnet_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, subnet, t_constants.RT_SUBNET,
- subnet_body)
- subnet_map[subnet['id']] = b_subnet_id
- subnet_dhcp_map[subnet['id']] = subnet['enable_dhcp']
-
- # dhcp port
- for t_subnet_id, b_subnet_id in subnet_map.iteritems():
- if not subnet_dhcp_map[t_subnet_id]:
- continue
- self.prepare_dhcp_port(t_ctx, project_id, pod, t_net['id'],
- t_subnet_id, b_net_id, b_subnet_id)
- b_client = self._get_client(pod['pod_name'])
- b_client.update_subnets(t_ctx, b_subnet_id,
- {'subnet': {'enable_dhcp': True}})
-
- return b_net_id, subnet_map
-
- def get_bottom_bridge_elements(self, t_ctx, project_id,
- pod, t_net, is_external, t_subnet, t_port):
- """Get or create bottom bridge port
-
- :param t_ctx: tricircle context
- :param project_id: project id
- :param pod: dict of bottom pod
- :param t_net: dict of top bridge network
- :param is_external: whether the bottom network should be created as
- an external network, this is True for south-north case
- :param t_subnet: dict of top bridge subnet
- :param t_port: dict of top bridge port
- :return: tuple (boolean value indicating whether the resource is newly
- created or already exists, bottom port id, bottom subnet id,
- bottom network id)
- """
- net_body = {'network': {
- 'tenant_id': project_id,
- 'name': t_net['id'],
- 'provider:network_type': self._transfer_network_type(
- t_net['provider:network_type']),
- 'provider:physical_network': t_net['provider:physical_network'],
- 'provider:segmentation_id': t_net['provider:segmentation_id'],
- 'admin_state_up': True}}
- if is_external:
- net_body['network'][EXTERNAL] = True
- _, b_net_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, t_net, 'network', net_body)
-
- subnet_body = {'subnet': {'network_id': b_net_id,
- 'name': t_subnet['id'],
- 'ip_version': 4,
- 'cidr': t_subnet['cidr'],
- 'enable_dhcp': False,
- 'tenant_id': project_id}}
- # In the pod hosting external network, where ns bridge network is used
- # as an internal network, need to allocate ip address from .3 because
- # .2 is used by the router gateway port in the pod hosting servers,
- # where ns bridge network is used as an external network.
- # if t_subnet['name'].startswith('ns_bridge_') and not is_external:
- # prefix = t_subnet['cidr'][:t_subnet['cidr'].rindex('.')]
- # subnet_body['subnet']['allocation_pools'] = [
- # {'start': prefix + '.3', 'end': prefix + '.254'}]
- _, b_subnet_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, t_subnet, 'subnet', subnet_body)
-
- if t_port:
- port_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': t_port['id'],
- 'network_id': b_net_id,
- 'fixed_ips': [
- {'subnet_id': b_subnet_id,
- 'ip_address': t_port['fixed_ips'][0]['ip_address']}]
- }
- }
- is_new, b_port_id = self.prepare_bottom_element(
- t_ctx, project_id, pod, t_port, 'port', port_body)
-
- return is_new, b_port_id, b_subnet_id, b_net_id
- else:
- return None, None, b_subnet_id, b_net_id
-
- @staticmethod
- def _get_create_dhcp_port_body(project_id, port, b_subnet_id,
- b_net_id):
- body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': port['id'],
- 'network_id': b_net_id,
- 'fixed_ips': [
- {'subnet_id': b_subnet_id,
- 'ip_address': port['fixed_ips'][0]['ip_address']}
- ],
- 'mac_address': port['mac_address'],
- 'binding:profile': {},
- 'device_id': 'reserved_dhcp_port',
- 'device_owner': 'network:dhcp',
- }
- }
- return body
-
- def prepare_dhcp_port(self, ctx, project_id, b_pod, t_net_id, t_subnet_id,
- b_net_id, b_subnet_id):
- """Create top dhcp port and map it to bottom dhcp port
-
- :param ctx: tricircle context
- :param project_id: project id
- :param b_pod: dict of bottom pod
- :param t_net_id: top network id
- :param t_subnet_id: top subnet id
- :param b_net_id: bottom network id
- :param b_subnet_id: bottom subnet id
- :return: None
- """
- t_client = self._get_client()
-
- t_dhcp_name = t_constants.dhcp_port_name % t_subnet_id
- t_dhcp_port_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'network_id': t_net_id,
- 'name': t_dhcp_name,
- 'binding:profile': {},
- 'device_id': 'reserved_dhcp_port',
- 'device_owner': 'network:dhcp',
- }
- }
- if self.call_obj:
- t_dhcp_port_body['port'].update(
- {'mac_address': constants.ATTR_NOT_SPECIFIED,
- 'fixed_ips': constants.ATTR_NOT_SPECIFIED})
-
- # NOTE(zhiyuan) for one subnet in different pods, we just create
- # one dhcp port. though dhcp port in different pods will have
- # the same IP, each dnsmasq daemon only takes care of VM IPs in
- # its own pod, VM will not receive incorrect dhcp response
- _, t_dhcp_port_id = self.prepare_top_element(
- ctx, None, project_id, db_api.get_top_pod(ctx),
- {'id': t_dhcp_name}, t_constants.RT_PORT, t_dhcp_port_body)
- t_dhcp_port = t_client.get_ports(ctx, t_dhcp_port_id)
- dhcp_port_body = self._get_create_dhcp_port_body(
- project_id, t_dhcp_port, b_subnet_id, b_net_id)
- self.prepare_bottom_element(ctx, project_id, b_pod, t_dhcp_port,
- t_constants.RT_PORT, dhcp_port_body)
-
- @staticmethod
- def _safe_create_bottom_floatingip(t_ctx, pod, client, fip_net_id,
- fip_address, port_id):
- try:
- client.create_floatingips(
- t_ctx, {'floatingip': {'floating_network_id': fip_net_id,
- 'floating_ip_address': fip_address,
- 'port_id': port_id}})
- except q_cli_exceptions.IpAddressInUseClient:
- fips = client.list_floatingips(t_ctx,
- [{'key': 'floating_ip_address',
- 'comparator': 'eq',
- 'value': fip_address}])
- if not fips:
- # this is rare case that we got IpAddressInUseClient exception
- # a second ago but now the floating ip is missing
- raise t_network_exc.BottomPodOperationFailure(
- resource='floating ip', pod_name=pod['pod_name'])
- associated_port_id = fips[0].get('port_id')
- if associated_port_id == port_id:
- # the internal port associated with the existing fip is what
- # we expect, just ignore this exception
- pass
- elif not associated_port_id:
- # the existing fip is not associated with any internal port,
- # update the fip to add association
- client.update_floatingips(t_ctx, fips[0]['id'],
- {'floatingip': {'port_id': port_id}})
- else:
- raise
-
- def _get_top_element(self, t_ctx, q_ctx, _type, _id):
- if self.call_obj:
- return getattr(self.call_obj, 'get_%s' % _type)(q_ctx, _id)
- else:
- return getattr(self._get_client(), 'get_%ss' % _type)(t_ctx, _id)
diff --git a/tricircle/network/managers.py b/tricircle/network/managers.py
deleted file mode 100644
index afbe447..0000000
--- a/tricircle/network/managers.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_config import cfg
-from oslo_log import log
-
-from neutron.api.v2 import attributes
-from neutron.extensions import external_net
-from neutron.plugins.ml2 import managers
-
-from tricircle.common.i18n import _LE
-from tricircle.common.i18n import _LI
-
-LOG = log.getLogger(__name__)
-
-
-class TricircleTypeManager(managers.TypeManager):
-
- def __init__(self):
- self.drivers = {}
-
- # NOTE(zhiyuan) here we call __init__ of super class's super class,
- # which is NamedExtensionManager's __init__ to bypass initialization
- # process of ml2 type manager
- super(managers.TypeManager, self).__init__(
- 'tricircle.network.type_drivers',
- cfg.CONF.tricircle.type_drivers,
- invoke_on_load=True)
- LOG.info(_LI('Loaded type driver names: %s'), self.names())
-
- self._register_types()
- self._check_tenant_network_types(
- cfg.CONF.tricircle.tenant_network_types)
- self._check_bridge_network_type(
- cfg.CONF.tricircle.bridge_network_type)
-
- def _check_bridge_network_type(self, bridge_network_type):
- if not bridge_network_type:
- return
- if bridge_network_type == 'local':
- LOG.error(_LE("Local is not a valid bridge network type. "
- "Service terminated!"), bridge_network_type)
- raise SystemExit(1)
-
- type_set = set(self.tenant_network_types)
- if bridge_network_type not in type_set:
- LOG.error(_LE("Bridge network type %s is not registered. "
- "Service terminated!"), bridge_network_type)
- raise SystemExit(1)
-
- def _register_types(self):
- for ext in self:
- network_type = ext.obj.get_type()
- if network_type not in self.drivers:
- self.drivers[network_type] = ext
-
- @staticmethod
- def _is_external_network(network):
- external = network.get(external_net.EXTERNAL)
- external_set = attributes.is_attr_set(external)
- if not external_set or not external:
- return False
- else:
- return True
-
- def create_network_segments(self, context, network, tenant_id):
- # NOTE(zhiyuan) before we figure out how to deal with external network
- # segment allocation, skip segment creation for external network
- if self._is_external_network(network):
- return
- segments = self._process_provider_create(network)
- session = context.session
- with session.begin(subtransactions=True):
- network_id = network['id']
- if segments:
- for segment_index, segment in enumerate(segments):
- segment = self.reserve_provider_segment(
- session, segment)
- self._add_network_segment(context, network_id, segment,
- segment_index)
- else:
- segment = self._allocate_tenant_net_segment(session)
- self._add_network_segment(context, network_id, segment)
-
- def extend_networks_dict_provider(self, context, networks):
- internal_networks = []
- for network in networks:
- # NOTE(zhiyuan) before we figure out how to deal with external
- # network segment allocation, skip external network since it does
- # not have segment information
- if not self._is_external_network(network):
- internal_networks.append(network)
- if internal_networks:
- super(TricircleTypeManager,
- self).extend_networks_dict_provider(context,
- internal_networks)
diff --git a/tricircle/network/plugin.py b/tricircle/network/plugin.py
deleted file mode 100644
index 1238a2c..0000000
--- a/tricircle/network/plugin.py
+++ /dev/null
@@ -1,1173 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import copy
-
-from oslo_config import cfg
-import oslo_log.helpers as log_helpers
-from oslo_log import log
-
-from neutron.api.v2 import attributes
-from neutron.common import exceptions
-from neutron.db import common_db_mixin
-from neutron.db import db_base_plugin_v2
-from neutron.db import external_net_db
-from neutron.db import extradhcpopt_db
-# NOTE(zhiyuan) though not used, this import cannot be removed because Router
-# relies on one table defined in l3_agentschedulers_db
-from neutron.db import l3_agentschedulers_db # noqa
-from neutron.db import l3_db
-from neutron.db import models_v2
-from neutron.db import portbindings_db
-from neutron.db import sqlalchemyutils
-from neutron.extensions import availability_zone as az_ext
-from neutron.extensions import external_net
-from neutron.extensions import l3
-from neutron.extensions import providernet as provider
-from neutron_lib import constants
-import neutronclient.common.exceptions as q_cli_exceptions
-
-from sqlalchemy import sql
-
-from tricircle.common import az_ag
-import tricircle.common.client as t_client
-import tricircle.common.constants as t_constants
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exceptions
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common.i18n import _LI
-from tricircle.common import xrpcapi
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-import tricircle.network.exceptions as t_network_exc
-from tricircle.network import helper
-from tricircle.network import managers
-from tricircle.network import security_groups
-
-
-tricircle_opts = [
- cfg.ListOpt('type_drivers',
- default=['local'],
- help=_('List of network type driver entry points to be loaded '
- 'from the tricircle.network.type_drivers namespace.')),
- cfg.ListOpt('tenant_network_types',
- default=['local'],
- help=_('Ordered list of network_types to allocate as tenant '
- 'networks. The default value "local" is useful for '
- 'single pod connectivity.')),
- cfg.ListOpt('network_vlan_ranges',
- default=[],
- help=_('List of :: or '
- ' specifying physical_network names '
- 'usable for VLAN provider and tenant networks, as '
- 'well as ranges of VLAN tags on each available for '
- 'allocation to tenant networks.')),
- cfg.StrOpt('bridge_network_type',
- default='',
- help=_('Type of l3 bridge network, this type should be enabled '
- 'in tenant_network_types and is not local type.'))
-]
-
-tricircle_opt_group = cfg.OptGroup('tricircle')
-cfg.CONF.register_group(tricircle_opt_group)
-cfg.CONF.register_opts(tricircle_opts, group=tricircle_opt_group)
-
-LOG = log.getLogger(__name__)
-
-NON_VM_PORT_TYPES = [constants.DEVICE_OWNER_ROUTER_INTF,
- constants.DEVICE_OWNER_ROUTER_GW,
- constants.DEVICE_OWNER_DHCP]
-
-
-class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
- security_groups.TricircleSecurityGroupMixin,
- external_net_db.External_net_db_mixin,
- portbindings_db.PortBindingMixin,
- extradhcpopt_db.ExtraDhcpOptMixin,
- l3_db.L3_NAT_dbonly_mixin):
-
- __native_bulk_support = True
- __native_pagination_support = True
- __native_sorting_support = True
-
- # NOTE(zhiyuan) we don't support "agent" and "availability_zone" extensions
- # and also it's no need for us to support, but "network_availability_zone"
- # depends on these two extensions so we need to register them
- supported_extension_aliases = ["agent",
- "quotas",
- "extra_dhcp_opt",
- "binding",
- "security-group",
- "external-net",
- "availability_zone",
- "provider",
- "network_availability_zone",
- "router"]
-
- def __init__(self):
- super(TricirclePlugin, self).__init__()
- LOG.info(_LI("Starting Tricircle Neutron Plugin"))
- self.clients = {}
- self.xjob_handler = xrpcapi.XJobAPI()
- self._setup_rpc()
- self.type_manager = managers.TricircleTypeManager()
- self.type_manager.initialize()
- self.helper = helper.NetworkHelper(self)
-
- def _setup_rpc(self):
- self.endpoints = []
-
- def _get_client(self, pod_name):
- if pod_name not in self.clients:
- self.clients[pod_name] = t_client.Client(pod_name)
- return self.clients[pod_name]
-
- @log_helpers.log_method_call
- def start_rpc_listeners(self):
- return []
- # NOTE(zhiyuan) use later
- # self.topic = topics.PLUGIN
- # self.conn = n_rpc.create_connection(new=True)
- # self.conn.create_consumer(self.topic, self.endpoints, fanout=False)
- # return self.conn.consume_in_threads()
-
- @staticmethod
- def _validate_availability_zones(context, az_list, external):
- if not az_list:
- return
- t_ctx = t_context.get_context_from_neutron_context(context)
- with context.session.begin():
- pods = core.query_resource(t_ctx, models.Pod, [], [])
- az_set = set(az_list)
- if external:
- known_az_set = set([pod['pod_name'] for pod in pods])
- else:
- known_az_set = set([pod['az_name'] for pod in pods])
- diff = az_set - known_az_set
- if diff:
- if external:
- raise t_exceptions.PodNotFound(pod_name=diff.pop())
- else:
- raise az_ext.AvailabilityZoneNotFound(
- availability_zone=diff.pop())
-
- @staticmethod
- def _extend_availability_zone(net_res, net_db):
- net_res[az_ext.AZ_HINTS] = az_ext.convert_az_string_to_list(
- net_db[az_ext.AZ_HINTS])
-
- common_db_mixin.CommonDbMixin.register_dict_extend_funcs(
- attributes.NETWORKS, ['_extend_availability_zone'])
-
- @staticmethod
- def _ensure_az_set_for_external_network(context, req_data):
- external = req_data.get(external_net.EXTERNAL)
- external_set = attributes.is_attr_set(external)
- if not external_set or not external:
- return False
- if az_ext.AZ_HINTS in req_data and req_data[az_ext.AZ_HINTS]:
- return True
- t_ctx = t_context.get_context_from_neutron_context(context)
- pod, pod_az = az_ag.get_pod_by_az_tenant(
- t_ctx,
- az_name='',
- tenant_id=req_data['tenant_id'])
- if pod:
- req_data[az_ext.AZ_HINTS] = [pod['pod_name']]
- return True
- raise t_exceptions.ExternalNetPodNotSpecify()
-
- def _create_bottom_external_network(self, context, net, top_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- # use the first pod
- pod_name = net[az_ext.AZ_HINTS][0]
- pod = db_api.get_pod_by_name(t_ctx, pod_name)
- body = {
- 'network': {
- 'name': top_id,
- 'tenant_id': net['tenant_id'],
- 'admin_state_up': True,
- external_net.EXTERNAL: True
- }
- }
- provider_attrs = ('provider:network_type', 'provider:segmentation_id',
- 'provider:physical_network')
- for provider_attr in provider_attrs:
- if attributes.is_attr_set(net.get(provider_attr)):
- body['network'][provider_attr] = net[provider_attr]
-
- self._prepare_bottom_element(
- t_ctx, net['tenant_id'], pod, {'id': top_id},
- t_constants.RT_NETWORK, body)
-
- def _create_bottom_external_subnet(self, context, subnet, net, top_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- pod_name = net[az_ext.AZ_HINTS][0]
- pod = db_api.get_pod_by_name(t_ctx, pod_name)
- b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, net['id'], pod_name, t_constants.RT_NETWORK)
- body = {
- 'subnet': {
- 'name': top_id,
- 'network_id': b_net_id,
- 'tenant_id': subnet['tenant_id']
- }
- }
- attrs = ('ip_version', 'cidr', 'gateway_ip', 'allocation_pools',
- 'enable_dhcp')
- for attr in attrs:
- if attributes.is_attr_set(subnet.get(attr)):
- body['subnet'][attr] = subnet[attr]
- self._prepare_bottom_element(
- t_ctx, subnet['tenant_id'], pod, {'id': top_id},
- t_constants.RT_SUBNET, body)
-
- @property
- def _core_plugin(self):
- return self
-
- def create_network(self, context, network):
- net_data = network[attributes.NETWORK]
- tenant_id = net_data['tenant_id']
- is_external = self._ensure_az_set_for_external_network(context,
- net_data)
- if az_ext.AZ_HINTS in net_data:
- self._validate_availability_zones(context,
- net_data[az_ext.AZ_HINTS],
- is_external)
- with context.session.begin(subtransactions=True):
- res = super(TricirclePlugin, self).create_network(context, network)
- net_data['id'] = res['id']
- self.type_manager.create_network_segments(context, net_data,
- tenant_id)
- self.type_manager.extend_network_dict_provider(context, res)
- if az_ext.AZ_HINTS in net_data:
- az_hints = az_ext.convert_az_list_to_string(
- net_data[az_ext.AZ_HINTS])
- update_res = super(TricirclePlugin, self).update_network(
- context, res['id'],
- {'network': {az_ext.AZ_HINTS: az_hints}})
- res[az_ext.AZ_HINTS] = update_res[az_ext.AZ_HINTS]
- self._process_l3_create(context, res, net_data)
- # put inside a session so when bottom operations fails db can
- # rollback
- if is_external:
- self._create_bottom_external_network(
- context, net_data, res['id'])
- return res
-
- def delete_network(self, context, network_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- try:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, network_id, t_constants.RT_NETWORK)
- for mapping in mappings:
- pod_name = mapping[0]['pod_name']
- bottom_network_id = mapping[1]
- self._get_client(pod_name).delete_networks(
- t_ctx, bottom_network_id)
- with t_ctx.session.begin():
- core.delete_resources(
- t_ctx, models.ResourceRouting,
- filters=[{'key': 'top_id', 'comparator': 'eq',
- 'value': network_id},
- {'key': 'pod_id', 'comparator': 'eq',
- 'value': mapping[0]['pod_id']}])
- except Exception:
- raise
- with t_ctx.session.begin():
- core.delete_resources(t_ctx, models.ResourceRouting,
- filters=[{'key': 'top_id',
- 'comparator': 'eq',
- 'value': network_id}])
-
- session = context.session
- with session.begin(subtransactions=True):
- self.type_manager.release_network_segments(session, network_id)
- super(TricirclePlugin, self).delete_network(context, network_id)
-
- def update_network(self, context, network_id, network):
- net_data = network[attributes.NETWORK]
- provider._raise_if_updates_provider_attributes(net_data)
-
- net = super(TricirclePlugin, self).update_network(
- context, network_id, network)
- self.type_manager.extend_network_dict_provider(context, net)
- return net
-
- def get_network(self, context, network_id, fields=None):
- net = super(TricirclePlugin, self).get_network(context, network_id,
- fields)
- self.type_manager.extend_network_dict_provider(context, net)
- return net
-
- def get_networks(self, context, filters=None, fields=None,
- sorts=None, limit=None, marker=None, page_reverse=False):
- nets = super(TricirclePlugin,
- self).get_networks(context, filters, None, sorts,
- limit, marker, page_reverse)
- self.type_manager.extend_networks_dict_provider(context, nets)
- return nets
-
- def create_subnet(self, context, subnet):
- subnet_data = subnet['subnet']
- network = self.get_network(context, subnet_data['network_id'])
- with context.session.begin(subtransactions=True):
- res = super(TricirclePlugin, self).create_subnet(context, subnet)
- # put inside a session so when bottom operations fails db can
- # rollback
- if network.get(external_net.EXTERNAL):
- self._create_bottom_external_subnet(
- context, res, network, res['id'])
- return res
-
- def delete_subnet(self, context, subnet_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- try:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, subnet_id, t_constants.RT_SUBNET)
- for mapping in mappings:
- pod_name = mapping[0]['pod_name']
- bottom_subnet_id = mapping[1]
- self._get_client(pod_name).delete_subnets(
- t_ctx, bottom_subnet_id)
- with t_ctx.session.begin():
- core.delete_resources(
- t_ctx, models.ResourceRouting,
- filters=[{'key': 'top_id', 'comparator': 'eq',
- 'value': subnet_id},
- {'key': 'pod_id', 'comparator': 'eq',
- 'value': mapping[0]['pod_id']}])
- except Exception:
- raise
- super(TricirclePlugin, self).delete_subnet(context, subnet_id)
-
- def update_subnet(self, context, subnet_id, subnet):
- return super(TricirclePlugin, self).update_subnet(
- context, subnet_id, subnet)
-
- def create_port(self, context, port):
- return super(TricirclePlugin, self).create_port(context, port)
-
- def update_port(self, context, port_id, port):
- # TODO(zhiyuan) handle bottom port update
- # be careful that l3_db will call update_port to update device_id of
- # router interface, we cannot directly update bottom port in this case,
- # otherwise we will fail when attaching bottom port to bottom router
- # because its device_id is not empty
- return super(TricirclePlugin, self).update_port(context, port_id, port)
-
- def delete_port(self, context, port_id, l3_port_check=True):
- t_ctx = t_context.get_context_from_neutron_context(context)
- port = super(TricirclePlugin, self).get_port(context, port_id)
- # NOTE(zhiyuan) for none vm ports like router interfaces and dhcp
- # ports, we just remove records in top pod and leave deletion of
- # ports and routing entries in bottom pods to xjob
- if port.get('device_owner') not in NON_VM_PORT_TYPES:
- try:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, port_id, t_constants.RT_PORT)
- if mappings:
- pod_name = mappings[0][0]['pod_name']
- bottom_port_id = mappings[0][1]
- self._get_client(pod_name).delete_ports(
- t_ctx, bottom_port_id)
- except Exception:
- raise
- with t_ctx.session.begin():
- core.delete_resources(t_ctx, models.ResourceRouting,
- filters=[{'key': 'top_id',
- 'comparator': 'eq',
- 'value': port_id}])
- super(TricirclePlugin, self).delete_port(context, port_id)
-
- def get_port(self, context, port_id, fields=None):
- t_ctx = t_context.get_context_from_neutron_context(context)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, port_id, t_constants.RT_PORT)
- if mappings:
- pod_name = mappings[0][0]['pod_name']
- bottom_port_id = mappings[0][1]
- port = self._get_client(pod_name).get_ports(
- t_ctx, bottom_port_id)
- # TODO(zhiyuan) handle the case that bottom port does not exist
- port['id'] = port_id
- if fields:
- port = dict(
- [(k, v) for k, v in port.iteritems() if k in fields])
- if 'network_id' not in port and 'fixed_ips' not in port:
- return port
-
- bottom_top_map = {}
- with t_ctx.session.begin():
- for resource in (t_constants.RT_SUBNET, t_constants.RT_NETWORK,
- t_constants.RT_ROUTER):
- route_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': resource}]
- routes = core.query_resource(
- t_ctx, models.ResourceRouting, route_filters, [])
- for route in routes:
- if route['bottom_id']:
- bottom_top_map[
- route['bottom_id']] = route['top_id']
- self._map_port_from_bottom_to_top(port, bottom_top_map)
- return port
- else:
- return super(TricirclePlugin, self).get_port(context,
- port_id, fields)
-
- @staticmethod
- def _apply_ports_filters(query, model, filters):
- if not filters:
- return query
- for key, value in filters.iteritems():
- column = getattr(model, key, None)
- if column is not None:
- if not value:
- query = query.filter(sql.false())
- return query
- query = query.filter(column.in_(value))
- return query
-
- def _get_ports_from_db_with_number(self, context,
- number, last_port_id, top_bottom_map,
- filters=None):
- query = context.session.query(models_v2.Port)
- # set step as two times of number to have better chance to obtain all
- # ports we need
- search_step = number * 2
- if search_step < 100:
- search_step = 100
- query = self._apply_ports_filters(query, models_v2.Port, filters)
- query = sqlalchemyutils.paginate_query(
- query, models_v2.Port, search_step, [('id', False)],
- # create a dummy port object
- marker_obj=models_v2.Port(
- id=last_port_id) if last_port_id else None)
- total = 0
- ret = []
- for port in query:
- total += 1
- if port['id'] not in top_bottom_map:
- ret.append(self._make_port_dict(port))
- if len(ret) == number:
- return ret
- # NOTE(zhiyuan) we have traversed all the ports
- if total < search_step:
- return ret
- else:
- ret.extend(self._get_ports_from_db_with_number(
- context, number - len(ret), ret[-1]['id'], top_bottom_map))
-
- def _get_ports_from_top_with_number(self, context,
- number, last_port_id, top_bottom_map,
- filters=None):
- with context.session.begin():
- ret = self._get_ports_from_db_with_number(
- context, number, last_port_id, top_bottom_map, filters)
- return {'ports': ret}
-
- def _get_ports_from_top(self, context, top_bottom_map, filters=None):
- with context.session.begin():
- ret = []
- query = context.session.query(models_v2.Port)
- query = self._apply_ports_filters(query, models_v2.Port, filters)
- for port in query:
- if port['id'] not in top_bottom_map:
- ret.append(self._make_port_dict(port))
- return ret
-
- @staticmethod
- def _map_port_from_bottom_to_top(port, bottom_top_map):
- if 'network_id' in port and port['network_id'] in bottom_top_map:
- port['network_id'] = bottom_top_map[port['network_id']]
- if 'fixed_ips' in port:
- for ip in port['fixed_ips']:
- if ip['subnet_id'] in bottom_top_map:
- ip['subnet_id'] = bottom_top_map[ip['subnet_id']]
- if 'device_id' in port and port['device_id'] in bottom_top_map:
- port['device_id'] = bottom_top_map[port['device_id']]
-
- @staticmethod
- def _map_ports_from_bottom_to_top(ports, bottom_top_map):
- # TODO(zhiyuan) judge if it's fine to remove unmapped port
- port_list = []
- for port in ports:
- if port['id'] not in bottom_top_map:
- continue
- if port.get('device_owner') in NON_VM_PORT_TYPES:
- continue
- port['id'] = bottom_top_map[port['id']]
- TricirclePlugin._map_port_from_bottom_to_top(port, bottom_top_map)
- port_list.append(port)
- return port_list
-
- @staticmethod
- def _get_map_filter_ids(key, value, pod_id, top_bottom_map):
- if key in ('id', 'network_id', 'device_id'):
- id_list = []
- for _id in value:
- key = '%s_%s' % (pod_id, _id)
- if _id in top_bottom_map:
- id_list.append(top_bottom_map[_id])
- elif key in top_bottom_map:
- id_list.append(top_bottom_map[key])
- else:
- id_list.append(_id)
- return id_list
-
- def _get_ports_from_pod_with_number(self, context,
- current_pod, number, last_port_id,
- bottom_top_map, top_bottom_map,
- filters=None):
- # NOTE(zhiyuan) last_port_id is top id, also id in returned port dict
- # also uses top id. when interacting with bottom pod, need to map
- # top to bottom in request and map bottom to top in response
-
- t_ctx = t_context.get_context_from_neutron_context(context)
- q_client = self._get_client(
- current_pod['pod_name']).get_native_client('port', t_ctx)
- params = {'limit': number}
- if filters:
- _filters = dict(filters)
- for key, value in _filters:
- id_list = self._get_map_filter_ids(
- key, value, current_pod['pod_id'], top_bottom_map)
- if id_list:
- _filters[key] = id_list
- params.update(_filters)
- if last_port_id:
- # map top id to bottom id in request
- params['marker'] = top_bottom_map[last_port_id]
- res = q_client.get(q_client.ports_path, params=params)
- # map bottom id to top id in client response
- mapped_port_list = self._map_ports_from_bottom_to_top(res['ports'],
- bottom_top_map)
- del res['ports']
- res['ports'] = mapped_port_list
-
- if len(res['ports']) == number:
- return res
- else:
- next_pod = db_api.get_next_bottom_pod(
- t_ctx, current_pod_id=current_pod['pod_id'])
- if not next_pod:
- # _get_ports_from_top_with_number uses top id, no need to map
- next_res = self._get_ports_from_top_with_number(
- context, number - len(res['ports']), '', top_bottom_map,
- filters)
- next_res['ports'].extend(res['ports'])
- return next_res
- else:
- # _get_ports_from_pod_with_number itself returns top id, no
- # need to map
- next_res = self._get_ports_from_pod_with_number(
- context, next_pod, number - len(res['ports']), '',
- bottom_top_map, top_bottom_map, filters)
- next_res['ports'].extend(res['ports'])
- return next_res
-
- def get_ports(self, context, filters=None, fields=None, sorts=None,
- limit=None, marker=None, page_reverse=False):
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- non_vm_ports = super(TricirclePlugin, self).get_ports(
- context, {'device_owner': NON_VM_PORT_TYPES}, ['id'])
- non_vm_port_ids = set([port['id'] for port in non_vm_ports])
-
- with t_ctx.session.begin():
- bottom_top_map = {}
- top_bottom_map = {}
- for resource in (t_constants.RT_PORT, t_constants.RT_SUBNET,
- t_constants.RT_NETWORK, t_constants.RT_ROUTER):
- route_filters = [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': resource}]
- routes = core.query_resource(t_ctx, models.ResourceRouting,
- route_filters, [])
-
- for route in routes:
- if route['top_id'] in non_vm_port_ids:
- continue
- if route['bottom_id']:
- bottom_top_map[route['bottom_id']] = route['top_id']
- if route['resource_type'] == t_constants.RT_PORT:
- key = route['top_id']
- else:
- # for non port resource, one top resource is
- # possible to be mapped to more than one bottom
- # resource
- key = '%s_%s' % (route['pod_id'], route['top_id'])
- top_bottom_map[key] = route['bottom_id']
-
- if limit:
- if marker:
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, marker, t_constants.RT_PORT)
- # NOTE(zhiyuan) if mapping exists, we retrieve port information
- # from bottom, otherwise from top
- if mappings:
- pod_id = mappings[0][0]['pod_id']
- current_pod = db_api.get_pod(t_ctx, pod_id)
- res = self._get_ports_from_pod_with_number(
- context, current_pod, limit, marker,
- bottom_top_map, top_bottom_map, filters)
- else:
- res = self._get_ports_from_top_with_number(
- context, limit, marker, top_bottom_map, filters)
-
- else:
- current_pod = db_api.get_next_bottom_pod(t_ctx)
- # only top pod registered
- if current_pod:
- res = self._get_ports_from_pod_with_number(
- context, current_pod, limit, '',
- bottom_top_map, top_bottom_map, filters)
- else:
- res = self._get_ports_from_top_with_number(
- context, limit, marker, top_bottom_map, filters)
-
- # NOTE(zhiyuan) we can safely return ports, neutron controller will
- # generate links for us so we do not need to worry about it.
- #
- # _get_ports_from_pod_with_number already traverses all the pods
- # to try to get ports equal to limit, so pod is transparent for
- # controller.
- return res['ports']
- else:
- ret = []
- pods = db_api.list_pods(t_ctx)
- for pod in pods:
- if not pod['az_name']:
- continue
- _filters = []
- if filters:
- for key, value in filters.iteritems():
- id_list = self._get_map_filter_ids(
- key, value, pod['pod_id'], top_bottom_map)
- if id_list:
- _filters.append({'key': key,
- 'comparator': 'eq',
- 'value': id_list})
- else:
- _filters.append({'key': key,
- 'comparator': 'eq',
- 'value': value})
- client = self._get_client(pod['pod_name'])
- ret.extend(client.list_ports(t_ctx, filters=_filters))
- ret = self._map_ports_from_bottom_to_top(ret, bottom_top_map)
- ret.extend(self._get_ports_from_top(context, top_bottom_map,
- filters))
- return ret
-
- def create_router(self, context, router):
- return super(TricirclePlugin, self).create_router(context, router)
-
- def delete_router(self, context, _id):
- super(TricirclePlugin, self).delete_router(context, _id)
-
- def _prepare_top_element(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- return self.helper.prepare_top_element(
- t_ctx, q_ctx, project_id, pod, ele, _type, body)
-
- def _prepare_bottom_element(self, t_ctx,
- project_id, pod, ele, _type, body):
- return self.helper.prepare_bottom_element(
- t_ctx, project_id, pod, ele, _type, body)
-
- def _get_bridge_subnet_pool_id(self, t_ctx, q_ctx, project_id, pod, is_ew):
- if is_ew:
- pool_name = t_constants.ew_bridge_subnet_pool_name
- pool_cidr = '100.0.0.0/9'
- else:
- pool_name = t_constants.ns_bridge_subnet_pool_name
- pool_cidr = '100.128.0.0/9'
- pool_ele = {'id': pool_name}
- body = {'subnetpool': {'tenant_id': project_id,
- 'name': pool_name,
- 'shared': True,
- 'is_default': False,
- 'prefixes': [pool_cidr]}}
-
- is_admin = q_ctx.is_admin
- q_ctx.is_admin = True
- _, pool_id = self._prepare_top_element(t_ctx, q_ctx, project_id, pod,
- pool_ele, 'subnetpool', body)
- q_ctx.is_admin = is_admin
-
- return pool_id
-
- def _get_bridge_network_subnet(self, t_ctx, q_ctx, project_id, pod,
- pool_id, is_ew):
- if is_ew:
- net_name = t_constants.ew_bridge_net_name % project_id
- net_ele = {'id': net_name}
- subnet_name = t_constants.ew_bridge_subnet_name % project_id
- subnet_ele = {'id': subnet_name}
- else:
- net_name = t_constants.ns_bridge_net_name % project_id
- net_ele = {'id': net_name}
- subnet_name = t_constants.ns_bridge_subnet_name % project_id
- subnet_ele = {'id': subnet_name}
-
- is_admin = q_ctx.is_admin
- q_ctx.is_admin = True
-
- net_body = {'network': {
- 'tenant_id': project_id,
- 'name': net_name,
- 'shared': False,
- 'admin_state_up': True,
- provider.NETWORK_TYPE: cfg.CONF.tricircle.bridge_network_type}}
- _, net_id = self._prepare_top_element(
- t_ctx, q_ctx, project_id, pod, net_ele, 'network', net_body)
-
- subnet_body = {
- 'subnet': {
- 'network_id': net_id,
- 'name': subnet_name,
- 'prefixlen': 24,
- 'ip_version': 4,
- 'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
- 'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
- 'host_routes': attributes.ATTR_NOT_SPECIFIED,
- 'cidr': attributes.ATTR_NOT_SPECIFIED,
- 'subnetpool_id': pool_id,
- 'enable_dhcp': False,
- 'tenant_id': project_id
- }
- }
- _, subnet_id = self._prepare_top_element(
- t_ctx, q_ctx,
- project_id, pod, subnet_ele, 'subnet', subnet_body)
-
- q_ctx.is_admin = is_admin
-
- net = self.get_network(q_ctx, net_id)
- subnet = self.get_subnet(q_ctx, subnet_id)
-
- return net, subnet
-
- def _get_bridge_interface(self, t_ctx, q_ctx, project_id, pod,
- t_net_id, b_router_id, b_port_id, is_ew):
- port_id = self.helper.get_bridge_interface(t_ctx, q_ctx, project_id,
- pod, t_net_id, b_router_id,
- b_port_id, is_ew)
- return super(TricirclePlugin, self).get_port(q_ctx, port_id)
-
- def _get_bottom_bridge_elements(self, q_ctx, project_id,
- pod, t_net, is_external, t_subnet, t_port):
- t_ctx = t_context.get_context_from_neutron_context(q_ctx)
- return self.helper.get_bottom_bridge_elements(
- t_ctx, project_id, pod, t_net, is_external, t_subnet, t_port)
-
- def _get_net_pods_by_interface_info(self, t_ctx, q_ctx, add_by_port,
- interface_info):
- if add_by_port:
- port = self.get_port(q_ctx, interface_info['port_id'])
- net_id = port['network_id']
- else:
- subnet = self.get_subnet(q_ctx, interface_info['subnet_id'])
- net_id = subnet['network_id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, net_id, t_constants.RT_NETWORK)
- return net_id, [mapping[0] for mapping in mappings]
-
- # NOTE(zhiyuan) the origin implementation in l3_db uses port returned from
- # get_port in core plugin to check, change it to base plugin, since only
- # top port information should be checked.
- def _check_router_port(self, context, port_id, device_id):
- port = super(TricirclePlugin, self).get_port(context, port_id)
- if port['device_id'] != device_id:
- raise exceptions.PortInUse(net_id=port['network_id'],
- port_id=port['id'],
- device_id=port['device_id'])
- if not port['fixed_ips']:
- msg = _('Router port must have at least one fixed IP')
- raise exceptions.BadRequest(resource='router', msg=msg)
- return port
-
- def _add_router_gateway(self, context, router_id, router_data):
- # get top external network information
- ext_net_id = router_data[l3.EXTERNAL_GW_INFO].get('network_id')
- t_ctx = t_context.get_context_from_neutron_context(context)
- network = self.get_network(context, ext_net_id)
-
- # when creating external network in top pod, pod name is passed via
- # az hint parameter, so tricircle plugin knows where to create the
- # corresponding bottom external network. here we get bottom external
- # network ID from resource routing table.
- if not network.get(az_ext.AZ_HINTS):
- raise t_exceptions.ExternalNetPodNotSpecify()
- pod_name = network[az_ext.AZ_HINTS][0]
- pod = db_api.get_pod_by_name(t_ctx, pod_name)
- b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, ext_net_id, pod_name, t_constants.RT_NETWORK)
-
- # create corresponding bottom router in the pod where external network
- # is located.
- t_router = self._get_router(context, router_id)
-
- # TODO(zhiyuan) decide router is distributed or not from pod table
- # currently "distributed" is set to False, should add a metadata field
- # to pod table, and decide distributed or not from the metadata later
- body = {'router': {'name': router_id,
- 'distributed': False}}
- _, b_router_id = self._prepare_bottom_element(
- t_ctx, t_router['tenant_id'], pod, t_router,
- t_constants.RT_ROUTER, body)
-
- # both router and external network in bottom pod are ready, attach
- # external network to router in bottom pod.
- b_client = self._get_client(pod_name)
- t_info = router_data[l3.EXTERNAL_GW_INFO]
- b_info = {'network_id': b_net_id}
- if 'enable_snat' in t_info:
- b_info['enable_snat'] = t_info['enable_snat']
- if 'external_fixed_ips' in t_info:
- fixed_ips = []
- for ip in t_info['external_fixed_ips']:
- t_subnet_id = ip['subnet_id']
- b_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_subnet_id, pod_name,
- t_constants.RT_SUBNET)
- fixed_ips.append({'subnet_id': b_subnet_id,
- 'ip_address': ip['ip_address']})
- b_info['external_fixed_ips'] = fixed_ips
- b_client.action_routers(t_ctx, 'add_gateway', b_router_id, b_info)
-
- # when internal network(providing fixed ip) and external network
- # (providing floating ip) are in different bottom pods, we utilize a
- # bridge network to connect these two networks. here we create the
- # bridge network.
- t_pod = db_api.get_top_pod(t_ctx)
- project_id = t_router['tenant_id']
- pool_id = self._get_bridge_subnet_pool_id(
- t_ctx, context, None, t_pod, False)
- t_bridge_net, t_bridge_subnet = self._get_bridge_network_subnet(
- t_ctx, context, project_id, t_pod, pool_id, False)
- (_, _, b_bridge_subnet_id,
- b_bridge_net_id) = self._get_bottom_bridge_elements(
- context, project_id, pod, t_bridge_net, False, t_bridge_subnet,
- None)
-
- # here we attach the bridge network to the router in bottom pod. to
- # make this method reentrant, we check if the interface is already
- # attached before attaching the interface.
- def _is_bridge_network_attached():
- interfaces = b_client.list_ports(t_ctx,
- filters=[{'key': 'device_id',
- 'comparator': 'eq',
- 'value': b_router_id}])
- for interface in interfaces:
- for fixed_ip in interface['fixed_ips']:
- if fixed_ip['subnet_id'] == b_bridge_subnet_id:
- return True
- return False
-
- is_attach = _is_bridge_network_attached()
- if not is_attach:
- b_client.action_routers(t_ctx, 'add_interface', b_router_id,
- {'subnet_id': b_bridge_subnet_id})
-
- def _remove_router_gateway(self, context, router_id):
- t_ctx = t_context.get_context_from_neutron_context(context)
- t_router = self._get_router(context, router_id)
- gw_port = t_router.gw_port
- if not gw_port:
- return
- ext_net_id = gw_port['network_id']
- t_network = self.get_network(context, ext_net_id)
- if az_ext.AZ_HINTS not in t_network:
- raise t_exceptions.ExternalNetPodNotSpecify()
- if not t_network[az_ext.AZ_HINTS]:
- raise t_exceptions.ExternalNetPodNotSpecify()
-
- pod_name = t_network[az_ext.AZ_HINTS][0]
- b_router_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, router_id, pod_name, t_constants.RT_ROUTER)
- b_client = self._get_client(pod_name)
- b_client.action_routers(t_ctx, 'remove_gateway', b_router_id)
-
- def update_router(self, context, router_id, router):
- # TODO(zhiyuan) handle the case that SNAT is disabled
- # and check if bridge network solution works with IPv6
- router_data = copy.deepcopy(router['router'])
- need_update_bottom = False
- is_add = False
- if attributes.is_attr_set(router_data.get(l3.EXTERNAL_GW_INFO)):
- need_update_bottom = True
- ext_net_id = router_data[l3.EXTERNAL_GW_INFO].get('network_id')
- if ext_net_id:
- is_add = True
- # TODO(zhiyuan) solve ip address conflict issue
- # if user creates floating ip before set router gateway, we may trigger
- # ip address conflict here. let's say external cidr is 163.3.124.0/24,
- # creating floating ip before setting router gateway, the gateway ip
- # will be 163.3.124.3 since 163.3.124.2 is used by floating ip, however
- # in the bottom pod floating ip is not created when creating floating
- # ip on top, so the gateway ip in the bottom pod is still 163.3.124.2,
- # thus conflict may occur.
- #
- # before this issue is solved, user should set router gateway before
- # create floating ip.
- if not need_update_bottom:
- return super(TricirclePlugin, self).update_router(
- context, router_id, router)
- if is_add:
- ret = super(TricirclePlugin, self).update_router(
- context, router_id, router)
- router_data[l3.EXTERNAL_GW_INFO].update(ret[l3.EXTERNAL_GW_INFO])
- self._add_router_gateway(context, router_id, router_data)
- return ret
- else:
- self._remove_router_gateway(context, router_id)
- return super(TricirclePlugin, self).update_router(
- context, router_id, router)
-
- def add_router_interface(self, context, router_id, interface_info):
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- router = self._get_router(context, router_id)
- project_id = router['tenant_id']
- add_by_port, _ = self._validate_interface_info(interface_info)
-
- net_id, b_pods = self._get_net_pods_by_interface_info(
- t_ctx, context, add_by_port, interface_info)
- t_pod = db_api.get_top_pod(t_ctx)
- assert t_pod
-
- # bridge network for E-W networking
- pool_id = self._get_bridge_subnet_pool_id(
- t_ctx, context, None, t_pod, True)
- self._get_bridge_network_subnet(
- t_ctx, context, project_id, t_pod, pool_id, True)
-
- # bridge network for N-S networking
- ext_nets = self.get_networks(context, {external_net.EXTERNAL: [True]})
- if not ext_nets:
- need_ns_bridge = False
- else:
- ext_net_pod_names = set(
- [ext_net[az_ext.AZ_HINTS][0] for ext_net in ext_nets])
- need_ns_bridge = False
- for b_pod in b_pods:
- if b_pod['pod_name'] not in ext_net_pod_names:
- need_ns_bridge = True
- break
- if need_ns_bridge:
- pool_id = self._get_bridge_subnet_pool_id(
- t_ctx, context, None, t_pod, False)
- self._get_bridge_network_subnet(
- t_ctx, context, project_id, t_pod, pool_id, False)
-
- return_info = super(TricirclePlugin, self).add_router_interface(
- context, router_id, interface_info)
- if not b_pods:
- return return_info
- try:
- if len(b_pods) == 1:
- self.xjob_handler.setup_bottom_router(
- t_ctx, net_id, router_id, b_pods[0]['pod_id'])
- else:
- self.xjob_handler.setup_bottom_router(
- t_ctx, net_id, router_id, t_constants.POD_NOT_SPECIFIED)
- except Exception:
- # NOTE(zhiyuan) we fail to submit the job, so bottom router
- # operations are not started, it's safe for us to remove the top
- # router interface
- super(TricirclePlugin, self).remove_router_interface(
- context, router_id, interface_info)
- raise
- return return_info
-
- def create_floatingip(self, context, floatingip):
- # create bottom fip when associating fixed ip
- return super(TricirclePlugin, self).create_floatingip(
- context, floatingip,
- initial_status=constants.FLOATINGIP_STATUS_DOWN)
-
- def remove_router_interface(self, context, router_id, interface_info):
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- add_by_port, _ = self._validate_interface_info(interface_info,
- for_removal=True)
- net_id, b_pods = self._get_net_pods_by_interface_info(
- t_ctx, context, add_by_port, interface_info)
-
- return_info = super(TricirclePlugin, self).remove_router_interface(
- context, router_id, interface_info)
- if not b_pods:
- return return_info
- try:
- if len(b_pods) == 1:
- self.xjob_handler.setup_bottom_router(
- t_ctx, net_id, router_id, b_pods[0]['pod_id'])
- else:
- self.xjob_handler.setup_bottom_router(
- t_ctx, net_id, router_id, t_constants.POD_NOT_SPECIFIED)
- except Exception:
- # NOTE(zhiyuan) we fail to submit the job, so if bottom router
- # interface exists, it would not be deleted, then after we add
- # the top interface again, the bottom router setup job will reuse
- # the existing bottom interface.
- #
- # we don't create a routing entry between top interface and bottom
- # interface, instead, when we create bottom subnet, we specify the
- # ip of the top interface as the gateway ip of the bottom subnet.
- # later when we attach the bottom subnet to bottom router, neutron
- # server in bottom pod will create the bottom interface using the
- # gateway ip automatically.
- interface_info = {'subnet_id': return_info['subnet_id']}
- super(TricirclePlugin, self).add_router_interface(
- context, router_id, interface_info)
- raise
- return return_info
-
- @staticmethod
- def _safe_create_bottom_floatingip(t_ctx, pod, client, fip_net_id,
- fip_address, port_id):
- try:
- client.create_floatingips(
- t_ctx, {'floatingip': {'floating_network_id': fip_net_id,
- 'floating_ip_address': fip_address,
- 'port_id': port_id}})
- except q_cli_exceptions.IpAddressInUseClient:
- fips = client.list_floatingips(t_ctx,
- [{'key': 'floating_ip_address',
- 'comparator': 'eq',
- 'value': fip_address}])
- # NOTE(zhiyuan) if the internal port associated with the existing
- # fip is what we expect, just ignore this exception; or if the
- # existing fip is not associated with any internal port, update the
- # fip to add association
- if not fips:
- # this is rare case that we got IpAddressInUseClient exception
- # a second ago but now the floating ip is missing
- raise t_network_exc.BottomPodOperationFailure(
- resource='floating ip', pod_name=pod['pod_name'])
- associated_port_id = fips[0].get('port_id')
- if associated_port_id == port_id:
- pass
- elif not associated_port_id:
- client.update_floatingips(t_ctx, fips[0]['id'],
- {'floatingip': {'port_id': port_id}})
- else:
- raise
-
- @staticmethod
- def _rollback_floatingip_data(context, _id, org_data):
- """Rollback the data of floating ip object to the original one
-
- :param context: request context
- :param _id: ID of the floating ip
- :param org_data: data of floating ip we rollback to
- :return: None
- """
- try:
- with context.session.begin():
- fip_qry = context.session.query(l3_db.FloatingIP)
- floating_ips = fip_qry.filter_by(id=_id)
- for floating_ip in floating_ips:
- floating_ip.update(org_data)
- except Exception as e:
- # log the exception and re-raise it
- LOG.exception(_LE('Fail to rollback floating ip data, reason: '
- '%(reason)s') % {'reason': e.message})
- raise
-
- def update_floatingip(self, context, _id, floatingip):
- """Update floating ip object in top and bottom pods
-
- :param context: request context
- :param _id: ID of the floating ip
- :param floatingip: data of floating ip we update to
- :return: updated floating ip ojbect
- """
- org_floatingip_dict = self._make_floatingip_dict(
- self._get_floatingip(context, _id))
-
- res = super(TricirclePlugin, self).update_floatingip(
- context, _id, floatingip)
- try:
- if floatingip['floatingip']['port_id']:
- self._associate_floatingip(context, _id, floatingip)
- else:
- self._disassociate_floatingip(context, org_floatingip_dict)
- return res
- except Exception as e:
- # NOTE(zhiyuan) when exception occurs, we update floating ip object
- # to rollback fixed_port_id, fixed_ip_address, router_id
- LOG.exception(
- _LE('Fail to update floating ip, reason: '
- '%(reason)s, rollback floating ip data') % {
- 'reason': e.message})
- org_data = {
- 'fixed_port_id': org_floatingip_dict['port_id'],
- 'fixed_ip_address': org_floatingip_dict['fixed_ip_address'],
- 'router_id': org_floatingip_dict['router_id']}
- self._rollback_floatingip_data(context, _id, org_data)
- raise
-
- def _associate_floatingip(self, context, _id, floatingip):
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- fip = floatingip['floatingip']
- floatingip_db = self._get_floatingip(context, _id)
- int_port_id = fip['port_id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, int_port_id, t_constants.RT_PORT)
- if not mappings:
- # mapping does not exist, meaning that the bottom port has not
- # been created, we just return and leave the work to setup bottom
- # floating ip to nova api gateway
- return
-
- int_net_pod, b_int_port_id = mappings[0]
- int_port = self.get_port(context, int_port_id)
- net_id = int_port['network_id']
- self.xjob_handler.setup_bottom_router(
- t_ctx, net_id, floatingip_db['router_id'], int_net_pod['pod_id'])
-
- def _disassociate_floatingip(self, context, ori_floatingip_db):
- if not ori_floatingip_db['port_id']:
- # floating ip has not been associated with fixed ip, no
- # operation in bottom pod needed
- return
-
- t_ctx = t_context.get_context_from_neutron_context(context)
-
- t_int_port_id = ori_floatingip_db['port_id']
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_int_port_id, t_constants.RT_PORT)
- if not mappings:
- # floating ip in top pod is associated but no mapping between
- # top and bottom internal port, this is an inconsistent state,
- # but since bottom internal port does not exist, no operation
- # in bottom pod is required
- LOG.warning(_LI('Internal port associated with floating ip '
- 'does not exist in bottom pod.'))
- return
-
- b_int_net_pod, b_int_port_id = mappings[0]
- int_port = self.get_port(context, t_int_port_id)
- net_id = int_port['network_id']
- self.xjob_handler.setup_bottom_router(
- t_ctx, net_id, ori_floatingip_db['router_id'],
- b_int_net_pod['pod_id'])
diff --git a/tricircle/network/security_groups.py b/tricircle/network/security_groups.py
deleted file mode 100644
index 65cde2f..0000000
--- a/tricircle/network/security_groups.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from neutron.db import securitygroups_db
-import neutronclient.common.exceptions as q_exceptions
-
-from tricircle.common import constants
-from tricircle.common import context
-import tricircle.db.api as db_api
-import tricircle.network.exceptions as n_exceptions
-
-
-class TricircleSecurityGroupMixin(securitygroups_db.SecurityGroupDbMixin):
-
- @staticmethod
- def _safe_create_security_group_rule(t_context, client, body):
- try:
- client.create_security_group_rules(t_context, body)
- except q_exceptions.Conflict:
- return
-
- @staticmethod
- def _safe_delete_security_group_rule(t_context, client, _id):
- try:
- client.delete_security_group_rules(t_context, _id)
- except q_exceptions.NotFound:
- return
-
- @staticmethod
- def _compare_rule(rule1, rule2):
- for key in ('direction', 'remote_ip_prefix', 'protocol', 'ethertype',
- 'port_range_max', 'port_range_min'):
- if rule1[key] != rule2[key]:
- return False
- return True
-
- def create_security_group_rule(self, q_context, security_group_rule):
- rule = security_group_rule['security_group_rule']
- if rule['remote_group_id']:
- raise n_exceptions.RemoteGroupNotSupported()
- sg_id = rule['security_group_id']
- sg = self.get_security_group(q_context, sg_id)
- if sg['name'] == 'default':
- raise n_exceptions.DefaultGroupUpdateNotSupported()
-
- new_rule = super(TricircleSecurityGroupMixin,
- self).create_security_group_rule(q_context,
- security_group_rule)
-
- t_context = context.get_context_from_neutron_context(q_context)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_context, sg_id, constants.RT_SG)
-
- try:
- for pod, b_sg_id in mappings:
- client = self._get_client(pod['pod_name'])
- rule['security_group_id'] = b_sg_id
- self._safe_create_security_group_rule(
- t_context, client, {'security_group_rule': rule})
- except Exception:
- super(TricircleSecurityGroupMixin,
- self).delete_security_group_rule(q_context, new_rule['id'])
- raise n_exceptions.BottomPodOperationFailure(
- resource='security group rule', pod_name=pod['pod_name'])
- return new_rule
-
- def delete_security_group_rule(self, q_context, _id):
- rule = self.get_security_group_rule(q_context, _id)
- if rule['remote_group_id']:
- raise n_exceptions.RemoteGroupNotSupported()
- sg_id = rule['security_group_id']
- sg = self.get_security_group(q_context, sg_id)
- if sg['name'] == 'default':
- raise n_exceptions.DefaultGroupUpdateNotSupported()
-
- t_context = context.get_context_from_neutron_context(q_context)
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_context, sg_id, constants.RT_SG)
-
- try:
- for pod, b_sg_id in mappings:
- client = self._get_client(pod['pod_name'])
- rule['security_group_id'] = b_sg_id
- b_sg = client.get_security_groups(t_context, b_sg_id)
- for b_rule in b_sg['security_group_rules']:
- if not self._compare_rule(b_rule, rule):
- continue
- self._safe_delete_security_group_rule(t_context, client,
- b_rule['id'])
- break
- except Exception:
- raise n_exceptions.BottomPodOperationFailure(
- resource='security group rule', pod_name=pod['pod_name'])
-
- super(TricircleSecurityGroupMixin,
- self).delete_security_group_rule(q_context, _id)
diff --git a/tricircle/nova_apigw/controllers/server.py b/tricircle/nova_apigw/controllers/server.py
deleted file mode 100644
index 9e9a55e..0000000
--- a/tricircle/nova_apigw/controllers/server.py
+++ /dev/null
@@ -1,679 +0,0 @@
-# Copyright (c) 2015 Huawei Tech. Co., Ltd.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-import pecan
-from pecan import expose
-from pecan import rest
-import six
-
-import oslo_log.log as logging
-
-import neutronclient.common.exceptions as q_exceptions
-
-from tricircle.common import az_ag
-import tricircle.common.client as t_client
-from tricircle.common import constants
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exceptions
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-import tricircle.common.lock_handle as t_lock
-from tricircle.common.quota import QUOTAS
-from tricircle.common import utils
-from tricircle.common import xrpcapi
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.network import helper
-
-LOG = logging.getLogger(__name__)
-
-MAX_METADATA_KEY_LENGTH = 255
-MAX_METADATA_VALUE_LENGTH = 255
-
-
-class ServerController(rest.RestController):
-
- def __init__(self, project_id):
- self.project_id = project_id
- self.clients = {constants.TOP: t_client.Client()}
- self.helper = helper.NetworkHelper()
- self.xjob_handler = xrpcapi.XJobAPI()
-
- def _get_client(self, pod_name=constants.TOP):
- if pod_name not in self.clients:
- self.clients[pod_name] = t_client.Client(pod_name)
- return self.clients[pod_name]
-
- def _get_all(self, context, params):
- filters = [{'key': key,
- 'comparator': 'eq',
- 'value': value} for key, value in params.iteritems()]
- ret = []
- pods = db_api.list_pods(context)
- for pod in pods:
- if not pod['az_name']:
- continue
- client = self._get_client(pod['pod_name'])
- servers = client.list_servers(context, filters=filters)
- self._remove_fip_info(servers)
- ret.extend(servers)
- return ret
-
- @staticmethod
- def _construct_brief_server_entry(server):
- return {'id': server['id'],
- 'name': server.get('name'),
- 'links': server.get('links')}
-
- @staticmethod
- def _transform_network_name(server):
- if 'addresses' not in server:
- return
- keys = [key for key in server['addresses'].iterkeys()]
- for key in keys:
- value = server['addresses'].pop(key)
- network_name = key.split('#')[1]
- server['addresses'][network_name] = value
- return server
-
- @expose(generic=True, template='json')
- def get_one(self, _id, **kwargs):
- context = t_context.extract_context_from_environ()
-
- if _id == 'detail':
- return {'servers': [self._transform_network_name(
- server) for server in self._get_all(context, kwargs)]}
-
- mappings = db_api.get_bottom_mappings_by_top_id(
- context, _id, constants.RT_SERVER)
- if not mappings:
- return utils.format_nova_error(
- 404, _('Instance %s could not be found.') % _id)
- pod, bottom_id = mappings[0]
- client = self._get_client(pod['pod_name'])
- server = client.get_servers(context, bottom_id)
- if not server:
- return utils.format_nova_error(
- 404, _('Instance %s could not be found.') % _id)
- else:
- self._transform_network_name(server)
- return {'server': server}
-
- @expose(generic=True, template='json')
- def get_all(self, **kwargs):
- context = t_context.extract_context_from_environ()
- return {'servers': [self._construct_brief_server_entry(
- server) for server in self._get_all(context, kwargs)]}
-
- @expose(generic=True, template='json')
- def post(self, **kw):
- context = t_context.extract_context_from_environ()
-
- if 'server' not in kw:
- return utils.format_nova_error(
- 400, _('server is not set'))
-
- az = kw['server'].get('availability_zone', '')
-
- pod, b_az = az_ag.get_pod_by_az_tenant(
- context, az, self.project_id)
- if not pod:
- return utils.format_nova_error(
- 500, _('Pod not configured or scheduling failure'))
-
- t_server_dict = kw['server']
- self._process_metadata_quota(context, t_server_dict)
- self._process_injected_file_quota(context, t_server_dict)
-
- server_body = self._get_create_server_body(kw['server'], b_az)
-
- top_client = self._get_client()
-
- sg_filters = [{'key': 'tenant_id', 'comparator': 'eq',
- 'value': self.project_id}]
- top_sgs = top_client.list_security_groups(context, sg_filters)
- top_sg_map = dict((sg['name'], sg) for sg in top_sgs)
-
- if 'security_groups' not in kw['server']:
- security_groups = ['default']
- else:
- security_groups = []
- for sg in kw['server']['security_groups']:
- if 'name' not in sg:
- return utils.format_nova_error(
- 400, _('Invalid input for field/attribute'))
- if sg['name'] not in top_sg_map:
- return utils.format_nova_error(
- 400, _('Unable to find security_group with name or id '
- '%s') % sg['name'])
- security_groups.append(sg['name'])
- t_sg_ids, b_sg_ids, is_news = self._handle_security_group(
- context, pod, top_sg_map, security_groups)
-
- server_body['networks'] = []
- if 'networks' in kw['server']:
- for net_info in kw['server']['networks']:
- if 'uuid' in net_info:
- network = top_client.get_networks(context,
- net_info['uuid'])
- if not network:
- return utils.format_nova_error(
- 400, _('Network %s could not be '
- 'found') % net_info['uuid'])
-
- if not self._check_network_server_az_match(
- context, network,
- kw['server']['availability_zone']):
- return utils.format_nova_error(
- 400, _('Network and server not in the same '
- 'availability zone'))
-
- subnets = top_client.list_subnets(
- context, [{'key': 'network_id',
- 'comparator': 'eq',
- 'value': network['id']}])
- if not subnets:
- return utils.format_nova_error(
- 400, _('Network does not contain any subnets'))
- t_port_id, b_port_id = self._handle_network(
- context, pod, network, subnets,
- top_sg_ids=t_sg_ids, bottom_sg_ids=b_sg_ids)
- elif 'port' in net_info:
- port = top_client.get_ports(context, net_info['port'])
- if not port:
- return utils.format_nova_error(
- 400, _('Port %s could not be '
- 'found') % net_info['port'])
- t_port_id, b_port_id = self._handle_port(
- context, pod, port)
- server_body['networks'].append({'port': b_port_id})
-
- # only for security group first created in a pod, we invoke
- # _handle_sg_rule_for_new_group to initialize rules in that group, this
- # method removes all the rules in the new group then add new rules
- top_sg_id_map = dict((sg['id'], sg) for sg in top_sgs)
- new_top_sgs = []
- new_bottom_sg_ids = []
- default_sg = None
- for t_id, b_id, is_new in zip(t_sg_ids, b_sg_ids, is_news):
- sg_name = top_sg_id_map[t_id]['name']
- if sg_name == 'default':
- default_sg = top_sg_id_map[t_id]
- continue
- if not is_new:
- continue
- new_top_sgs.append(top_sg_id_map[t_id])
- new_bottom_sg_ids.append(b_id)
- self._handle_sg_rule_for_new_group(context, pod, new_top_sgs,
- new_bottom_sg_ids)
- if default_sg:
- self._handle_sg_rule_for_default_group(
- context, pod, default_sg, self.project_id)
-
- client = self._get_client(pod['pod_name'])
- nics = [
- {'port-id': _port['port']} for _port in server_body['networks']]
-
- server = client.create_servers(context,
- name=server_body['name'],
- image=server_body['imageRef'],
- flavor=server_body['flavorRef'],
- nics=nics,
- security_groups=b_sg_ids)
- with context.session.begin():
- core.create_resource(context, models.ResourceRouting,
- {'top_id': server['id'],
- 'bottom_id': server['id'],
- 'pod_id': pod['pod_id'],
- 'project_id': self.project_id,
- 'resource_type': constants.RT_SERVER})
- pecan.response.status = 202
- return {'server': server}
-
- @expose(generic=True, template='json')
- def delete(self, _id):
- context = t_context.extract_context_from_environ()
-
- mappings = db_api.get_bottom_mappings_by_top_id(context, _id,
- constants.RT_SERVER)
- if not mappings:
- pecan.response.status = 404
- return {'Error': {'message': _('Server not found'), 'code': 404}}
-
- pod, bottom_id = mappings[0]
- client = self._get_client(pod['pod_name'])
- top_client = self._get_client()
- try:
- server_ports = top_client.list_ports(
- context, filters=[{'key': 'device_id', 'comparator': 'eq',
- 'value': _id}])
- ret = client.delete_servers(context, bottom_id)
- # none return value indicates server not found
- if ret is None:
- self._remove_stale_mapping(context, _id)
- pecan.response.status = 404
- return {'Error': {'message': _('Server not found'),
- 'code': 404}}
- for server_port in server_ports:
- self.xjob_handler.delete_server_port(context,
- server_port['id'])
- except Exception as e:
- code = 500
- message = _('Delete server %(server_id)s fails') % {
- 'server_id': _id}
- if hasattr(e, 'code'):
- code = e.code
- ex_message = str(e)
- if ex_message:
- message = ex_message
- LOG.error(message)
-
- pecan.response.status = code
- return {'Error': {'message': message, 'code': code}}
-
- # NOTE(zhiyuan) Security group rules for default security group are
- # also kept until subnet is deleted.
- pecan.response.status = 204
- return pecan.response
-
- def _get_or_create_route(self, context, pod, _id, _type):
- def list_resources(t_ctx, q_ctx, pod_, ele, _type_):
- client = self._get_client(pod_['pod_name'])
- return client.list_resources(_type_, t_ctx, [{'key': 'name',
- 'comparator': 'eq',
- 'value': ele['id']}])
-
- return t_lock.get_or_create_route(context, None,
- self.project_id, pod, {'id': _id},
- _type, list_resources)
-
- def _handle_router(self, context, pod, net):
- top_client = self._get_client()
-
- interfaces = top_client.list_ports(
- context, filters=[{'key': 'network_id',
- 'comparator': 'eq',
- 'value': net['id']},
- {'key': 'device_owner',
- 'comparator': 'eq',
- 'value': 'network:router_interface'}])
- interfaces = [inf for inf in interfaces if inf['device_id']]
- if not interfaces:
- return
- # TODO(zhiyuan) change xjob invoking from "cast" to "call" to guarantee
- # the job can be successfully registered
- self.xjob_handler.setup_bottom_router(
- context, net['id'], interfaces[0]['device_id'], pod['pod_id'])
-
- def _handle_network(self, context, pod, net, subnets, port=None,
- top_sg_ids=None, bottom_sg_ids=None):
- (bottom_net_id,
- subnet_map) = self.helper.prepare_bottom_network_subnets(
- context, None, self.project_id, pod, net, subnets)
-
- top_client = self._get_client()
- top_port_body = {'port': {'network_id': net['id'],
- 'admin_state_up': True}}
- if top_sg_ids:
- top_port_body['port']['security_groups'] = top_sg_ids
-
- # port
- if not port:
- port = top_client.create_ports(context, top_port_body)
- port_body = self.helper.get_create_port_body(
- self.project_id, port, subnet_map, bottom_net_id,
- bottom_sg_ids)
- else:
- port_body = self.helper.get_create_port_body(
- self.project_id, port, subnet_map, bottom_net_id)
- _, bottom_port_id = self.helper.prepare_bottom_element(
- context, self.project_id, pod, port, constants.RT_PORT, port_body)
-
- self._handle_router(context, pod, net)
-
- return port['id'], bottom_port_id
-
- def _handle_port(self, context, pod, port):
- top_client = self._get_client()
- # NOTE(zhiyuan) at this moment, it is possible that the bottom port has
- # been created. if user creates a port and associate it with a floating
- # ip before booting a vm, tricircle plugin will create the bottom port
- # first in order to setup floating ip in bottom pod. but it is still
- # safe for us to use network id and subnet id in the returned port dict
- # since tricircle plugin will do id mapping and guarantee ids in the
- # dict are top id.
- net = top_client.get_networks(context, port['network_id'])
- subnets = []
- for fixed_ip in port['fixed_ips']:
- subnets.append(top_client.get_subnets(context,
- fixed_ip['subnet_id']))
- return self._handle_network(context, pod, net, subnets, port=port)
-
- @staticmethod
- def _safe_create_security_group_rule(context, client, body):
- try:
- client.create_security_group_rules(context, body)
- except q_exceptions.Conflict:
- return
-
- @staticmethod
- def _safe_delete_security_group_rule(context, client, _id):
- try:
- client.delete_security_group_rules(context, _id)
- except q_exceptions.NotFound:
- return
-
- def _handle_security_group(self, context, pod, top_sg_map,
- security_groups):
- t_sg_ids = []
- b_sg_ids = []
- is_news = []
- for sg_name in security_groups:
- t_sg = top_sg_map[sg_name]
- sg_body = {
- 'security_group': {
- 'name': t_sg['id'],
- 'description': t_sg['description']}}
- is_new, b_sg_id = self.helper.prepare_bottom_element(
- context, self.project_id, pod, t_sg, constants.RT_SG, sg_body)
- t_sg_ids.append(t_sg['id'])
- is_news.append(is_new)
- b_sg_ids.append(b_sg_id)
-
- return t_sg_ids, b_sg_ids, is_news
-
- @staticmethod
- def _construct_bottom_rule(rule, sg_id, ip=None):
- ip = ip or rule['remote_ip_prefix']
- # if ip is passed, this is a extended rule for remote group
- return {'remote_group_id': None,
- 'direction': rule['direction'],
- 'remote_ip_prefix': ip,
- 'protocol': rule.get('protocol'),
- 'ethertype': rule['ethertype'],
- 'port_range_max': rule.get('port_range_max'),
- 'port_range_min': rule.get('port_range_min'),
- 'security_group_id': sg_id}
-
- @staticmethod
- def _compare_rule(rule1, rule2):
- for key in ('direction', 'remote_ip_prefix', 'protocol', 'ethertype',
- 'port_range_max', 'port_range_min'):
- if rule1[key] != rule2[key]:
- return False
- return True
-
- def _handle_sg_rule_for_default_group(self, context, pod, default_sg,
- project_id):
- top_client = self._get_client()
- new_b_rules = []
- for t_rule in default_sg['security_group_rules']:
- if not t_rule['remote_group_id']:
- # leave sg_id empty here
- new_b_rules.append(
- self._construct_bottom_rule(t_rule, ''))
- continue
- if t_rule['ethertype'] != 'IPv4':
- continue
- subnets = top_client.list_subnets(
- context, [{'key': 'tenant_id', 'comparator': 'eq',
- 'value': project_id}])
- bridge_ip_net = netaddr.IPNetwork('100.0.0.0/8')
- for subnet in subnets:
- ip_net = netaddr.IPNetwork(subnet['cidr'])
- if ip_net in bridge_ip_net:
- continue
- # leave sg_id empty here
- new_b_rules.append(
- self._construct_bottom_rule(t_rule, '',
- subnet['cidr']))
-
- mappings = db_api.get_bottom_mappings_by_top_id(
- context, default_sg['id'], constants.RT_SG)
- for pod, b_sg_id in mappings:
- client = self._get_client(pod['pod_name'])
- b_sg = client.get_security_groups(context, b_sg_id)
- add_rules = []
- del_rules = []
- match_index = set()
- for b_rule in b_sg['security_group_rules']:
- match = False
- for i, rule in enumerate(new_b_rules):
- if self._compare_rule(b_rule, rule):
- match = True
- match_index.add(i)
- break
- if not match:
- del_rules.append(b_rule)
- for i, rule in enumerate(new_b_rules):
- if i not in match_index:
- add_rules.append(rule)
-
- for del_rule in del_rules:
- self._safe_delete_security_group_rule(
- context, client, del_rule['id'])
- if add_rules:
- rule_body = {'security_group_rules': []}
- for add_rule in add_rules:
- add_rule['security_group_id'] = b_sg_id
- rule_body['security_group_rules'].append(add_rule)
- self._safe_create_security_group_rule(context,
- client, rule_body)
-
- def _handle_sg_rule_for_new_group(self, context, pod, top_sgs,
- bottom_sg_ids):
- client = self._get_client(pod['pod_name'])
- for i, t_sg in enumerate(top_sgs):
- b_sg_id = bottom_sg_ids[i]
- new_b_rules = []
- for t_rule in t_sg['security_group_rules']:
- if t_rule['remote_group_id']:
- # we do not handle remote group rule for non-default
- # security group, actually tricircle plugin in neutron
- # will reject such rule
- # default security group is not passed with top_sgs so
- # t_rule will not belong to default security group
- continue
- new_b_rules.append(
- self._construct_bottom_rule(t_rule, b_sg_id))
- try:
- b_sg = client.get_security_groups(context, b_sg_id)
- for b_rule in b_sg['security_group_rules']:
- self._safe_delete_security_group_rule(
- context, client, b_rule['id'])
- if new_b_rules:
- rule_body = {'security_group_rules': new_b_rules}
- self._safe_create_security_group_rule(context, client,
- rule_body)
- except Exception:
- # if we fails when operating bottom security group rule, we
- # update the security group mapping to set bottom_id to None
- # and expire the mapping, so next time the security group rule
- # operations can be redone
- with context.session.begin():
- routes = core.query_resource(
- context, models.ResourceRouting,
- [{'key': 'top_id', 'comparator': 'eq',
- 'value': t_sg['id']},
- {'key': 'bottom_id', 'comparator': 'eq',
- 'value': b_sg_id}], [])
- update_dict = {'bottom_id': None,
- 'created_at': constants.expire_time,
- 'updated_at': constants.expire_time}
- core.update_resource(context, models.ResourceRouting,
- routes[0]['id'], update_dict)
- raise
-
- @staticmethod
- def _get_create_server_body(origin, bottom_az):
- body = {}
- copy_fields = ['name', 'imageRef', 'flavorRef',
- 'max_count', 'min_count']
- if bottom_az:
- body['availability_zone'] = bottom_az
- for field in copy_fields:
- if field in origin:
- body[field] = origin[field]
- return body
-
- @staticmethod
- def _remove_fip_info(servers):
- for server in servers:
- if 'addresses' not in server:
- continue
- for addresses in server['addresses'].values():
- remove_index = -1
- for i, address in enumerate(addresses):
- if address.get('OS-EXT-IPS:type') == 'floating':
- remove_index = i
- break
- if remove_index >= 0:
- del addresses[remove_index]
-
- @staticmethod
- def _remove_stale_mapping(context, server_id):
- filters = [{'key': 'top_id', 'comparator': 'eq', 'value': server_id},
- {'key': 'resource_type',
- 'comparator': 'eq',
- 'value': constants.RT_SERVER}]
- with context.session.begin():
- core.delete_resources(context,
- models.ResourceRouting,
- filters)
-
- @staticmethod
- def _check_network_server_az_match(context, network, server_az):
- az_hints = 'availability_zone_hints'
- network_type = 'provider:network_type'
-
- # for local type network, we make sure it's created in only one az
-
- # NOTE(zhiyuan) race condition exists when creating vms in the same
- # local type network but different azs at the same time
- if network.get(network_type) == constants.NT_LOCAL:
- mappings = db_api.get_bottom_mappings_by_top_id(
- context, network['id'], constants.RT_NETWORK)
- if mappings:
- pod, _ = mappings[0]
- if pod['az_name'] != server_az:
- return False
- # if neutron az not assigned, server az is used
- if not network.get(az_hints):
- return True
- if server_az in network[az_hints]:
- return True
- else:
- return False
-
- def _process_injected_file_quota(self, context, t_server_dict):
- try:
- ctx = context.elevated()
- injected_files = t_server_dict.get('injected_files', None)
- self._check_injected_file_quota(ctx, injected_files)
- except (t_exceptions.OnsetFileLimitExceeded,
- t_exceptions.OnsetFilePathLimitExceeded,
- t_exceptions.OnsetFileContentLimitExceeded) as e:
- msg = str(e)
- LOG.exception(_LE('Quota exceeded %(msg)s'),
- {'msg': msg})
- return utils.format_nova_error(400, _('Quota exceeded %s') % msg)
-
- def _check_injected_file_quota(self, context, injected_files):
- """Enforce quota limits on injected files.
-
- Raises a QuotaError if any limit is exceeded.
-
- """
-
- if injected_files is None:
- return
-
- # Check number of files first
- try:
- QUOTAS.limit_check(context,
- injected_files=len(injected_files))
- except t_exceptions.OverQuota:
- raise t_exceptions.OnsetFileLimitExceeded()
-
- # OK, now count path and content lengths; we're looking for
- # the max...
- max_path = 0
- max_content = 0
- for path, content in injected_files:
- max_path = max(max_path, len(path))
- max_content = max(max_content, len(content))
-
- try:
- QUOTAS.limit_check(context,
- injected_file_path_bytes=max_path,
- injected_file_content_bytes=max_content)
- except t_exceptions.OverQuota as exc:
- # Favor path limit over content limit for reporting
- # purposes
- if 'injected_file_path_bytes' in exc.kwargs['overs']:
- raise t_exceptions.OnsetFilePathLimitExceeded()
- else:
- raise t_exceptions.OnsetFileContentLimitExceeded()
-
- def _process_metadata_quota(self, context, t_server_dict):
- try:
- ctx = context.elevated()
- metadata = t_server_dict.get('metadata', None)
- self._check_metadata_properties_quota(ctx, metadata)
- except t_exceptions.InvalidMetadata as e1:
- LOG.exception(_LE('Invalid metadata %(exception)s'),
- {'exception': str(e1)})
- return utils.format_nova_error(400, _('Invalid metadata'))
- except t_exceptions.InvalidMetadataSize as e2:
- LOG.exception(_LE('Invalid metadata size %(exception)s'),
- {'exception': str(e2)})
- return utils.format_nova_error(400, _('Invalid metadata size'))
- except t_exceptions.MetadataLimitExceeded as e3:
- LOG.exception(_LE('Quota exceeded %(exception)s'),
- {'exception': str(e3)})
- return utils.format_nova_error(400,
- _('Quota exceeded in metadata'))
-
- def _check_metadata_properties_quota(self, context, metadata=None):
- """Enforce quota limits on metadata properties."""
- if not metadata:
- metadata = {}
- if not isinstance(metadata, dict):
- msg = (_("Metadata type should be dict."))
- raise t_exceptions.InvalidMetadata(reason=msg)
- num_metadata = len(metadata)
- try:
- QUOTAS.limit_check(context, metadata_items=num_metadata)
- except t_exceptions.OverQuota as exc:
- quota_metadata = exc.kwargs['quotas']['metadata_items']
- raise t_exceptions.MetadataLimitExceeded(allowed=quota_metadata)
-
- # Because metadata is processed in the bottom pod, we just do
- # parameter validation here to ensure quota management
- for k, v in six.iteritems(metadata):
- try:
- utils.check_string_length(v)
- utils.check_string_length(k, min_len=1)
- except t_exceptions.InvalidInput as e:
- raise t_exceptions.InvalidMetadata(reason=str(e))
-
- if len(k) > MAX_METADATA_KEY_LENGTH:
- msg = _("Metadata property key greater than 255 characters")
- raise t_exceptions.InvalidMetadataSize(reason=msg)
- if len(v) > MAX_METADATA_VALUE_LENGTH:
- msg = _("Metadata property value greater than 255 characters")
- raise t_exceptions.InvalidMetadataSize(reason=msg)
diff --git a/tricircle/tempestplugin/README.rst b/tricircle/tempestplugin/README.rst
deleted file mode 100644
index 8668a70..0000000
--- a/tricircle/tempestplugin/README.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-===============================================
-Tempest Integration of Tricircle
-===============================================
-
-This directory contains Tempest tests to cover the Tricircle project.
-
diff --git a/tricircle/tempestplugin/tempest_network.sh b/tricircle/tempestplugin/tempest_network.sh
deleted file mode 100755
index b2526e3..0000000
--- a/tricircle/tempestplugin/tempest_network.sh
+++ /dev/null
@@ -1,275 +0,0 @@
-# tempest.api.network.admin.test_agent_management.AgentManagementTestJSON.test_list_agent[id-9c80f04d-11f3-44a4-8738-ed2f879b0ff4]
-# tempest.api.network.admin.test_agent_management.AgentManagementTestJSON.test_list_agents_non_admin[id-e335be47-b9a1-46fd-be30-0874c0b751e6]
-# tempest.api.network.admin.test_agent_management.AgentManagementTestJSON.test_show_agent[id-869bc8e8-0fda-4a30-9b71-f8a7cf58ca9f]
-# tempest.api.network.admin.test_agent_management.AgentManagementTestJSON.test_update_agent_description[id-68a94a14-1243-46e6-83bf-157627e31556]
-# tempest.api.network.admin.test_agent_management.AgentManagementTestJSON.test_update_agent_status[id-371dfc5b-55b9-4cb5-ac82-c40eadaac941]
-# tempest.api.network.admin.test_dhcp_agent_scheduler.DHCPAgentSchedulersTestJSON.test_add_remove_network_from_dhcp_agent[id-a0856713-6549-470c-a656-e97c8df9a14d]
-# tempest.api.network.admin.test_dhcp_agent_scheduler.DHCPAgentSchedulersTestJSON.test_list_dhcp_agent_hosting_network[id-5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d]
-# tempest.api.network.admin.test_dhcp_agent_scheduler.DHCPAgentSchedulersTestJSON.test_list_networks_hosted_by_one_dhcp[id-30c48f98-e45d-4ffb-841c-b8aad57c7587]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_create_external_network[id-462be770-b310-4df9-9c42-773217e4c8b1]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_delete_external_networks_with_floating_ip[id-82068503-2cf2-4ed4-b3be-ecb89432e4bb]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_list_external_networks[id-39be4c9b-a57e-4ff9-b7c7-b218e209dfcc]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_show_external_networks_attribute[id-2ac50ab2-7ebd-4e27-b3ce-a9e399faaea2]
-# tempest.api.network.admin.test_external_network_extension.ExternalNetworksTestJSON.test_update_external_network[id-4db5417a-e11c-474d-a361-af00ebef57c5]
-# tempest.api.network.admin.test_external_networks_negative.ExternalNetworksAdminNegativeTestJSON.test_create_port_with_precreated_floatingip_as_fixed_ip[id-d402ae6c-0be0-4d8e-833b-a738895d98d0,negative]
-# tempest.api.network.admin.test_floating_ips_admin_actions.FloatingIPAdminTestJSON.test_create_list_show_floating_ip_with_tenant_id_by_admin[id-32727cc3-abe2-4485-a16e-48f2d54c14f2]
-# tempest.api.network.admin.test_floating_ips_admin_actions.FloatingIPAdminTestJSON.test_list_floating_ips_from_admin_and_nonadmin[id-64f2100b-5471-4ded-b46c-ddeeeb4f231b]
-# tempest.api.network.admin.test_l3_agent_scheduler.L3AgentSchedulerTestJSON.test_add_list_remove_router_on_l3_agent[id-9464e5e7-8625-49c3-8fd1-89c52be59d66]
-# tempest.api.network.admin.test_l3_agent_scheduler.L3AgentSchedulerTestJSON.test_list_routers_on_l3_agent[id-b7ce6e89-e837-4ded-9b78-9ed3c9c6a45a]
-# tempest.api.network.admin.test_negative_quotas.QuotasNegativeTest.test_network_quota_exceeding[id-644f4e1b-1bf9-4af0-9fd8-eb56ac0f51cf]
-# tempest.api.network.admin.test_quotas.QuotasTest.test_quotas[id-2390f766-836d-40ef-9aeb-e810d78207fb]
-# tempest.api.network.admin.test_routers_dvr.RoutersTestDVR.test_centralized_router_creation[id-8a0a72b4-7290-4677-afeb-b4ffe37bc352]
-# tempest.api.network.admin.test_routers_dvr.RoutersTestDVR.test_centralized_router_update_to_dvr[id-acd43596-c1fb-439d-ada8-31ad48ae3c2e]
-# tempest.api.network.admin.test_routers_dvr.RoutersTestDVR.test_distributed_router_creation[id-08a2a0a8-f1e4-4b34-8e30-e522e836c44e]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairIpV6TestJSON.test_create_list_port_with_address_pair[id-86c3529b-1231-40de-803c-00e40882f043]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairIpV6TestJSON.test_update_port_with_address_pair[id-9599b337-272c-47fd-b3cf-509414414ac4]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairIpV6TestJSON.test_update_port_with_cidr_address_pair[id-4d6d178f-34f6-4bff-a01c-0a2f8fe909e4]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairIpV6TestJSON.test_update_port_with_multiple_ip_mac_address_pair[id-b3f20091-6cd5-472b-8487-3516137df933]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairTestJSON.test_create_list_port_with_address_pair[id-86c3529b-1231-40de-803c-00e40882f043]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairTestJSON.test_update_port_with_address_pair[id-9599b337-272c-47fd-b3cf-509414414ac4]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairTestJSON.test_update_port_with_cidr_address_pair[id-4d6d178f-34f6-4bff-a01c-0a2f8fe909e4]
-# tempest.api.network.test_allowed_address_pair.AllowedAddressPairTestJSON.test_update_port_with_multiple_ip_mac_address_pair[id-b3f20091-6cd5-472b-8487-3516137df933]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful[id-4ab211a0-276f-4552-9070-51e27f58fecf]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_fixedips[id-51a5e97f-f02e-4e4e-9a17-a69811d300e3]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_fixedips_duplicate[id-57b8302b-cba9-4fbb-8835-9168df029051]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_fixedips_outrange[id-98244d88-d990-4570-91d4-6b25d70d08af]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcp_stateful_router[id-e98f65db-68f4-4330-9fea-abd8c5192d4d]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_64_subnets[id-4256c61d-c538-41ea-9147-3c450c36669e]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_invalid_options[id-81f18ef6-95b5-4584-9966-10d480b7496a]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_stateless_eui64[id-e5517e62-6f16-430d-a672-f80875493d4c]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_stateless_no_ra[id-ae2f4a5d-03ff-4c42-a3b0-ce2fcb7ea832]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_stateless_no_ra_no_dhcp[id-21635b6f-165a-4d42-bf49-7d195e47342f]
-# tempest.api.network.test_dhcp_ipv6.NetworksTestDHCPv6.test_dhcpv6_two_subnets[id-4544adf7-bb5f-4bdc-b769-b3e77026cef2]
-# tempest.api.network.test_extensions.ExtensionsTestJSON.test_list_show_extensions[id-ef28c7e6-e646-4979-9d67-deb207bc5564,smoke]
-# tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsIpV6TestJSON.test_create_list_port_with_extra_dhcp_options[id-d2c17063-3767-4a24-be4f-a23dbfa133c9]
-# tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsIpV6TestJSON.test_update_show_port_with_extra_dhcp_options[id-9a6aebf4-86ee-4f47-b07a-7f7232c55607]
-# tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsTestJSON.test_create_list_port_with_extra_dhcp_options[id-d2c17063-3767-4a24-be4f-a23dbfa133c9]
-# tempest.api.network.test_extra_dhcp_options.ExtraDHCPOptionsTestJSON.test_update_show_port_with_extra_dhcp_options[id-9a6aebf4-86ee-4f47-b07a-7f7232c55607]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_floating_ip_specifying_a_fixed_ip_address[id-36de4bd0-f09c-43e3-a8e1-1decc1ffd3a5,smoke]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_list_show_update_delete_floating_ip[id-62595970-ab1c-4b7f-8fcc-fddfe55e8718,smoke]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_create_update_floatingip_with_port_multiple_ip_address[id-45c4c683-ea97-41ef-9c51-5e9802f2f3d7]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_floating_ip_delete_port[id-e1f6bffd-442f-4668-b30e-df13f2705e77]
-# tempest.api.network.test_floating_ips.FloatingIPTestJSON.test_floating_ip_update_different_router[id-1bb2f731-fe5a-4b8c-8409-799ade1bed4d]
-# tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_associate_floatingip_port_ext_net_unreachable[id-6b3b8797-6d43-4191-985c-c48b773eb429,negative]
-# tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_create_floatingip_in_private_network[id-50b9aeb4-9f0b-48ee-aa31-fa955a48ff54,negative]
-# tempest.api.network.test_floating_ips_negative.FloatingIPNegativeTestJSON.test_create_floatingip_with_port_ext_net_unreachable[id-22996ea8-4a81-4b27-b6e1-fa5df92fa5e8,negative]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_create_delete_metering_label_rule_with_filters[id-f4d547cd-3aee-408f-bf36-454f8825e045]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_create_delete_metering_label_with_filters[id-ec8e15ff-95d0-433b-b8a6-b466bddb1e50]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_list_metering_label_rules[id-cc832399-6681-493b-9d79-0202831a1281]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_list_metering_labels[id-e2fb2f8c-45bf-429a-9f17-171c70444612]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_show_metering_label[id-30abb445-0eea-472e-bd02-8649f54a5968]
-# tempest.api.network.test_metering_extensions.MeteringIpV6TestJSON.test_show_metering_label_rule[id-b7354489-96ea-41f3-9452-bace120fb4a7]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_create_delete_metering_label_rule_with_filters[id-f4d547cd-3aee-408f-bf36-454f8825e045]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_create_delete_metering_label_with_filters[id-ec8e15ff-95d0-433b-b8a6-b466bddb1e50]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_list_metering_label_rules[id-cc832399-6681-493b-9d79-0202831a1281]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_list_metering_labels[id-e2fb2f8c-45bf-429a-9f17-171c70444612]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_show_metering_label[id-30abb445-0eea-472e-bd02-8649f54a5968]
-# tempest.api.network.test_metering_extensions.MeteringTestJSON.test_show_metering_label_rule[id-b7354489-96ea-41f3-9452-bace120fb4a7]
-# tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_network[id-d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_port[id-48037ff2-e889-4c3b-b86a-8e3f34d2d060,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsIpV6Test.test_bulk_create_delete_subnet[id-8936533b-c0aa-4f29-8e53-6cc873aec489,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsTest.test_bulk_create_delete_network[id-d4f9024d-1e28-4fc1-a6b1-25dbc6fa11e2,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsTest.test_bulk_create_delete_port[id-48037ff2-e889-4c3b-b86a-8e3f34d2d060,smoke]
-# tempest.api.network.test_networks.BulkNetworkOpsTest.test_bulk_create_delete_subnet[id-8936533b-c0aa-4f29-8e53-6cc873aec489,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_default_gw[id-ebb4fd95-524f-46af-83c1-0305b239338f]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_gw[id-e41a4888-65a6-418c-a095-f7c2ef4ad59a]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_list_subnet_with_no_gw64_one_network[id-a9653883-b2a4-469b-8c3c-4518430a7e55]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-# tempest.api.network.test_networks.NetworksIpV6Test.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_slaac_subnet_with_ports[id-88554555-ebf8-41ef-9300-4926d45e06e9]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_stateless_subnet_with_ports[id-2de6ab5a-fcf0-4144-9813-f91a940291f1]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_default_gw[id-ebb4fd95-524f-46af-83c1-0305b239338f]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_gw[id-e41a4888-65a6-418c-a095-f7c2ef4ad59a]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_v6_attributes_slaac[id-176b030f-a923-4040-a755-9dc94329e60c]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_v6_attributes_stateful[id-da40cd1b-a833-4354-9a85-cd9b8a3b74ca]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_with_v6_attributes_stateless[id-7d410310-8c86-4902-adf9-865d08e31adb]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_list_subnet_with_no_gw64_one_network[id-a9653883-b2a4-469b-8c3c-4518430a7e55]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-# tempest.api.network.test_networks.NetworksIpV6TestAttrs.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_all_attributes[id-a4d9ec4c-0306-4111-a75c-db01a709030b]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_allocation_pools[id-bec949c4-3147-4ba6-af5f-cd2306118404]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_dhcp_enabled[id-94ce038d-ff0a-4a4c-a56b-09da3ca0b55d]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw[id-9393b468-186d-496d-aa36-732348cd76e7]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_gw_and_allocation_pools[id-8217a149-0c6c-4cfb-93db-0486f707d13f]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_with_host_routes_and_dns_nameservers[id-d830de0a-be47-468f-8f02-1fd996118289]
-# tempest.api.network.test_networks.NetworksTest.test_create_delete_subnet_without_gateway[id-d2d596e2-8e76-47a9-ac51-d4648009f4d3]
-# tempest.api.network.test_networks.NetworksTest.test_create_update_delete_network_subnet[id-0e269138-0da6-4efc-a46d-578161e7b221,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_delete_network_with_subnet[id-f04f61a9-b7f3-4194-90b2-9bcf660d1bfe]
-# tempest.api.network.test_networks.NetworksTest.test_external_network_visibility[id-af774677-42a9-4e4b-bb58-16fe6a5bc1ec,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_list_networks[id-f7ffdeda-e200-4a7a-bcbe-05716e86bf43,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_list_networks_fields[id-6ae6d24f-9194-4869-9c85-c313cb20e080]
-# tempest.api.network.test_networks.NetworksTest.test_list_subnets[id-db68ba48-f4ea-49e9-81d1-e367f6d0b20a,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_list_subnets_fields[id-842589e3-9663-46b0-85e4-7f01273b0412]
-# tempest.api.network.test_networks.NetworksTest.test_show_network[id-2bf13842-c93f-4a69-83ed-717d2ec3b44e,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_show_network_fields[id-867819bb-c4b6-45f7-acf9-90edcf70aa5e]
-# tempest.api.network.test_networks.NetworksTest.test_show_subnet[id-bd635d81-6030-4dd1-b3b9-31ba0cfdf6cc,smoke]
-# tempest.api.network.test_networks.NetworksTest.test_show_subnet_fields[id-270fff0b-8bfc-411f-a184-1e8fd35286f0]
-# tempest.api.network.test_networks.NetworksTest.test_update_subnet_gw_dns_host_routes_dhcp[id-3d3852eb-3009-49ec-97ac-5ce83b73010a]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_create_port_on_non_existent_network[id-13d3b106-47e6-4b9b-8d53-dae947f092fe,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_delete_non_existent_network[id-03795047-4a94-4120-a0a1-bd376e36fd4e,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_delete_non_existent_port[id-49ec2bbd-ac2e-46fd-8054-798e679ff894,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_delete_non_existent_subnet[id-a176c859-99fb-42ec-a208-8a85b552a239,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_show_non_existent_network[id-9293e937-824d-42d2-8d5b-e985ea67002a,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_show_non_existent_port[id-a954861d-cbfd-44e8-b0a9-7fab111f235d,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_show_non_existent_subnet[id-d746b40c-5e09-4043-99f7-cba1be8b70df,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_update_non_existent_network[id-98bfe4e3-574e-4012-8b17-b2647063de87,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_update_non_existent_port[id-cf8eef21-4351-4f53-adcd-cc5cb1e76b92,negative]
-# tempest.api.network.test_networks_negative.NetworksNegativeTestJSON.test_update_non_existent_subnet[id-1cc47884-ac52-4415-a31c-e7ce5474a868,negative]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_create_port_binding_ext_attr[id-8e8569c1-9ac7-44db-8bc1-f5fb2814f29b]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_list_ports_binding_ext_attr[id-1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_show_port_binding_ext_attr[id-b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsIpV6TestJSON.test_update_port_binding_ext_attr[id-6f6c412c-711f-444d-8502-0ac30fbf5dd5]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsTestJSON.test_create_port_binding_ext_attr[id-8e8569c1-9ac7-44db-8bc1-f5fb2814f29b]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsTestJSON.test_list_ports_binding_ext_attr[id-1c82a44a-6c6e-48ff-89e1-abe7eaf8f9f8]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsTestJSON.test_show_port_binding_ext_attr[id-b54ac0ff-35fc-4c79-9ca3-c7dbd4ea4f13]
-# tempest.api.network.test_ports.PortsAdminExtendedAttrsTestJSON.test_update_port_binding_ext_attr[id-6f6c412c-711f-444d-8502-0ac30fbf5dd5]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_bulk_port[id-67f1b811-f8db-43e2-86bd-72c074d4a42c]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_port_with_no_securitygroups[id-4179dcb9-1382-4ced-84fe-1b91c54f5735,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_show_delete_port_user_defined_mac[id-13e95171-6cbd-489c-9d7c-3f9c58215c18]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_create_update_port_with_second_ip[id-63aeadd4-3b49-427f-a3b1-19ca81f06270]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_list_ports_fields[id-ff7f117f-f034-4e0e-abff-ccef05c454b4]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_port_list_filter_by_ip[id-e7fe260b-1e79-4dd3-86d9-bec6a7959fc5]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_port_list_filter_by_router_id[id-5ad01ed0-0e6e-4c5d-8194-232801b15c72]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f,smoke]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_show_port_fields[id-45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_security_group_and_extra_attributes[id-58091b66-4ff4-4cc1-a549-05d60c7acd1a]
-# tempest.api.network.test_ports.PortsIpV6TestJSON.test_update_port_with_two_security_groups_and_extra_attributes[id-edf6766d-3d40-4621-bc6e-2521a44c257d]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_bulk_port[id-67f1b811-f8db-43e2-86bd-72c074d4a42c]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_port_in_allowed_allocation_pools[id-0435f278-40ae-48cb-a404-b8a087bc09b1,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_port_with_no_securitygroups[id-4179dcb9-1382-4ced-84fe-1b91c54f5735,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_show_delete_port_user_defined_mac[id-13e95171-6cbd-489c-9d7c-3f9c58215c18]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_update_delete_port[id-c72c1c0c-2193-4aca-aaa4-b1442640f51c,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_create_update_port_with_second_ip[id-63aeadd4-3b49-427f-a3b1-19ca81f06270]
-# tempest.api.network.test_ports.PortsTestJSON.test_list_ports[id-cf95b358-3e92-4a29-a148-52445e1ac50e,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_list_ports_fields[id-ff7f117f-f034-4e0e-abff-ccef05c454b4]
-# tempest.api.network.test_ports.PortsTestJSON.test_port_list_filter_by_ip[id-e7fe260b-1e79-4dd3-86d9-bec6a7959fc5]
-# tempest.api.network.test_ports.PortsTestJSON.test_port_list_filter_by_router_id[id-5ad01ed0-0e6e-4c5d-8194-232801b15c72]
-# tempest.api.network.test_ports.PortsTestJSON.test_show_port[id-c9a685bd-e83f-499c-939f-9f7863ca259f,smoke]
-# tempest.api.network.test_ports.PortsTestJSON.test_show_port_fields[id-45fcdaf2-dab0-4c13-ac6c-fcddfb579dbd]
-# tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_security_group_and_extra_attributes[id-58091b66-4ff4-4cc1-a549-05d60c7acd1a]
-# tempest.api.network.test_ports.PortsTestJSON.test_update_port_with_two_security_groups_and_extra_attributes[id-edf6766d-3d40-4621-bc6e-2521a44c257d]
-# tempest.api.network.test_routers.DvrRoutersTest.test_convert_centralized_router[id-644d7a4a-01a1-4b68-bb8d-0c0042cb1729]
-# tempest.api.network.test_routers.DvrRoutersTest.test_create_distributed_router[id-141297aa-3424-455d-aa8d-f2d95731e00a]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_add_multiple_router_interfaces[id-802c73c9-c937-4cef-824b-2191e24a6aab,smoke]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_port_id[id-2b7d2f37-6748-4d78-92e5-1d590234f0d5,smoke]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_add_remove_router_interface_with_subnet_id[id-b42e6e39-2e37-49cc-a6f4-8467e940900a,smoke]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_create_router_setting_project_id[id-e54dd3a3-4352-4921-b09d-44369ae17397]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_create_router_with_default_snat_value[id-847257cc-6afd-4154-b8fb-af49f5670ce8]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_create_router_with_snat_explicit[id-ea74068d-09e9-4fd7-8995-9b6a1ace920f]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_create_show_list_update_delete_router[id-f64403e2-8483-4b34-8ccd-b09a87bcc68c,smoke]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_router_interface_port_update_with_fixed_ip[id-96522edf-b4b5-45d9-8443-fa11c26e6eff]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_delete_extra_route[id-c86ac3a8-50bd-4b00-a6b8-62af84a0765c]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_admin_state[id-a8902683-c788-4246-95c7-ad9c6d63a4d9]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_reset_gateway_without_snat[id-f2faf994-97f4-410b-a831-9bc977b64374]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_set_gateway[id-6cc285d8-46bf-4f36-9b1a-783e3008ba79]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_set_gateway_with_snat_explicit[id-b386c111-3b21-466d-880c-5e72b01e1a33]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_set_gateway_without_snat[id-96536bc7-8262-4fb2-9967-5c46940fa279]
-# tempest.api.network.test_routers.RoutersIpV6Test.test_update_router_unset_gateway[id-ad81b7ee-4f81-407b-a19c-17e623f763e8]
-# tempest.api.network.test_routers.RoutersTest.test_add_multiple_router_interfaces[id-802c73c9-c937-4cef-824b-2191e24a6aab,smoke]
-# tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_port_id[id-2b7d2f37-6748-4d78-92e5-1d590234f0d5,smoke]
-# tempest.api.network.test_routers.RoutersTest.test_add_remove_router_interface_with_subnet_id[id-b42e6e39-2e37-49cc-a6f4-8467e940900a,smoke]
-# tempest.api.network.test_routers.RoutersTest.test_create_router_setting_project_id[id-e54dd3a3-4352-4921-b09d-44369ae17397]
-# tempest.api.network.test_routers.RoutersTest.test_create_router_with_default_snat_value[id-847257cc-6afd-4154-b8fb-af49f5670ce8]
-# tempest.api.network.test_routers.RoutersTest.test_create_router_with_snat_explicit[id-ea74068d-09e9-4fd7-8995-9b6a1ace920f]
-# tempest.api.network.test_routers.RoutersTest.test_create_show_list_update_delete_router[id-f64403e2-8483-4b34-8ccd-b09a87bcc68c,smoke]
-# tempest.api.network.test_routers.RoutersTest.test_router_interface_port_update_with_fixed_ip[id-96522edf-b4b5-45d9-8443-fa11c26e6eff]
-# tempest.api.network.test_routers.RoutersTest.test_update_delete_extra_route[id-c86ac3a8-50bd-4b00-a6b8-62af84a0765c]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_admin_state[id-a8902683-c788-4246-95c7-ad9c6d63a4d9]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_reset_gateway_without_snat[id-f2faf994-97f4-410b-a831-9bc977b64374]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_set_gateway[id-6cc285d8-46bf-4f36-9b1a-783e3008ba79]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_set_gateway_with_snat_explicit[id-b386c111-3b21-466d-880c-5e72b01e1a33]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_set_gateway_without_snat[id-96536bc7-8262-4fb2-9967-5c46940fa279]
-# tempest.api.network.test_routers.RoutersTest.test_update_router_unset_gateway[id-ad81b7ee-4f81-407b-a19c-17e623f763e8]
-# tempest.api.network.test_routers_negative.DvrRoutersNegativeTest.test_router_create_tenant_distributed_returns_forbidden[id-4990b055-8fc7-48ab-bba7-aa28beaad0b9,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_add_router_interfaces_on_overlapping_subnets_returns_400[id-957751a3-3c68-4fa2-93b6-eb52ea10db6e,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_delete_non_existent_router_returns_404[id-c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_router_add_gateway_invalid_network_returns_404[id-37a94fc0-a834-45b9-bd23-9a81d2fd1e22,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_router_add_gateway_net_not_external_returns_400[id-11836a18-0b15-4327-a50b-f0d9dc66bddd,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_router_remove_interface_in_use_returns_409[id-04df80f9-224d-47f5-837a-bf23e33d1c20,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_show_non_existent_router_returns_404[id-c2a70d72-8826-43a7-8208-0209e6360c47,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeIpV6Test.test_update_non_existent_router_returns_404[id-b23d1569-8b0c-4169-8d4b-6abd34fad5c7,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_add_router_interfaces_on_overlapping_subnets_returns_400[id-957751a3-3c68-4fa2-93b6-eb52ea10db6e,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_delete_non_existent_router_returns_404[id-c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_router_add_gateway_invalid_network_returns_404[id-37a94fc0-a834-45b9-bd23-9a81d2fd1e22,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_router_add_gateway_net_not_external_returns_400[id-11836a18-0b15-4327-a50b-f0d9dc66bddd,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_router_remove_interface_in_use_returns_409[id-04df80f9-224d-47f5-837a-bf23e33d1c20,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_show_non_existent_router_returns_404[id-c2a70d72-8826-43a7-8208-0209e6360c47,negative]
-# tempest.api.network.test_routers_negative.RoutersNegativeTest.test_update_non_existent_router_returns_404[id-b23d1569-8b0c-4169-8d4b-6abd34fad5c7,negative]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802,smoke]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_additional_args[id-87dfbcf9-1849-43ea-b1e4-efa3eeae9f71]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_icmp_type_code[id-c9463db8-b44d-4f52-b6c0-8dbda99f26ce]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_protocol_integer_value[id-0a307599-6655-4220-bebc-fd70c64f2290]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_remote_group_id[id-c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_security_group_rule_with_remote_ip_prefix[id-16459776-5da2-4634-bce4-4b55ee3ec188]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9,smoke]
-# tempest.api.network.test_security_groups.SecGroupIPv6Test.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686,smoke]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_list_update_show_delete_security_group[id-bfd128e5-3c92-44b6-9d66-7fe29d22c802,smoke]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_additional_args[id-87dfbcf9-1849-43ea-b1e4-efa3eeae9f71]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_icmp_type_code[id-c9463db8-b44d-4f52-b6c0-8dbda99f26ce]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_protocol_integer_value[id-0a307599-6655-4220-bebc-fd70c64f2290]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_group_id[id-c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_security_group_rule_with_remote_ip_prefix[id-16459776-5da2-4634-bce4-4b55ee3ec188]
-# tempest.api.network.test_security_groups.SecGroupTest.test_create_show_delete_security_group_rule[id-cfb99e0e-7410-4a3d-8a0c-959a63ee77e9,smoke]
-# tempest.api.network.test_security_groups.SecGroupTest.test_list_security_groups[id-e30abd17-fef9-4739-8617-dc26da88e686,smoke]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_additional_default_security_group_fails[id-2323061e-9fbf-4eb0-b547-7e8fafc90849,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_duplicate_security_group_rule_fails[id-8fde898f-ce88-493b-adc9-4e4692879fc5,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_bad_ethertype[id-5666968c-fff3-40d6-9efc-df1c8bd01abb,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_bad_protocol[id-981bdc22-ce48-41ed-900a-73148b583958,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_bad_remote_ip_prefix[id-5f8daf69-3c5f-4aaa-88c9-db1d66f68679,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_invalid_ports[id-0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_non_existent_remote_groupid[id-4bf786fd-2f02-443c-9716-5b98e159a49a,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_non_existent_security_group[id-be308db6-a7cf-4d5c-9baf-71bafd73f35e,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_with_remote_ip_and_group[id-b5c4b247-6b02-435b-b088-d10d45650881,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_create_security_group_rule_wrong_ip_prefix_version[id-7607439c-af73-499e-bf64-f687fd12a842,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_delete_non_existent_security_group[id-1f1bb89d-5664-4956-9fcd-83ee0fa603df,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_show_non_existent_security_group[id-424fd5c3-9ddc-486a-b45f-39bf0c820fc6,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupIPv6Test.test_show_non_existent_security_group_rule[id-4c094c09-000b-4e41-8100-9617600c02a6,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_additional_default_security_group_fails[id-2323061e-9fbf-4eb0-b547-7e8fafc90849,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_duplicate_security_group_rule_fails[id-8fde898f-ce88-493b-adc9-4e4692879fc5,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_ethertype[id-5666968c-fff3-40d6-9efc-df1c8bd01abb,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_protocol[id-981bdc22-ce48-41ed-900a-73148b583958,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_bad_remote_ip_prefix[id-5f8daf69-3c5f-4aaa-88c9-db1d66f68679,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_invalid_ports[id-0d9c7791-f2ad-4e2f-ac73-abf2373b0d2d,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_remote_groupid[id-4bf786fd-2f02-443c-9716-5b98e159a49a,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_non_existent_security_group[id-be308db6-a7cf-4d5c-9baf-71bafd73f35e,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_create_security_group_rule_with_remote_ip_and_group[id-b5c4b247-6b02-435b-b088-d10d45650881,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_delete_non_existent_security_group[id-1f1bb89d-5664-4956-9fcd-83ee0fa603df,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group[id-424fd5c3-9ddc-486a-b45f-39bf0c820fc6,negative]
-# tempest.api.network.test_security_groups_negative.NegativeSecGroupTest.test_show_non_existent_security_group_rule[id-4c094c09-000b-4e41-8100-9617600c02a6,negative]
-# tempest.api.network.test_service_type_management.ServiceTypeManagementTestJSON.test_service_provider_list[id-2cbbeea9-f010-40f6-8df5-4eaa0c918ea6]
-# tempest.api.network.test_subnetpools_extensions.SubnetPoolsTestJSON.test_create_list_show_update_delete_subnetpools[id-62595970-ab1c-4b7f-8fcc-fddfe55e9811,smoke]
diff --git a/tricircle/tests/unit/network/test_helper.py b/tricircle/tests/unit/network/test_helper.py
deleted file mode 100644
index 0074cb2..0000000
--- a/tricircle/tests/unit/network/test_helper.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import unittest
-
-from oslo_utils import uuidutils
-
-from tricircle.network import helper
-
-
-class HelperTest(unittest.TestCase):
- def setUp(self):
- self.helper = helper.NetworkHelper()
-
- def test_get_create_subnet_body(self):
- t_net_id = uuidutils.generate_uuid()
- t_subnet_id = uuidutils.generate_uuid()
- b_net_id = uuidutils.generate_uuid()
- project_id = uuidutils.generate_uuid()
-
- t_subnet = {
- 'network_id': t_net_id,
- 'id': t_subnet_id,
- 'ip_version': 4,
- 'cidr': '10.0.1.0/24',
- 'gateway_ip': '10.0.1.1',
- 'allocation_pools': [{'start': '10.0.1.2', 'end': '10.0.1.254'}],
- 'enable_dhcp': True,
- 'tenant_id': project_id
- }
- body = self.helper.get_create_subnet_body(project_id, t_subnet,
- b_net_id, '10.0.1.2')
- self.assertItemsEqual([{'start': '10.0.1.3', 'end': '10.0.1.254'}],
- body['subnet']['allocation_pools'])
- self.assertEqual('10.0.1.2', body['subnet']['gateway_ip'])
-
- body = self.helper.get_create_subnet_body(project_id, t_subnet,
- b_net_id, '10.0.1.254')
- self.assertItemsEqual([{'start': '10.0.1.2', 'end': '10.0.1.253'}],
- body['subnet']['allocation_pools'])
- self.assertEqual('10.0.1.254', body['subnet']['gateway_ip'])
-
- t_subnet['allocation_pools'] = [
- {'start': '10.0.1.2', 'end': '10.0.1.10'},
- {'start': '10.0.1.20', 'end': '10.0.1.254'}]
- body = self.helper.get_create_subnet_body(project_id, t_subnet,
- b_net_id, '10.0.1.5')
- self.assertItemsEqual([{'start': '10.0.1.2', 'end': '10.0.1.4'},
- {'start': '10.0.1.6', 'end': '10.0.1.10'},
- {'start': '10.0.1.20', 'end': '10.0.1.254'}],
- body['subnet']['allocation_pools'])
- self.assertEqual('10.0.1.5', body['subnet']['gateway_ip'])
diff --git a/tricircle/tests/unit/network/test_plugin.py b/tricircle/tests/unit/network/test_plugin.py
deleted file mode 100644
index 773f55e..0000000
--- a/tricircle/tests/unit/network/test_plugin.py
+++ /dev/null
@@ -1,2239 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-
-import copy
-import mock
-from mock import patch
-import netaddr
-import unittest
-
-from sqlalchemy.orm import attributes
-from sqlalchemy.orm import exc
-from sqlalchemy.sql import elements
-
-import neutron_lib.constants as q_constants
-
-import neutron.conf.common as q_config
-from neutron.db import db_base_plugin_common
-from neutron.db import db_base_plugin_v2
-from neutron.db import ipam_pluggable_backend
-from neutron.db import l3_db
-from neutron.db import models_v2
-from neutron.extensions import availability_zone as az_ext
-from neutron.ipam import driver
-from neutron.ipam import requests
-import neutron.ipam.utils as ipam_utils
-from neutron import manager
-import neutronclient.common.exceptions as q_exceptions
-
-from oslo_config import cfg
-from oslo_serialization import jsonutils
-from oslo_utils import uuidutils
-
-from tricircle.common import client
-from tricircle.common import constants
-from tricircle.common import context
-from tricircle.common import exceptions
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.network.drivers import type_local
-from tricircle.network.drivers import type_shared_vlan
-from tricircle.network import helper
-from tricircle.network import managers
-from tricircle.network import plugin
-from tricircle.tests.unit.network import test_security_groups
-from tricircle.xjob import xmanager
-
-
-TOP_NETS = []
-TOP_SUBNETS = []
-TOP_PORTS = []
-TOP_ROUTERS = []
-TOP_ROUTERPORT = []
-TOP_SUBNETPOOLS = []
-TOP_SUBNETPOOLPREFIXES = []
-TOP_IPALLOCATIONS = []
-TOP_VLANALLOCATIONS = []
-TOP_SEGMENTS = []
-TOP_EXTNETS = []
-TOP_FLOATINGIPS = []
-TOP_SGS = []
-TOP_SG_RULES = []
-BOTTOM1_NETS = []
-BOTTOM1_SUBNETS = []
-BOTTOM1_PORTS = []
-BOTTOM1_ROUTERS = []
-BOTTOM1_SGS = []
-BOTTOM1_FIPS = []
-BOTTOM2_NETS = []
-BOTTOM2_SUBNETS = []
-BOTTOM2_PORTS = []
-BOTTOM2_ROUTERS = []
-BOTTOM2_SGS = []
-BOTTOM2_FIPS = []
-RES_LIST = [TOP_NETS, TOP_SUBNETS, TOP_PORTS, TOP_ROUTERS, TOP_ROUTERPORT,
- TOP_SUBNETPOOLS, TOP_SUBNETPOOLPREFIXES, TOP_IPALLOCATIONS,
- TOP_VLANALLOCATIONS, TOP_SEGMENTS, TOP_EXTNETS, TOP_FLOATINGIPS,
- TOP_SGS, TOP_SG_RULES,
- BOTTOM1_NETS, BOTTOM1_SUBNETS, BOTTOM1_PORTS, BOTTOM1_ROUTERS,
- BOTTOM1_SGS, BOTTOM1_FIPS,
- BOTTOM2_NETS, BOTTOM2_SUBNETS, BOTTOM2_PORTS, BOTTOM2_ROUTERS,
- BOTTOM2_SGS, BOTTOM2_FIPS]
-RES_MAP = {'networks': TOP_NETS,
- 'subnets': TOP_SUBNETS,
- 'ports': TOP_PORTS,
- 'routers': TOP_ROUTERS,
- 'routerports': TOP_ROUTERPORT,
- 'ipallocations': TOP_IPALLOCATIONS,
- 'subnetpools': TOP_SUBNETPOOLS,
- 'subnetpoolprefixes': TOP_SUBNETPOOLPREFIXES,
- 'ml2_vlan_allocations': TOP_VLANALLOCATIONS,
- 'networksegments': TOP_SEGMENTS,
- 'externalnetworks': TOP_EXTNETS,
- 'floatingips': TOP_FLOATINGIPS,
- 'securitygroups': TOP_SGS,
- 'securitygrouprules': TOP_SG_RULES}
-SUBNET_INFOS = {}
-
-
-def _fill_external_gateway_info(router):
- if router.gw_port:
- ext_gw_info = {
- 'network_id': router.gw_port['network_id'],
- 'external_fixed_ips': [
- {'subnet_id': ip["subnet_id"],
- 'ip_address': ip["ip_address"]}
- for ip in router.gw_port['fixed_ips']]}
- else:
- ext_gw_info = None
- router['external_gateway_info'] = ext_gw_info
- return router
-
-
-def _transform_az(network):
- az_hints_key = 'availability_zone_hints'
- if az_hints_key in network:
- ret = DotDict(network)
- az_str = network[az_hints_key]
- ret[az_hints_key] = jsonutils.loads(az_str) if az_str else []
- return ret
- return network
-
-
-class FakeIpamSubnet(driver.Subnet):
- def __init__(self, subnet):
- self._subnet = subnet
-
- def allocate(self, address_request):
- pass
-
- def deallocate(self, address):
- pass
-
- def get_details(self):
- return requests.SpecificSubnetRequest(self._subnet['tenant_id'],
- self._subnet['id'],
- self._subnet['cidr'],
- self._subnet['gateway'],
- self._subnet['pools'])
-
-
-class FakePool(driver.Pool):
- def allocate_subnet(self, subnet_request):
- if isinstance(subnet_request, requests.SpecificSubnetRequest):
- subnet_info = {'id': subnet_request.subnet_id,
- 'tenant_id': subnet_request.tenant_id,
- 'cidr': subnet_request.subnet_cidr,
- 'gateway': subnet_request.gateway_ip,
- 'pools': subnet_request.allocation_pools}
- SUBNET_INFOS[subnet_info['id']] = subnet_info
- return FakeIpamSubnet(subnet_info)
- prefix = self._subnetpool.prefixes[0]
- subnet = next(prefix.subnet(subnet_request.prefixlen))
- gateway = subnet.network + 1
- pools = ipam_utils.generate_pools(subnet.cidr,
- gateway)
- subnet_info = {'id': subnet_request.subnet_id,
- 'tenant_id': subnet_request.tenant_id,
- 'cidr': subnet.cidr,
- 'gateway': gateway,
- 'pools': pools}
- SUBNET_INFOS[subnet_info['id']] = subnet_info
- return FakeIpamSubnet(subnet_info)
-
- def get_subnet(self, subnet_id):
- return FakeIpamSubnet(SUBNET_INFOS[subnet_id])
-
- def get_allocator(self, subnet_ids):
- return driver.SubnetGroup()
-
- def update_subnet(self, subnet_request):
- return FakeIpamSubnet()
-
- def remove_subnet(self, subnet_id):
- pass
-
-
-class DotDict(dict):
- def __init__(self, normal_dict=None):
- if normal_dict:
- for key, value in normal_dict.iteritems():
- self[key] = value
-
- def __getattr__(self, item):
- return self.get(item)
-
-
-class FakeNeutronClient(object):
-
- _res_map = {'top': {'port': TOP_PORTS},
- 'pod_1': {'port': BOTTOM1_PORTS},
- 'pod_2': {'port': BOTTOM2_PORTS}}
-
- def __init__(self, pod_name):
- self.pod_name = pod_name
- self.ports_path = ''
-
- def _get(self, params=None):
- port_list = self._res_map[self.pod_name]['port']
-
- if not params:
- return {'ports': port_list}
- if 'marker' in params:
- sorted_list = sorted(port_list, key=lambda x: x['id'])
- for i, port in enumerate(sorted_list):
- if port['id'] == params['marker']:
- return {'ports': sorted_list[i + 1:]}
- if 'filters' in params:
- return_list = []
- for port in port_list:
- is_selected = True
- for key, value in params['filters'].iteritems():
- if key not in port or not port[key] or (
- port[key] not in value):
- is_selected = False
- break
- if is_selected:
- return_list.append(port)
- return {'ports': return_list}
- return {'ports': port_list}
-
- def get(self, path, params=None):
- if self.pod_name in ['pod_1', 'pod_2', 'top']:
- res_list = self._get(params)['ports']
- return_list = []
- for res in res_list:
- if self.pod_name != 'top':
- res = copy.copy(res)
- return_list.append(res)
- return {'ports': return_list}
- else:
- raise Exception()
-
-
-class FakeClient(object):
-
- _res_map = {'top': RES_MAP,
- 'pod_1': {'network': BOTTOM1_NETS,
- 'subnet': BOTTOM1_SUBNETS,
- 'port': BOTTOM1_PORTS,
- 'router': BOTTOM1_ROUTERS,
- 'security_group': BOTTOM1_SGS,
- 'floatingip': BOTTOM1_FIPS},
- 'pod_2': {'network': BOTTOM2_NETS,
- 'subnet': BOTTOM2_SUBNETS,
- 'port': BOTTOM2_PORTS,
- 'router': BOTTOM2_ROUTERS,
- 'security_group': BOTTOM2_SGS,
- 'floatingip': BOTTOM2_FIPS}}
-
- def __init__(self, pod_name):
- if not pod_name:
- self.pod_name = 'top'
- else:
- self.pod_name = pod_name
- self.client = FakeNeutronClient(self.pod_name)
-
- def get_native_client(self, resource, ctx):
- return self.client
-
- def _get_connection(self):
- # only for mock purpose
- pass
-
- def _allocate_ip(self, port_body):
- subnet_list = self._res_map[self.pod_name]['subnet']
- for subnet in subnet_list:
- if subnet['network_id'] == port_body['port']['network_id']:
- cidr = subnet['cidr']
- ip = cidr[:cidr.rindex('.')] + '.5'
- return {'subnet_id': subnet['id'],
- 'ip_address': ip}
-
- def create_resources(self, _type, ctx, body):
- if _type == 'port':
- res_list = self._res_map[self.pod_name][_type]
- subnet_ips_map = {}
- for res in res_list:
- fixed_ips = res.get('fixed_ips', [])
- for fixed_ip in fixed_ips:
- if fixed_ip['subnet_id'] not in subnet_ips_map:
- subnet_ips_map[fixed_ip['subnet_id']] = set()
- subnet_ips_map[fixed_ip['subnet_id']].add(
- fixed_ip['ip_address'])
- fixed_ips = body[_type].get('fixed_ips', [])
- for fixed_ip in fixed_ips:
- for subnet in self._res_map[self.pod_name]['subnet']:
- ip_range = netaddr.IPNetwork(subnet['cidr'])
- ip = netaddr.IPAddress(fixed_ip['ip_address'])
- if ip in ip_range:
- fixed_ip['subnet_id'] = subnet['id']
- break
- if fixed_ip['ip_address'] in subnet_ips_map.get(
- fixed_ip['subnet_id'], set()):
- raise q_exceptions.IpAddressInUseClient()
- if 'device_id' not in body[_type]:
- body[_type]['device_id'] = ''
- if 'fixed_ips' not in body[_type]:
- body[_type]['fixed_ips'] = [self._allocate_ip(body)]
- if _type == 'subnet':
- if 'gateway_ip' not in body[_type]:
- cidr = body[_type]['cidr']
- body[_type]['gateway_ip'] = cidr[:cidr.rindex('.')] + '.1'
- if 'id' not in body[_type]:
- body[_type]['id'] = uuidutils.generate_uuid()
- res_list = self._res_map[self.pod_name][_type]
- res = dict(body[_type])
- res_list.append(res)
- return res
-
- def list_resources(self, _type, ctx, filters=None):
- if self.pod_name == 'top':
- res_list = self._res_map[self.pod_name][_type + 's']
- else:
- res_list = self._res_map[self.pod_name][_type]
- ret_list = []
- for res in res_list:
- is_selected = True
- for _filter in filters:
- if _filter['key'] not in res:
- is_selected = False
- break
- if _filter['value'] != res[_filter['key']]:
- is_selected = False
- break
- if is_selected:
- ret_list.append(res)
- return ret_list
-
- def list_networks(self, ctx, filters=None):
- networks = self.list_resources('network', ctx, filters)
- if self.pod_name != 'top':
- return networks
- ret_list = []
- for network in networks:
- ret_list.append(_transform_az(network))
- return ret_list
-
- def get_networks(self, ctx, net_id):
- return self.list_networks(ctx, [{'key': 'id',
- 'comparator': 'eq',
- 'value': net_id}])[0]
-
- def list_subnets(self, ctx, filters=None):
- return self.list_resources('subnet', ctx, filters)
-
- def get_subnets(self, ctx, subnet_id):
- return self.list_resources('subnet', ctx, [{'key': 'id',
- 'comparator': 'eq',
- 'value': subnet_id}])[0]
-
- def update_subnets(self, ctx, subnet_id, body):
- pass
-
- def create_ports(self, ctx, body):
- return self.create_resources('port', ctx, body)
-
- def list_ports(self, ctx, filters=None):
- filter_dict = {}
- filters = filters or []
- for query_filter in filters:
- key = query_filter['key']
- value = query_filter['value']
- filter_dict[key] = value
- return self.client.get('', {'filters': filter_dict})['ports']
-
- def get_ports(self, ctx, port_id):
- return self.client.get(
- '', params={'filters': {'id': [port_id]}})['ports'][0]
-
- def delete_ports(self, ctx, port_id):
- index = -1
- if self.pod_name == 'top':
- port_list = self._res_map[self.pod_name]['ports']
- else:
- port_list = self._res_map[self.pod_name]['port']
- for i, port in enumerate(port_list):
- if port['id'] == port_id:
- index = i
- if index != -1:
- del port_list[index]
-
- def add_gateway_routers(self, ctx, *args, **kwargs):
- # only for mock purpose
- pass
-
- def add_interface_routers(self, ctx, *args, **kwargs):
- self._get_connection()
-
- router_id, body = args
- if 'port_id' in body:
- for port in self._res_map[self.pod_name]['port']:
- if port['id'] == body['port_id']:
- port['device_id'] = router_id
- port['device_owner'] = 'network:router_interface'
- else:
- subnet_id = body['subnet_id']
- subnet = self.get_subnets(ctx, subnet_id)
- self.create_ports(ctx, {'port': {
- 'tenant_id': subnet['tenant_id'],
- 'admin_state_up': True,
- 'id': uuidutils.generate_uuid(),
- 'name': '',
- 'network_id': subnet['network_id'],
- 'fixed_ips': [
- {'subnet_id': subnet_id,
- 'ip_address': subnet['gateway_ip']}
- ],
- 'mac_address': '',
- 'device_id': router_id,
- 'device_owner': 'network:router_interface'
- }})
-
- def remove_interface_routers(self, ctx, *args, **kwargs):
- # only for mock purpose
- pass
-
- def get_routers(self, ctx, router_id):
- router = self.list_resources('router', ctx, [{'key': 'id',
- 'comparator': 'eq',
- 'value': router_id}])[0]
- return _fill_external_gateway_info(router)
-
- def action_routers(self, ctx, action, *args, **kwargs):
- # divide into three functions for test purpose
- if action == 'add_interface':
- return self.add_interface_routers(ctx, *args, **kwargs)
- elif action == 'add_gateway':
- return self.add_gateway_routers(ctx, *args, **kwargs)
- elif action == 'remove_interface':
- return self.remove_interface_routers(ctx, *args, **kwargs)
-
- def create_floatingips(self, ctx, body):
- fip = self.create_resources('floatingip', ctx, body)
- for key in ['fixed_port_id']:
- if key not in fip:
- fip[key] = None
- return fip
-
- def list_floatingips(self, ctx, filters=None):
- fips = self.list_resources('floatingip', ctx, filters)
- for fip in fips:
- if 'port_id' not in fip:
- fip['port_id'] = None
- return fips
-
- def update_floatingips(self, ctx, _id, body):
- pass
-
- def delete_floatingips(self, ctx, _id):
- pass
-
- def create_security_group_rules(self, ctx, body):
- sg_id = body['security_group_rule']['security_group_id']
- res_list = self._res_map[self.pod_name]['security_group']
- for sg in res_list:
- if sg['id'] == sg_id:
- target_sg = sg
- new_rule = copy.copy(body['security_group_rule'])
- match_found = False
- for rule in target_sg['security_group_rules']:
- old_rule = copy.copy(rule)
- if new_rule == old_rule:
- match_found = True
- break
- if match_found:
- raise q_exceptions.Conflict()
- target_sg['security_group_rules'].append(body['security_group_rule'])
-
- def delete_security_group_rules(self, ctx, rule_id):
- res_list = self._res_map[self.pod_name]['security_group']
- for sg in res_list:
- for rule in sg['security_group_rules']:
- if rule['id'] == rule_id:
- sg['security_group_rules'].remove(rule)
- return
-
- def get_security_groups(self, ctx, sg_id):
- res_list = self._res_map[self.pod_name]['security_group']
- for sg in res_list:
- if sg['id'] == sg_id:
- # need to do a deep copy because we will traverse the security
- # group's 'security_group_rules' field and make change to the
- # group
- ret_sg = copy.deepcopy(sg)
- return ret_sg
-
-
-class FakeNeutronContext(object):
- def __init__(self):
- self._session = None
- self.is_admin = True
- self.is_advsvc = False
- self.tenant_id = ''
-
- @property
- def session(self):
- if not self._session:
- self._session = FakeSession()
- return self._session
-
- def elevated(self):
- return self
-
-
-def delete_model(res_list, model_obj, key=None):
- if not res_list:
- return
- if not key:
- key = 'id'
- if key not in res_list[0]:
- return
- index = -1
- for i, res in enumerate(res_list):
- if res[key] == model_obj[key]:
- index = i
- break
- if index != -1:
- del res_list[index]
- return
-
-
-def link_models(model_obj, model_dict, foreign_table, foreign_key, table, key,
- link_prop):
- if model_obj.__tablename__ == foreign_table:
- for instance in RES_MAP[table]:
- if instance[key] == model_dict[foreign_key]:
- if link_prop not in instance:
- instance[link_prop] = []
- instance[link_prop].append(model_dict)
-
-
-def unlink_models(res_list, model_dict, foreign_key, key, link_prop,
- link_ele_foreign_key, link_ele_key):
- if foreign_key not in model_dict:
- return
- for instance in res_list:
- if instance[key] == model_dict[foreign_key]:
- if link_prop not in instance:
- return
- index = -1
- for i, res in enumerate(instance[link_prop]):
- if res[link_ele_foreign_key] == model_dict[link_ele_key]:
- index = i
- break
- if index != -1:
- del instance[link_prop][index]
- return
-
-
-def update_floatingip(self, context, _id, floatingip):
- for fip in TOP_FLOATINGIPS:
- if fip['id'] != _id:
- continue
- update_dict = floatingip['floatingip']
- if not floatingip['floatingip']['port_id']:
- update_dict['fixed_port_id'] = None
- update_dict['fixed_ip_address'] = None
- update_dict['router_id'] = None
- fip.update(update_dict)
- return
- for port in TOP_PORTS:
- if port['id'] != floatingip['floatingip']['port_id']:
- continue
- update_dict['fixed_port_id'] = port['id']
- update_dict[
- 'fixed_ip_address'] = port['fixed_ips'][0]['ip_address']
- for router_port in TOP_ROUTERPORT:
- for _port in TOP_PORTS:
- if _port['id'] != router_port['port_id']:
- continue
- if _port['network_id'] == port['network_id']:
- update_dict['router_id'] = router_port['router_id']
-
- fip.update(update_dict)
-
-
-class FakeQuery(object):
- def __init__(self, records, table):
- self.records = records
- self.table = table
- self.index = 0
-
- def _handle_pagination_by_id(self, record_id):
- for i, record in enumerate(self.records):
- if record['id'] == record_id:
- if i + 1 < len(self.records):
- return FakeQuery(self.records[i + 1:], self.table)
- else:
- return FakeQuery([], self.table)
- return FakeQuery([], self.table)
-
- def _handle_filter(self, keys, values):
- filtered_list = []
- for record in self.records:
- selected = True
- for i, key in enumerate(keys):
- if key not in record or record[key] != values[i]:
- selected = False
- break
- if selected:
- filtered_list.append(record)
- return FakeQuery(filtered_list, self.table)
-
- def filter(self, *criteria):
- _filter = []
- keys = []
- values = []
- for e in criteria:
- if not hasattr(e, 'right') and isinstance(e, elements.False_):
- # filter is a single False value, set key to a 'INVALID_FIELD'
- # then no records will be returned
- keys.append('INVALID_FIELD')
- values.append(False)
- elif not isinstance(e.right, elements.Null):
- _filter.append(e)
- else:
- if e.left.name == 'network_id' and (
- e.expression.operator.__name__ == 'isnot'):
- keys.append('router:external')
- values.append(True)
- if not _filter:
- if not keys:
- return FakeQuery(self.records, self.table)
- else:
- return self._handle_filter(keys, values)
- if hasattr(_filter[0].right, 'value'):
- keys.extend([e.left.name for e in _filter])
- values.extend([e.right.value for e in _filter])
- else:
- keys.extend([e.expression.left.name for e in _filter])
- values.extend(
- [e.expression.right.element.clauses[0].value for e in _filter])
- if _filter[0].expression.operator.__name__ == 'lt':
- return self._handle_pagination_by_id(values[0])
- else:
- return self._handle_filter(keys, values)
-
- def filter_by(self, **kwargs):
- filtered_list = []
- for record in self.records:
- selected = True
- for key, value in kwargs.iteritems():
- if key not in record or record[key] != value:
- selected = False
- break
- if selected:
- filtered_list.append(record)
- return FakeQuery(filtered_list, self.table)
-
- def delete(self):
- for model_obj in self.records:
- unlink_models(RES_MAP['routers'], model_obj, 'router_id',
- 'id', 'attached_ports', 'port_id', 'port_id')
- delete_model(RES_MAP[self.table], model_obj, key='port_id')
-
- def outerjoin(self, *props, **kwargs):
- return FakeQuery(self.records, self.table)
-
- def join(self, *props, **kwargs):
- return FakeQuery(self.records, self.table)
-
- def order_by(self, func):
- self.records.sort(key=lambda x: x['id'])
- return FakeQuery(self.records, self.table)
-
- def enable_eagerloads(self, value):
- return FakeQuery(self.records, self.table)
-
- def limit(self, limit):
- return FakeQuery(self.records[:limit], self.table)
-
- def next(self):
- if self.index >= len(self.records):
- raise StopIteration
- self.index += 1
- return self.records[self.index - 1]
-
- def one(self):
- if len(self.records) == 0:
- raise exc.NoResultFound()
- return self.records[0]
-
- def first(self):
- if len(self.records) == 0:
- return None
- else:
- return self.records[0]
-
- def update(self, values):
- for record in self.records:
- for key, value in values.iteritems():
- record[key] = value
- return len(self.records)
-
- def all(self):
- return self.records
-
- def count(self):
- return len(self.records)
-
- def __iter__(self):
- return self
-
-
-class FakeSession(object):
- class WithWrapper(object):
- def __enter__(self):
- pass
-
- def __exit__(self, type, value, traceback):
- pass
-
- def __init__(self):
- self.info = {}
-
- @property
- def is_active(self):
- return True
-
- def begin(self, subtransactions=False, nested=True):
- return FakeSession.WithWrapper()
-
- def begin_nested(self):
- return FakeSession.WithWrapper()
-
- def query(self, model):
- if isinstance(model, attributes.InstrumentedAttribute):
- model = model.class_
- if model.__tablename__ not in RES_MAP:
- return FakeQuery([], model.__tablename__)
- return FakeQuery(RES_MAP[model.__tablename__],
- model.__tablename__)
-
- def add(self, model_obj):
- if model_obj.__tablename__ not in RES_MAP:
- return
- model_dict = DotDict(model_obj._as_dict())
- if 'project_id' in model_dict:
- model_dict['tenant_id'] = model_dict['project_id']
-
- if model_obj.__tablename__ == 'networks':
- model_dict['subnets'] = []
- if model_obj.__tablename__ == 'ports':
- model_dict['dhcp_opts'] = []
- model_dict['security_groups'] = []
-
- link_models(model_obj, model_dict,
- 'subnetpoolprefixes', 'subnetpool_id',
- 'subnetpools', 'id', 'prefixes')
- link_models(model_obj, model_dict,
- 'ipallocations', 'port_id',
- 'ports', 'id', 'fixed_ips')
- link_models(model_obj, model_dict,
- 'subnets', 'network_id', 'networks', 'id', 'subnets')
- link_models(model_obj, model_dict,
- 'securitygrouprules', 'security_group_id',
- 'securitygroups', 'id', 'security_group_rules')
-
- if model_obj.__tablename__ == 'routerports':
- for port in TOP_PORTS:
- if port['id'] == model_dict['port_id']:
- model_dict['port'] = port
- port.update(model_dict)
- break
- if model_obj.__tablename__ == 'externalnetworks':
- for net in TOP_NETS:
- if net['id'] == model_dict['network_id']:
- net['external'] = True
- net['router:external'] = True
- break
- link_models(model_obj, model_dict,
- 'routerports', 'router_id',
- 'routers', 'id', 'attached_ports')
-
- RES_MAP[model_obj.__tablename__].append(model_dict)
-
- def _cascade_delete(self, model_dict, foreign_key, table, key):
- if foreign_key not in model_dict:
- return
- index = -1
- for i, instance in enumerate(RES_MAP[table]):
- if instance[foreign_key] == model_dict[key]:
- index = i
- break
- if index != -1:
- del RES_MAP[table][index]
-
- def delete(self, model_obj):
- unlink_models(RES_MAP['routers'], model_obj, 'router_id', 'id',
- 'attached_ports', 'port_id', 'id')
- self._cascade_delete(model_obj, 'port_id', 'ipallocations', 'id')
- for res_list in RES_MAP.values():
- delete_model(res_list, model_obj)
-
- def flush(self):
- pass
-
- def expire(self, obj, fields=None):
- pass
-
-
-class FakeXManager(xmanager.XManager):
- def __init__(self, fake_plugin):
- self.clients = {constants.TOP: client.Client()}
- self.job_handles = {
- constants.JT_ROUTER: self.configure_extra_routes,
- constants.JT_ROUTER_SETUP: self.setup_bottom_router,
- constants.JT_PORT_DELETE: self.delete_server_port}
- self.helper = FakeHelper(fake_plugin)
- self.xjob_handler = FakeBaseRPCAPI()
-
- def _get_client(self, pod_name=None):
- return FakeClient(pod_name)
-
-
-class FakeBaseRPCAPI(object):
- def configure_extra_routes(self, ctxt, router_id):
- pass
-
-
-class FakeRPCAPI(FakeBaseRPCAPI):
- def __init__(self, fake_plugin):
- self.xmanager = FakeXManager(fake_plugin)
-
- def setup_bottom_router(self, ctxt, net_id, router_id, pod_id):
- combine_id = '%s#%s#%s' % (pod_id, router_id, net_id)
- self.xmanager.setup_bottom_router(
- ctxt, payload={constants.JT_ROUTER_SETUP: combine_id})
-
-
-class FakeExtension(object):
- def __init__(self, ext_obj):
- self.obj = ext_obj
-
-
-class FakeHelper(helper.NetworkHelper):
- def _get_client(self, pod_name=None):
- return FakeClient(pod_name)
-
- def _prepare_top_element_by_call(self, t_ctx, q_ctx,
- project_id, pod, ele, _type, body):
- if not q_ctx:
- q_ctx = FakeNeutronContext()
- return super(FakeHelper, self)._prepare_top_element_by_call(
- t_ctx, q_ctx, project_id, pod, ele, _type, body)
-
- def _get_top_element(self, t_ctx, q_ctx, _type, _id):
- if not q_ctx:
- q_ctx = FakeNeutronContext()
- return super(FakeHelper, self)._get_top_element(
- t_ctx, q_ctx, _type, _id)
-
-
-class FakeTypeManager(managers.TricircleTypeManager):
- def _register_types(self):
- local_driver = type_local.LocalTypeDriver()
- self.drivers[constants.NT_LOCAL] = FakeExtension(local_driver)
- vlan_driver = type_shared_vlan.SharedVLANTypeDriver()
- self.drivers[constants.NT_SHARED_VLAN] = FakeExtension(vlan_driver)
-
- def extend_network_dict_provider(self, cxt, net):
- target_net = None
- for t_net in TOP_NETS:
- if t_net['id'] == net['id']:
- target_net = t_net
- if not target_net:
- return
- for segment in TOP_SEGMENTS:
- if target_net['id'] == segment['network_id']:
- target_net['provider:network_type'] = segment['network_type']
- target_net[
- 'provider:physical_network'] = segment['physical_network']
- target_net[
- 'provider:segmentation_id'] = segment['segmentation_id']
- break
-
-
-class FakePlugin(plugin.TricirclePlugin):
- def __init__(self):
- self.set_ipam_backend()
- self.helper = FakeHelper(self)
- self.xjob_handler = FakeRPCAPI(self)
- self.type_manager = FakeTypeManager()
-
- def _get_client(self, pod_name):
- return FakeClient(pod_name)
-
- def _make_network_dict(self, network, fields=None,
- process_extensions=True, context=None):
- network = _transform_az(network)
- if 'project_id' in network:
- network['tenant_id'] = network['project_id']
- return network
-
- def _make_subnet_dict(self, subnet, fields=None, context=None):
- return subnet
-
- def _make_port_dict(self, port, fields=None, process_extensions=True):
- if port.get('fixed_ips'):
- if isinstance(port['fixed_ips'][0], dict):
- return port
- else:
- for i, fixed_ip in enumerate(port['fixed_ips']):
- port['fixed_ips'][i] = {
- 'subnet_id': fixed_ip['subnet_id'],
- 'ip_address': fixed_ip['ip_address']}
- for allocation in TOP_IPALLOCATIONS:
- if allocation['port_id'] == port['id']:
- ret = {}
- for key, value in port.iteritems():
- if key == 'fixed_ips':
- ret[key] = [{'subnet_id': allocation['subnet_id'],
- 'ip_address': allocation['ip_address']}]
- else:
- ret[key] = value
- if 'project_id' in ret:
- ret['tenant_id'] = ret['project_id']
- return ret
- if 'project_id' in port:
- port['tenant_id'] = port['project_id']
- return port
-
- def _make_security_group_dict(self, security_group, fields=None):
- return security_group
-
-
-def fake_get_context_from_neutron_context(q_context):
- return context.get_db_context()
-
-
-def fake_get_client(self, pod_name):
- return FakeClient(pod_name)
-
-
-def fake_make_network_dict(self, network, fields=None,
- process_extensions=True, context=None):
- return network
-
-
-def fake_make_subnet_dict(self, subnet, fields=None, context=None):
- return subnet
-
-
-def fake_make_router_dict(self, router, fields=None, process_extensions=True):
- return _fill_external_gateway_info(router)
-
-
-def fake_generate_ip(subnet):
- suffix = 1
- for allocation in TOP_IPALLOCATIONS:
- if allocation['subnet_id'] == subnet['id']:
- ip = allocation['ip_address']
- current_suffix = int(ip[ip.rindex('.') + 1:])
- if current_suffix >= suffix:
- suffix = current_suffix
- suffix += 1
- cidr = subnet['cidr']
- new_ip = cidr[:cidr.rindex('.') + 1] + ('%d' % suffix)
- return {'ip_address': new_ip, 'subnet_id': subnet['id']}
-
-
-def fake_allocate_ips_for_port(self, context, port):
- if 'fixed_ips' in port['port'] and (
- port['port'][
- 'fixed_ips'] is not q_constants.ATTR_NOT_SPECIFIED):
- return port['port']['fixed_ips']
- for subnet in TOP_SUBNETS:
- if subnet['network_id'] == port['port']['network_id']:
- return [fake_generate_ip(subnet)]
-
-
-@classmethod
-def fake_get_instance(cls, subnet_pool, context):
- return FakePool(subnet_pool, context)
-
-
-class PluginTest(unittest.TestCase,
- test_security_groups.TricircleSecurityGroupTestMixin):
- def setUp(self):
- core.initialize()
- core.ModelBase.metadata.create_all(core.get_engine())
- cfg.CONF.register_opts(q_config.core_opts)
- plugin_path = 'tricircle.tests.unit.network.test_plugin.FakePlugin'
- cfg.CONF.set_override('core_plugin', plugin_path)
- self.context = context.Context()
- self.save_method = manager.NeutronManager._get_default_service_plugins
- manager.NeutronManager._get_default_service_plugins = mock.Mock()
- manager.NeutronManager._get_default_service_plugins.return_value = []
- xmanager.IN_TEST = True
-
- phynet = 'bridge'
- vlan_min = 2000
- vlan_max = 2001
- cfg.CONF.set_override('type_drivers', ['local', 'shared_vlan'],
- group='tricircle')
- cfg.CONF.set_override('tenant_network_types', ['local', 'shared_vlan'],
- group='tricircle')
- cfg.CONF.set_override('network_vlan_ranges',
- ['%s:%d:%d' % (phynet, vlan_min, vlan_max)],
- group='tricircle')
- cfg.CONF.set_override('bridge_network_type', 'shared_vlan',
- group='tricircle')
- for vlan in (vlan_min, vlan_max):
- TOP_VLANALLOCATIONS.append(
- DotDict({'physical_network': phynet,
- 'vlan_id': vlan, 'allocated': False}))
-
- def _basic_pod_route_setup(self):
- pod1 = {'pod_id': 'pod_id_1',
- 'pod_name': 'pod_1',
- 'az_name': 'az_name_1'}
- pod2 = {'pod_id': 'pod_id_2',
- 'pod_name': 'pod_2',
- 'az_name': 'az_name_2'}
- pod3 = {'pod_id': 'pod_id_0',
- 'pod_name': 'top_pod',
- 'az_name': ''}
- for pod in (pod1, pod2, pod3):
- db_api.create_pod(self.context, pod)
- route1 = {
- 'top_id': 'top_id_1',
- 'pod_id': 'pod_id_1',
- 'bottom_id': 'bottom_id_1',
- 'resource_type': 'port'}
- route2 = {
- 'top_id': 'top_id_2',
- 'pod_id': 'pod_id_2',
- 'bottom_id': 'bottom_id_2',
- 'resource_type': 'port'}
- with self.context.session.begin():
- core.create_resource(self.context, models.ResourceRouting, route1)
- core.create_resource(self.context, models.ResourceRouting, route2)
-
- def _basic_port_setup(self):
- TOP_PORTS.extend([{'id': 'top_id_0', 'name': 'top',
- 'fixed_ips': [models_v2.IPAllocation(
- port_id='top_id_0', ip_address='10.0.0.1',
- subnet_id='top_subnet_id',
- network_id='top_net_id')]},
- {'id': 'top_id_1', 'name': 'top'},
- {'id': 'top_id_2', 'name': 'top'},
- {'id': 'top_id_3', 'name': 'top'}])
- BOTTOM1_PORTS.append({'id': 'bottom_id_1', 'name': 'bottom'})
- BOTTOM2_PORTS.append({'id': 'bottom_id_2', 'name': 'bottom'})
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- @patch.object(plugin.TricirclePlugin, '_get_client',
- new=fake_get_client)
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'get_port')
- def test_get_port(self, mock_plugin_method):
- self._basic_pod_route_setup()
- self._basic_port_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- fake_plugin.get_port(neutron_context, 'top_id_0')
- port1 = fake_plugin.get_port(neutron_context, 'top_id_1')
- port2 = fake_plugin.get_port(neutron_context, 'top_id_2')
- fake_plugin.get_port(neutron_context, 'top_id_3')
-
- self.assertEqual({'id': 'top_id_1', 'name': 'bottom'}, port1)
- self.assertEqual({'id': 'top_id_2', 'name': 'bottom'}, port2)
- calls = [mock.call(neutron_context, 'top_id_0', None),
- mock.call(neutron_context, 'top_id_3', None)]
- mock_plugin_method.assert_has_calls(calls)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- @patch.object(plugin.TricirclePlugin, '_get_client',
- new=fake_get_client)
- def test_get_ports_pagination(self):
- self._basic_pod_route_setup()
- self._basic_port_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- ports1 = fake_plugin.get_ports(neutron_context, limit=1)
- ports2 = fake_plugin.get_ports(neutron_context, limit=1,
- marker=ports1[-1]['id'])
- ports3 = fake_plugin.get_ports(neutron_context, limit=1,
- marker=ports2[-1]['id'])
- ports4 = fake_plugin.get_ports(neutron_context, limit=1,
- marker=ports3[-1]['id'])
- ports = []
- expected_ports = [{'id': 'top_id_0', 'name': 'top',
- 'fixed_ips': [{'subnet_id': 'top_subnet_id',
- 'ip_address': '10.0.0.1'}]},
- {'id': 'top_id_1', 'name': 'bottom'},
- {'id': 'top_id_2', 'name': 'bottom'},
- {'id': 'top_id_3', 'name': 'top'}]
- for _ports in (ports1, ports2, ports3, ports4):
- ports.extend(_ports)
- self.assertItemsEqual(expected_ports, ports)
-
- ports = fake_plugin.get_ports(neutron_context)
- self.assertItemsEqual(expected_ports, ports)
-
- @patch.object(context, 'get_context_from_neutron_context',
- new=fake_get_context_from_neutron_context)
- @patch.object(plugin.TricirclePlugin, '_get_client',
- new=fake_get_client)
- def test_get_ports_filters(self):
- self._basic_pod_route_setup()
- self._basic_port_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- ports1 = fake_plugin.get_ports(neutron_context,
- filters={'id': ['top_id_0']})
- ports2 = fake_plugin.get_ports(neutron_context,
- filters={'id': ['top_id_1']})
- ports3 = fake_plugin.get_ports(neutron_context,
- filters={'id': ['top_id_4']})
- self.assertEqual([{'id': 'top_id_0', 'name': 'top',
- 'fixed_ips': [{'subnet_id': 'top_subnet_id',
- 'ip_address': '10.0.0.1'}]}], ports1)
- self.assertEqual([{'id': 'top_id_1', 'name': 'bottom'}], ports2)
- self.assertEqual([], ports3)
-
- TOP_ROUTERS.append({'id': 'router_id'})
- b_routers_list = [BOTTOM1_ROUTERS, BOTTOM2_ROUTERS]
- b_ports_list = [BOTTOM1_PORTS, BOTTOM2_PORTS]
- for i in xrange(1, 3):
- router_id = 'router_%d_id' % i
- b_routers_list[i - 1].append({'id': router_id})
- route = {
- 'top_id': 'router_id',
- 'pod_id': 'pod_id_%d' % i,
- 'bottom_id': router_id,
- 'resource_type': 'router'}
- with self.context.session.begin():
- core.create_resource(self.context,
- models.ResourceRouting, route)
- # find port and add device_id
- for port in b_ports_list[i - 1]:
- port_id = 'bottom_id_%d' % i
- if port['id'] == port_id:
- port['device_id'] = router_id
- ports = fake_plugin.get_ports(neutron_context,
- filters={'device_id': ['router_id']})
- expected = [{'id': 'top_id_1', 'name': 'bottom',
- 'device_id': 'router_id'},
- {'id': 'top_id_2', 'name': 'bottom',
- 'device_id': 'router_id'}]
- self.assertItemsEqual(expected, ports)
-
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'delete_port')
- @patch.object(FakeClient, 'delete_ports')
- def test_delete_port(self, mock_client_method, mock_plugin_method,
- mock_context_method):
- self._basic_pod_route_setup()
- self._basic_port_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- tricircle_context = context.get_db_context()
- mock_context_method.return_value = tricircle_context
-
- fake_plugin.delete_port(neutron_context, 'top_id_0')
- fake_plugin.delete_port(neutron_context, 'top_id_1')
-
- calls = [mock.call(neutron_context, 'top_id_0'),
- mock.call(neutron_context, 'top_id_1')]
- mock_plugin_method.assert_has_calls(calls)
- mock_client_method.assert_called_once_with(tricircle_context,
- 'bottom_id_1')
-
- @patch.object(context, 'get_context_from_neutron_context')
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'update_network')
- @patch.object(db_base_plugin_v2.NeutronDbPluginV2, 'create_network')
- def test_network_az(self, mock_create, mock_update, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- tricircle_context = context.get_db_context()
- mock_context.return_value = tricircle_context
-
- network = {'network': {
- 'id': 'net_id', 'name': 'net_az', 'tenant_id': 'test_tenant_id',
- 'availability_zone_hints': ['az_name_1', 'az_name_2']}}
- mock_create.return_value = {'id': 'net_id', 'name': 'net_az'}
- mock_update.return_value = network['network']
- fake_plugin.create_network(neutron_context, network)
- mock_update.assert_called_once_with(
- neutron_context, 'net_id',
- {'network': {
- 'availability_zone_hints': '["az_name_1", "az_name_2"]'}})
-
- err_network = {'network': {
- 'id': 'net_id', 'name': 'net_az', 'tenant_id': 'test_tenant_id',
- 'availability_zone_hints': ['az_name_1', 'az_name_3']}}
- mock_create.return_value = {'id': 'net_id', 'name': 'net_az'}
- self.assertRaises(az_ext.AvailabilityZoneNotFound,
- fake_plugin.create_network,
- neutron_context, err_network)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- neutron_context = FakeNeutronContext()
- tricircle_context = context.get_db_context()
- mock_context.return_value = tricircle_context
-
- network = {'network': {
- 'id': 'net_id', 'name': 'net_az', 'tenant_id': 'test_tenant_id',
- 'admin_state_up': True, 'shared': False,
- 'availability_zone_hints': ['az_name_1', 'az_name_2']}}
- fake_plugin.create_network(neutron_context, network)
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(context, 'get_context_from_neutron_context')
- def test_prepare_element(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- for pod in db_api.list_pods(t_ctx):
- if not pod['az_name']:
- t_pod = pod
- else:
- b_pod = pod
-
- # test _prepare_top_element
- pool_id = fake_plugin._get_bridge_subnet_pool_id(
- t_ctx, q_ctx, 'project_id', t_pod, True)
- net, subnet = fake_plugin._get_bridge_network_subnet(
- t_ctx, q_ctx, 'project_id', t_pod, pool_id, True)
- port = fake_plugin._get_bridge_interface(t_ctx, q_ctx, 'project_id',
- pod, net['id'], 'b_router_id',
- None, True)
-
- top_entry_map = {}
- with t_ctx.session.begin():
- for entry in core.query_resource(
- t_ctx, models.ResourceRouting,
- [{'key': 'pod_id', 'comparator': 'eq',
- 'value': 'pod_id_0'}], []):
- top_entry_map[entry['resource_type']] = entry
- self.assertEqual(net['id'], subnet['network_id'])
- self.assertEqual(net['id'], port['network_id'])
- self.assertEqual(subnet['id'], port['fixed_ips'][0]['subnet_id'])
- self.assertEqual(top_entry_map['network']['bottom_id'], net['id'])
- self.assertEqual(top_entry_map['subnet']['bottom_id'], subnet['id'])
- self.assertEqual(top_entry_map['port']['bottom_id'], port['id'])
-
- # test _prepare_bottom_element
- _, b_port_id, _, _ = fake_plugin._get_bottom_bridge_elements(
- q_ctx, 'project_id', b_pod, net, False, subnet, port)
- b_port = fake_plugin._get_client(b_pod['pod_name']).get_ports(
- t_ctx, b_port_id)
-
- bottom_entry_map = {}
- with t_ctx.session.begin():
- for entry in core.query_resource(
- t_ctx, models.ResourceRouting,
- [{'key': 'pod_id', 'comparator': 'eq',
- 'value': b_pod['pod_id']}], []):
- bottom_entry_map[entry['resource_type']] = entry
- self.assertEqual(bottom_entry_map['network']['top_id'], net['id'])
- self.assertEqual(bottom_entry_map['network']['bottom_id'],
- b_port['network_id'])
- self.assertEqual(bottom_entry_map['subnet']['top_id'], subnet['id'])
- self.assertEqual(bottom_entry_map['subnet']['bottom_id'],
- b_port['fixed_ips'][0]['subnet_id'])
- self.assertEqual(bottom_entry_map['port']['top_id'], port['id'])
- self.assertEqual(bottom_entry_map['port']['bottom_id'], b_port_id)
-
- @staticmethod
- def _prepare_router_test(tenant_id, ctx, pod_name, index):
- t_net_id = uuidutils.generate_uuid()
- t_subnet_id = uuidutils.generate_uuid()
- b_net_id = uuidutils.generate_uuid()
- b_subnet_id = uuidutils.generate_uuid()
-
- # no need to specify az, we will setup router in the pod where bottom
- # network is created
- t_net = {
- 'id': t_net_id,
- 'name': 'top_net_%d' % index,
- 'tenant_id': tenant_id
- }
- t_subnet = {
- 'id': t_subnet_id,
- 'network_id': t_net_id,
- 'name': 'top_subnet_%d' % index,
- 'ip_version': 4,
- 'cidr': '10.0.%d.0/24' % index,
- 'allocation_pools': [],
- 'enable_dhcp': True,
- 'gateway_ip': '10.0.%d.1' % index,
- 'ipv6_address_mode': '',
- 'ipv6_ra_mode': '',
- 'tenant_id': tenant_id
- }
- TOP_NETS.append(DotDict(t_net))
- TOP_SUBNETS.append(DotDict(t_subnet))
- subnet_info = {'id': t_subnet['id'],
- 'tenant_id': t_subnet['tenant_id'],
- 'cidr': t_subnet['cidr'],
- 'gateway': t_subnet['gateway_ip'],
- 'pools': t_subnet['allocation_pools']}
- SUBNET_INFOS[subnet_info['id']] = subnet_info
-
- b_net = {
- 'id': b_net_id,
- 'name': t_net_id,
- 'tenant_id': tenant_id
- }
- b_subnet = {
- 'id': b_subnet_id,
- 'network_id': b_net_id,
- 'name': b_subnet_id,
- 'ip_version': 4,
- 'cidr': '10.0.%d.0/24' % index,
- 'allocation_pools': [],
- 'enable_dhcp': True,
- 'gateway_ip': '10.0.%d.1' % index,
- 'ipv6_address_mode': '',
- 'ipv6_ra_mode': '',
- 'tenant_id': tenant_id
- }
- if pod_name == 'pod_1':
- BOTTOM1_NETS.append(DotDict(b_net))
- BOTTOM1_SUBNETS.append(DotDict(b_subnet))
- else:
- BOTTOM2_NETS.append(DotDict(b_net))
- BOTTOM2_SUBNETS.append(DotDict(b_subnet))
-
- pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_net_id,
- 'bottom_id': b_net_id,
- 'pod_id': pod_id,
- 'project_id': tenant_id,
- 'resource_type': constants.RT_NETWORK})
- core.create_resource(ctx, models.ResourceRouting,
- {'top_id': t_subnet_id,
- 'bottom_id': b_subnet_id,
- 'pod_id': pod_id,
- 'project_id': tenant_id,
- 'resource_type': constants.RT_SUBNET})
-
- if len(TOP_ROUTERS) == 0:
- t_router_id = uuidutils.generate_uuid()
- t_router = {
- 'id': t_router_id,
- 'name': 'top_router',
- 'distributed': False,
- 'tenant_id': tenant_id,
- 'attached_ports': []
- }
- TOP_ROUTERS.append(DotDict(t_router))
- else:
- t_router_id = TOP_ROUTERS[0]['id']
-
- return t_net_id, t_subnet_id, t_router_id, b_net_id, b_subnet_id
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(FakeBaseRPCAPI, 'configure_extra_routes')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_add_interface(self, mock_context, mock_rpc):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- tenant_id = 'test_tenant_id'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 1)
-
- fake_plugin.add_router_interface(
- q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
-
- _, b_router_id = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_router_id, 'router')[0]
-
- mock_rpc.assert_called_once_with(t_ctx, t_router_id)
- for b_net in BOTTOM1_NETS:
- if 'provider:segmentation_id' in b_net:
- self.assertIn(b_net['provider:segmentation_id'], (2000, 2001))
- # only one VLAN allocated, for E-W bridge network
- allocations = [
- allocation['allocated'] for allocation in TOP_VLANALLOCATIONS]
- self.assertItemsEqual([True, False], allocations)
- for segment in TOP_SEGMENTS:
- self.assertIn(segment['segmentation_id'], (2000, 2001))
-
- bridge_port_name = constants.ew_bridge_port_name % (tenant_id,
- b_router_id)
- _, t_bridge_port_id = db_api.get_bottom_mappings_by_top_id(
- t_ctx, bridge_port_name, 'port')[0]
- _, b_bridge_port_id = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_bridge_port_id, 'port')[0]
-
- (t_net_id, t_subnet_id, t_router_id,
- b_another_net_id, b_another_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 2)
-
- fake_plugin.add_router_interface(
- q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
-
- t_ns_bridge_net_id = None
- for net in TOP_NETS:
- if net['name'].startswith('ns_bridge'):
- t_ns_bridge_net_id = net['id']
- # N-S bridge not created since no extenal network created
- self.assertIsNone(t_ns_bridge_net_id)
-
- device_ids = ['', '', '']
- for port in BOTTOM1_PORTS:
- if port['id'] == b_bridge_port_id:
- device_ids[0] = port['device_id']
- elif port['network_id'] == b_net_id and (
- port['device_owner'] == 'network:router_interface'):
- device_ids[1] = port['device_id']
- elif port['network_id'] == b_another_net_id and (
- port['device_owner'] == 'network:router_interface'):
- device_ids[2] = port['device_id']
-
- self.assertEqual(device_ids, [b_router_id, b_router_id, b_router_id])
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(FakeBaseRPCAPI, 'configure_extra_routes')
- @patch.object(FakeClient, 'add_gateway_routers')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_add_interface_with_external_network(self, mock_context,
- mock_action, mock_rpc):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- tenant_id = 'test_tenant_id'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 1)
-
- e_net_id = uuidutils.generate_uuid()
- e_net = {'id': e_net_id,
- 'name': 'ext-net',
- 'admin_state_up': True,
- 'shared': False,
- 'tenant_id': tenant_id,
- 'router:external': True,
- 'availability_zone_hints': '["pod_2"]'}
- TOP_NETS.append(e_net)
-
- fake_plugin.add_router_interface(
- q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
-
- b_router_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_router_id, 'pod_1', 'router')
-
- mock_rpc.assert_called_once_with(t_ctx, t_router_id)
- for b_net in BOTTOM1_NETS:
- if 'provider:segmentation_id' in b_net:
- self.assertIn(b_net['provider:segmentation_id'], (2000, 2001))
- # two VLANs allocated, for E-W and N-S bridge network
- allocations = [
- allocation['allocated'] for allocation in TOP_VLANALLOCATIONS]
- self.assertItemsEqual([True, True], allocations)
- for segment in TOP_SEGMENTS:
- self.assertIn(segment['segmentation_id'], (2000, 2001))
-
- bridge_port_name = constants.ew_bridge_port_name % (tenant_id,
- b_router_id)
- _, t_bridge_port_id = db_api.get_bottom_mappings_by_top_id(
- t_ctx, bridge_port_name, 'port')[0]
- _, b_bridge_port_id = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_bridge_port_id, 'port')[0]
-
- (t_net_id, t_subnet_id, t_router_id,
- b_another_net_id, b_another_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 2)
-
- fake_plugin.add_router_interface(
- q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
-
- for net in TOP_NETS:
- if net['name'].startswith('ns_bridge'):
- t_ns_bridge_net_id = net['id']
- for subnet in TOP_SUBNETS:
- if subnet['name'].startswith('ns_bridge'):
- t_ns_bridge_subnet_id = subnet['id']
- b_ns_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_ns_bridge_net_id, 'pod_1', constants.RT_NETWORK)
- b_ns_bridge_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_ns_bridge_subnet_id, 'pod_1', constants.RT_SUBNET)
- # internal network and external network are in different pods, need
- # to create N-S bridge network and set gateway, add_router_interface
- # is called two times, so add_gateway is also called two times.
- # add_interface is called three times because the second time
- # add_router_interface is called, bottom router is already attached
- # to E-W bridge network, only need to attach internal network to
- # bottom router
- calls = [mock.call(t_ctx, b_router_id,
- {'network_id': b_ns_bridge_net_id,
- 'external_fixed_ips': [
- {'subnet_id': b_ns_bridge_subnet_id,
- 'ip_address': '100.128.0.2'}]}),
- mock.call(t_ctx, b_router_id,
- {'network_id': b_ns_bridge_net_id,
- 'external_fixed_ips': [
- {'subnet_id': b_ns_bridge_subnet_id,
- 'ip_address': '100.128.0.2'}]})]
- mock_action.assert_has_calls(calls)
-
- device_ids = ['', '', '']
- for port in BOTTOM1_PORTS:
- if port['id'] == b_bridge_port_id:
- device_ids[0] = port['device_id']
- elif port['network_id'] == b_net_id and (
- port['device_owner'] == 'network:router_interface'):
- device_ids[1] = port['device_id']
- elif port['network_id'] == b_another_net_id and (
- port['device_owner'] == 'network:router_interface'):
- device_ids[2] = port['device_id']
- self.assertEqual(device_ids, [b_router_id, b_router_id, b_router_id])
-
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_2', 2)
-
- fake_plugin.add_router_interface(
- q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
-
- b_router_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_router_id, 'pod_2', 'router')
- bridge_port_name = constants.ew_bridge_port_name % (tenant_id,
- b_router_id)
- _, t_bridge_port_id = db_api.get_bottom_mappings_by_top_id(
- t_ctx, bridge_port_name, 'port')[0]
- _, b_bridge_port_id = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_bridge_port_id, 'port')[0]
- # internal network and external network are in the same pod, no need
- # to create N-S bridge network when attaching router interface(N-S
- # bridge network is created when setting router external gateway), so
- # add_gateway is not called.
- device_ids = ['', '']
- for port in BOTTOM2_PORTS:
- if port['id'] == b_bridge_port_id:
- device_ids[0] = port['device_id']
- elif port['network_id'] == b_net_id and (
- port['device_owner'] == 'network:router_interface'):
- device_ids[1] = port['device_id']
- self.assertEqual(device_ids, [b_router_id, b_router_id])
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(FakeRPCAPI, 'configure_extra_routes', new=mock.Mock)
- @patch.object(FakeClient, 'action_routers')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_add_interface_exception(self, mock_context, mock_action):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- tenant_id = 'test_tenant_id'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 1)
-
- with t_ctx.session.begin():
- entries = core.query_resource(t_ctx, models.ResourceRouting,
- [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': 'port'}], [])
- entry_num = len(entries)
-
- mock_action.side_effect = q_exceptions.ConnectionFailed
- self.assertRaises(q_exceptions.ConnectionFailed,
- fake_plugin.add_router_interface,
- q_ctx, t_router_id, {'subnet_id': t_subnet_id})
- self.assertEqual(0, len(TOP_ROUTERS[0]['attached_ports']))
-
- with t_ctx.session.begin():
- entries = core.query_resource(t_ctx, models.ResourceRouting,
- [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': 'port'}], [])
- # two new entries, for top and bottom bridge ports
- self.assertEqual(entry_num + 2, len(entries))
- # top and bottom interface is deleted, only bridge port left
- self.assertEqual(1, len(TOP_PORTS))
- self.assertEqual(1, len(BOTTOM1_PORTS))
-
- mock_action.side_effect = None
- fake_plugin.add_router_interface(q_ctx, t_router_id,
- {'subnet_id': t_subnet_id})
- # bottom dhcp port and bridge port
- self.assertEqual(2, len(BOTTOM1_PORTS))
- with t_ctx.session.begin():
- entries = core.query_resource(t_ctx, models.ResourceRouting,
- [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': 'port'}], [])
- # three more entries, for top and bottom dhcp ports, top interface
- self.assertEqual(entry_num + 2 + 3, len(entries))
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(FakeBaseRPCAPI, 'configure_extra_routes', new=mock.Mock)
- @patch.object(FakeClient, '_get_connection')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_add_interface_exception_port_left(self, mock_context,
- mock_connect):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- tenant_id = 'test_tenant_id'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 1)
- mock_connect.side_effect = q_exceptions.ConnectionFailed
- self.assertRaises(q_exceptions.ConnectionFailed,
- fake_plugin.add_router_interface,
- q_ctx, t_router_id, {'subnet_id': t_subnet_id})
- # top interface is removed
- self.assertEqual(0, len(TOP_ROUTERS[0]['attached_ports']))
-
- mock_connect.side_effect = None
- # test that we can success when bottom pod comes back
- fake_plugin.add_router_interface(
- q_ctx, t_router_id, {'subnet_id': t_subnet_id})
- # bottom dhcp port, bottom interface and bridge port
- self.assertEqual(3, len(BOTTOM1_PORTS))
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(FakeBaseRPCAPI, 'configure_extra_routes')
- @patch.object(FakeClient, 'remove_interface_routers')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_remove_interface(self, mock_context, mock_remove, mock_rpc):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- tenant_id = 'test_tenant_id'
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 1)
- t_port_id = fake_plugin.add_router_interface(
- q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
- _, b_router_id = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_router_id, constants.RT_ROUTER)[0]
-
- for port in BOTTOM1_PORTS:
- if port['network_id'] == b_net_id and (
- port['device_owner'] == 'network:router_interface'):
- b_interface_id = port['id']
-
- fake_plugin.remove_router_interface(
- q_ctx, t_router_id, {'port_id': t_port_id})
-
- mock_remove.assert_called_with(
- t_ctx, b_router_id, {'port_id': b_interface_id})
- mock_rpc.assert_called_with(t_ctx, t_router_id)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_external_network_no_az_pod(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- # create external network without specifying az pod name
- body = {
- 'network': {
- 'name': 'ext-net',
- 'admin_state_up': True,
- 'shared': True,
- 'tenant_id': 'test_tenant_id',
- 'router:external': True,
- }
- }
-
- top_net = fake_plugin.create_network(q_ctx, body)
- for net in BOTTOM1_NETS:
- if net.get('router:external'):
- bottom_net = net
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, top_net['id'], constants.RT_NETWORK)
- self.assertEqual(mappings[0][1], bottom_net['id'])
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_external_network(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- # create external network specifying az name
- body = {
- 'network': {
- 'router:external': True,
- 'tenant_id': 'test_tenant_id',
- 'availability_zone_hints': ['az_name_1']
- }
- }
- self.assertRaises(exceptions.PodNotFound,
- fake_plugin.create_network, q_ctx, body)
- body = {
- 'network': {
- 'name': 'ext-net',
- 'admin_state_up': True,
- 'shared': False,
- 'tenant_id': 'test_tenant_id',
- 'router:external': True,
- 'availability_zone_hints': ['pod_1']
- }
- }
- top_net = fake_plugin.create_network(q_ctx, body)
- for net in BOTTOM1_NETS:
- if net.get('router:external'):
- bottom_net = net
- mappings = db_api.get_bottom_mappings_by_top_id(
- t_ctx, top_net['id'], constants.RT_NETWORK)
- self.assertEqual(mappings[0][1], bottom_net['id'])
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict',
- new=fake_make_router_dict)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(FakeClient, 'action_routers')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_set_gateway(self, mock_context, mock_action):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- tenant_id = 'test_tenant_id'
- t_net_body = {
- 'name': 'ext_net',
- 'availability_zone_hints': ['pod_1'],
- 'tenant_id': tenant_id,
- 'router:external': True,
- 'admin_state_up': True,
- 'shared': False,
- }
- fake_plugin.create_network(q_ctx, {'network': t_net_body})
- t_net_id = TOP_NETS[0]['id']
-
- t_subnet_body = {
- 'network_id': t_net_id, # only one network created
- 'name': 'ext_subnet',
- 'ip_version': 4,
- 'cidr': '100.64.0.0/24',
- 'allocation_pools': [],
- 'enable_dhcp': False,
- 'gateway_ip': '100.64.0.1',
- 'dns_nameservers': '',
- 'host_routes': '',
- 'tenant_id': tenant_id
- }
- fake_plugin.create_subnet(q_ctx, {'subnet': t_subnet_body})
- t_subnet_id = TOP_SUBNETS[0]['id']
-
- t_router_id = uuidutils.generate_uuid()
- t_router = {
- 'id': t_router_id,
- 'name': 'router',
- 'distributed': False,
- 'tenant_id': tenant_id,
- 'attached_ports': []
- }
-
- TOP_ROUTERS.append(DotDict(t_router))
- fake_plugin.update_router(
- q_ctx, t_router_id,
- {'router': {'external_gateway_info': {
- 'network_id': TOP_NETS[0]['id'],
- 'enable_snat': False,
- 'external_fixed_ips': [{'subnet_id': TOP_SUBNETS[0]['id'],
- 'ip_address': '100.64.0.5'}]}}})
-
- b_router_id = BOTTOM1_ROUTERS[0]['id']
- b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_net_id, 'pod_1', constants.RT_NETWORK)
- b_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_subnet_id, 'pod_1', constants.RT_SUBNET)
-
- for subnet in TOP_SUBNETS:
- if subnet['name'].startswith('ns_bridge_subnet'):
- t_ns_bridge_subnet_id = subnet['id']
- b_ns_bridge_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_ns_bridge_subnet_id, 'pod_1', constants.RT_SUBNET)
- body = {'network_id': b_net_id,
- 'enable_snat': False,
- 'external_fixed_ips': [{'subnet_id': b_subnet_id,
- 'ip_address': '100.64.0.5'}]}
- calls = [mock.call(t_ctx, 'add_gateway', b_router_id, body),
- mock.call(t_ctx, 'add_interface', b_router_id,
- {'subnet_id': b_ns_bridge_subnet_id})]
- mock_action.assert_has_calls(calls)
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict',
- new=fake_make_router_dict)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(FakeClient, 'action_routers')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_unset_gateway(self, mock_context, mock_action):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- tenant_id = 'test_tenant_id'
- t_net_body = {
- 'name': 'ext_net',
- 'availability_zone_hints': ['pod_1'],
- 'tenant_id': tenant_id,
- 'router:external': True,
- 'admin_state_up': True,
- 'shared': False,
- }
- fake_plugin.create_network(q_ctx, {'network': t_net_body})
- t_net_id = TOP_NETS[0]['id']
-
- t_subnet_body = {
- 'network_id': t_net_id, # only one network created
- 'name': 'ext_subnet',
- 'ip_version': 4,
- 'cidr': '100.64.0.0/24',
- 'allocation_pools': [],
- 'enable_dhcp': False,
- 'gateway_ip': '100.64.0.1',
- 'dns_nameservers': '',
- 'host_routes': '',
- 'tenant_id': tenant_id
- }
- fake_plugin.create_subnet(q_ctx, {'subnet': t_subnet_body})
- t_subnet_id = TOP_SUBNETS[0]['id']
-
- t_router_id = uuidutils.generate_uuid()
- t_router = {
- 'id': t_router_id,
- 'name': 'router',
- 'distributed': False,
- 'tenant_id': tenant_id,
- 'attached_ports': []
- }
-
- TOP_ROUTERS.append(DotDict(t_router))
- # first add router gateway
- fake_plugin.update_router(
- q_ctx, t_router_id,
- {'router': {'external_gateway_info': {
- 'network_id': t_net_id,
- 'enable_snat': False,
- 'external_fixed_ips': [{'subnet_id': t_subnet_id,
- 'ip_address': '100.64.0.5'}]}}})
- _, b_router_id = db_api.get_bottom_mappings_by_top_id(
- t_ctx, t_router_id, constants.RT_ROUTER)[0]
-
- # then remove router gateway
- fake_plugin.update_router(
- q_ctx, t_router_id,
- {'router': {'external_gateway_info': {}}})
- mock_action.assert_called_with(t_ctx, 'remove_gateway', b_router_id)
-
- def _prepare_associate_floatingip_test(self, t_ctx, q_ctx, fake_plugin):
- tenant_id = 'test_tenant_id'
- self._basic_pod_route_setup()
- (t_net_id, t_subnet_id,
- t_router_id, b_net_id, b_subnet_id) = self._prepare_router_test(
- tenant_id, t_ctx, 'pod_1', 1)
-
- net_body = {
- 'name': 'ext_net',
- 'admin_state_up': True,
- 'shared': False,
- 'tenant_id': tenant_id,
- 'router:external': True,
- 'availability_zone_hints': ['pod_2']
- }
- e_net = fake_plugin.create_network(q_ctx, {'network': net_body})
- subnet_body = {
- 'network_id': e_net['id'],
- 'name': 'ext_subnet',
- 'ip_version': 4,
- 'cidr': '100.64.0.0/24',
- 'allocation_pools': [{'start': '100.64.0.2',
- 'end': '100.64.0.254'}],
- 'enable_dhcp': False,
- 'gateway_ip': '100.64.0.1',
- 'dns_nameservers': '',
- 'host_routes': '',
- 'tenant_id': tenant_id
- }
- e_subnet = fake_plugin.create_subnet(q_ctx, {'subnet': subnet_body})
- # set external gateway
- fake_plugin.update_router(
- q_ctx, t_router_id,
- {'router': {'external_gateway_info': {
- 'network_id': e_net['id'],
- 'enable_snat': False,
- 'external_fixed_ips': [{'subnet_id': e_subnet['id'],
- 'ip_address': '100.64.0.5'}]}}})
- # create floating ip
- fip_body = {'floating_network_id': e_net['id'],
- 'tenant_id': tenant_id}
- fip = fake_plugin.create_floatingip(q_ctx, {'floatingip': fip_body})
- # add router interface
- fake_plugin.add_router_interface(q_ctx, t_router_id,
- {'subnet_id': t_subnet_id})
- # create internal port
- t_port_id = uuidutils.generate_uuid()
- b_port_id = uuidutils.generate_uuid()
- t_port = {
- 'id': t_port_id,
- 'network_id': t_net_id,
- 'mac_address': 'fa:16:3e:96:41:03',
- 'fixed_ips': [{'subnet_id': t_subnet_id,
- 'ip_address': '10.0.0.4'}]
- }
- b_port = {
- 'id': b_port_id,
- 'name': t_port_id,
- 'network_id': db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_net_id, 'pod_1', constants.RT_NETWORK),
- 'mac_address': 'fa:16:3e:96:41:03',
- 'fixed_ips': [
- {'subnet_id': db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, t_subnet_id, 'pod_1', constants.RT_SUBNET),
- 'ip_address': '10.0.0.4'}]
- }
- TOP_PORTS.append(t_port)
- BOTTOM1_PORTS.append(b_port)
- route = {'top_id': t_port_id,
- 'pod_id': 'pod_id_1',
- 'bottom_id': b_port_id,
- 'resource_type': constants.RT_PORT}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- return t_port_id, b_port_id, fip, e_net
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict',
- new=fake_make_router_dict)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(l3_db.L3_NAT_dbonly_mixin, 'update_floatingip',
- new=update_floatingip)
- @patch.object(FakeClient, 'create_floatingips')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_associate_floatingip(self, mock_context, mock_create):
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- (t_port_id, b_port_id,
- fip, e_net) = self._prepare_associate_floatingip_test(t_ctx, q_ctx,
- fake_plugin)
-
- # associate floating ip
- fip_body = {'port_id': t_port_id}
- fake_plugin.update_floatingip(q_ctx, fip['id'],
- {'floatingip': fip_body})
-
- b_ext_net_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, e_net['id'], 'pod_2', constants.RT_NETWORK)
- for port in BOTTOM2_PORTS:
- if port['name'] == 'ns_bridge_port':
- ns_bridge_port = port
- for net in TOP_NETS:
- if net['name'].startswith('ns_bridge'):
- b_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, net['id'], 'pod_1', constants.RT_NETWORK)
- calls = [mock.call(t_ctx,
- {'floatingip': {
- 'floating_network_id': b_ext_net_id,
- 'floating_ip_address': fip[
- 'floating_ip_address'],
- 'port_id': ns_bridge_port['id']}}),
- mock.call(t_ctx,
- {'floatingip': {
- 'floating_network_id': b_bridge_net_id,
- 'floating_ip_address': '100.128.0.3',
- 'port_id': b_port_id}})]
- mock_create.assert_has_calls(calls)
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict',
- new=fake_make_router_dict)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(l3_db.L3_NAT_dbonly_mixin, 'update_floatingip',
- new=update_floatingip)
- @patch.object(FakePlugin, '_rollback_floatingip_data')
- @patch.object(FakeRPCAPI, 'setup_bottom_router')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_associate_floatingip_port_exception(
- self, mock_context, mock_setup, mock_rollback):
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- (t_port_id, b_port_id,
- fip, e_net) = self._prepare_associate_floatingip_test(t_ctx, q_ctx,
- fake_plugin)
-
- # associate floating ip and exception occurs
- # actually we will not get this exception when calling
- # setup_bottom_router, we set this exception for test purpose
- mock_setup.side_effect = q_exceptions.ConnectionFailed
- fip_body = {'port_id': t_port_id}
- self.assertRaises(q_exceptions.ConnectionFailed,
- fake_plugin.update_floatingip, q_ctx, fip['id'],
- {'floatingip': fip_body})
- data = {'fixed_port_id': None,
- 'fixed_ip_address': None,
- 'router_id': None}
- mock_rollback.assert_called_once_with(q_ctx, fip['id'], data)
-
- @patch.object(driver.Pool, 'get_instance', new=fake_get_instance)
- @patch.object(ipam_pluggable_backend.IpamPluggableBackend,
- '_allocate_ips_for_port', new=fake_allocate_ips_for_port)
- @patch.object(l3_db.L3_NAT_dbonly_mixin, '_make_router_dict',
- new=fake_make_router_dict)
- @patch.object(db_base_plugin_common.DbBasePluginCommon,
- '_make_subnet_dict', new=fake_make_subnet_dict)
- @patch.object(l3_db.L3_NAT_dbonly_mixin, 'update_floatingip',
- new=update_floatingip)
- @patch.object(FakeClient, 'delete_floatingips')
- @patch.object(FakeClient, 'update_floatingips')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_disassociate_floatingip(self, mock_context, mock_update,
- mock_delete):
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- (t_port_id, b_port_id,
- fip, e_net) = self._prepare_associate_floatingip_test(t_ctx, q_ctx,
- fake_plugin)
-
- # associate floating ip
- fip_body = {'port_id': t_port_id}
- fake_plugin.update_floatingip(q_ctx, fip['id'],
- {'floatingip': fip_body})
-
- bridge_port_name = constants.ns_bridge_port_name % (
- e_net['tenant_id'], None, b_port_id)
- t_pod = db_api.get_top_pod(t_ctx)
- mapping = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, bridge_port_name, t_pod['pod_name'], constants.RT_PORT)
- # check routing for bridge port in top pod exists
- self.assertIsNotNone(mapping)
-
- # disassociate floating ip
- fip_body = {'port_id': None}
- fake_plugin.update_floatingip(q_ctx, fip['id'],
- {'floatingip': fip_body})
-
- fip_id1 = BOTTOM1_FIPS[0]['id']
- fip_id2 = BOTTOM2_FIPS[0]['id']
- mock_update.assert_called_once_with(
- t_ctx, fip_id2, {'floatingip': {'port_id': None}})
- mock_delete.assert_called_once_with(t_ctx, fip_id1)
- mapping = db_api.get_bottom_id_by_top_id_pod_name(
- t_ctx, bridge_port_name, t_pod['pod_name'], constants.RT_PORT)
- # check routing for bridge port in top pod is deleted
- self.assertIsNone(mapping)
-
- # check the association information is cleared
- self.assertIsNone(TOP_FLOATINGIPS[0]['fixed_port_id'])
- self.assertIsNone(TOP_FLOATINGIPS[0]['fixed_ip_address'])
- self.assertIsNone(TOP_FLOATINGIPS[0]['router_id'])
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_security_group_rule(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- self._test_create_security_group_rule(fake_plugin, q_ctx, t_ctx,
- 'pod_id_1', TOP_SGS, BOTTOM1_SGS)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_delete_security_group_rule(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- self._test_delete_security_group_rule(fake_plugin, q_ctx, t_ctx,
- 'pod_id_1', TOP_SGS,
- TOP_SG_RULES, BOTTOM1_SGS)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_handle_remote_group_invalid_input(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- self._test_handle_remote_group_invalid_input(fake_plugin, q_ctx, t_ctx,
- 'pod_id_1', TOP_SGS,
- TOP_SG_RULES, BOTTOM1_SGS)
-
- @patch.object(context, 'get_context_from_neutron_context')
- def test_handle_default_sg_invalid_input(self, mock_context):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
-
- self._test_handle_default_sg_invalid_input(fake_plugin, q_ctx, t_ctx,
- 'pod_id_1', TOP_SGS,
- TOP_SG_RULES, BOTTOM1_SGS)
-
- @patch.object(FakeClient, 'create_security_group_rules')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_create_security_group_rule_exception(self, mock_context,
- mock_create):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
- mock_create.side_effect = q_exceptions.ConnectionFailed
-
- self._test_create_security_group_rule_exception(
- fake_plugin, q_ctx, t_ctx, 'pod_id_1', TOP_SGS, BOTTOM1_SGS)
-
- @patch.object(FakeClient, 'delete_security_group_rules')
- @patch.object(context, 'get_context_from_neutron_context')
- def test_delete_security_group_rule_exception(self, mock_context,
- mock_delete):
- self._basic_pod_route_setup()
-
- fake_plugin = FakePlugin()
- q_ctx = FakeNeutronContext()
- t_ctx = context.get_db_context()
- mock_context.return_value = t_ctx
- mock_delete.side_effect = q_exceptions.ConnectionFailed
-
- self._test_delete_security_group_rule_exception(
- fake_plugin, q_ctx, t_ctx, 'pod_id_1', TOP_SGS, TOP_SG_RULES,
- BOTTOM1_SGS)
-
- def tearDown(self):
- core.ModelBase.metadata.drop_all(core.get_engine())
- for res in RES_LIST:
- del res[:]
- cfg.CONF.unregister_opts(q_config.core_opts)
- xmanager.IN_TEST = False
diff --git a/tricircle/tests/unit/network/test_security_groups.py b/tricircle/tests/unit/network/test_security_groups.py
deleted file mode 100644
index 398a39c..0000000
--- a/tricircle/tests/unit/network/test_security_groups.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from oslo_utils import uuidutils
-
-from tricircle.common import constants
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.network import exceptions
-
-
-class TricircleSecurityGroupTestMixin(object):
-
- @staticmethod
- def _build_test_rule(_id, sg_id, project_id, ip_prefix, remote_group=None):
- return {'security_group_id': sg_id,
- 'id': _id,
- 'tenant_id': project_id,
- 'remote_group_id': remote_group,
- 'direction': 'ingress',
- 'remote_ip_prefix': ip_prefix,
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'}
-
- def _test_create_security_group_rule(self, plugin, q_ctx, t_ctx, pod_id,
- top_sgs, bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_sg = {'id': t_sg_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- top_sgs.append(t_sg)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- rule = {
- 'security_group_rule': self._build_test_rule(
- t_rule_id, t_sg_id, project_id, '10.0.0.0/24')}
- plugin.create_security_group_rule(q_ctx, rule)
-
- self.assertEqual(1, len(bottom1_sgs[0]['security_group_rules']))
- b_rule = bottom1_sgs[0]['security_group_rules'][0]
- self.assertEqual(b_sg_id, b_rule['security_group_id'])
- rule['security_group_rule'].pop('security_group_id', None)
- b_rule.pop('security_group_id', None)
- self.assertEqual(rule['security_group_rule'], b_rule)
-
- def _test_delete_security_group_rule(self, plugin, q_ctx, t_ctx, pod_id,
- top_sgs, top_rules, bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule1_id = uuidutils.generate_uuid()
- t_rule2_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule1 = self._build_test_rule(
- t_rule1_id, t_sg_id, project_id, '10.0.1.0/24')
- t_rule2 = self._build_test_rule(
- t_rule2_id, t_sg_id, project_id, '10.0.2.0/24')
- b_rule1 = self._build_test_rule(
- t_rule1_id, b_sg_id, project_id, '10.0.1.0/24')
- b_rule2 = self._build_test_rule(
- t_rule2_id, b_sg_id, project_id, '10.0.2.0/24')
- t_sg = {'id': t_sg_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule1, t_rule2]}
- b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [b_rule1, b_rule2]}
- top_sgs.append(t_sg)
- top_rules.append(t_rule1)
- top_rules.append(t_rule2)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- plugin.delete_security_group_rule(q_ctx, t_rule1_id)
-
- self.assertEqual(1, len(bottom1_sgs[0]['security_group_rules']))
- b_rule = bottom1_sgs[0]['security_group_rules'][0]
- self.assertEqual(b_sg_id, b_rule['security_group_id'])
- t_rule2.pop('security_group_id', None)
- b_rule.pop('security_group_id', None)
- self.assertEqual(t_rule2, b_rule)
-
- def _test_handle_remote_group_invalid_input(self, plugin, q_ctx, t_ctx,
- pod_id, top_sgs, top_rules,
- bottom1_sgs):
- t_sg1_id = uuidutils.generate_uuid()
- t_sg2_id = uuidutils.generate_uuid()
- t_rule1_id = uuidutils.generate_uuid()
- t_rule2_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule1 = self._build_test_rule(
- t_rule1_id, t_sg1_id, project_id, None, t_sg1_id)
- t_rule2 = self._build_test_rule(
- t_rule2_id, t_sg1_id, project_id, None, t_sg2_id)
- t_sg = {'id': t_sg1_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- b_sg = {'id': b_sg_id, 'name': t_sg1_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- top_sgs.append(t_sg)
- top_rules.append(t_rule1)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg1_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- self.assertRaises(exceptions.RemoteGroupNotSupported,
- plugin.create_security_group_rule, q_ctx,
- {'security_group_rule': t_rule2})
- self.assertRaises(exceptions.RemoteGroupNotSupported,
- plugin.delete_security_group_rule, q_ctx, t_rule1_id)
-
- def _test_handle_default_sg_invalid_input(self, plugin, q_ctx, t_ctx,
- pod_id, top_sgs, top_rules,
- bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule1_id = uuidutils.generate_uuid()
- t_rule2_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule1 = self._build_test_rule(
- t_rule1_id, t_sg_id, project_id, '10.0.0.0/24')
- t_rule2 = self._build_test_rule(
- t_rule2_id, t_sg_id, project_id, '10.0.1.0/24')
- t_sg = {'id': t_sg_id, 'name': 'default', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule1]}
- b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- top_sgs.append(t_sg)
- top_rules.append(t_rule1)
- bottom1_sgs.append(b_sg)
- route1 = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route1)
-
- self.assertRaises(exceptions.DefaultGroupUpdateNotSupported,
- plugin.create_security_group_rule, q_ctx,
- {'security_group_rule': t_rule2})
- self.assertRaises(exceptions.DefaultGroupUpdateNotSupported,
- plugin.delete_security_group_rule, q_ctx, t_rule1_id)
-
- def _test_create_security_group_rule_exception(
- self, plugin, q_ctx, t_ctx, pod_id, top_sgs, bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_sg = {'id': t_sg_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': []}
- top_sgs.append(t_sg)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- rule = {
- 'security_group_rule': self._build_test_rule(
- t_rule_id, t_sg_id, project_id, '10.0.0.0/24')}
- self.assertRaises(exceptions.BottomPodOperationFailure,
- plugin.create_security_group_rule, q_ctx, rule)
-
- def _test_delete_security_group_rule_exception(self, plugin, q_ctx, t_ctx,
- pod_id, top_sgs, top_rules,
- bottom1_sgs):
- t_sg_id = uuidutils.generate_uuid()
- t_rule_id = uuidutils.generate_uuid()
- b_sg_id = uuidutils.generate_uuid()
- project_id = 'test_prject_id'
- t_rule = self._build_test_rule(
- t_rule_id, t_sg_id, project_id, '10.0.1.0/24')
- b_rule = self._build_test_rule(
- t_rule_id, b_sg_id, project_id, '10.0.1.0/24')
- t_sg = {'id': t_sg_id, 'name': 'test', 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [t_rule]}
- b_sg = {'id': b_sg_id, 'name': t_sg_id, 'description': '',
- 'tenant_id': project_id,
- 'security_group_rules': [b_rule]}
- top_sgs.append(t_sg)
- top_rules.append(t_rule)
- bottom1_sgs.append(b_sg)
- route = {
- 'top_id': t_sg_id,
- 'pod_id': pod_id,
- 'bottom_id': b_sg_id,
- 'resource_type': constants.RT_SG}
- with t_ctx.session.begin():
- core.create_resource(t_ctx, models.ResourceRouting, route)
-
- self.assertRaises(exceptions.BottomPodOperationFailure,
- plugin.delete_security_group_rule, q_ctx, t_rule_id)
diff --git a/tricircle/tests/unit/nova_apigw/controllers/__init__.py b/tricircle/tests/unit/nova_apigw/controllers/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tricircle/tests/unit/xjob/__init__.py b/tricircle/tests/unit/xjob/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tricircle/xjob/__init__.py b/tricircle/xjob/__init__.py
deleted file mode 100644
index e69de29..0000000
diff --git a/tricircle/xjob/opts.py b/tricircle/xjob/opts.py
deleted file mode 100644
index bca2ec0..0000000
--- a/tricircle/xjob/opts.py
+++ /dev/null
@@ -1,23 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-# All Rights Reserved
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import tricircle.xjob.xservice
-
-
-def list_opts():
- return [
- ('DEFAULT', tricircle.xjob.xservice.common_opts),
- ('DEFAULT', tricircle.xjob.xservice.service_opts),
- ]
diff --git a/tricircle/xjob/xmanager.py b/tricircle/xjob/xmanager.py
deleted file mode 100644
index f1f4793..0000000
--- a/tricircle/xjob/xmanager.py
+++ /dev/null
@@ -1,654 +0,0 @@
-# Copyright 2015 Huawei Technologies Co., Ltd.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-# implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import datetime
-import eventlet
-import netaddr
-import random
-import six
-
-from oslo_config import cfg
-from oslo_log import log as logging
-import oslo_messaging as messaging
-from oslo_service import periodic_task
-
-import neutronclient.common.exceptions as q_cli_exceptions
-
-from tricircle.common import client
-from tricircle.common import constants
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common.i18n import _LI
-from tricircle.common.i18n import _LW
-from tricircle.common import xrpcapi
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-import tricircle.network.exceptions as t_network_exc
-from tricircle.network import helper
-
-
-CONF = cfg.CONF
-LOG = logging.getLogger(__name__)
-
-IN_TEST = False
-AZ_HINTS = 'availability_zone_hints'
-
-
-def _job_handle(job_type):
- def handle_func(func):
- @six.wraps(func)
- def handle_args(*args, **kwargs):
- if IN_TEST:
- # NOTE(zhiyuan) job mechanism will cause some unpredictable
- # result in unit test so we would like to bypass it. However
- # we have problem mocking a decorator which decorates member
- # functions, that's why we use this label, not an elegant
- # way though.
- func(*args, **kwargs)
- return
- ctx = args[1]
- payload = kwargs['payload']
-
- resource_id = payload[job_type]
- db_api.new_job(ctx, job_type, resource_id)
- start_time = datetime.datetime.now()
-
- while True:
- current_time = datetime.datetime.now()
- delta = current_time - start_time
- if delta.seconds >= CONF.worker_handle_timeout:
- # quit when this handle is running for a long time
- break
- time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
- job_type, resource_id)
- time_success = db_api.get_latest_timestamp(
- ctx, constants.JS_Success, job_type, resource_id)
- if time_success and time_success >= time_new:
- break
- job = db_api.register_job(ctx, job_type, resource_id)
- if not job:
- # fail to obtain the lock, let other worker handle the job
- running_job = db_api.get_running_job(ctx, job_type,
- resource_id)
- if not running_job:
- # there are two reasons that running_job is None. one
- # is that the running job has just been finished, the
- # other is that all workers fail to register the job
- # due to deadlock exception. so we sleep and try again
- eventlet.sleep(CONF.worker_sleep_time)
- continue
- job_time = running_job['timestamp']
- current_time = datetime.datetime.now()
- delta = current_time - job_time
- if delta.seconds > CONF.job_run_expire:
- # previous running job expires, we set its status to
- # fail and try again to obtain the lock
- db_api.finish_job(ctx, running_job['id'], False,
- time_new)
- LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
- 'resource %(resource)s expires, set '
- 'its state to Fail'),
- {'job': running_job['id'],
- 'job_type': job_type,
- 'resource': resource_id})
- eventlet.sleep(CONF.worker_sleep_time)
- continue
- else:
- # previous running job is still valid, we just leave
- # the job to the worker who holds the lock
- break
- # successfully obtain the lock, start to execute handler
- try:
- func(*args, **kwargs)
- except Exception:
- db_api.finish_job(ctx, job['id'], False, time_new)
- LOG.error(_LE('Job %(job)s of type %(job_type)s for '
- 'resource %(resource)s fails'),
- {'job': job['id'],
- 'job_type': job_type,
- 'resource': resource_id})
- break
- db_api.finish_job(ctx, job['id'], True, time_new)
- eventlet.sleep(CONF.worker_sleep_time)
- return handle_args
- return handle_func
-
-
-class PeriodicTasks(periodic_task.PeriodicTasks):
- def __init__(self):
- super(PeriodicTasks, self).__init__(CONF)
-
-
-class XManager(PeriodicTasks):
-
- target = messaging.Target(version='1.0')
-
- def __init__(self, host=None, service_name='xjob'):
-
- LOG.debug(_('XManager initialization...'))
-
- if not host:
- host = CONF.host
- self.host = host
- self.service_name = service_name
- # self.notifier = rpc.get_notifier(self.service_name, self.host)
- self.additional_endpoints = []
- self.clients = {constants.TOP: client.Client()}
- self.job_handles = {
- constants.JT_ROUTER: self.configure_extra_routes,
- constants.JT_ROUTER_SETUP: self.setup_bottom_router,
- constants.JT_PORT_DELETE: self.delete_server_port}
- self.helper = helper.NetworkHelper()
- self.xjob_handler = xrpcapi.XJobAPI()
- super(XManager, self).__init__()
-
- def _get_client(self, pod_name=None):
- if not pod_name:
- return self.clients[constants.TOP]
- if pod_name not in self.clients:
- self.clients[pod_name] = client.Client(pod_name)
- return self.clients[pod_name]
-
- def periodic_tasks(self, context, raise_on_error=False):
- """Tasks to be run at a periodic interval."""
- return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
-
- def init_host(self):
-
- """init_host
-
- Hook to do additional manager initialization when one requests
- the service be started. This is called before any service record
- is created.
- Child classes should override this method.
- """
-
- LOG.debug(_('XManager init_host...'))
-
- pass
-
- def cleanup_host(self):
-
- """cleanup_host
-
- Hook to do cleanup work when the service shuts down.
- Child classes should override this method.
- """
-
- LOG.debug(_('XManager cleanup_host...'))
-
- pass
-
- def pre_start_hook(self):
-
- """pre_start_hook
-
- Hook to provide the manager the ability to do additional
- start-up work before any RPC queues/consumers are created. This is
- called after other initialization has succeeded and a service
- record is created.
- Child classes should override this method.
- """
-
- LOG.debug(_('XManager pre_start_hook...'))
-
- pass
-
- def post_start_hook(self):
-
- """post_start_hook
-
- Hook to provide the manager the ability to do additional
- start-up work immediately after a service creates RPC consumers
- and starts 'running'.
- Child classes should override this method.
- """
-
- LOG.debug(_('XManager post_start_hook...'))
-
- pass
-
- # rpc message endpoint handling
- def test_rpc(self, ctx, payload):
-
- LOG.info(_LI("xmanager receive payload: %s"), payload)
-
- info_text = "xmanager receive payload: %s" % payload
-
- return info_text
-
- @staticmethod
- def _get_resource_by_name(cli, cxt, _type, name):
- return cli.list_resources(_type, cxt, filters=[{'key': 'name',
- 'comparator': 'eq',
- 'value': name}])[0]
-
- @staticmethod
- def _get_router_interfaces(cli, cxt, router_id, net_id):
- return cli.list_ports(
- cxt, filters=[{'key': 'network_id', 'comparator': 'eq',
- 'value': net_id},
- {'key': 'device_id', 'comparator': 'eq',
- 'value': router_id}])
-
- @periodic_task.periodic_task
- def redo_failed_job(self, ctx):
- failed_jobs = db_api.get_latest_failed_jobs(ctx)
- failed_jobs = [
- job for job in failed_jobs if job['type'] in self.job_handles]
- if not failed_jobs:
- return
- # in one run we only pick one job to handle
- job_index = random.randint(0, len(failed_jobs) - 1)
- failed_job = failed_jobs[job_index]
- job_type = failed_job['type']
- payload = {job_type: failed_job['resource_id']}
- LOG.debug(_('Redo failed job for %(resource_id)s of type '
- '%(job_type)s'),
- {'resource_id': failed_job['resource_id'],
- 'job_type': job_type})
- self.job_handles[job_type](ctx, payload=payload)
-
- @staticmethod
- def _safe_create_bottom_floatingip(t_ctx, pod, client, fip_net_id,
- fip_address, port_id):
- try:
- client.create_floatingips(
- t_ctx, {'floatingip': {'floating_network_id': fip_net_id,
- 'floating_ip_address': fip_address,
- 'port_id': port_id}})
- except q_cli_exceptions.IpAddressInUseClient:
- fips = client.list_floatingips(t_ctx,
- [{'key': 'floating_ip_address',
- 'comparator': 'eq',
- 'value': fip_address}])
- if not fips:
- # this is rare case that we got IpAddressInUseClient exception
- # a second ago but now the floating ip is missing
- raise t_network_exc.BottomPodOperationFailure(
- resource='floating ip', pod_name=pod['pod_name'])
- associated_port_id = fips[0].get('port_id')
- if associated_port_id == port_id:
- # if the internal port associated with the existing fip is what
- # we expect, just ignore this exception
- pass
- elif not associated_port_id:
- # if the existing fip is not associated with any internal port,
- # update the fip to add association
- client.update_floatingips(t_ctx, fips[0]['id'],
- {'floatingip': {'port_id': port_id}})
- else:
- raise
-
- def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net,
- t_router, t_ew_bridge_net, t_ew_bridge_subnet,
- need_ns_bridge):
- b_client = self._get_client(b_pod['pod_name'])
-
- router_body = {'router': {'name': t_router['id'],
- 'distributed': False}}
- project_id = t_router['tenant_id']
-
- # create bottom router in target bottom pod
- _, b_router_id = self.helper.prepare_bottom_element(
- ctx, project_id, b_pod, t_router, 'router', router_body)
-
- # handle E-W networking
- # create top E-W bridge port
- q_ctx = None # no need to pass neutron context when using client
- t_ew_bridge_port_id = self.helper.get_bridge_interface(
- ctx, q_ctx, project_id, t_pod, t_ew_bridge_net['id'],
- b_router_id, None, True)
-
- # create bottom E-W bridge port
- t_ew_bridge_port = t_client.get_ports(ctx, t_ew_bridge_port_id)
- (is_new, b_ew_bridge_port_id,
- _, _) = self.helper.get_bottom_bridge_elements(
- ctx, project_id, b_pod, t_ew_bridge_net, False, t_ew_bridge_subnet,
- t_ew_bridge_port)
-
- # attach bottom E-W bridge port to bottom router
- if is_new:
- # only attach bridge port the first time
- b_client.action_routers(ctx, 'add_interface', b_router_id,
- {'port_id': b_ew_bridge_port_id})
- else:
- # still need to check if the bridge port is bound
- port = b_client.get_ports(ctx, b_ew_bridge_port_id)
- if not port.get('device_id'):
- b_client.action_routers(ctx, 'add_interface', b_router_id,
- {'port_id': b_ew_bridge_port_id})
-
- # handle N-S networking
- if need_ns_bridge:
- t_ns_bridge_net_name = constants.ns_bridge_net_name % project_id
- t_ns_bridge_subnet_name = constants.ns_bridge_subnet_name % (
- project_id)
- t_ns_bridge_net = self._get_resource_by_name(
- t_client, ctx, 'network', t_ns_bridge_net_name)
- t_ns_bridge_subnet = self._get_resource_by_name(
- t_client, ctx, 'subnet', t_ns_bridge_subnet_name)
- # create bottom N-S bridge network and subnet
- (_, _, b_ns_bridge_subnet_id,
- b_ns_bridge_net_id) = self.helper.get_bottom_bridge_elements(
- ctx, project_id, b_pod, t_ns_bridge_net, True,
- t_ns_bridge_subnet, None)
- # create top N-S bridge gateway port
- t_ns_bridge_gateway_id = self.helper.get_bridge_interface(
- ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'],
- b_router_id, None, False)
- t_ns_bridge_gateway = t_client.get_ports(ctx,
- t_ns_bridge_gateway_id)
- # add external gateway for bottom router
- # add gateway is update operation, can run multiple times
- gateway_ip = t_ns_bridge_gateway['fixed_ips'][0]['ip_address']
- b_client.action_routers(
- ctx, 'add_gateway', b_router_id,
- {'network_id': b_ns_bridge_net_id,
- 'external_fixed_ips': [{'subnet_id': b_ns_bridge_subnet_id,
- 'ip_address': gateway_ip}]})
-
- # attach internal port to bottom router
- t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'],
- t_net['id'])
- b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
- ctx, t_net['id'], b_pod['pod_name'], constants.RT_NETWORK)
- if b_net_id:
- b_ports = self._get_router_interfaces(b_client, ctx, b_router_id,
- b_net_id)
- else:
- b_ports = []
- if not t_ports and b_ports:
- # remove redundant bottom interface
- b_port = b_ports[0]
- request_body = {'port_id': b_port['id']}
- b_client.action_routers(ctx, 'remove_interface', b_router_id,
- request_body)
- elif t_ports and not b_ports:
- # create new bottom interface
- t_port = t_ports[0]
-
- # only consider ipv4 address currently
- t_subnet_id = t_port['fixed_ips'][0]['subnet_id']
- t_subnet = t_client.get_subnets(ctx, t_subnet_id)
-
- (b_net_id,
- subnet_map) = self.helper.prepare_bottom_network_subnets(
- ctx, q_ctx, project_id, b_pod, t_net, [t_subnet])
-
- # the gateway ip of bottom subnet is set to the ip of t_port, so
- # we just attach the bottom subnet to the bottom router and neutron
- # server in the bottom pod will create the interface for us, using
- # the gateway ip.
- b_client.action_routers(ctx, 'add_interface', b_router_id,
- {'subnet_id': subnet_map[t_subnet_id]})
-
- if not t_router['external_gateway_info']:
- return
-
- # handle floatingip
- t_ext_net_id = t_router['external_gateway_info']['network_id']
- t_fips = t_client.list_floatingips(ctx, [{'key': 'floating_network_id',
- 'comparator': 'eq',
- 'value': t_ext_net_id}])
- # skip unbound top floatingip
- t_ip_fip_map = dict([(fip['floating_ip_address'],
- fip) for fip in t_fips if fip['port_id']])
- mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_ext_net_id,
- constants.RT_NETWORK)
- # bottom external network should exist
- b_ext_pod, b_ext_net_id = mappings[0]
- b_ext_client = self._get_client(b_ext_pod['pod_name'])
- b_fips = b_ext_client.list_floatingips(
- ctx, [{'key': 'floating_network_id', 'comparator': 'eq',
- 'value': b_ext_net_id}])
- # skip unbound bottom floatingip
- b_ip_fip_map = dict([(fip['floating_ip_address'],
- fip) for fip in b_fips if fip['port_id']])
- add_fips = [ip for ip in t_ip_fip_map if ip not in b_ip_fip_map]
- del_fips = [ip for ip in b_ip_fip_map if ip not in t_ip_fip_map]
-
- for add_fip in add_fips:
- fip = t_ip_fip_map[add_fip]
- t_int_port_id = fip['port_id']
- b_int_port_id = db_api.get_bottom_id_by_top_id_pod_name(
- ctx, t_int_port_id, b_pod['pod_name'], constants.RT_PORT)
- if not b_int_port_id:
- LOG.warning(_LW('Port %(port_id)s associated with floating ip '
- '%(fip)s is not mapped to bottom pod'),
- {'port_id': t_int_port_id, 'fip': add_fip})
- continue
- t_int_port = t_client.get_ports(ctx, t_int_port_id)
- if t_int_port['network_id'] != t_net['id']:
- # only handle floating ip association for the given top network
- continue
- if need_ns_bridge:
- # create top N-S bridge interface port
- t_ns_bridge_port_id = self.helper.get_bridge_interface(
- ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'], None,
- b_int_port_id, False)
- t_ns_bridge_port = t_client.get_ports(ctx, t_ns_bridge_port_id)
- b_ext_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
- ctx, t_ns_bridge_net['id'], b_ext_pod['pod_name'],
- constants.RT_NETWORK)
- port_body = {
- 'port': {
- 'tenant_id': project_id,
- 'admin_state_up': True,
- 'name': 'ns_bridge_port',
- 'network_id': b_ext_bridge_net_id,
- 'fixed_ips': [{'ip_address': t_ns_bridge_port[
- 'fixed_ips'][0]['ip_address']}]
- }
- }
- _, b_ns_bridge_port_id = self.helper.prepare_bottom_element(
- ctx, project_id, b_ext_pod, t_ns_bridge_port,
- constants.RT_PORT, port_body)
- self._safe_create_bottom_floatingip(
- ctx, b_ext_pod, b_ext_client, b_ext_net_id, add_fip,
- b_ns_bridge_port_id)
- self._safe_create_bottom_floatingip(
- ctx, b_pod, b_client, b_ns_bridge_net_id,
- t_ns_bridge_port['fixed_ips'][0]['ip_address'],
- b_int_port_id)
- else:
- self._safe_create_bottom_floatingip(
- ctx, b_pod, b_client, b_ext_net_id, add_fip,
- b_int_port_id)
-
- for del_fip in del_fips:
- fip = b_ip_fip_map[del_fip]
- if need_ns_bridge:
- b_ns_bridge_port = b_ext_client.get_ports(ctx, fip['port_id'])
- entries = core.query_resource(
- ctx, models.ResourceRouting,
- [{'key': 'bottom_id', 'comparator': 'eq',
- 'value': b_ns_bridge_port['id']},
- {'key': 'pod_id', 'comparator': 'eq',
- 'value': b_ext_pod['pod_id']}], [])
- t_ns_bridge_port_id = entries[0]['top_id']
- b_int_fips = b_client.list_floatingips(
- ctx,
- [{'key': 'floating_ip_address',
- 'comparator': 'eq',
- 'value': b_ns_bridge_port['fixed_ips'][0]['ip_address']},
- {'key': 'floating_network_id',
- 'comparator': 'eq',
- 'value': b_ns_bridge_net_id}])
- if b_int_fips:
- b_client.delete_floatingips(ctx, b_int_fips[0]['id'])
- b_ext_client.update_floatingips(
- ctx, fip['id'], {'floatingip': {'port_id': None}})
-
- # for bridge port, we have two resource routing entries, one
- # for bridge port in top pod, another for bridge port in bottom
- # pod. calling t_client.delete_ports will delete bridge port in
- # bottom pod as well as routing entry for it, but we also need
- # to remove routing entry for bridge port in top pod, bridge
- # network will be deleted when deleting router
-
- # first we update the routing entry to set bottom_id to None
- # and expire the entry, so if we succeed to delete the bridge
- # port next, this expired entry will be deleted; otherwise, we
- # fail to delete the bridge port, when the port is accessed via
- # lock_handle module, that module will find the port and update
- # the entry
- with ctx.session.begin():
- core.update_resources(
- ctx, models.ResourceRouting,
- [{'key': 'bottom_id', 'comparator': 'eq',
- 'value': t_ns_bridge_port_id}],
- {'bottom_id': None,
- 'created_at': constants.expire_time,
- 'updated_at': constants.expire_time})
- # delete bridge port
- t_client.delete_ports(ctx, t_ns_bridge_port_id)
- # delete the expired entry, even if this deletion fails, we
- # still have a chance that lock_handle module will delete it
- with ctx.session.begin():
- core.delete_resources(ctx, models.ResourceRouting,
- [{'key': 'bottom_id',
- 'comparator': 'eq',
- 'value': t_ns_bridge_port_id}])
- else:
- b_client.update_floatingips(ctx, fip['id'],
- {'floatingip': {'port_id': None}})
-
- @_job_handle(constants.JT_ROUTER_SETUP)
- def setup_bottom_router(self, ctx, payload):
- (b_pod_id,
- t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')
-
- if b_pod_id == constants.POD_NOT_SPECIFIED:
- mappings = db_api.get_bottom_mappings_by_top_id(
- ctx, t_net_id, constants.RT_NETWORK)
- b_pods = [mapping[0] for mapping in mappings]
- for b_pod in b_pods:
- # NOTE(zhiyuan) we create one job for each pod to avoid
- # conflict caused by different workers operating the same pod
- self.xjob_handler.setup_bottom_router(
- ctx, t_net_id, t_router_id, b_pod['pod_id'])
- return
-
- t_client = self._get_client()
- t_pod = db_api.get_top_pod(ctx)
- t_router = t_client.get_routers(ctx, t_router_id)
- if not t_router:
- # we just end this job if top router no longer exists
- return
- t_net = t_client.get_networks(ctx, t_net_id)
- if not t_net:
- # we just end this job if top network no longer exists
- return
- project_id = t_router['tenant_id']
-
- b_pod = db_api.get_pod(ctx, b_pod_id)
-
- t_ew_bridge_net_name = constants.ew_bridge_net_name % project_id
- t_ew_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id
- t_ew_bridge_net = self._get_resource_by_name(t_client, ctx, 'network',
- t_ew_bridge_net_name)
- t_ew_bridge_subnet = self._get_resource_by_name(
- t_client, ctx, 'subnet', t_ew_bridge_subnet_name)
-
- ext_nets = t_client.list_networks(ctx,
- filters=[{'key': 'router:external',
- 'comparator': 'eq',
- 'value': True}])
- ext_net_pod_names = set(
- [ext_net[AZ_HINTS][0] for ext_net in ext_nets])
-
- if not ext_net_pod_names:
- need_ns_bridge = False
- elif b_pod['pod_name'] in ext_net_pod_names:
- need_ns_bridge = False
- else:
- need_ns_bridge = True
- self._setup_router_one_pod(ctx, t_pod, b_pod, t_client, t_net,
- t_router, t_ew_bridge_net,
- t_ew_bridge_subnet, need_ns_bridge)
-
- self.xjob_handler.configure_extra_routes(ctx, t_router_id)
-
- @_job_handle(constants.JT_ROUTER)
- def configure_extra_routes(self, ctx, payload):
- t_router_id = payload[constants.JT_ROUTER]
-
- non_vm_port_types = ['network:router_interface',
- 'network:router_gateway',
- 'network:dhcp']
-
- b_pods, b_router_ids = zip(*db_api.get_bottom_mappings_by_top_id(
- ctx, t_router_id, constants.RT_ROUTER))
-
- router_bridge_ip_map = {}
- router_ips_map = {}
- for i, b_pod in enumerate(b_pods):
- bottom_client = self._get_client(pod_name=b_pod['pod_name'])
- b_interfaces = bottom_client.list_ports(
- ctx, filters=[{'key': 'device_id',
- 'comparator': 'eq',
- 'value': b_router_ids[i]},
- {'key': 'device_owner',
- 'comparator': 'eq',
- 'value': 'network:router_interface'}])
- router_ips_map[b_router_ids[i]] = {}
- for b_interface in b_interfaces:
- ip = b_interface['fixed_ips'][0]['ip_address']
- ew_bridge_cidr = '100.0.0.0/9'
- ns_bridge_cidr = '100.128.0.0/9'
- if netaddr.IPAddress(ip) in netaddr.IPNetwork(ew_bridge_cidr):
- router_bridge_ip_map[b_router_ids[i]] = ip
- continue
- if netaddr.IPAddress(ip) in netaddr.IPNetwork(ns_bridge_cidr):
- continue
- b_net_id = b_interface['network_id']
- b_subnet = bottom_client.get_subnets(
- ctx, b_interface['fixed_ips'][0]['subnet_id'])
- b_ports = bottom_client.list_ports(
- ctx, filters=[{'key': 'network_id',
- 'comparator': 'eq',
- 'value': b_net_id}])
- b_vm_ports = [b_port for b_port in b_ports if b_port.get(
- 'device_owner', '') not in non_vm_port_types]
- ips = [vm_port['fixed_ips'][0][
- 'ip_address'] for vm_port in b_vm_ports]
- router_ips_map[b_router_ids[i]][b_subnet['cidr']] = ips
-
- for i, b_router_id in enumerate(b_router_ids):
- bottom_client = self._get_client(pod_name=b_pods[i]['pod_name'])
- extra_routes = []
- if not router_ips_map[b_router_id]:
- bottom_client.update_routers(
- ctx, b_router_id, {'router': {'routes': extra_routes}})
- continue
- for router_id, cidr_ips_map in router_ips_map.iteritems():
- if router_id == b_router_id:
- continue
- for cidr, ips in cidr_ips_map.iteritems():
- if cidr in router_ips_map[b_router_id]:
- continue
- for ip in ips:
- extra_routes.append(
- {'nexthop': router_bridge_ip_map[router_id],
- 'destination': ip + '/32'})
- bottom_client.update_routers(
- ctx, b_router_id, {'router': {'routes': extra_routes}})
-
- @_job_handle(constants.JT_PORT_DELETE)
- def delete_server_port(self, ctx, payload):
- t_port_id = payload[constants.JT_PORT_DELETE]
- self._get_client().delete_ports(ctx, t_port_id)
diff --git a/tricircle/__init__.py b/trio2o/__init__.py
similarity index 100%
rename from tricircle/__init__.py
rename to trio2o/__init__.py
diff --git a/tricircle/api/__init__.py b/trio2o/api/__init__.py
similarity index 100%
rename from tricircle/api/__init__.py
rename to trio2o/api/__init__.py
diff --git a/tricircle/api/app.py b/trio2o/api/app.py
similarity index 92%
rename from tricircle/api/app.py
rename to trio2o/api/app.py
index 597b2a2..a4cc848 100644
--- a/tricircle/api/app.py
+++ b/trio2o/api/app.py
@@ -17,8 +17,8 @@ import pecan
from oslo_config import cfg
-from tricircle.common.i18n import _
-from tricircle.common import restapp
+from trio2o.common.i18n import _
+from trio2o.common import restapp
common_opts = [
@@ -52,8 +52,8 @@ def setup_app(*args, **kwargs):
'host': cfg.CONF.bind_host
},
'app': {
- 'root': 'tricircle.api.controllers.root.RootController',
- 'modules': ['tricircle.api'],
+ 'root': 'trio2o.api.controllers.root.RootController',
+ 'modules': ['trio2o.api'],
'errors': {
400: '/error',
'__force_dict__': True
diff --git a/tricircle/api/controllers/__init__.py b/trio2o/api/controllers/__init__.py
similarity index 100%
rename from tricircle/api/controllers/__init__.py
rename to trio2o/api/controllers/__init__.py
diff --git a/tricircle/api/controllers/pod.py b/trio2o/api/controllers/pod.py
similarity index 97%
rename from tricircle/api/controllers/pod.py
rename to trio2o/api/controllers/pod.py
index a99b61e..c9e69f4 100644
--- a/tricircle/api/controllers/pod.py
+++ b/trio2o/api/controllers/pod.py
@@ -22,16 +22,16 @@ import oslo_db.exception as db_exc
from oslo_log import log as logging
from oslo_utils import uuidutils
-from tricircle.common import az_ag
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exc
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common import utils
+from trio2o.common import az_ag
+import trio2o.common.context as t_context
+import trio2o.common.exceptions as t_exc
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+from trio2o.common import utils
-from tricircle.db import api as db_api
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.db import api as db_api
+from trio2o.db import core
+from trio2o.db import models
LOG = logging.getLogger(__name__)
diff --git a/tricircle/api/controllers/root.py b/trio2o/api/controllers/root.py
similarity index 97%
rename from tricircle/api/controllers/root.py
rename to trio2o/api/controllers/root.py
index 9c81d84..0b65fd9 100644
--- a/tricircle/api/controllers/root.py
+++ b/trio2o/api/controllers/root.py
@@ -18,8 +18,8 @@ import oslo_log.log as logging
import pecan
from pecan import request
-from tricircle.api.controllers import pod
-import tricircle.common.context as t_context
+from trio2o.api.controllers import pod
+import trio2o.common.context as t_context
LOG = logging.getLogger(__name__)
diff --git a/tricircle/db/opts.py b/trio2o/api/opts.py
similarity index 90%
rename from tricircle/db/opts.py
rename to trio2o/api/opts.py
index 3d156d1..fc22480 100644
--- a/tricircle/db/opts.py
+++ b/trio2o/api/opts.py
@@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import tricircle.db.core
+import trio2o.api.app
def list_opts():
return [
- ('DEFAULT', tricircle.db.core.db_opts),
+ ('DEFAULT', trio2o.api.app.common_opts),
]
diff --git a/tricircle/cinder_apigw/__init__.py b/trio2o/cinder_apigw/__init__.py
similarity index 100%
rename from tricircle/cinder_apigw/__init__.py
rename to trio2o/cinder_apigw/__init__.py
diff --git a/tricircle/cinder_apigw/app.py b/trio2o/cinder_apigw/app.py
similarity index 92%
rename from tricircle/cinder_apigw/app.py
rename to trio2o/cinder_apigw/app.py
index 524ee1f..08c35f9 100644
--- a/tricircle/cinder_apigw/app.py
+++ b/trio2o/cinder_apigw/app.py
@@ -17,8 +17,8 @@ import pecan
from oslo_config import cfg
-from tricircle.common.i18n import _
-from tricircle.common import restapp
+from trio2o.common.i18n import _
+from trio2o.common import restapp
common_opts = [
@@ -52,8 +52,8 @@ def setup_app(*args, **kwargs):
'host': cfg.CONF.bind_host
},
'app': {
- 'root': 'tricircle.cinder_apigw.controllers.root.RootController',
- 'modules': ['tricircle.cinder_apigw'],
+ 'root': 'trio2o.cinder_apigw.controllers.root.RootController',
+ 'modules': ['trio2o.cinder_apigw'],
'errors': {
400: '/error',
'__force_dict__': True
diff --git a/tricircle/cinder_apigw/controllers/__init__.py b/trio2o/cinder_apigw/controllers/__init__.py
similarity index 100%
rename from tricircle/cinder_apigw/controllers/__init__.py
rename to trio2o/cinder_apigw/controllers/__init__.py
diff --git a/tricircle/cinder_apigw/controllers/root.py b/trio2o/cinder_apigw/controllers/root.py
similarity index 94%
rename from tricircle/cinder_apigw/controllers/root.py
rename to trio2o/cinder_apigw/controllers/root.py
index 083cf42..4833b06 100644
--- a/tricircle/cinder_apigw/controllers/root.py
+++ b/trio2o/cinder_apigw/controllers/root.py
@@ -17,10 +17,10 @@ import pecan
import oslo_log.log as logging
-from tricircle.cinder_apigw.controllers import volume
-from tricircle.cinder_apigw.controllers import volume_actions
-from tricircle.cinder_apigw.controllers import volume_metadata
-from tricircle.cinder_apigw.controllers import volume_type
+from trio2o.cinder_apigw.controllers import volume
+from trio2o.cinder_apigw.controllers import volume_actions
+from trio2o.cinder_apigw.controllers import volume_metadata
+from trio2o.cinder_apigw.controllers import volume_type
LOG = logging.getLogger(__name__)
diff --git a/tricircle/cinder_apigw/controllers/volume.py b/trio2o/cinder_apigw/controllers/volume.py
similarity index 97%
rename from tricircle/cinder_apigw/controllers/volume.py
rename to trio2o/cinder_apigw/controllers/volume.py
index 804c275..670005a 100644
--- a/tricircle/cinder_apigw/controllers/volume.py
+++ b/trio2o/cinder_apigw/controllers/volume.py
@@ -23,17 +23,17 @@ from pecan import rest
from oslo_log import log as logging
from oslo_serialization import jsonutils
-from tricircle.common import az_ag
-from tricircle.common import constants as cons
-import tricircle.common.context as t_context
-from tricircle.common import httpclient as hclient
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common import utils
+from trio2o.common import az_ag
+from trio2o.common import constants as cons
+import trio2o.common.context as t_context
+from trio2o.common import httpclient as hclient
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+from trio2o.common import utils
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
+import trio2o.db.api as db_api
+from trio2o.db import core
+from trio2o.db import models
LOG = logging.getLogger(__name__)
diff --git a/tricircle/cinder_apigw/controllers/volume_actions.py b/trio2o/cinder_apigw/controllers/volume_actions.py
similarity index 97%
rename from tricircle/cinder_apigw/controllers/volume_actions.py
rename to trio2o/cinder_apigw/controllers/volume_actions.py
index 1c579df..a36ac6b 100644
--- a/tricircle/cinder_apigw/controllers/volume_actions.py
+++ b/trio2o/cinder_apigw/controllers/volume_actions.py
@@ -19,13 +19,13 @@ from pecan import rest
from oslo_log import log as logging
-import tricircle.common.client as t_client
-from tricircle.common import constants
-import tricircle.common.context as t_context
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common import utils
-import tricircle.db.api as db_api
+import trio2o.common.client as t_client
+from trio2o.common import constants
+import trio2o.common.context as t_context
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+from trio2o.common import utils
+import trio2o.db.api as db_api
LOG = logging.getLogger(__name__)
diff --git a/tricircle/cinder_apigw/controllers/volume_metadata.py b/trio2o/cinder_apigw/controllers/volume_metadata.py
similarity index 97%
rename from tricircle/cinder_apigw/controllers/volume_metadata.py
rename to trio2o/cinder_apigw/controllers/volume_metadata.py
index 18485d5..fce61b5 100644
--- a/tricircle/cinder_apigw/controllers/volume_metadata.py
+++ b/trio2o/cinder_apigw/controllers/volume_metadata.py
@@ -22,13 +22,13 @@ from pecan import rest
from oslo_log import log as logging
from oslo_serialization import jsonutils
-from tricircle.common import constants as cons
-import tricircle.common.context as t_context
-from tricircle.common import httpclient as hclient
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common import utils
-import tricircle.db.api as db_api
+from trio2o.common import constants as cons
+import trio2o.common.context as t_context
+from trio2o.common import httpclient as hclient
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+from trio2o.common import utils
+import trio2o.db.api as db_api
LOG = logging.getLogger(__name__)
diff --git a/tricircle/cinder_apigw/controllers/volume_type.py b/trio2o/cinder_apigw/controllers/volume_type.py
similarity index 97%
rename from tricircle/cinder_apigw/controllers/volume_type.py
rename to trio2o/cinder_apigw/controllers/volume_type.py
index 2068216..0bf49b9 100644
--- a/tricircle/cinder_apigw/controllers/volume_type.py
+++ b/trio2o/cinder_apigw/controllers/volume_type.py
@@ -20,14 +20,14 @@ from pecan import rest
from oslo_log import log as logging
from oslo_utils import uuidutils
-import tricircle.common.context as t_context
-from tricircle.common import exceptions
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common import utils
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
+import trio2o.common.context as t_context
+from trio2o.common import exceptions
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+from trio2o.common import utils
+import trio2o.db.api as db_api
+from trio2o.db import core
+from trio2o.db import models
LOG = logging.getLogger(__name__)
diff --git a/tricircle/nova_apigw/opts.py b/trio2o/cinder_apigw/opts.py
similarity index 88%
rename from tricircle/nova_apigw/opts.py
rename to trio2o/cinder_apigw/opts.py
index 70355ce..8b0ff32 100644
--- a/tricircle/nova_apigw/opts.py
+++ b/trio2o/cinder_apigw/opts.py
@@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import tricircle.nova_apigw.app
+import trio2o.cinder_apigw.app
def list_opts():
return [
- ('DEFAULT', tricircle.nova_apigw.app.common_opts),
+ ('DEFAULT', trio2o.cinder_apigw.app.common_opts),
]
diff --git a/tricircle/common/__init__.py b/trio2o/common/__init__.py
similarity index 100%
rename from tricircle/common/__init__.py
rename to trio2o/common/__init__.py
diff --git a/tricircle/common/az_ag.py b/trio2o/common/az_ag.py
similarity index 97%
rename from tricircle/common/az_ag.py
rename to trio2o/common/az_ag.py
index 8cabd23..3db6cc0 100644
--- a/tricircle/common/az_ag.py
+++ b/trio2o/common/az_ag.py
@@ -16,11 +16,11 @@
from oslo_log import log as logging
from oslo_utils import uuidutils
-from tricircle.common.i18n import _LE
+from trio2o.common.i18n import _LE
-from tricircle.db import api as db_api
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.db import api as db_api
+from trio2o.db import core
+from trio2o.db import models
LOG = logging.getLogger(__name__)
diff --git a/tricircle/common/baserpc.py b/trio2o/common/baserpc.py
similarity index 96%
rename from tricircle/common/baserpc.py
rename to trio2o/common/baserpc.py
index 15ec3dd..7a1a1f9 100644
--- a/tricircle/common/baserpc.py
+++ b/trio2o/common/baserpc.py
@@ -23,7 +23,7 @@ from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
-from tricircle.common import rpc
+from trio2o.common import rpc
CONF = cfg.CONF
@@ -44,7 +44,7 @@ class BaseClientAPI(object):
"""
VERSION_ALIASES = {
- # baseapi was added in the first version of Tricircle
+ # baseapi was added in the first version of Trio2o
}
def __init__(self, topic):
diff --git a/tricircle/common/client.py b/trio2o/common/client.py
similarity index 98%
rename from tricircle/common/client.py
rename to trio2o/common/client.py
index 5c50e4a..648b1b4 100644
--- a/tricircle/common/client.py
+++ b/trio2o/common/client.py
@@ -26,11 +26,11 @@ from keystoneclient.v3 import client as keystone_client
from oslo_config import cfg
from oslo_log import log as logging
-import tricircle.common.context as tricircle_context
-from tricircle.common import exceptions
-from tricircle.common import resource_handle
-from tricircle.db import api
-from tricircle.db import models
+import trio2o.common.context as trio2o_context
+from trio2o.common import exceptions
+from trio2o.common import resource_handle
+from trio2o.db import api
+from trio2o.db import models
client_opts = [
@@ -123,7 +123,7 @@ class Client(object):
handle_create in NeutronResourceHandle is called).
Not all kinds of resources support the above five operations(or not
- supported yet by Tricircle), so each service handler has a
+ supported yet by Trio2o), so each service handler has a
support_resource field to specify the resources and operations it
supports, like:
'port': LIST | CREATE | DELETE | GET
@@ -271,7 +271,7 @@ class Client(object):
:return: None
"""
if is_internal:
- admin_context = tricircle_context.Context()
+ admin_context = trio2o_context.Context()
admin_context.auth_token = self._get_admin_token()
endpoint_map = self._get_endpoint_from_keystone(admin_context)
else:
diff --git a/tricircle/common/config.py b/trio2o/common/config.py
similarity index 86%
rename from tricircle/common/config.py
rename to trio2o/common/config.py
index e8f0a03..2d1622e 100644
--- a/tricircle/common/config.py
+++ b/trio2o/common/config.py
@@ -14,7 +14,7 @@
# under the License.
"""
-Routines for configuring tricircle, largely copy from Neutron
+Routines for configuring trio2o, largely copy from Neutron
"""
import sys
@@ -22,11 +22,11 @@ import sys
from oslo_config import cfg
import oslo_log.log as logging
-from tricircle.common.i18n import _LI
+from trio2o.common.i18n import _LI
-# from tricircle import policy
-from tricircle.common import rpc
-from tricircle.common import version
+# from trio2o import policy
+from trio2o.common import rpc
+from trio2o.common import version
LOG = logging.getLogger(__name__)
@@ -40,7 +40,7 @@ def init(opts, args, **kwargs):
# auth.register_conf_options(cfg.CONF)
logging.register_options(cfg.CONF)
- cfg.CONF(args=args, project='tricircle',
+ cfg.CONF(args=args, project='trio2o',
version=version.version_info,
**kwargs)
@@ -51,7 +51,7 @@ def init(opts, args, **kwargs):
def _setup_logging():
"""Sets up the logging options for a log with supplied name."""
- product_name = "tricircle"
+ product_name = "trio2o"
logging.setup(cfg.CONF, product_name)
LOG.info(_LI("Logging enabled!"))
LOG.info(_LI("%(prog)s version %(version)s"),
diff --git a/tricircle/common/constants.py b/trio2o/common/constants.py
similarity index 100%
rename from tricircle/common/constants.py
rename to trio2o/common/constants.py
diff --git a/tricircle/common/context.py b/trio2o/common/context.py
similarity index 98%
rename from tricircle/common/context.py
rename to trio2o/common/context.py
index 3201aa8..88882e3 100644
--- a/tricircle/common/context.py
+++ b/trio2o/common/context.py
@@ -19,9 +19,9 @@ from pecan import request
import oslo_context.context as oslo_ctx
-from tricircle.common import constants
-from tricircle.common.i18n import _
-from tricircle.db import core
+from trio2o.common import constants
+from trio2o.common.i18n import _
+from trio2o.db import core
def get_db_context():
diff --git a/tricircle/common/exceptions.py b/trio2o/common/exceptions.py
similarity index 85%
rename from tricircle/common/exceptions.py
rename to trio2o/common/exceptions.py
index 3055910..979a5af 100644
--- a/tricircle/common/exceptions.py
+++ b/trio2o/common/exceptions.py
@@ -14,20 +14,20 @@
# under the License.
"""
-Tricircle base exception handling.
+Trio2o base exception handling.
"""
import six
from oslo_log import log as logging
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
LOG = logging.getLogger(__name__)
-class TricircleException(Exception):
- """Base Tricircle Exception.
+class Trio2oException(Exception):
+ """Base Trio2o Exception.
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
@@ -83,7 +83,7 @@ class TricircleException(Exception):
message = six.text_type(message)
self.msg = message
- super(TricircleException, self).__init__(message)
+ super(Trio2oException, self).__init__(message)
def _should_format(self):
@@ -97,25 +97,25 @@ class TricircleException(Exception):
return six.text_type(self.msg)
-class BadRequest(TricircleException):
+class BadRequest(Trio2oException):
message = _('Bad %(resource)s request: %(msg)s')
-class NotFound(TricircleException):
+class NotFound(Trio2oException):
message = _("Resource could not be found.")
code = 404
safe = True
-class Conflict(TricircleException):
+class Conflict(Trio2oException):
pass
-class NotAuthorized(TricircleException):
+class NotAuthorized(Trio2oException):
message = _("Not authorized.")
-class ServiceUnavailable(TricircleException):
+class ServiceUnavailable(Trio2oException):
message = _("The service is unavailable")
@@ -123,37 +123,37 @@ class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
-class InUse(TricircleException):
+class InUse(Trio2oException):
message = _("The resource is inuse")
-class InvalidConfigurationOption(TricircleException):
+class InvalidConfigurationOption(Trio2oException):
message = _("An invalid value was provided for %(opt_name)s: "
"%(opt_value)s")
-class EndpointNotAvailable(TricircleException):
+class EndpointNotAvailable(Trio2oException):
message = "Endpoint %(url)s for %(service)s is not available"
def __init__(self, service, url):
super(EndpointNotAvailable, self).__init__(service=service, url=url)
-class EndpointNotUnique(TricircleException):
+class EndpointNotUnique(Trio2oException):
message = "Endpoint for %(service)s in %(pod)s not unique"
def __init__(self, pod, service):
super(EndpointNotUnique, self).__init__(pod=pod, service=service)
-class EndpointNotFound(TricircleException):
+class EndpointNotFound(Trio2oException):
message = "Endpoint for %(service)s in %(pod)s not found"
def __init__(self, pod, service):
super(EndpointNotFound, self).__init__(pod=pod, service=service)
-class ResourceNotFound(TricircleException):
+class ResourceNotFound(Trio2oException):
message = "Could not find %(resource_type)s: %(unique_key)s"
def __init__(self, model, unique_key):
@@ -162,7 +162,7 @@ class ResourceNotFound(TricircleException):
unique_key=unique_key)
-class ResourceNotSupported(TricircleException):
+class ResourceNotSupported(Trio2oException):
message = "%(method)s method not supported for %(resource)s"
def __init__(self, resource, method):
@@ -170,7 +170,7 @@ class ResourceNotSupported(TricircleException):
method=method)
-class Invalid(TricircleException):
+class Invalid(Trio2oException):
message = _("Unacceptable parameters.")
code = 400
@@ -187,7 +187,7 @@ class InvalidMetadataSize(Invalid):
message = _("Invalid metadata size: %(reason)s")
-class MetadataLimitExceeded(TricircleException):
+class MetadataLimitExceeded(Trio2oException):
message = _("Maximum number of metadata items exceeds %(allowed)d")
@@ -224,16 +224,16 @@ class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
-class OverQuota(TricircleException):
+class OverQuota(Trio2oException):
message = _("Quota exceeded for resources: %(overs)s")
-class TooManyInstances(TricircleException):
+class TooManyInstances(Trio2oException):
message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
" but already used %(used)s of %(allowed)s %(overs)s")
-class OnsetFileLimitExceeded(TricircleException):
+class OnsetFileLimitExceeded(Trio2oException):
message = _("Personality file limit exceeded")
@@ -245,7 +245,7 @@ class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded):
message = _("Personality file content too long")
-class ExternalNetPodNotSpecify(TricircleException):
+class ExternalNetPodNotSpecify(Trio2oException):
message = "Pod for external network not specified"
def __init__(self):
@@ -259,18 +259,18 @@ class PodNotFound(NotFound):
super(PodNotFound, self).__init__(pod_name=pod_name)
-class ChildQuotaNotZero(TricircleException):
+class ChildQuotaNotZero(Trio2oException):
message = _("Child projects having non-zero quota")
# parameter validation error
-class ValidationError(TricircleException):
+class ValidationError(Trio2oException):
message = _("%(msg)s")
code = 400
# parameter validation error
-class HTTPForbiddenError(TricircleException):
+class HTTPForbiddenError(Trio2oException):
message = _("%(msg)s")
code = 403
@@ -289,7 +289,7 @@ class VolumeTypeExtraSpecsNotFound(NotFound):
"key %(extra_specs_key)s.")
-class Duplicate(TricircleException):
+class Duplicate(Trio2oException):
pass
@@ -297,5 +297,5 @@ class VolumeTypeExists(Duplicate):
message = _("Volume Type %(id)s already exists.")
-class VolumeTypeUpdateFailed(TricircleException):
+class VolumeTypeUpdateFailed(Trio2oException):
message = _("Cannot update volume_type %(id)s")
diff --git a/tricircle/common/httpclient.py b/trio2o/common/httpclient.py
similarity index 94%
rename from tricircle/common/httpclient.py
rename to trio2o/common/httpclient.py
index 1042454..08be77e 100644
--- a/tricircle/common/httpclient.py
+++ b/trio2o/common/httpclient.py
@@ -21,18 +21,18 @@ from requests import Session
from oslo_log import log as logging
-from tricircle.common import client
-from tricircle.common import constants as cons
-from tricircle.common.i18n import _LE
-from tricircle.common import utils
-from tricircle.db import api as db_api
+from trio2o.common import client
+from trio2o.common import constants as cons
+from trio2o.common.i18n import _LE
+from trio2o.common import utils
+from trio2o.db import api as db_api
LOG = logging.getLogger(__name__)
# the url could be endpoint registered in the keystone
-# or url sent to tricircle service, which is stored in
+# or url sent to trio2o service, which is stored in
# pecan.request.url
def get_version_from_url(url):
@@ -60,7 +60,7 @@ def get_version_from_url(url):
def get_bottom_url(t_ver, t_url, b_ver, b_endpoint):
"""get_bottom_url
- convert url received by Tricircle service to bottom OpenStack
+ convert url received by Trio2o service to bottom OpenStack
request url through the configured endpoint in the KeyStone
:param t_ver: version of top service
diff --git a/tricircle/common/i18n.py b/trio2o/common/i18n.py
similarity index 94%
rename from tricircle/common/i18n.py
rename to trio2o/common/i18n.py
index bb53d28..ca87158 100644
--- a/tricircle/common/i18n.py
+++ b/trio2o/common/i18n.py
@@ -14,7 +14,7 @@
import oslo_i18n
-_translators = oslo_i18n.TranslatorFactory(domain='tricircle')
+_translators = oslo_i18n.TranslatorFactory(domain='trio2o')
# The primary translation function using the well-known name "_"
_ = _translators.primary
diff --git a/tricircle/common/lock_handle.py b/trio2o/common/lock_handle.py
similarity index 98%
rename from tricircle/common/lock_handle.py
rename to trio2o/common/lock_handle.py
index d580b8d..5f96179 100644
--- a/tricircle/common/lock_handle.py
+++ b/trio2o/common/lock_handle.py
@@ -18,8 +18,8 @@ import eventlet
import oslo_db.exception as db_exc
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.db import core
+from trio2o.db import models
ALL_DONE = 0 # both route and bottom resource exist
diff --git a/tricircle/common/opts.py b/trio2o/common/opts.py
similarity index 79%
rename from tricircle/common/opts.py
rename to trio2o/common/opts.py
index 0b9e973..40c80e2 100644
--- a/tricircle/common/opts.py
+++ b/trio2o/common/opts.py
@@ -13,14 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
-import tricircle.common.client
+import trio2o.common.client
# Todo: adding rpc cap negotiation configuration after first release
-# import tricircle.common.xrpcapi
+# import trio2o.common.xrpcapi
def list_opts():
return [
- ('client', tricircle.common.client.client_opts),
- # ('upgrade_levels', tricircle.common.xrpcapi.rpcapi_cap_opt),
+ ('client', trio2o.common.client.client_opts),
+ # ('upgrade_levels', trio2o.common.xrpcapi.rpcapi_cap_opt),
]
diff --git a/tricircle/common/quota.py b/trio2o/common/quota.py
similarity index 99%
rename from tricircle/common/quota.py
rename to trio2o/common/quota.py
index e71cde8..3904333 100644
--- a/tricircle/common/quota.py
+++ b/trio2o/common/quota.py
@@ -14,7 +14,7 @@
# under the License.
"""
-Routines for configuring tricircle, copy and modify from Cinder
+Routines for configuring trio2o, copy and modify from Cinder
"""
import datetime
@@ -28,13 +28,13 @@ from oslo_utils import timeutils
from keystoneclient import exceptions as k_exceptions
-from tricircle.common import client
-from tricircle.common import constants as cons
-from tricircle.common import exceptions as t_exceptions
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common import utils
-from tricircle.db import api as db_api
+from trio2o.common import client
+from trio2o.common import constants as cons
+from trio2o.common import exceptions as t_exceptions
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+from trio2o.common import utils
+from trio2o.db import api as db_api
quota_opts = [
cfg.IntOpt('quota_instances',
@@ -124,7 +124,7 @@ quota_opts = [
'they will update on a new reservation if max_age has '
'passed since the last reservation'),
cfg.StrOpt('quota_driver',
- default='tricircle.common.quota.DbQuotaDriver',
+ default='trio2o.common.quota.DbQuotaDriver',
help='Default driver to use for quota checks'),
cfg.BoolOpt('use_default_quota_class',
default=True,
@@ -621,7 +621,7 @@ class DbQuotaDriver(object):
# Yes, the admin may be in the process of reducing
# quotas, but that's a pretty rare thing.
- # NOTE(joehuang): in Tricircle, no embeded sync function here,
+ # NOTE(joehuang): in Trio2o, no embeded sync function here,
# so set has_sync=False.
quotas = self._get_quotas(context, resources, deltas.keys(),
has_sync=False, project_id=project_id)
@@ -999,7 +999,7 @@ class AllQuotaEngine(QuotaEngine):
result = {}
# Global quotas.
- # Set sync_func to None for no sync function in Tricircle
+ # Set sync_func to None for no sync function in Trio2o
reservable_argses = [
('instances', None, 'quota_instances'),
diff --git a/tricircle/common/resource_handle.py b/trio2o/common/resource_handle.py
similarity index 99%
rename from tricircle/common/resource_handle.py
rename to trio2o/common/resource_handle.py
index c3fde33..5e8055a 100644
--- a/tricircle/common/resource_handle.py
+++ b/trio2o/common/resource_handle.py
@@ -27,8 +27,8 @@ from oslo_config import cfg
from oslo_log import log as logging
from requests import exceptions as r_exceptions
-from tricircle.common import constants as cons
-from tricircle.common import exceptions
+from trio2o.common import constants as cons
+from trio2o.common import exceptions
client_opts = [
diff --git a/tricircle/common/restapp.py b/trio2o/common/restapp.py
similarity index 94%
rename from tricircle/common/restapp.py
rename to trio2o/common/restapp.py
index 2844ffb..a0ef795 100644
--- a/tricircle/common/restapp.py
+++ b/trio2o/common/restapp.py
@@ -27,7 +27,7 @@ def auth_app(app):
if cfg.CONF.auth_strategy == 'noauth':
pass
elif cfg.CONF.auth_strategy == 'keystone':
- # NOTE(zhiyuan) pkg_resources will try to load tricircle to get module
+ # NOTE(zhiyuan) pkg_resources will try to load trio2o to get module
# version, passing "project" as empty string to bypass it
app = auth_token.AuthProtocol(app, {'project': ''})
else:
diff --git a/tricircle/common/rpc.py b/trio2o/common/rpc.py
similarity index 95%
rename from tricircle/common/rpc.py
rename to trio2o/common/rpc.py
index 1ac5fd7..4860b5d 100644
--- a/tricircle/common/rpc.py
+++ b/trio2o/common/rpc.py
@@ -31,15 +31,15 @@ from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
-import tricircle.common.context
-import tricircle.common.exceptions
+import trio2o.common.context
+import trio2o.common.exceptions
CONF = cfg.CONF
TRANSPORT = None
NOTIFIER = None
ALLOWED_EXMODS = [
- tricircle.common.exceptions.__name__,
+ trio2o.common.exceptions.__name__,
]
EXTRA_EXMODS = []
@@ -102,7 +102,7 @@ class RequestContextSerializer(messaging.Serializer):
return context.to_dict()
def deserialize_context(self, context):
- return tricircle.common.context.Context.from_dict(context)
+ return trio2o.common.context.Context.from_dict(context)
def get_transport_url(url_str=None):
diff --git a/tricircle/common/serializer.py b/trio2o/common/serializer.py
similarity index 96%
rename from tricircle/common/serializer.py
rename to trio2o/common/serializer.py
index 839cf2b..42adf8f 100644
--- a/tricircle/common/serializer.py
+++ b/trio2o/common/serializer.py
@@ -31,9 +31,9 @@ _SINGLETON_MAPPING = Mapping({
})
-class TricircleSerializer(Serializer):
+class Trio2oSerializer(Serializer):
def __init__(self, base=None):
- super(TricircleSerializer, self).__init__()
+ super(Trio2oSerializer, self).__init__()
self._base = base
def serialize_entity(self, context, entity):
diff --git a/tricircle/common/topics.py b/trio2o/common/topics.py
similarity index 100%
rename from tricircle/common/topics.py
rename to trio2o/common/topics.py
diff --git a/tricircle/common/utils.py b/trio2o/common/utils.py
similarity index 96%
rename from tricircle/common/utils.py
rename to trio2o/common/utils.py
index 7315c8e..0300260 100644
--- a/tricircle/common/utils.py
+++ b/trio2o/common/utils.py
@@ -19,10 +19,10 @@ import pecan
from oslo_log import log as logging
-from tricircle.common import constants as cons
-import tricircle.common.exceptions as t_exceptions
-from tricircle.common.i18n import _
-import tricircle.db.api as db_api
+from trio2o.common import constants as cons
+import trio2o.common.exceptions as t_exceptions
+from trio2o.common.i18n import _
+import trio2o.db.api as db_api
LOG = logging.getLogger(__name__)
diff --git a/tricircle/common/version.py b/trio2o/common/version.py
similarity index 95%
rename from tricircle/common/version.py
rename to trio2o/common/version.py
index cf4331c..2cc5dc7 100644
--- a/tricircle/common/version.py
+++ b/trio2o/common/version.py
@@ -12,4 +12,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-version_info = "tricircle 1.0"
+version_info = "trio2o 1.0"
diff --git a/tricircle/common/xrpcapi.py b/trio2o/common/xrpcapi.py
similarity index 76%
rename from tricircle/common/xrpcapi.py
rename to trio2o/common/xrpcapi.py
index 163be9c..9ce7634 100644
--- a/tricircle/common/xrpcapi.py
+++ b/trio2o/common/xrpcapi.py
@@ -21,10 +21,10 @@ from oslo_log import log as logging
import oslo_messaging as messaging
import rpc
-from serializer import TricircleSerializer as Serializer
+from serializer import Trio2oSerializer as Serializer
import topics
-from tricircle.common import constants
+from trio2o.common import constants
CONF = cfg.CONF
@@ -80,17 +80,3 @@ class XJobAPI(object):
self.client.prepare(exchange='openstack').cast(
ctxt, 'setup_bottom_router',
payload={constants.JT_ROUTER_SETUP: combine_id})
-
- def configure_extra_routes(self, ctxt, router_id):
- # NOTE(zhiyuan) this RPC is called by plugin in Neutron server, whose
- # control exchange is "neutron", however, we starts xjob without
- # specifying its control exchange, so the default value "openstack" is
- # used, thus we need to pass exchange as "openstack" here.
- self.client.prepare(exchange='openstack').cast(
- ctxt, 'configure_extra_routes',
- payload={constants.JT_ROUTER: router_id})
-
- def delete_server_port(self, ctxt, port_id):
- self.client.prepare(exchange='openstack').cast(
- ctxt, 'delete_server_port',
- payload={constants.JT_PORT_DELETE: port_id})
diff --git a/tricircle/db/__init__.py b/trio2o/db/__init__.py
similarity index 100%
rename from tricircle/db/__init__.py
rename to trio2o/db/__init__.py
diff --git a/tricircle/db/api.py b/trio2o/db/api.py
similarity index 99%
rename from tricircle/db/api.py
rename to trio2o/db/api.py
index c1e340c..9d246e6 100644
--- a/tricircle/db/api.py
+++ b/trio2o/db/api.py
@@ -27,14 +27,14 @@ from sqlalchemy import or_, and_
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.expression import literal_column
-from tricircle.common import constants
-from tricircle.common.context import is_admin_context as _is_admin_context
-from tricircle.common import exceptions
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LW
+from trio2o.common import constants
+from trio2o.common.context import is_admin_context as _is_admin_context
+from trio2o.common import exceptions
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LW
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.db import core
+from trio2o.db import models
CONF = cfg.CONF
diff --git a/tricircle/db/core.py b/trio2o/db/core.py
similarity index 96%
rename from tricircle/db/core.py
rename to trio2o/db/core.py
index 5829790..ceded56 100644
--- a/tricircle/db/core.py
+++ b/trio2o/db/core.py
@@ -26,12 +26,12 @@ import oslo_db.options as db_options
import oslo_db.sqlalchemy.session as db_session
from oslo_utils import strutils
-from tricircle.common import exceptions
+from trio2o.common import exceptions
db_opts = [
- cfg.StrOpt('tricircle_db_connection',
- help='db connection string for tricircle'),
+ cfg.StrOpt('trio2o_db_connection',
+ help='db connection string for trio2o'),
]
cfg.CONF.register_opts(db_opts)
@@ -74,7 +74,7 @@ def _get_engine_facade():
global _engine_facade
if not _engine_facade:
- t_connection = cfg.CONF.tricircle_db_connection
+ t_connection = cfg.CONF.trio2o_db_connection
_engine_facade = db_session.EngineFacade(t_connection,
_conf=cfg.CONF)
return _engine_facade
diff --git a/tricircle/db/migrate_repo/__init__.py b/trio2o/db/migrate_repo/__init__.py
similarity index 100%
rename from tricircle/db/migrate_repo/__init__.py
rename to trio2o/db/migrate_repo/__init__.py
diff --git a/tricircle/db/migrate_repo/migrate.cfg b/trio2o/db/migrate_repo/migrate.cfg
similarity index 98%
rename from tricircle/db/migrate_repo/migrate.cfg
rename to trio2o/db/migrate_repo/migrate.cfg
index 9acd75f..ca7b228 100644
--- a/tricircle/db/migrate_repo/migrate.cfg
+++ b/trio2o/db/migrate_repo/migrate.cfg
@@ -1,7 +1,7 @@
[db_settings]
# Used to identify which repository this database is versioned under.
# You can use the name of your project.
-repository_id=tricircle
+repository_id=trio2o
# The name of the database table used to track the schema version.
# This name shouldn't already be used by your project.
diff --git a/tricircle/db/migrate_repo/versions/001_init.py b/trio2o/db/migrate_repo/versions/001_init.py
similarity index 100%
rename from tricircle/db/migrate_repo/versions/001_init.py
rename to trio2o/db/migrate_repo/versions/001_init.py
diff --git a/tricircle/db/migrate_repo/versions/002_resource.py b/trio2o/db/migrate_repo/versions/002_resource.py
similarity index 100%
rename from tricircle/db/migrate_repo/versions/002_resource.py
rename to trio2o/db/migrate_repo/versions/002_resource.py
diff --git a/tricircle/db/migrate_repo/versions/__init__.py b/trio2o/db/migrate_repo/versions/__init__.py
similarity index 100%
rename from tricircle/db/migrate_repo/versions/__init__.py
rename to trio2o/db/migrate_repo/versions/__init__.py
diff --git a/tricircle/db/migration_helpers.py b/trio2o/db/migration_helpers.py
similarity index 92%
rename from tricircle/db/migration_helpers.py
rename to trio2o/db/migration_helpers.py
index f40976e..7cf46b8 100644
--- a/tricircle/db/migration_helpers.py
+++ b/trio2o/db/migration_helpers.py
@@ -18,9 +18,9 @@ import os
from oslo_db.sqlalchemy import migration
-from tricircle import db
-from tricircle.db import core
-from tricircle.db import migrate_repo
+from trio2o import db
+from trio2o.db import core
+from trio2o.db import migrate_repo
def find_migrate_repo(package=None, repo_name='migrate_repo'):
diff --git a/tricircle/db/models.py b/trio2o/db/models.py
similarity index 99%
rename from tricircle/db/models.py
rename to trio2o/db/models.py
index 0769ad9..818ef0e 100644
--- a/tricircle/db/models.py
+++ b/trio2o/db/models.py
@@ -21,7 +21,7 @@ from sqlalchemy import schema
from oslo_db.sqlalchemy import models
from oslo_utils import timeutils
-from tricircle.db import core
+from trio2o.db import core
def MediumText():
diff --git a/tricircle/api/opts.py b/trio2o/db/opts.py
similarity index 89%
rename from tricircle/api/opts.py
rename to trio2o/db/opts.py
index 4621312..59c44b6 100644
--- a/tricircle/api/opts.py
+++ b/trio2o/db/opts.py
@@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import tricircle.api.app
+import trio2o.db.core
def list_opts():
return [
- ('DEFAULT', tricircle.api.app.common_opts),
+ ('DEFAULT', trio2o.db.core.db_opts),
]
diff --git a/tricircle/network/__init__.py b/trio2o/nova_apigw/__init__.py
similarity index 100%
rename from tricircle/network/__init__.py
rename to trio2o/nova_apigw/__init__.py
diff --git a/tricircle/nova_apigw/app.py b/trio2o/nova_apigw/app.py
similarity index 87%
rename from tricircle/nova_apigw/app.py
rename to trio2o/nova_apigw/app.py
index 35f96a1..87b083c 100644
--- a/tricircle/nova_apigw/app.py
+++ b/trio2o/nova_apigw/app.py
@@ -16,11 +16,11 @@ import pecan
from oslo_config import cfg
-from tricircle.common.i18n import _
-from tricircle.common import restapp
-from tricircle.nova_apigw.controllers import micro_versions
-from tricircle.nova_apigw.controllers import root
-from tricircle.nova_apigw.controllers import root_versions
+from trio2o.common.i18n import _
+from trio2o.common import restapp
+from trio2o.nova_apigw.controllers import micro_versions
+from trio2o.nova_apigw.controllers import root
+from trio2o.nova_apigw.controllers import root_versions
common_opts = [
@@ -54,8 +54,8 @@ def setup_app(*args, **kwargs):
'host': cfg.CONF.bind_host
},
'app': {
- 'root': 'tricircle.nova_apigw.controllers.root.RootController',
- 'modules': ['tricircle.nova_apigw'],
+ 'root': 'trio2o.nova_apigw.controllers.root.RootController',
+ 'modules': ['trio2o.nova_apigw'],
'errors': {
400: '/error',
'__force_dict__': True
diff --git a/tricircle/network/drivers/__init__.py b/trio2o/nova_apigw/controllers/__init__.py
similarity index 100%
rename from tricircle/network/drivers/__init__.py
rename to trio2o/nova_apigw/controllers/__init__.py
diff --git a/tricircle/nova_apigw/controllers/action.py b/trio2o/nova_apigw/controllers/action.py
similarity index 93%
rename from tricircle/nova_apigw/controllers/action.py
rename to trio2o/nova_apigw/controllers/action.py
index 998244b..220f8c5 100644
--- a/tricircle/nova_apigw/controllers/action.py
+++ b/trio2o/nova_apigw/controllers/action.py
@@ -19,12 +19,12 @@ from pecan import rest
from oslo_log import log as logging
-import tricircle.common.client as t_client
-from tricircle.common import constants
-import tricircle.common.context as t_context
-from tricircle.common.i18n import _
-from tricircle.common import utils
-import tricircle.db.api as db_api
+import trio2o.common.client as t_client
+from trio2o.common import constants
+import trio2o.common.context as t_context
+from trio2o.common.i18n import _
+from trio2o.common import utils
+import trio2o.db.api as db_api
LOG = logging.getLogger(__name__)
diff --git a/tricircle/nova_apigw/controllers/aggregate.py b/trio2o/nova_apigw/controllers/aggregate.py
similarity index 95%
rename from tricircle/nova_apigw/controllers/aggregate.py
rename to trio2o/nova_apigw/controllers/aggregate.py
index 6842d3f..3375d40 100644
--- a/tricircle/nova_apigw/controllers/aggregate.py
+++ b/trio2o/nova_apigw/controllers/aggregate.py
@@ -19,13 +19,13 @@ from pecan import rest
import oslo_db.exception as db_exc
-from tricircle.common import az_ag
-import tricircle.common.context as t_context
-import tricircle.common.exceptions as t_exc
-from tricircle.common.i18n import _
-from tricircle.common import utils
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.common import az_ag
+import trio2o.common.context as t_context
+import trio2o.common.exceptions as t_exc
+from trio2o.common.i18n import _
+from trio2o.common import utils
+from trio2o.db import core
+from trio2o.db import models
class AggregateActionController(rest.RestController):
diff --git a/tricircle/nova_apigw/controllers/flavor.py b/trio2o/nova_apigw/controllers/flavor.py
similarity index 98%
rename from tricircle/nova_apigw/controllers/flavor.py
rename to trio2o/nova_apigw/controllers/flavor.py
index fcd179c..bc332f4 100644
--- a/tricircle/nova_apigw/controllers/flavor.py
+++ b/trio2o/nova_apigw/controllers/flavor.py
@@ -19,11 +19,11 @@ from pecan import rest
import oslo_db.exception as db_exc
-import tricircle.common.context as t_context
-from tricircle.common.i18n import _
-from tricircle.common import utils
-from tricircle.db import core
-from tricircle.db import models
+import trio2o.common.context as t_context
+from trio2o.common.i18n import _
+from trio2o.common import utils
+from trio2o.db import core
+from trio2o.db import models
class FlavorManageController(rest.RestController):
diff --git a/tricircle/nova_apigw/controllers/image.py b/trio2o/nova_apigw/controllers/image.py
similarity index 95%
rename from tricircle/nova_apigw/controllers/image.py
rename to trio2o/nova_apigw/controllers/image.py
index c2c669c..3c045cf 100644
--- a/tricircle/nova_apigw/controllers/image.py
+++ b/trio2o/nova_apigw/controllers/image.py
@@ -18,12 +18,12 @@ from pecan import rest
import re
import urlparse
-import tricircle.common.client as t_client
-from tricircle.common import constants
-import tricircle.common.context as t_context
-from tricircle.common.i18n import _
-from tricircle.common import utils
-import tricircle.db.api as db_api
+import trio2o.common.client as t_client
+from trio2o.common import constants
+import trio2o.common.context as t_context
+from trio2o.common.i18n import _
+from trio2o.common import utils
+import trio2o.db.api as db_api
def url_join(*parts):
diff --git a/tricircle/nova_apigw/controllers/micro_versions.py b/trio2o/nova_apigw/controllers/micro_versions.py
similarity index 99%
rename from tricircle/nova_apigw/controllers/micro_versions.py
rename to trio2o/nova_apigw/controllers/micro_versions.py
index ad1442f..509ab2c 100644
--- a/tricircle/nova_apigw/controllers/micro_versions.py
+++ b/trio2o/nova_apigw/controllers/micro_versions.py
@@ -22,7 +22,7 @@ from oslo_utils import encodeutils
import webob.dec
-from tricircle.common import constants
+from trio2o.common import constants
class MicroVersion(object):
diff --git a/tricircle/nova_apigw/controllers/network.py b/trio2o/nova_apigw/controllers/network.py
similarity index 91%
rename from tricircle/nova_apigw/controllers/network.py
rename to trio2o/nova_apigw/controllers/network.py
index 78b6a0d..771f9ce 100644
--- a/tricircle/nova_apigw/controllers/network.py
+++ b/trio2o/nova_apigw/controllers/network.py
@@ -16,10 +16,10 @@
from pecan import expose
from pecan import rest
-import tricircle.common.client as t_client
-import tricircle.common.context as t_context
-from tricircle.common.i18n import _
-from tricircle.common import utils
+import trio2o.common.client as t_client
+import trio2o.common.context as t_context
+from trio2o.common.i18n import _
+from trio2o.common import utils
class NetworkController(rest.RestController):
diff --git a/tricircle/nova_apigw/controllers/quota_sets.py b/trio2o/nova_apigw/controllers/quota_sets.py
similarity index 98%
rename from tricircle/nova_apigw/controllers/quota_sets.py
rename to trio2o/nova_apigw/controllers/quota_sets.py
index e62647d..3e22794 100644
--- a/tricircle/nova_apigw/controllers/quota_sets.py
+++ b/trio2o/nova_apigw/controllers/quota_sets.py
@@ -24,10 +24,10 @@ from pecan import rest
from oslo_config import cfg
from oslo_log import log as logging
-import tricircle.common.context as t_context
-from tricircle.common import exceptions as t_exceptions
-from tricircle.common.i18n import _
-from tricircle.common import quota
+import trio2o.common.context as t_context
+from trio2o.common import exceptions as t_exceptions
+from trio2o.common.i18n import _
+from trio2o.common import quota
CONF = cfg.CONF
diff --git a/tricircle/nova_apigw/controllers/root.py b/trio2o/nova_apigw/controllers/root.py
similarity index 90%
rename from tricircle/nova_apigw/controllers/root.py
rename to trio2o/nova_apigw/controllers/root.py
index d3c93e3..21b1afa 100644
--- a/tricircle/nova_apigw/controllers/root.py
+++ b/trio2o/nova_apigw/controllers/root.py
@@ -23,17 +23,17 @@ import oslo_log.log as logging
import webob.exc as web_exc
-from tricircle.common import constants
-from tricircle.common import context as ctx
-from tricircle.common import xrpcapi
-from tricircle.nova_apigw.controllers import action
-from tricircle.nova_apigw.controllers import aggregate
-from tricircle.nova_apigw.controllers import flavor
-from tricircle.nova_apigw.controllers import image
-from tricircle.nova_apigw.controllers import network
-from tricircle.nova_apigw.controllers import quota_sets
-from tricircle.nova_apigw.controllers import server
-from tricircle.nova_apigw.controllers import volume
+from trio2o.common import constants
+from trio2o.common import context as ctx
+from trio2o.common import xrpcapi
+from trio2o.nova_apigw.controllers import action
+from trio2o.nova_apigw.controllers import aggregate
+from trio2o.nova_apigw.controllers import flavor
+from trio2o.nova_apigw.controllers import image
+from trio2o.nova_apigw.controllers import network
+from trio2o.nova_apigw.controllers import quota_sets
+from trio2o.nova_apigw.controllers import server
+from trio2o.nova_apigw.controllers import volume
LOG = logging.getLogger(__name__)
diff --git a/tricircle/nova_apigw/controllers/root_versions.py b/trio2o/nova_apigw/controllers/root_versions.py
similarity index 98%
rename from tricircle/nova_apigw/controllers/root_versions.py
rename to trio2o/nova_apigw/controllers/root_versions.py
index 0f96240..735f6f5 100644
--- a/tricircle/nova_apigw/controllers/root_versions.py
+++ b/trio2o/nova_apigw/controllers/root_versions.py
@@ -19,7 +19,7 @@ from oslo_utils import encodeutils
import webob.dec
-from tricircle.common import constants
+from trio2o.common import constants
class Versions(object):
diff --git a/trio2o/nova_apigw/controllers/server.py b/trio2o/nova_apigw/controllers/server.py
new file mode 100644
index 0000000..639b352
--- /dev/null
+++ b/trio2o/nova_apigw/controllers/server.py
@@ -0,0 +1,335 @@
+# Copyright (c) 2015 Huawei Tech. Co., Ltd.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pecan
+from pecan import expose
+from pecan import rest
+import six
+
+import oslo_log.log as logging
+
+from trio2o.common import az_ag
+import trio2o.common.client as t_client
+from trio2o.common import constants
+import trio2o.common.context as t_context
+import trio2o.common.exceptions as t_exceptions
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+import trio2o.common.lock_handle as t_lock
+from trio2o.common.quota import QUOTAS
+from trio2o.common import utils
+from trio2o.common import xrpcapi
+import trio2o.db.api as db_api
+from trio2o.db import core
+from trio2o.db import models
+
+LOG = logging.getLogger(__name__)
+
+MAX_METADATA_KEY_LENGTH = 255
+MAX_METADATA_VALUE_LENGTH = 255
+
+
+class ServerController(rest.RestController):
+
+ def __init__(self, project_id):
+ self.project_id = project_id
+ self.clients = {constants.TOP: t_client.Client()}
+ self.xjob_handler = xrpcapi.XJobAPI()
+
+ def _get_client(self, pod_name=constants.TOP):
+ if pod_name not in self.clients:
+ self.clients[pod_name] = t_client.Client(pod_name)
+ return self.clients[pod_name]
+
+ def _get_all(self, context, params):
+ filters = [{'key': key,
+ 'comparator': 'eq',
+ 'value': value} for key, value in params.iteritems()]
+ ret = []
+ pods = db_api.list_pods(context)
+ for pod in pods:
+ if not pod['az_name']:
+ continue
+ client = self._get_client(pod['pod_name'])
+ servers = client.list_servers(context, filters=filters)
+ ret.extend(servers)
+ return ret
+
+ @staticmethod
+ def _construct_brief_server_entry(server):
+ return {'id': server['id'],
+ 'name': server.get('name'),
+ 'links': server.get('links')}
+
+ @expose(generic=True, template='json')
+ def get_one(self, _id, **kwargs):
+ context = t_context.extract_context_from_environ()
+
+ if _id == 'detail':
+ # return {'servers': [self._construct_brief_server_entry(
+ # server) for server in self._get_all(context, kwargs)]}
+ return {'servers': self._get_all(context, kwargs)}
+
+ mappings = db_api.get_bottom_mappings_by_top_id(
+ context, _id, constants.RT_SERVER)
+ if not mappings:
+ return utils.format_nova_error(
+ 404, _('Instance %s could not be found.') % _id)
+ pod, bottom_id = mappings[0]
+ client = self._get_client(pod['pod_name'])
+ server = client.get_servers(context, bottom_id)
+ if not server:
+ return utils.format_nova_error(
+ 404, _('Instance %s could not be found.') % _id)
+ else:
+ return {'server': server}
+
+ @expose(generic=True, template='json')
+ def get_all(self, **kwargs):
+ context = t_context.extract_context_from_environ()
+ # return {'servers': [self._construct_brief_server_entry(
+ # server) for server in self._get_all(context, kwargs)]}
+ return {'servers': self._get_all(context, kwargs)}
+
+ @expose(generic=True, template='json')
+ def post(self, **kw):
+ context = t_context.extract_context_from_environ()
+
+ if 'server' not in kw:
+ return utils.format_nova_error(
+ 400, _('server is not set'))
+
+ az = kw['server'].get('availability_zone', '')
+
+ pod, b_az = az_ag.get_pod_by_az_tenant(
+ context, az, self.project_id)
+ if not pod:
+ return utils.format_nova_error(
+ 500, _('Pod not configured or scheduling failure'))
+
+ t_server_dict = kw['server']
+ self._process_metadata_quota(context, t_server_dict)
+ self._process_injected_file_quota(context, t_server_dict)
+
+ server_body = self._get_create_server_body(kw['server'], b_az)
+
+ security_groups = []
+ if 'security_groups' not in kw['server']:
+ security_groups = ['default']
+ else:
+ for sg in kw['server']['security_groups']:
+ if 'name' not in sg:
+ return utils.format_nova_error(
+ 400, _('Invalid input for field/attribute'))
+ security_groups.append(sg['name'])
+
+ server_body['networks'] = []
+ if 'networks' in kw['server']:
+ for net_info in kw['server']['networks']:
+ if 'uuid' in net_info:
+ nic = {'net-id': net_info['uuid']}
+ server_body['networks'].append(nic)
+ elif 'port' in net_info:
+ nic = {'port-id': net_info['port']}
+ server_body['networks'].append(nic)
+
+ client = self._get_client(pod['pod_name'])
+ server = client.create_servers(
+ context,
+ name=server_body['name'],
+ image=server_body['imageRef'],
+ flavor=server_body['flavorRef'],
+ nics=server_body['networks'],
+ security_groups=security_groups)
+
+ with context.session.begin():
+ core.create_resource(context, models.ResourceRouting,
+ {'top_id': server['id'],
+ 'bottom_id': server['id'],
+ 'pod_id': pod['pod_id'],
+ 'project_id': self.project_id,
+ 'resource_type': constants.RT_SERVER})
+ pecan.response.status = 202
+ return {'server': server}
+
+ @expose(generic=True, template='json')
+ def delete(self, _id):
+ context = t_context.extract_context_from_environ()
+
+ mappings = db_api.get_bottom_mappings_by_top_id(context, _id,
+ constants.RT_SERVER)
+ if not mappings:
+ pecan.response.status = 404
+ return {'Error': {'message': _('Server not found'), 'code': 404}}
+
+ pod, bottom_id = mappings[0]
+ client = self._get_client(pod['pod_name'])
+ try:
+ ret = client.delete_servers(context, bottom_id)
+ # none return value indicates server not found
+ if ret is None:
+ self._remove_stale_mapping(context, _id)
+ pecan.response.status = 404
+ return {'Error': {'message': _('Server not found'),
+ 'code': 404}}
+ except Exception as e:
+ code = 500
+ message = _('Delete server %(server_id)s fails') % {
+ 'server_id': _id}
+ if hasattr(e, 'code'):
+ code = e.code
+ ex_message = str(e)
+ if ex_message:
+ message = ex_message
+ LOG.error(message)
+
+ pecan.response.status = code
+ return {'Error': {'message': message, 'code': code}}
+
+ pecan.response.status = 204
+ return pecan.response
+
+ def _get_or_create_route(self, context, pod, _id, _type):
+ def list_resources(t_ctx, q_ctx, pod_, ele, _type_):
+ client = self._get_client(pod_['pod_name'])
+ return client.list_resources(_type_, t_ctx, [{'key': 'name',
+ 'comparator': 'eq',
+ 'value': ele['id']}])
+
+ return t_lock.get_or_create_route(context, None,
+ self.project_id, pod, {'id': _id},
+ _type, list_resources)
+
+ @staticmethod
+ def _get_create_server_body(origin, bottom_az):
+ body = {}
+ copy_fields = ['name', 'imageRef', 'flavorRef',
+ 'max_count', 'min_count']
+ if bottom_az:
+ body['availability_zone'] = bottom_az
+ for field in copy_fields:
+ if field in origin:
+ body[field] = origin[field]
+ return body
+
+ @staticmethod
+ def _remove_stale_mapping(context, server_id):
+ filters = [{'key': 'top_id', 'comparator': 'eq', 'value': server_id},
+ {'key': 'resource_type',
+ 'comparator': 'eq',
+ 'value': constants.RT_SERVER}]
+ with context.session.begin():
+ core.delete_resources(context,
+ models.ResourceRouting,
+ filters)
+
+ def _process_injected_file_quota(self, context, t_server_dict):
+ try:
+ ctx = context.elevated()
+ injected_files = t_server_dict.get('injected_files', None)
+ self._check_injected_file_quota(ctx, injected_files)
+ except (t_exceptions.OnsetFileLimitExceeded,
+ t_exceptions.OnsetFilePathLimitExceeded,
+ t_exceptions.OnsetFileContentLimitExceeded) as e:
+ msg = str(e)
+ LOG.exception(_LE('Quota exceeded %(msg)s'),
+ {'msg': msg})
+ return utils.format_nova_error(400, _('Quota exceeded %s') % msg)
+
+ def _check_injected_file_quota(self, context, injected_files):
+ """Enforce quota limits on injected files.
+
+ Raises a QuotaError if any limit is exceeded.
+
+ """
+
+ if injected_files is None:
+ return
+
+ # Check number of files first
+ try:
+ QUOTAS.limit_check(context,
+ injected_files=len(injected_files))
+ except t_exceptions.OverQuota:
+ raise t_exceptions.OnsetFileLimitExceeded()
+
+ # OK, now count path and content lengths; we're looking for
+ # the max...
+ max_path = 0
+ max_content = 0
+ for path, content in injected_files:
+ max_path = max(max_path, len(path))
+ max_content = max(max_content, len(content))
+
+ try:
+ QUOTAS.limit_check(context,
+ injected_file_path_bytes=max_path,
+ injected_file_content_bytes=max_content)
+ except t_exceptions.OverQuota as exc:
+ # Favor path limit over content limit for reporting
+ # purposes
+ if 'injected_file_path_bytes' in exc.kwargs['overs']:
+ raise t_exceptions.OnsetFilePathLimitExceeded()
+ else:
+ raise t_exceptions.OnsetFileContentLimitExceeded()
+
+ def _process_metadata_quota(self, context, t_server_dict):
+ try:
+ ctx = context.elevated()
+ metadata = t_server_dict.get('metadata', None)
+ self._check_metadata_properties_quota(ctx, metadata)
+ except t_exceptions.InvalidMetadata as e1:
+ LOG.exception(_LE('Invalid metadata %(exception)s'),
+ {'exception': str(e1)})
+ return utils.format_nova_error(400, _('Invalid metadata'))
+ except t_exceptions.InvalidMetadataSize as e2:
+ LOG.exception(_LE('Invalid metadata size %(exception)s'),
+ {'exception': str(e2)})
+ return utils.format_nova_error(400, _('Invalid metadata size'))
+ except t_exceptions.MetadataLimitExceeded as e3:
+ LOG.exception(_LE('Quota exceeded %(exception)s'),
+ {'exception': str(e3)})
+ return utils.format_nova_error(400,
+ _('Quota exceeded in metadata'))
+
+ def _check_metadata_properties_quota(self, context, metadata=None):
+ """Enforce quota limits on metadata properties."""
+ if not metadata:
+ metadata = {}
+ if not isinstance(metadata, dict):
+ msg = (_("Metadata type should be dict."))
+ raise t_exceptions.InvalidMetadata(reason=msg)
+ num_metadata = len(metadata)
+ try:
+ QUOTAS.limit_check(context, metadata_items=num_metadata)
+ except t_exceptions.OverQuota as exc:
+ quota_metadata = exc.kwargs['quotas']['metadata_items']
+ raise t_exceptions.MetadataLimitExceeded(allowed=quota_metadata)
+
+ # Because metadata is processed in the bottom pod, we just do
+ # parameter validation here to ensure quota management
+ for k, v in six.iteritems(metadata):
+ try:
+ utils.check_string_length(v)
+ utils.check_string_length(k, min_len=1)
+ except t_exceptions.InvalidInput as e:
+ raise t_exceptions.InvalidMetadata(reason=str(e))
+
+ if len(k) > MAX_METADATA_KEY_LENGTH:
+ msg = _("Metadata property key greater than 255 characters")
+ raise t_exceptions.InvalidMetadataSize(reason=msg)
+ if len(v) > MAX_METADATA_VALUE_LENGTH:
+ msg = _("Metadata property value greater than 255 characters")
+ raise t_exceptions.InvalidMetadataSize(reason=msg)
diff --git a/tricircle/nova_apigw/controllers/volume.py b/trio2o/nova_apigw/controllers/volume.py
similarity index 93%
rename from tricircle/nova_apigw/controllers/volume.py
rename to trio2o/nova_apigw/controllers/volume.py
index 5a8bd9b..71a8f88 100644
--- a/tricircle/nova_apigw/controllers/volume.py
+++ b/trio2o/nova_apigw/controllers/volume.py
@@ -19,13 +19,13 @@ import re
from oslo_log import log as logging
-import tricircle.common.client as t_client
-from tricircle.common import constants
-import tricircle.common.context as t_context
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common import utils
-import tricircle.db.api as db_api
+import trio2o.common.client as t_client
+from trio2o.common import constants
+import trio2o.common.context as t_context
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+from trio2o.common import utils
+import trio2o.db.api as db_api
LOG = logging.getLogger(__name__)
diff --git a/tricircle/network/opts.py b/trio2o/nova_apigw/opts.py
similarity index 87%
rename from tricircle/network/opts.py
rename to trio2o/nova_apigw/opts.py
index 0a4265e..15bc085 100644
--- a/tricircle/network/opts.py
+++ b/trio2o/nova_apigw/opts.py
@@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import tricircle.network.plugin
+import trio2o.nova_apigw.app
def list_opts():
return [
- ('DEFAULT', tricircle.network.plugin.tricircle_opts),
+ ('DEFAULT', trio2o.nova_apigw.app.common_opts),
]
diff --git a/trio2o/tempestplugin/README.rst b/trio2o/tempestplugin/README.rst
new file mode 100644
index 0000000..fff7c1c
--- /dev/null
+++ b/trio2o/tempestplugin/README.rst
@@ -0,0 +1,6 @@
+===============================================
+Tempest Integration of Trio2o
+===============================================
+
+This directory contains Tempest tests to cover the Trio2o project.
+
diff --git a/tricircle/nova_apigw/__init__.py b/trio2o/tempestplugin/__init__.py
similarity index 100%
rename from tricircle/nova_apigw/__init__.py
rename to trio2o/tempestplugin/__init__.py
diff --git a/tricircle/tempestplugin/config.py b/trio2o/tempestplugin/config.py
similarity index 92%
rename from tricircle/tempestplugin/config.py
rename to trio2o/tempestplugin/config.py
index 51e4b7e..550e499 100644
--- a/tricircle/tempestplugin/config.py
+++ b/trio2o/tempestplugin/config.py
@@ -13,4 +13,4 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tricircle.common import config as t_config # noqa
+from trio2o.common import config as t_config # noqa
diff --git a/tricircle/tempestplugin/plugin.py b/trio2o/tempestplugin/plugin.py
similarity index 89%
rename from tricircle/tempestplugin/plugin.py
rename to trio2o/tempestplugin/plugin.py
index e0b4838..671ed99 100644
--- a/tricircle/tempestplugin/plugin.py
+++ b/trio2o/tempestplugin/plugin.py
@@ -19,10 +19,10 @@ import os
from tempest import config # noqa
from tempest.test_discover import plugins
-from tricircle.tempestplugin import config as project_config # noqa
+from trio2o.tempestplugin import config as project_config # noqa
-class TricircleTempestPlugin(plugins.TempestPlugin):
+class Trio2oTempestPlugin(plugins.TempestPlugin):
def load_tests(self):
base_path = os.path.split(os.path.dirname(
diff --git a/tricircle/tempestplugin/post_test_hook.sh b/trio2o/tempestplugin/post_test_hook.sh
similarity index 85%
rename from tricircle/tempestplugin/post_test_hook.sh
rename to trio2o/tempestplugin/post_test_hook.sh
index fc784b6..8a75c97 100755
--- a/tricircle/tempestplugin/post_test_hook.sh
+++ b/trio2o/tempestplugin/post_test_hook.sh
@@ -16,13 +16,13 @@
export DEST=$BASE/new
export DEVSTACK_DIR=$DEST/devstack
-export TRICIRCLE_DIR=$DEST/tricircle
-export TRICIRCLE_DEVSTACK_PLUGIN_DIR=$TRICIRCLE_DIR/devstack
-export TRICIRCLE_TEMPEST_PLUGIN_DIR=$TRICIRCLE_DIR/tricircle/tempestplugin
+export TRIO2O_DIR=$DEST/trio2o
+export TRIO2O_DEVSTACK_PLUGIN_DIR=$TRIO2O_DIR/devstack
+export TRIO2O_TEMPEST_PLUGIN_DIR=$TRIO2O_DIR/trio2o/tempestplugin
export TEMPEST_DIR=$DEST/tempest
export TEMPEST_CONF=$TEMPEST_DIR/etc/tempest.conf
-# use admin role to create Tricircle top Pod and Pod1
+# use admin role to create Trio2o top Pod and Pod1
source $DEVSTACK_DIR/openrc admin admin
token=$(openstack token issue | awk 'NR==5 {print $4}')
@@ -55,7 +55,7 @@ fi
sudo chown -R jenkins:stack $DEST/tempest
# sudo chown -R jenkins:stack $BASE/data/tempest
-# change the tempest configruation to test Tricircle
+# change the tempest configruation to test Trio2o
env | grep OS_
# import functions needed for the below workaround
@@ -78,17 +78,13 @@ iniset $TEMPEST_CONF volume endpoint_type publicURL
iniset $TEMPEST_CONF volume-feature-enabled api_v1 false
# Run the Compute Tempest tests
-cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
+cd $TRIO2O_TEMPEST_PLUGIN_DIR
sudo BASE=$BASE ./tempest_compute.sh
# Run the Volume Tempest tests
-cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
+cd $TRIO2O_TEMPEST_PLUGIN_DIR
sudo BASE=$BASE ./tempest_volume.sh
-# Run the Network Tempest tests
-cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
-sudo BASE=$BASE ./tempest_network.sh
-
# Run the Scenario Tempest tests
-# cd $TRICIRCLE_TEMPEST_PLUGIN_DIR
+# cd $TRIO2O_TEMPEST_PLUGIN_DIR
# sudo BASE=$BASE ./tempest_scenario.sh
diff --git a/tricircle/tempestplugin/pre_test_hook.sh b/trio2o/tempestplugin/pre_test_hook.sh
similarity index 74%
rename from tricircle/tempestplugin/pre_test_hook.sh
rename to trio2o/tempestplugin/pre_test_hook.sh
index 97cdd1d..54c9c8d 100755
--- a/tricircle/tempestplugin/pre_test_hook.sh
+++ b/trio2o/tempestplugin/pre_test_hook.sh
@@ -15,8 +15,8 @@
# This script is executed inside pre_test_hook function in devstack gate.
export localconf=$BASE/new/devstack/local.conf
-export TRICIRCLE_API_CONF=/etc/tricircle/api.conf
-export TRICIRCLE_CINDER_APIGW_CONF=/etc/tricircle/cinder_apigw.conf
-export TRICIRCLE_NOVA_APIGW_CONF=/etc/tricircle/nova_apigw.conf
-export TRICIRCLE_XJOB_CONF=/etc/tricircle/xjob.conf
+export TRIO2O_API_CONF=/etc/trio2o/api.conf
+export TRIO2O_CINDER_APIGW_CONF=/etc/trio2o/cinder_apigw.conf
+export TRIO2O_NOVA_APIGW_CONF=/etc/trio2o/nova_apigw.conf
+export TRIO2O_XJOB_CONF=/etc/trio2o/xjob.conf
diff --git a/tricircle/nova_apigw/controllers/__init__.py b/trio2o/tempestplugin/services/__init__.py
similarity index 100%
rename from tricircle/nova_apigw/controllers/__init__.py
rename to trio2o/tempestplugin/services/__init__.py
diff --git a/tricircle/tempestplugin/tempest_compute.sh b/trio2o/tempestplugin/tempest_compute.sh
similarity index 99%
rename from tricircle/tempestplugin/tempest_compute.sh
rename to trio2o/tempestplugin/tempest_compute.sh
index 4e8d4a2..9f77706 100755
--- a/tricircle/tempestplugin/tempest_compute.sh
+++ b/trio2o/tempestplugin/tempest_compute.sh
@@ -21,7 +21,7 @@ export TEMPEST_CONF=$TEMPEST_DIR/etc/tempest.conf
cd $TEMPEST_DIR
# Run functional test
-echo "Running Tricircle functional test suite..."
+echo "Running Trio2o functional test suite..."
# all test cases with following prefix
TESTCASES="(tempest.api.compute.test_versions"
diff --git a/tricircle/tempestplugin/tempest_scenario.sh b/trio2o/tempestplugin/tempest_scenario.sh
similarity index 100%
rename from tricircle/tempestplugin/tempest_scenario.sh
rename to trio2o/tempestplugin/tempest_scenario.sh
diff --git a/tricircle/tempestplugin/tempest_volume.sh b/trio2o/tempestplugin/tempest_volume.sh
similarity index 99%
rename from tricircle/tempestplugin/tempest_volume.sh
rename to trio2o/tempestplugin/tempest_volume.sh
index 15c7d6c..fb6f49e 100755
--- a/tricircle/tempestplugin/tempest_volume.sh
+++ b/trio2o/tempestplugin/tempest_volume.sh
@@ -21,7 +21,7 @@ export TEMPEST_CONF=$TEMPEST_DIR/etc/tempest.conf
cd $TEMPEST_DIR
# Run functional test
-echo "Running Tricircle functional test suite..."
+echo "Running Trio2o functional test suite..."
# all test cases with following prefix
TESTCASES="(tempest.api.volume.test_volumes_list"
diff --git a/tricircle/tempestplugin/__init__.py b/trio2o/tempestplugin/tests/__init__.py
similarity index 100%
rename from tricircle/tempestplugin/__init__.py
rename to trio2o/tempestplugin/tests/__init__.py
diff --git a/tricircle/tempestplugin/services/__init__.py b/trio2o/tempestplugin/tests/api/__init__.py
similarity index 100%
rename from tricircle/tempestplugin/services/__init__.py
rename to trio2o/tempestplugin/tests/api/__init__.py
diff --git a/tricircle/tempestplugin/tests/api/base.py b/trio2o/tempestplugin/tests/api/base.py
similarity index 94%
rename from tricircle/tempestplugin/tests/api/base.py
rename to trio2o/tempestplugin/tests/api/base.py
index 7e093e0..59f6755 100644
--- a/tricircle/tempestplugin/tests/api/base.py
+++ b/trio2o/tempestplugin/tests/api/base.py
@@ -22,7 +22,7 @@ CONF = config.CONF
LOG = logging.getLogger(__name__)
-class BaseTricircleTest(test.BaseTestCase):
+class BaseTrio2oTest(test.BaseTestCase):
@classmethod
def skip_checks(cls):
diff --git a/tricircle/tempestplugin/tests/api/test_sample.py b/trio2o/tempestplugin/tests/api/test_sample.py
similarity index 73%
rename from tricircle/tempestplugin/tests/api/test_sample.py
rename to trio2o/tempestplugin/tests/api/test_sample.py
index 397f181..6dc122d 100644
--- a/tricircle/tempestplugin/tests/api/test_sample.py
+++ b/trio2o/tempestplugin/tests/api/test_sample.py
@@ -14,19 +14,19 @@
# under the License.
from tempest import test
-from tricircle.tempestplugin.tests.api import base
+from trio2o.tempestplugin.tests.api import base
-class TestTricircleSample(base.BaseTricircleTest):
+class TestTrio2oSample(base.BaseTrio2oTest):
@classmethod
def resource_setup(cls):
- super(TestTricircleSample, cls).resource_setup()
+ super(TestTrio2oSample, cls).resource_setup()
@test.attr(type="smoke")
def test_sample(self):
- self.assertEqual('Tricircle Sample Test!', 'Tricircle Sample Test!')
+ self.assertEqual('Trio2o Sample Test!', 'Trio2o Sample Test!')
@classmethod
def resource_cleanup(cls):
- super(TestTricircleSample, cls).resource_cleanup()
+ super(TestTrio2oSample, cls).resource_cleanup()
diff --git a/tricircle/tempestplugin/tests/__init__.py b/trio2o/tempestplugin/tests/scenario/__init__.py
similarity index 100%
rename from tricircle/tempestplugin/tests/__init__.py
rename to trio2o/tempestplugin/tests/scenario/__init__.py
diff --git a/tricircle/tempestplugin/tests/api/__init__.py b/trio2o/tests/__init__.py
similarity index 100%
rename from tricircle/tempestplugin/tests/api/__init__.py
rename to trio2o/tests/__init__.py
diff --git a/tricircle/tests/base.py b/trio2o/tests/base.py
similarity index 58%
rename from tricircle/tests/base.py
rename to trio2o/tests/base.py
index b6d2276..a5465e6 100644
--- a/tricircle/tests/base.py
+++ b/trio2o/tests/base.py
@@ -13,28 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from neutron.conf import common as n_conf
-from oslo_config import cfg
from oslotest import base
-CONFLICT_OPT_NAMES = [
- 'api_extensions_path',
- 'bind_port',
- 'bind_host',
- 'allow_pagination',
- 'allow_sorting'
-]
-
-
class TestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
def setUp(self):
- # neutron has configuration options "api_extensions_path",
- # "bind_port" and "bind_host"which conflicts with tricircle
- # configuration option, so unregister this option before
- # running tricircle tests
- for opt in n_conf.core_opts:
- if opt.name in CONFLICT_OPT_NAMES:
- cfg.CONF.unregister_opt(opt)
super(TestCase, self).setUp()
diff --git a/tricircle/tempestplugin/tests/scenario/__init__.py b/trio2o/tests/functional/__init__.py
similarity index 100%
rename from tricircle/tempestplugin/tests/scenario/__init__.py
rename to trio2o/tests/functional/__init__.py
diff --git a/tricircle/tests/__init__.py b/trio2o/tests/functional/api/__init__.py
similarity index 100%
rename from tricircle/tests/__init__.py
rename to trio2o/tests/functional/api/__init__.py
diff --git a/tricircle/tests/functional/__init__.py b/trio2o/tests/functional/api/controllers/__init__.py
similarity index 100%
rename from tricircle/tests/functional/__init__.py
rename to trio2o/tests/functional/api/controllers/__init__.py
diff --git a/tricircle/tests/functional/api/controllers/test_pod.py b/trio2o/tests/functional/api/controllers/test_pod.py
similarity index 97%
rename from tricircle/tests/functional/api/controllers/test_pod.py
rename to trio2o/tests/functional/api/controllers/test_pod.py
index a581f9e..344fc00 100644
--- a/tricircle/tests/functional/api/controllers/test_pod.py
+++ b/trio2o/tests/functional/api/controllers/test_pod.py
@@ -21,12 +21,12 @@ from oslo_config import cfg
from oslo_config import fixture as fixture_config
import oslo_db.exception as db_exc
-from tricircle.api import app
-from tricircle.common import az_ag
-from tricircle.common import context
-from tricircle.common import utils
-from tricircle.db import core
-from tricircle.tests import base
+from trio2o.api import app
+from trio2o.common import az_ag
+from trio2o.common import context
+from trio2o.common import utils
+from trio2o.db import core
+from trio2o.tests import base
OPT_GROUP_NAME = 'keystone_authtoken'
@@ -49,7 +49,7 @@ class API_FunctionalTest(base.TestCase):
self.CONF = self.useFixture(fixture_config.Config()).conf
self.CONF.set_override('auth_strategy', 'noauth')
- self.CONF.set_override('tricircle_db_connection', 'sqlite:///:memory:')
+ self.CONF.set_override('trio2o_db_connection', 'sqlite:///:memory:')
core.initialize()
core.ModelBase.metadata.create_all(core.get_engine())
@@ -61,8 +61,8 @@ class API_FunctionalTest(base.TestCase):
def _make_app(self, enable_acl=False):
self.config = {
'app': {
- 'root': 'tricircle.api.controllers.root.RootController',
- 'modules': ['tricircle.api'],
+ 'root': 'trio2o.api.controllers.root.RootController',
+ 'modules': ['trio2o.api'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
diff --git a/tricircle/tests/functional/api/controllers/test_root.py b/trio2o/tests/functional/api/controllers/test_root.py
similarity index 96%
rename from tricircle/tests/functional/api/controllers/test_root.py
rename to trio2o/tests/functional/api/controllers/test_root.py
index db07152..0eb512a 100644
--- a/tricircle/tests/functional/api/controllers/test_root.py
+++ b/trio2o/tests/functional/api/controllers/test_root.py
@@ -22,8 +22,8 @@ from oslo_config import fixture as fixture_config
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
-from tricircle.api import app
-from tricircle.tests import base
+from trio2o.api import app
+from trio2o.tests import base
OPT_GROUP_NAME = 'keystone_authtoken'
@@ -48,8 +48,8 @@ class API_FunctionalTest(base.TestCase):
def _make_app(self, enable_acl=False):
self.config = {
'app': {
- 'root': 'tricircle.api.controllers.root.RootController',
- 'modules': ['tricircle.api'],
+ 'root': 'trio2o.api.controllers.root.RootController',
+ 'modules': ['trio2o.api'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
diff --git a/tricircle/tests/functional/api/__init__.py b/trio2o/tests/functional/cinder_apigw/__init__.py
similarity index 100%
rename from tricircle/tests/functional/api/__init__.py
rename to trio2o/tests/functional/cinder_apigw/__init__.py
diff --git a/tricircle/tests/functional/api/controllers/__init__.py b/trio2o/tests/functional/cinder_apigw/controllers/__init__.py
similarity index 100%
rename from tricircle/tests/functional/api/controllers/__init__.py
rename to trio2o/tests/functional/cinder_apigw/controllers/__init__.py
diff --git a/tricircle/tests/functional/cinder_apigw/controllers/test_root.py b/trio2o/tests/functional/cinder_apigw/controllers/test_root.py
similarity index 96%
rename from tricircle/tests/functional/cinder_apigw/controllers/test_root.py
rename to trio2o/tests/functional/cinder_apigw/controllers/test_root.py
index 3ba0619..f5afde3 100644
--- a/tricircle/tests/functional/cinder_apigw/controllers/test_root.py
+++ b/trio2o/tests/functional/cinder_apigw/controllers/test_root.py
@@ -22,8 +22,8 @@ from oslo_config import fixture as fixture_config
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
-from tricircle.cinder_apigw import app
-from tricircle.tests import base
+from trio2o.cinder_apigw import app
+from trio2o.tests import base
OPT_GROUP_NAME = 'keystone_authtoken'
@@ -49,8 +49,8 @@ class Cinder_API_GW_FunctionalTest(base.TestCase):
self.config = {
'app': {
'root':
- 'tricircle.cinder_apigw.controllers.root.RootController',
- 'modules': ['tricircle.cinder_apigw'],
+ 'trio2o.cinder_apigw.controllers.root.RootController',
+ 'modules': ['trio2o.cinder_apigw'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
diff --git a/tricircle/tests/functional/cinder_apigw/controllers/test_volume.py b/trio2o/tests/functional/cinder_apigw/controllers/test_volume.py
similarity index 98%
rename from tricircle/tests/functional/cinder_apigw/controllers/test_volume.py
rename to trio2o/tests/functional/cinder_apigw/controllers/test_volume.py
index 7d9e687..b275944 100644
--- a/tricircle/tests/functional/cinder_apigw/controllers/test_volume.py
+++ b/trio2o/tests/functional/cinder_apigw/controllers/test_volume.py
@@ -27,16 +27,16 @@ from oslo_config import fixture as fixture_config
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
-from tricircle.cinder_apigw import app
+from trio2o.cinder_apigw import app
-from tricircle.common import constants as cons
-from tricircle.common import context
-from tricircle.common import httpclient as hclient
+from trio2o.common import constants as cons
+from trio2o.common import context
+from trio2o.common import httpclient as hclient
-from tricircle.db import api as db_api
-from tricircle.db import core
+from trio2o.db import api as db_api
+from trio2o.db import core
-from tricircle.tests import base
+from trio2o.tests import base
OPT_GROUP_NAME = 'keystone_authtoken'
@@ -155,8 +155,8 @@ class CinderVolumeFunctionalTest(base.TestCase):
self.config = {
'app': {
'root':
- 'tricircle.cinder_apigw.controllers.root.RootController',
- 'modules': ['tricircle.cinder_apigw'],
+ 'trio2o.cinder_apigw.controllers.root.RootController',
+ 'modules': ['trio2o.cinder_apigw'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
diff --git a/tricircle/tests/functional/cinder_apigw/__init__.py b/trio2o/tests/functional/nova_apigw/__init__.py
similarity index 100%
rename from tricircle/tests/functional/cinder_apigw/__init__.py
rename to trio2o/tests/functional/nova_apigw/__init__.py
diff --git a/tricircle/tests/functional/cinder_apigw/controllers/__init__.py b/trio2o/tests/functional/nova_apigw/controllers/__init__.py
similarity index 100%
rename from tricircle/tests/functional/cinder_apigw/controllers/__init__.py
rename to trio2o/tests/functional/nova_apigw/controllers/__init__.py
diff --git a/tricircle/tests/functional/nova_apigw/controllers/test_microversion.py b/trio2o/tests/functional/nova_apigw/controllers/test_microversion.py
similarity index 91%
rename from tricircle/tests/functional/nova_apigw/controllers/test_microversion.py
rename to trio2o/tests/functional/nova_apigw/controllers/test_microversion.py
index 979554e..61f3a23 100644
--- a/tricircle/tests/functional/nova_apigw/controllers/test_microversion.py
+++ b/trio2o/tests/functional/nova_apigw/controllers/test_microversion.py
@@ -20,15 +20,15 @@ import pecan
from pecan.configuration import set_config
from pecan.testing import load_test_app
-from tricircle.common import constants
-from tricircle.common import constants as cons
-from tricircle.common import context
-from tricircle.common import resource_handle
-from tricircle.db import api as db_api
-from tricircle.db import core
-from tricircle.nova_apigw import app
-from tricircle.nova_apigw.controllers import server
-from tricircle.tests import base
+from trio2o.common import constants
+from trio2o.common import constants as cons
+from trio2o.common import context
+from trio2o.common import resource_handle
+from trio2o.db import api as db_api
+from trio2o.db import core
+from trio2o.nova_apigw import app
+from trio2o.nova_apigw.controllers import server
+from trio2o.tests import base
from oslo_config import cfg
from oslo_config import fixture as fixture_config
@@ -36,11 +36,11 @@ from oslo_config import fixture as fixture_config
FAKE_AZ = 'fake_az'
-def get_tricircle_client(self, pod):
- return FakeTricircleClient()
+def get_trio2o_client(self, pod):
+ return FakeTrio2oClient()
-class FakeTricircleClient(object):
+class FakeTrio2oClient(object):
def __init__(self):
pass
@@ -90,7 +90,7 @@ class MicroVersionFunctionTest(base.TestCase):
self.CONF = self.useFixture(fixture_config.Config()).conf
self.CONF.set_override('auth_strategy', 'noauth')
- self.CONF.set_override('tricircle_db_connection', 'sqlite:///:memory:')
+ self.CONF.set_override('trio2o_db_connection', 'sqlite:///:memory:')
core.initialize()
core.ModelBase.metadata.create_all(core.get_engine())
@@ -102,8 +102,8 @@ class MicroVersionFunctionTest(base.TestCase):
def _make_app(self, enable_acl=False):
self.config = {
'app': {
- 'root': 'tricircle.nova_apigw.controllers.root.RootController',
- 'modules': ['tricircle.nova_apigw'],
+ 'root': 'trio2o.nova_apigw.controllers.root.RootController',
+ 'modules': ['trio2o.nova_apigw'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
@@ -216,7 +216,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
return headers
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_no_header(self, mock_client):
headers = self._make_headers(None)
@@ -231,7 +231,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
timeout=60, username=None, api_key=None)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_vaild_version(self, mock_client):
headers = self._make_headers(self.vaild_version)
@@ -245,7 +245,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
timeout=60, username=None, api_key=None)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_vaild_leagcy_version(self, mock_client):
headers = self._make_headers(self.vaild_leagcy_version, 'leagcy')
@@ -259,7 +259,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
timeout=60, username=None, api_key=None)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_latest_version(self, mock_client):
headers = self._make_headers(self.latest_version)
@@ -274,7 +274,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
timeout=60, username=None, api_key=None)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_min_version(self, mock_client):
headers = self._make_headers(self.min_version, 'leagecy')
@@ -288,7 +288,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
timeout=60, username=None, api_key=None)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_max_version(self, mock_client):
headers = self._make_headers(self.max_version)
@@ -302,7 +302,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
timeout=60, username=None, api_key=None)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_major(self, mock_client):
headers = self._make_headers(self.invaild_major)
@@ -312,7 +312,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_major2(self, mock_client):
headers = self._make_headers(self.invaild_major2, 'leagecy')
@@ -322,7 +322,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_major3(self, mock_client):
headers = self._make_headers(self.invaild_major3)
@@ -332,7 +332,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_minor(self, mock_client):
headers = self._make_headers(self.invaild_minor)
@@ -342,7 +342,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_lower_boundary(self, mock_client):
headers = self._make_headers(self.lower_boundary)
@@ -352,7 +352,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_upper_boundary(self, mock_client):
headers = self._make_headers(self.upper_boundary)
@@ -362,7 +362,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_compute_format(self, mock_client):
headers = self._make_headers(self.invaild_compute_format)
@@ -372,7 +372,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_only_major(self, mock_client):
headers = self._make_headers(self.only_major, 'leagecy')
@@ -382,7 +382,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_version(self, mock_client):
headers = self._make_headers(self.invaild_version)
@@ -392,7 +392,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_leagecy_version(self, mock_client):
headers = self._make_headers(self.invaild_leagecy_version, 'leagecy')
@@ -402,7 +402,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_both_version(self, mock_client):
headers = self._make_headers(self.vaild_version, 'both')
@@ -417,7 +417,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
timeout=60, username=None, api_key=None)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_version2(self, mock_client):
headers = self._make_headers(self.invaild_version2)
@@ -427,7 +427,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_version3(self, mock_client):
headers = self._make_headers(self.invaild_version3)
@@ -437,7 +437,7 @@ class MicroversionsTest(MicroVersionFunctionTest):
self.assertEqual(406, res.status_int)
@mock.patch.object(server.ServerController, '_get_client',
- new=get_tricircle_client)
+ new=get_trio2o_client)
@mock.patch.object(n_client, 'Client')
def test_microversions_invaild_version4(self, mock_client):
headers = self._make_headers(self.invaild_version4)
diff --git a/tricircle/tests/functional/nova_apigw/controllers/test_quota_sets.py b/trio2o/tests/functional/nova_apigw/controllers/test_quota_sets.py
similarity index 97%
rename from tricircle/tests/functional/nova_apigw/controllers/test_quota_sets.py
rename to trio2o/tests/functional/nova_apigw/controllers/test_quota_sets.py
index 1be1ba9..1257e3e 100644
--- a/tricircle/tests/functional/nova_apigw/controllers/test_quota_sets.py
+++ b/trio2o/tests/functional/nova_apigw/controllers/test_quota_sets.py
@@ -23,15 +23,15 @@ from oslo_config import cfg
from oslo_config import fixture as fixture_config
from oslo_serialization import jsonutils
-from tricircle.nova_apigw import app
-from tricircle.nova_apigw.controllers import quota_sets
+from trio2o.nova_apigw import app
+from trio2o.nova_apigw.controllers import quota_sets
-from tricircle.common import context
-from tricircle.common import exceptions as t_exceptions
-from tricircle.common import quota
-from tricircle.db import core
+from trio2o.common import context
+from trio2o.common import exceptions as t_exceptions
+from trio2o.common import quota
+from trio2o.db import core
-from tricircle.tests.unit.common import test_quota
+from trio2o.tests.unit.common import test_quota
QUOTAS = quota.QUOTAS
@@ -112,8 +112,8 @@ class QuotaControllerTest(test_quota.QuotaSetsOperationTest):
self.config = {
'app': {
'root':
- 'tricircle.nova_apigw.controllers.root.RootController',
- 'modules': ['tricircle.nova_apigw'],
+ 'trio2o.nova_apigw.controllers.root.RootController',
+ 'modules': ['trio2o.nova_apigw'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
diff --git a/tricircle/tests/functional/nova_apigw/controllers/test_root.py b/trio2o/tests/functional/nova_apigw/controllers/test_root.py
similarity index 96%
rename from tricircle/tests/functional/nova_apigw/controllers/test_root.py
rename to trio2o/tests/functional/nova_apigw/controllers/test_root.py
index 26e557c..dcfadfb 100644
--- a/tricircle/tests/functional/nova_apigw/controllers/test_root.py
+++ b/trio2o/tests/functional/nova_apigw/controllers/test_root.py
@@ -22,8 +22,8 @@ from oslo_config import fixture as fixture_config
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
-from tricircle.nova_apigw import app
-from tricircle.tests import base
+from trio2o.nova_apigw import app
+from trio2o.tests import base
OPT_GROUP_NAME = 'keystone_authtoken'
@@ -48,8 +48,8 @@ class Nova_API_GW_FunctionalTest(base.TestCase):
def _make_app(self, enable_acl=False):
self.config = {
'app': {
- 'root': 'tricircle.nova_apigw.controllers.root.RootController',
- 'modules': ['tricircle.nova_apigw'],
+ 'root': 'trio2o.nova_apigw.controllers.root.RootController',
+ 'modules': ['trio2o.nova_apigw'],
'enable_acl': enable_acl,
'errors': {
400: '/error',
diff --git a/tricircle/tests/functional/nova_apigw/__init__.py b/trio2o/tests/unit/__init__.py
similarity index 100%
rename from tricircle/tests/functional/nova_apigw/__init__.py
rename to trio2o/tests/unit/__init__.py
diff --git a/tricircle/tests/functional/nova_apigw/controllers/__init__.py b/trio2o/tests/unit/api/__init__.py
similarity index 100%
rename from tricircle/tests/functional/nova_apigw/controllers/__init__.py
rename to trio2o/tests/unit/api/__init__.py
diff --git a/tricircle/tests/unit/__init__.py b/trio2o/tests/unit/api/controllers/__init__.py
similarity index 100%
rename from tricircle/tests/unit/__init__.py
rename to trio2o/tests/unit/api/controllers/__init__.py
diff --git a/tricircle/tests/unit/api/controllers/test_pod.py b/trio2o/tests/unit/api/controllers/test_pod.py
similarity index 97%
rename from tricircle/tests/unit/api/controllers/test_pod.py
rename to trio2o/tests/unit/api/controllers/test_pod.py
index 5a45208..2c7daed 100644
--- a/tricircle/tests/unit/api/controllers/test_pod.py
+++ b/trio2o/tests/unit/api/controllers/test_pod.py
@@ -19,11 +19,11 @@ import unittest
import pecan
-from tricircle.api.controllers import pod
-from tricircle.common import context
-from tricircle.common import utils
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.api.controllers import pod
+from trio2o.common import context
+from trio2o.common import utils
+from trio2o.db import core
+from trio2o.db import models
class PodsControllerTest(unittest.TestCase):
diff --git a/tricircle/tests/unit/api/__init__.py b/trio2o/tests/unit/cinder_apigw/__init__.py
similarity index 100%
rename from tricircle/tests/unit/api/__init__.py
rename to trio2o/tests/unit/cinder_apigw/__init__.py
diff --git a/tricircle/tests/unit/api/controllers/__init__.py b/trio2o/tests/unit/cinder_apigw/controllers/__init__.py
similarity index 100%
rename from tricircle/tests/unit/api/controllers/__init__.py
rename to trio2o/tests/unit/cinder_apigw/controllers/__init__.py
diff --git a/tricircle/tests/unit/cinder_apigw/controllers/test_volume.py b/trio2o/tests/unit/cinder_apigw/controllers/test_volume.py
similarity index 98%
rename from tricircle/tests/unit/cinder_apigw/controllers/test_volume.py
rename to trio2o/tests/unit/cinder_apigw/controllers/test_volume.py
index c8aa07a..c68be30 100644
--- a/tricircle/tests/unit/cinder_apigw/controllers/test_volume.py
+++ b/trio2o/tests/unit/cinder_apigw/controllers/test_volume.py
@@ -17,10 +17,10 @@ from mock import patch
import pecan
import unittest
-from tricircle.cinder_apigw.controllers import volume_type
-from tricircle.common import context
-from tricircle.db import api as db_api
-from tricircle.db import core
+from trio2o.cinder_apigw.controllers import volume_type
+from trio2o.common import context
+from trio2o.db import api as db_api
+from trio2o.db import core
class FakeResponse(object):
diff --git a/tricircle/tests/unit/cinder_apigw/controllers/test_volume_actions.py b/trio2o/tests/unit/cinder_apigw/controllers/test_volume_actions.py
similarity index 97%
rename from tricircle/tests/unit/cinder_apigw/controllers/test_volume_actions.py
rename to trio2o/tests/unit/cinder_apigw/controllers/test_volume_actions.py
index ebe3657..9e6e39a 100644
--- a/tricircle/tests/unit/cinder_apigw/controllers/test_volume_actions.py
+++ b/trio2o/tests/unit/cinder_apigw/controllers/test_volume_actions.py
@@ -20,13 +20,13 @@ import unittest
from cinderclient.client import HTTPClient
from oslo_utils import uuidutils
-from tricircle.cinder_apigw.controllers import volume_actions as action
-from tricircle.common import constants
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.cinder_apigw.controllers import volume_actions as action
+from trio2o.common import constants
+from trio2o.common import context
+from trio2o.common import exceptions
+from trio2o.db import api
+from trio2o.db import core
+from trio2o.db import models
class FakeResponse(object):
diff --git a/tricircle/tests/unit/cinder_apigw/__init__.py b/trio2o/tests/unit/common/__init__.py
similarity index 100%
rename from tricircle/tests/unit/cinder_apigw/__init__.py
rename to trio2o/tests/unit/common/__init__.py
diff --git a/tricircle/tests/unit/common/test_az_ag.py b/trio2o/tests/unit/common/test_az_ag.py
similarity index 97%
rename from tricircle/tests/unit/common/test_az_ag.py
rename to trio2o/tests/unit/common/test_az_ag.py
index f811b23..a93c9ed 100644
--- a/tricircle/tests/unit/common/test_az_ag.py
+++ b/trio2o/tests/unit/common/test_az_ag.py
@@ -16,12 +16,12 @@
import unittest
-from tricircle.common import az_ag
-from tricircle.common import context
+from trio2o.common import az_ag
+from trio2o.common import context
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.db import api
+from trio2o.db import core
+from trio2o.db import models
FAKE_AZ = 'fake_az'
diff --git a/tricircle/tests/unit/common/test_client.py b/trio2o/tests/unit/common/test_client.py
similarity index 98%
rename from tricircle/tests/unit/common/test_client.py
rename to trio2o/tests/unit/common/test_client.py
index 56d798e..736fc64 100644
--- a/tricircle/tests/unit/common/test_client.py
+++ b/trio2o/tests/unit/common/test_client.py
@@ -21,12 +21,12 @@ import mock
from mock import patch
from oslo_config import cfg
-from tricircle.common import client
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.common import resource_handle
-from tricircle.db import api
-from tricircle.db import core
+from trio2o.common import client
+from trio2o.common import context
+from trio2o.common import exceptions
+from trio2o.common import resource_handle
+from trio2o.db import api
+from trio2o.db import core
FAKE_AZ = 'fake_az'
diff --git a/tricircle/tests/unit/common/test_exception.py b/trio2o/tests/unit/common/test_exception.py
similarity index 67%
rename from tricircle/tests/unit/common/test_exception.py
rename to trio2o/tests/unit/common/test_exception.py
index 99be2ce..b5c57f1 100644
--- a/tricircle/tests/unit/common/test_exception.py
+++ b/trio2o/tests/unit/common/test_exception.py
@@ -18,97 +18,97 @@
import six
import unittest
-from tricircle.common import exceptions
+from trio2o.common import exceptions
-class TricircleExceptionTestCase(unittest.TestCase):
+class Trio2oExceptionTestCase(unittest.TestCase):
def test_default_error_msg(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
message = "default message"
- exc = FakeTricircleException()
+ exc = FakeTrio2oException()
self.assertEqual('default message', six.text_type(exc))
def test_error_msg(self):
self.assertEqual('test',
- six.text_type(exceptions.TricircleException('test')))
+ six.text_type(exceptions.Trio2oException('test')))
def test_default_error_msg_with_kwargs(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
message = "default message: %(code)s"
- exc = FakeTricircleException(code=500)
+ exc = FakeTrio2oException(code=500)
self.assertEqual('default message: 500', six.text_type(exc))
def test_error_msg_exception_with_kwargs(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
message = "default message: %(misspelled_code)s"
- exc = FakeTricircleException(code=500)
+ exc = FakeTrio2oException(code=500)
self.assertEqual('default message: %(misspelled_code)s',
six.text_type(exc))
def test_default_error_code(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
code = 404
- exc = FakeTricircleException()
+ exc = FakeTrio2oException()
self.assertEqual(404, exc.kwargs['code'])
def test_error_code_from_kwarg(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
code = 500
- exc = FakeTricircleException(code=404)
+ exc = FakeTrio2oException(code=404)
self.assertEqual(404, exc.kwargs['code'])
def test_error_msg_is_exception_to_string(self):
msg = 'test message'
exc1 = Exception(msg)
- exc2 = exceptions.TricircleException(exc1)
+ exc2 = exceptions.Trio2oException(exc1)
self.assertEqual(msg, exc2.msg)
def test_exception_kwargs_to_string(self):
msg = 'test message'
exc1 = Exception(msg)
- exc2 = exceptions.TricircleException(kwarg1=exc1)
+ exc2 = exceptions.Trio2oException(kwarg1=exc1)
self.assertEqual(msg, exc2.kwargs['kwarg1'])
def test_message_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
message = 'FakeCinderException: %(message)s'
- exc = FakeTricircleException(message='message')
+ exc = FakeTrio2oException(message='message')
self.assertEqual('FakeCinderException: message', six.text_type(exc))
def test_message_and_kwarg_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
message = 'Error %(code)d: %(message)s'
- exc = FakeTricircleException(message='message', code=404)
+ exc = FakeTrio2oException(message='message', code=404)
self.assertEqual('Error 404: message', six.text_type(exc))
def test_message_is_exception_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
message = 'Exception: %(message)s'
msg = 'test message'
exc1 = Exception(msg)
- exc2 = FakeTricircleException(message=exc1)
+ exc2 = FakeTrio2oException(message=exc1)
self.assertEqual('Exception: test message', six.text_type(exc2))
def test_no_message_input_exception_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
message = 'Error: %(message)s'
- exc = FakeTricircleException()
+ exc = FakeTrio2oException()
out_message = six.text_type(exc)
self.assertEqual('Error: None', out_message)
def test_no_kwarg_input_exception_in_format_string(self):
- class FakeTricircleException(exceptions.TricircleException):
+ class FakeTrio2oException(exceptions.Trio2oException):
message = 'No Kwarg Error: %(why)s, %(reason)s'
- exc = FakeTricircleException(why='why')
+ exc = FakeTrio2oException(why='why')
out_message = six.text_type(exc)
self.assertEqual('No Kwarg Error: %(why)s, %(reason)s', out_message)
diff --git a/tricircle/tests/unit/common/test_httpclient.py b/trio2o/tests/unit/common/test_httpclient.py
similarity index 97%
rename from tricircle/tests/unit/common/test_httpclient.py
rename to trio2o/tests/unit/common/test_httpclient.py
index 72255d5..8a175ec 100644
--- a/tricircle/tests/unit/common/test_httpclient.py
+++ b/trio2o/tests/unit/common/test_httpclient.py
@@ -17,12 +17,12 @@ from mock import patch
import unittest
-from tricircle.common import constants as cons
-from tricircle.common import context
-from tricircle.common import httpclient as hclient
+from trio2o.common import constants as cons
+from trio2o.common import context
+from trio2o.common import httpclient as hclient
-from tricircle.db import api
-from tricircle.db import core
+from trio2o.db import api
+from trio2o.db import core
def fake_get_pod_service_endpoint(ctx, pod_name, st):
diff --git a/tricircle/tests/unit/common/test_quota.py b/trio2o/tests/unit/common/test_quota.py
similarity index 99%
rename from tricircle/tests/unit/common/test_quota.py
rename to trio2o/tests/unit/common/test_quota.py
index 0d80a1a..be0804c 100644
--- a/tricircle/tests/unit/common/test_quota.py
+++ b/trio2o/tests/unit/common/test_quota.py
@@ -25,14 +25,14 @@ from oslo_utils import timeutils
from oslo_utils import uuidutils
from oslotest import moxstubout
-from tricircle.common import constants as cons
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.common import quota
-from tricircle.db import api as db_api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.tests import base
+from trio2o.common import constants as cons
+from trio2o.common import context
+from trio2o.common import exceptions
+from trio2o.common import quota
+from trio2o.db import api as db_api
+from trio2o.db import core
+from trio2o.db import models
+from trio2o.tests import base
CONF = cfg.CONF
@@ -1389,7 +1389,7 @@ class QuotaReserveTestCase(QuotaTestBase, base.TestCase):
result = db_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, 0)
- # no sync function in Tricircle
+ # no sync function in Trio2o
# self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called)
self.compare_usage(self.usages_created,
[dict(resource='volumes',
@@ -1423,7 +1423,7 @@ class QuotaReserveTestCase(QuotaTestBase, base.TestCase):
result = db_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
- # no sync function in Tricircle
+ # no sync function in Trio2o
# self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
@@ -1454,7 +1454,7 @@ class QuotaReserveTestCase(QuotaTestBase, base.TestCase):
result = db_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 5, 0)
- # no sync function in Tricircle
+ # no sync function in Trio2o
# self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
@@ -1490,7 +1490,7 @@ class QuotaReserveTestCase(QuotaTestBase, base.TestCase):
result = db_api.quota_reserve(context, self.resources, quotas,
deltas, self.expire, 0, max_age)
- # no sync function in Tricircle
+ # no sync function in Trio2o
# self.assertEqual(set(['volumes', 'gigabytes']), self.sync_called)
self.compare_usage(self.usages, [dict(resource='volumes',
project_id='test_project',
diff --git a/tricircle/tests/unit/cinder_apigw/controllers/__init__.py b/trio2o/tests/unit/db/__init__.py
similarity index 100%
rename from tricircle/tests/unit/cinder_apigw/controllers/__init__.py
rename to trio2o/tests/unit/db/__init__.py
diff --git a/tricircle/tests/unit/db/test_api.py b/trio2o/tests/unit/db/test_api.py
similarity index 99%
rename from tricircle/tests/unit/db/test_api.py
rename to trio2o/tests/unit/db/test_api.py
index 988b4e1..dec1bae 100644
--- a/tricircle/tests/unit/db/test_api.py
+++ b/trio2o/tests/unit/db/test_api.py
@@ -17,13 +17,13 @@ import datetime
import six
import unittest
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.common import quota
+from trio2o.common import context
+from trio2o.common import exceptions
+from trio2o.common import quota
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.db import api
+from trio2o.db import core
+from trio2o.db import models
class APITest(unittest.TestCase):
diff --git a/tricircle/tests/unit/db/test_models.py b/trio2o/tests/unit/db/test_models.py
similarity index 98%
rename from tricircle/tests/unit/db/test_models.py
rename to trio2o/tests/unit/db/test_models.py
index 47ad566..3d39608 100644
--- a/tricircle/tests/unit/db/test_models.py
+++ b/trio2o/tests/unit/db/test_models.py
@@ -21,11 +21,11 @@ import unittest
import oslo_db.exception
import sqlalchemy as sql
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
+from trio2o.common import context
+from trio2o.common import exceptions
+from trio2o.db import api
+from trio2o.db import core
+from trio2o.db import models
def _get_field_value(column):
diff --git a/tricircle/tests/unit/common/__init__.py b/trio2o/tests/unit/nova_apigw/__init__.py
similarity index 100%
rename from tricircle/tests/unit/common/__init__.py
rename to trio2o/tests/unit/nova_apigw/__init__.py
diff --git a/tricircle/tests/unit/db/__init__.py b/trio2o/tests/unit/nova_apigw/controllers/__init__.py
similarity index 100%
rename from tricircle/tests/unit/db/__init__.py
rename to trio2o/tests/unit/nova_apigw/controllers/__init__.py
diff --git a/tricircle/tests/unit/nova_apigw/controllers/test_action.py b/trio2o/tests/unit/nova_apigw/controllers/test_action.py
similarity index 95%
rename from tricircle/tests/unit/nova_apigw/controllers/test_action.py
rename to trio2o/tests/unit/nova_apigw/controllers/test_action.py
index 1c1666b..70114dd 100644
--- a/tricircle/tests/unit/nova_apigw/controllers/test_action.py
+++ b/trio2o/tests/unit/nova_apigw/controllers/test_action.py
@@ -19,14 +19,14 @@ import unittest
from oslo_utils import uuidutils
-from tricircle.common import client
-from tricircle.common import constants
-from tricircle.common import context
-from tricircle.common import exceptions
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.nova_apigw.controllers import action
+from trio2o.common import client
+from trio2o.common import constants
+from trio2o.common import context
+from trio2o.common import exceptions
+from trio2o.db import api
+from trio2o.db import core
+from trio2o.db import models
+from trio2o.nova_apigw.controllers import action
class FakeResponse(object):
diff --git a/tricircle/tests/unit/nova_apigw/controllers/test_aggregate.py b/trio2o/tests/unit/nova_apigw/controllers/test_aggregate.py
similarity index 95%
rename from tricircle/tests/unit/nova_apigw/controllers/test_aggregate.py
rename to trio2o/tests/unit/nova_apigw/controllers/test_aggregate.py
index c313372..60d1d53 100644
--- a/tricircle/tests/unit/nova_apigw/controllers/test_aggregate.py
+++ b/trio2o/tests/unit/nova_apigw/controllers/test_aggregate.py
@@ -16,9 +16,9 @@
from mock import patch
import unittest
-from tricircle.common import context
-from tricircle.db import core
-from tricircle.nova_apigw.controllers import aggregate
+from trio2o.common import context
+from trio2o.db import core
+from trio2o.nova_apigw.controllers import aggregate
class AggregateTest(unittest.TestCase):
diff --git a/tricircle/tests/unit/nova_apigw/controllers/test_flavor.py b/trio2o/tests/unit/nova_apigw/controllers/test_flavor.py
similarity index 93%
rename from tricircle/tests/unit/nova_apigw/controllers/test_flavor.py
rename to trio2o/tests/unit/nova_apigw/controllers/test_flavor.py
index 3ef481e..403064b 100644
--- a/tricircle/tests/unit/nova_apigw/controllers/test_flavor.py
+++ b/trio2o/tests/unit/nova_apigw/controllers/test_flavor.py
@@ -16,9 +16,9 @@
from mock import patch
import unittest
-from tricircle.common import context
-from tricircle.db import core
-from tricircle.nova_apigw.controllers import flavor
+from trio2o.common import context
+from trio2o.db import core
+from trio2o.nova_apigw.controllers import flavor
class FlavorTest(unittest.TestCase):
diff --git a/tricircle/tests/unit/nova_apigw/controllers/test_server.py b/trio2o/tests/unit/nova_apigw/controllers/test_server.py
similarity index 61%
rename from tricircle/tests/unit/nova_apigw/controllers/test_server.py
rename to trio2o/tests/unit/nova_apigw/controllers/test_server.py
index 8640fc5..1f9ad5c 100644
--- a/tricircle/tests/unit/nova_apigw/controllers/test_server.py
+++ b/trio2o/tests/unit/nova_apigw/controllers/test_server.py
@@ -20,19 +20,17 @@ from mock import patch
import pecan
import unittest
-import neutronclient.common.exceptions as q_exceptions
from oslo_utils import uuidutils
-from tricircle.common import constants
-from tricircle.common import context
-import tricircle.common.exceptions as t_exceptions
-from tricircle.common import lock_handle
-from tricircle.common import xrpcapi
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.network import helper
-from tricircle.nova_apigw.controllers import server
+from trio2o.common import constants
+from trio2o.common import context
+import trio2o.common.exceptions as t_exceptions
+from trio2o.common import lock_handle
+from trio2o.common import xrpcapi
+from trio2o.db import api
+from trio2o.db import core
+from trio2o.db import models
+from trio2o.nova_apigw.controllers import server
TOP_NETS = []
@@ -83,7 +81,6 @@ class FakeServerController(server.ServerController):
def __init__(self, project_id):
self.clients = {'t_region': FakeClient('t_region')}
self.project_id = project_id
- self.helper = FakeHelper()
self.xjob_handler = xrpcapi.XJobAPI()
def _get_client(self, pod_name=None):
@@ -95,11 +92,6 @@ class FakeServerController(server.ServerController):
return self.clients[pod_name]
-class FakeHelper(helper.NetworkHelper):
- def _get_client(self, pod_name=None):
- return FakeClient(pod_name)
-
-
class FakeClient(object):
_res_map = {'top': {'network': TOP_NETS,
@@ -253,19 +245,6 @@ class FakeClient(object):
ret_servers = []
for b_server in self.list_resources('server', ctx, filters):
ret_server = copy.deepcopy(b_server)
- for nic in ret_server['nics']:
- ports = self.list_ports(
- ctx, [{'key': 'id', 'comparator': 'eq',
- 'value': nic['port-id']}])
- nets = self.list_resources(
- 'network', ctx, [{'key': 'id', 'comparator': 'eq',
- 'value': ports[0]['network_id']}])
- ret_server['addresses'] = {
- nets[0]['name']: [
- {'OS-EXT-IPS-MAC:mac_addr': ports[0]['mac_address'],
- 'version': 4,
- 'addr': ports[0]['fixed_ips'][0]['ip_address'],
- 'OS-EXT-IPS:type': 'fixed'}]}
ret_servers.append(ret_server)
return ret_servers
@@ -289,14 +268,10 @@ class FakeClient(object):
'security_group', ctx,
[{'key': 'id', 'comparator': 'eq', 'value': sg_id}])[0]
new_rule = copy.copy(_rule)
- match_found = False
for rule in sg['security_group_rules']:
old_rule = copy.copy(rule)
if new_rule == old_rule:
- match_found = True
break
- if match_found:
- raise q_exceptions.Conflict()
sg['security_group_rules'].append(new_rule)
def delete_security_group_rules(self, ctx, rule_id):
@@ -395,202 +370,6 @@ class ServerTest(unittest.TestCase):
self.assertEqual('port', new_route['resource_type'])
self.assertEqual(self.project_id, new_route['project_id'])
- def test_prepare_neutron_element(self):
- t_pod, b_pod = self._prepare_pod()
- net = {'id': 'top_net_id'}
- body = {'network': {'name': 'top_net_id'}}
- is_new, bottom_port_id = self.controller.helper.prepare_bottom_element(
- self.context, self.project_id, b_pod, net, 'network', body)
- mappings = api.get_bottom_mappings_by_top_id(self.context,
- 'top_net_id', 'network')
- self.assertEqual(bottom_port_id, mappings[0][1])
-
- @patch.object(FakeClient, 'create_resources')
- def test_prepare_neutron_element_create_res_exception(self, mock_method):
- mock_method.side_effect = FakeException()
- t_pod, b_pod = self._prepare_pod()
- net = {'id': 'top_net_id'}
- body = {'network': {'name': 'top_net_id'}}
- self.assertRaises(FakeException,
- self.controller.helper.prepare_bottom_element,
- self.context, self.project_id, b_pod, net,
- 'network', body)
- mappings = api.get_bottom_mappings_by_top_id(self.context,
- 'top_net_id', 'network')
- self.assertEqual(0, len(mappings))
-
- def _check_routes(self, b_pod):
- for res in (TOP_NETS, TOP_SUBNETS, BOTTOM_NETS, BOTTOM_SUBNETS):
- self.assertEqual(1, len(res))
- enable_dhcp = TOP_SUBNETS[0]['enable_dhcp']
- self.assertEqual(enable_dhcp, BOTTOM_SUBNETS[0]['enable_dhcp'])
- # top vm port, top interface port, top dhcp port
- t_port_num = 3 if enable_dhcp else 2
- # bottom vm port, bottom dhcp port
- b_port_num = 2 if enable_dhcp else 1
- self.assertEqual(t_port_num, len(TOP_PORTS))
- self.assertEqual(b_port_num, len(BOTTOM_PORTS))
-
- with self.context.session.begin():
- routes = core.query_resource(self.context,
- models.ResourceRouting, [], [])
- # bottom network, bottom subnet, bottom port, no top dhcp and bottom
- # dhcp if dhcp disabled
- entry_num = 6 if enable_dhcp else 4
- self.assertEqual(entry_num, len(routes))
- actual = [[], [], [], []]
- actual[3].append(constants.interface_port_name % (
- b_pod['pod_id'], TOP_SUBNETS[0]['id']))
- if entry_num > 4:
- actual.extend([[], []])
- actual[5].append(constants.dhcp_port_name % TOP_SUBNETS[0]['id'])
-
- for region in ('t_region', 'b_region'):
- actual[0].append(self.controller._get_client(
- region).list_resources('network', self.context, [])[0]['id'])
- actual[1].append(self.controller._get_client(
- region).list_resources('subnet', self.context, [])[0]['id'])
- ports = self.controller._get_client(
- region).list_resources('port', self.context, [])
-
- for port in ports:
- if port.get('device_id'):
- dhcp_port_id = port['id']
- elif port.get('device_owner'):
- gateway_port_id = port['id']
- else:
- vm_port_id = port['id']
-
- actual[2].append(vm_port_id)
- if region == 't_region':
- actual[3].append(gateway_port_id)
- if entry_num > 4:
- actual[4].append(dhcp_port_id)
- if region == 't_region':
- actual[5].append(dhcp_port_id)
-
- expect = [[route['top_id'], route['bottom_id']] for route in routes]
- self.assertItemsEqual(expect, actual)
-
- def test_handle_network(self):
- t_pod, b_pod = self._prepare_pod()
- net = {'id': 'top_net_id', 'name': 'net'}
- subnet = {'id': 'top_subnet_id',
- 'network_id': 'top_net_id',
- 'ip_version': 4,
- 'cidr': '10.0.0.0/24',
- 'gateway_ip': '10.0.0.1',
- 'allocation_pools': [{'start': '10.0.0.2',
- 'end': '10.0.0.254'}],
- 'enable_dhcp': True}
- TOP_NETS.append(net)
- TOP_SUBNETS.append(subnet)
- self.controller._handle_network(self.context, b_pod, net, [subnet])
- self._check_routes(b_pod)
-
- def test_handle_network_dhcp_disable(self):
- t_pod, b_pod = self._prepare_pod()
- net = {'id': 'top_net_id', 'name': 'net'}
- subnet = {'id': 'top_subnet_id',
- 'network_id': 'top_net_id',
- 'ip_version': 4,
- 'cidr': '10.0.0.0/24',
- 'gateway_ip': '10.0.0.1',
- 'allocation_pools': [{'start': '10.0.0.2',
- 'end': '10.0.0.254'}],
- 'enable_dhcp': False}
- TOP_NETS.append(net)
- TOP_SUBNETS.append(subnet)
- self.controller._handle_network(self.context, b_pod, net, [subnet])
- self._check_routes(b_pod)
-
- def test_handle_port(self):
- t_pod, b_pod = self._prepare_pod()
- net = {'id': 'top_net_id', 'name': 'net'}
- subnet = {'id': 'top_subnet_id',
- 'network_id': 'top_net_id',
- 'ip_version': 4,
- 'cidr': '10.0.0.0/24',
- 'gateway_ip': '10.0.0.1',
- 'allocation_pools': [{'start': '10.0.0.2',
- 'end': '10.0.0.254'}],
- 'enable_dhcp': True}
- port = {
- 'id': 'top_port_id',
- 'network_id': 'top_net_id',
- 'mac_address': 'fa:16:3e:96:41:07',
- 'fixed_ips': [{'subnet_id': 'top_subnet_id',
- 'ip_address': '10.0.0.7'}]
- }
- TOP_NETS.append(net)
- TOP_SUBNETS.append(subnet)
- TOP_PORTS.append(port)
- self.controller._handle_port(self.context, b_pod, port)
- self._check_routes(b_pod)
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(FakeClient, 'create_servers')
- @patch.object(context, 'extract_context_from_environ')
- def test_post_with_network_az(self, mock_ctx, mock_create):
- t_pod, b_pod = self._prepare_pod()
- top_net_id = 'top_net_id'
- top_subnet_id = 'top_subnet_id'
- top_sg_id = 'top_sg_id'
- t_net = {'id': top_net_id, 'name': 'net'}
- t_subnet = {'id': top_subnet_id,
- 'network_id': top_net_id,
- 'ip_version': 4,
- 'cidr': '10.0.0.0/24',
- 'gateway_ip': '10.0.0.1',
- 'allocation_pools': [{'start': '10.0.0.2',
- 'end': '10.0.0.254'}],
- 'enable_dhcp': True}
- t_sg = {'id': top_sg_id, 'name': 'default', 'description': '',
- 'tenant_id': self.project_id,
- 'security_group_rules': [
- {'remote_group_id': top_sg_id,
- 'direction': 'ingress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'},
- {'remote_group_id': None,
- 'direction': 'egress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'},
- ]}
- TOP_NETS.append(t_net)
- TOP_SUBNETS.append(t_subnet)
- TOP_SGS.append(t_sg)
-
- server_name = 'test_server'
- image_id = 'image_id'
- flavor_id = 1
- body = {
- 'server': {
- 'name': server_name,
- 'imageRef': image_id,
- 'flavorRef': flavor_id,
- 'availability_zone': b_pod['az_name'],
- 'networks': [{'uuid': top_net_id}]
- }
- }
- mock_create.return_value = {'id': 'bottom_server_id'}
- mock_ctx.return_value = self.context
-
- # update top net for test purpose, correct az
- TOP_NETS[0]['availability_zone_hints'] = ['b_az']
- self.controller.post(**body)
-
- # update top net for test purpose, wrong az
- TOP_NETS[0]['availability_zone_hints'] = ['fake_az']
- res = self.controller.post(**body)
- self._validate_error_code(res, 400)
-
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(FakeClient, 'create_servers')
@patch.object(context, 'extract_context_from_environ')
@@ -648,22 +427,10 @@ class ServerTest(unittest.TestCase):
server_dict = self.controller.post(**body)['server']
- for port in BOTTOM_PORTS:
- if 'device_id' not in port:
- bottom_port_id = port['id']
- for sg in BOTTOM_SGS:
- if sg['name'] == top_sg_id:
- bottom_sg = sg
-
mock_create.assert_called_with(self.context, name=server_name,
image=image_id, flavor=flavor_id,
- nics=[{'port-id': bottom_port_id}],
- security_groups=[bottom_sg['id']])
- # make sure remote group is extended to ip addresses
- for rule in bottom_sg['security_group_rules']:
- if rule['ethertype'] == 'IPv4' and rule['direction'] == 'ingress':
- self.assertIsNone(rule['remote_group_id'])
- self.assertEqual('10.0.0.0/24', rule['remote_ip_prefix'])
+ nics=[{'net-id': top_net_id}],
+ security_groups=['default'])
with self.context.session.begin():
routes = core.query_resource(self.context, models.ResourceRouting,
@@ -676,105 +443,6 @@ class ServerTest(unittest.TestCase):
self.assertEqual(b_pod['pod_id'], routes[0]['pod_id'])
self.assertEqual(self.project_id, routes[0]['project_id'])
- # make sure security group mapping is built
- routes = core.query_resource(self.context, models.ResourceRouting,
- [{'key': 'resource_type',
- 'comparator': 'eq',
- 'value': 'security_group'}], [])
- self.assertEqual(1, len(routes))
- self.assertEqual(top_sg_id, routes[0]['top_id'])
- self.assertEqual(bottom_sg['id'], routes[0]['bottom_id'])
- self.assertEqual(b_pod['pod_id'], routes[0]['pod_id'])
- self.assertEqual(self.project_id, routes[0]['project_id'])
-
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(FakeClient, 'create_servers')
- @patch.object(context, 'extract_context_from_environ')
- def test_post_exception_retry(self, mock_ctx, mock_server):
- t_pod, b_pod = self._prepare_pod()
- top_net_id = 'top_net_id'
- top_subnet_id = 'top_subnet_id'
- top_sg_id = 'top_sg_id'
-
- t_net = {'id': top_net_id, 'name': 'net'}
- t_subnet = {'id': top_subnet_id,
- 'network_id': top_net_id,
- 'ip_version': 4,
- 'cidr': '10.0.0.0/24',
- 'gateway_ip': '10.0.0.1',
- 'allocation_pools': [{'start': '10.0.0.2',
- 'end': '10.0.0.254'}],
- 'enable_dhcp': True}
- t_sg = {'id': top_sg_id, 'name': 'test_sg', 'description': '',
- 'tenant_id': self.project_id,
- 'security_group_rules': [
- {'remote_group_id': None,
- 'direction': 'ingress',
- 'remote_ip_prefix': '10.0.1.0/24',
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'},
- {'remote_group_id': None,
- 'direction': 'egress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'},
- ]}
- TOP_NETS.append(t_net)
- TOP_SUBNETS.append(t_subnet)
- TOP_SGS.append(t_sg)
-
- server_name = 'test_server'
- image_id = 'image_id'
- flavor_id = 1
- body = {
- 'server': {
- 'name': server_name,
- 'imageRef': image_id,
- 'flavorRef': flavor_id,
- 'availability_zone': b_pod['az_name'],
- 'networks': [{'uuid': top_net_id}],
- 'security_groups': [{'name': 'test_sg'}]
- }
- }
- mock_server.return_value = {'id': 'bottom_server_id'}
- mock_ctx.return_value = self.context
-
- create_security_group_rules = FakeClient.create_security_group_rules
- FakeClient.create_security_group_rules = mock.Mock()
- FakeClient.create_security_group_rules.side_effect = \
- q_exceptions.ConnectionFailed
-
- self.assertRaises(q_exceptions.ConnectionFailed, self.controller.post,
- **body)
- with self.context.session.begin():
- routes = core.query_resource(
- self.context, models.ResourceRouting,
- [{'key': 'top_sg_id', 'comparator': 'eq',
- 'value': t_sg['id']},
- {'key': 'pod_id', 'comparator': 'eq',
- 'value': 'b_pod_uuid'}], [])
- self.assertIsNone(routes[0]['bottom_id'])
-
- # test we can redo after exception
- FakeClient.create_security_group_rules = create_security_group_rules
- self.controller.post(**body)
-
- for port in BOTTOM_PORTS:
- if 'device_id' not in port:
- bottom_port_id = port['id']
- for sg in BOTTOM_SGS:
- if sg['name'] == top_sg_id:
- bottom_sg = sg
-
- mock_server.assert_called_with(self.context, name=server_name,
- image=image_id, flavor=flavor_id,
- nics=[{'port-id': bottom_port_id}],
- security_groups=[bottom_sg['id']])
-
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(FakeClient, 'create_servers')
@patch.object(context, 'extract_context_from_environ')
@@ -859,49 +527,20 @@ class ServerTest(unittest.TestCase):
mock_create.return_value = {'id': 'bottom_server2_id'}
self.controller.post(**body)['server']
- for port in BOTTOM1_PORTS:
- if 'device_id' not in port:
- bottom_port1_id = port['id']
- for port in BOTTOM2_PORTS:
- if 'device_id' not in port:
- bottom_port2_id = port['id']
- for sg in BOTTOM1_SGS:
- if sg['name'] == top_sg_id:
- bottom_sg1 = sg
- for sg in BOTTOM2_SGS:
- if sg['name'] == top_sg_id:
- bottom_sg2 = sg
-
calls = [mock.call(self.context, name='test_server1', image=image_id,
flavor=flavor_id,
- nics=[{'port-id': bottom_port1_id}],
- security_groups=[bottom_sg1['id']]),
+ nics=[{'net-id': top_net1_id}],
+ security_groups=['default']),
mock.call(self.context, name='test_server2', image=image_id,
flavor=flavor_id,
- nics=[{'port-id': bottom_port2_id}],
- security_groups=[bottom_sg2['id']])]
+ nics=[{'net-id': top_net2_id}],
+ security_groups=['default'])]
mock_create.assert_has_calls(calls)
- # make sure remote group is extended to ip addresses
- expected_ips = ['10.0.1.0/24', '10.0.2.0/24']
- ips = []
- for rule in bottom_sg1['security_group_rules']:
- if rule['ethertype'] == 'IPv4' and rule['direction'] == 'ingress':
- self.assertIsNone(rule['remote_group_id'])
- ips.append(rule['remote_ip_prefix'])
- self.assertEqual(expected_ips, ips)
- ips = []
- for rule in bottom_sg2['security_group_rules']:
- if rule['ethertype'] == 'IPv4' and rule['direction'] == 'ingress':
- self.assertIsNone(rule['remote_group_id'])
- ips.append(rule['remote_ip_prefix'])
- self.assertEqual(expected_ips, ips)
-
- @patch.object(xrpcapi.XJobAPI, 'delete_server_port')
@patch.object(FakeClient, 'delete_servers')
@patch.object(pecan, 'response', new=FakeResponse)
@patch.object(context, 'extract_context_from_environ')
- def test_delete(self, mock_ctx, mock_delete, mock_delete_port):
+ def test_delete(self, mock_ctx, mock_delete):
t_pod, b_pod = self._prepare_pod()
mock_ctx.return_value = self.context
t_server_id = 't_server_id'
@@ -923,7 +562,6 @@ class ServerTest(unittest.TestCase):
mock_delete.return_value = ()
res = self.controller.delete(t_server_id)
- mock_delete_port.assert_called_once_with(self.context, port_id)
mock_delete.assert_called_once_with(self.context, b_server_id)
self.assertEqual(204, res.status)
@@ -973,77 +611,6 @@ class ServerTest(unittest.TestCase):
res['Error']['message'])
self.assertEqual(404, res['Error']['code'])
- @patch.object(pecan, 'response', new=FakeResponse)
- @patch.object(xrpcapi.XJobAPI, 'setup_bottom_router')
- @patch.object(FakeClient, 'create_servers')
- @patch.object(context, 'extract_context_from_environ')
- def test_post_l3_involved(self, mock_ctx, mock_create, mock_setup):
- t_pod, b_pod = self._prepare_pod(1)
-
- top_net_id = 'top_net_id'
- top_subnet_id = 'top_subnet_id'
- top_port_id = 'top_port_id'
- top_sg_id = 'top_sg_id'
- top_router_id = 'top_router_id'
-
- t_net = {'id': top_net_id, 'name': 'net'}
- t_subnet = {'id': top_subnet_id,
- 'network_id': top_net_id,
- 'ip_version': 4,
- 'cidr': '10.0.0.0/24',
- 'gateway_ip': '10.0.0.1',
- 'allocation_pools': [{'start': '10.0.0.2',
- 'end': '10.0.0.254'}],
- 'enable_dhcp': True}
- t_port = {'id': top_port_id,
- 'network_id': top_net_id,
- 'device_id': top_router_id,
- 'device_owner': 'network:router_interface',
- 'fixed_ips': [{'subnet_id': top_subnet_id,
- 'ip_address': '10.0.0.1'}],
- 'mac_address': 'fa:16:3e:96:41:03'}
- t_sg = {'id': top_sg_id, 'name': 'default', 'description': '',
- 'tenant_id': self.project_id,
- 'security_group_rules': [
- {'remote_group_id': top_sg_id,
- 'direction': 'ingress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'},
- {'remote_group_id': None,
- 'direction': 'egress',
- 'remote_ip_prefix': None,
- 'protocol': None,
- 'port_range_max': None,
- 'port_range_min': None,
- 'ethertype': 'IPv4'},
- ]}
- TOP_NETS.append(t_net)
- TOP_SUBNETS.append(t_subnet)
- TOP_PORTS.append(t_port)
- TOP_SGS.append(t_sg)
-
- server_name = 'test_server'
- image_id = 'image_id'
- flavor_id = 1
- body = {
- 'server': {
- 'name': server_name,
- 'imageRef': image_id,
- 'flavorRef': flavor_id,
- 'availability_zone': b_pod['az_name'],
- 'networks': [{'port': top_port_id}]
- }
- }
- mock_create.return_value = {'id': 'bottom_server_id'}
- mock_ctx.return_value = self.context
-
- self.controller.post(**body)['server']
- mock_setup.assert_called_with(self.context, top_net_id, top_router_id,
- b_pod['pod_id'])
-
@patch.object(pecan, 'response', new=FakeResponse)
def test_process_injected_file_quota(self):
ctx = self.context.elevated()
@@ -1247,13 +814,11 @@ class ServerTest(unittest.TestCase):
self.assertEqual(server_name, ret_server['name'])
self.assertEqual(image_id, ret_server['image'])
self.assertEqual(flavor_id, ret_server['flavor'])
- self.assertEqual(t_net['name'], ret_server['addresses'].keys()[0])
ret_server = self.controller.get_one('detail')['servers'][0]
self.assertEqual(server_name, ret_server['name'])
self.assertEqual(image_id, ret_server['image'])
self.assertEqual(flavor_id, ret_server['flavor'])
- self.assertEqual(t_net['name'], ret_server['addresses'].keys()[0])
def tearDown(self):
core.ModelBase.metadata.drop_all(core.get_engine())
diff --git a/tricircle/tests/unit/nova_apigw/controllers/test_volume.py b/trio2o/tests/unit/nova_apigw/controllers/test_volume.py
similarity index 95%
rename from tricircle/tests/unit/nova_apigw/controllers/test_volume.py
rename to trio2o/tests/unit/nova_apigw/controllers/test_volume.py
index f4f8c22..5bdce20 100644
--- a/tricircle/tests/unit/nova_apigw/controllers/test_volume.py
+++ b/trio2o/tests/unit/nova_apigw/controllers/test_volume.py
@@ -20,13 +20,13 @@ import unittest
from oslo_utils import uuidutils
-from tricircle.common import client
-from tricircle.common import constants
-from tricircle.common import context
-from tricircle.db import api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.nova_apigw.controllers import volume
+from trio2o.common import client
+from trio2o.common import constants
+from trio2o.common import context
+from trio2o.db import api
+from trio2o.db import core
+from trio2o.db import models
+from trio2o.nova_apigw.controllers import volume
class FakeResponse(object):
diff --git a/tricircle/tests/unit/network/__init__.py b/trio2o/tests/unit/xjob/__init__.py
similarity index 100%
rename from tricircle/tests/unit/network/__init__.py
rename to trio2o/tests/unit/xjob/__init__.py
diff --git a/tricircle/tests/unit/xjob/test_xmanager.py b/trio2o/tests/unit/xjob/test_xmanager.py
similarity index 61%
rename from tricircle/tests/unit/xjob/test_xmanager.py
rename to trio2o/tests/unit/xjob/test_xmanager.py
index b5c096c..e6f7425 100644
--- a/tricircle/tests/unit/xjob/test_xmanager.py
+++ b/trio2o/tests/unit/xjob/test_xmanager.py
@@ -14,20 +14,19 @@
# limitations under the License.
import datetime
-import mock
from mock import patch
import unittest
from oslo_config import cfg
from oslo_utils import uuidutils
-from tricircle.common import constants
-from tricircle.common import context
-import tricircle.db.api as db_api
-from tricircle.db import core
-from tricircle.db import models
-from tricircle.xjob import xmanager
-from tricircle.xjob import xservice
+from trio2o.common import constants
+from trio2o.common import context
+import trio2o.db.api as db_api
+from trio2o.db import core
+from trio2o.db import models
+from trio2o.xjob import xmanager
+from trio2o.xjob import xservice
BOTTOM1_NETWORK = []
@@ -107,106 +106,6 @@ class XManagerTest(unittest.TestCase):
self.context = context.Context()
self.xmanager = FakeXManager()
- @patch.object(FakeClient, 'update_routers')
- def test_configure_extra_routes(self, mock_update):
- top_router_id = 'router_id'
- for i in xrange(1, 3):
- pod_dict = {'pod_id': 'pod_id_%d' % i,
- 'pod_name': 'pod_%d' % i,
- 'az_name': 'az_name_%d' % i}
- db_api.create_pod(self.context, pod_dict)
-
- network = {'id': 'network_%d_id' % i}
- bridge_network = {'id': 'bridge_network_%d_id' % i}
- router = {'id': 'router_%d_id' % i}
- subnet = {
- 'id': 'subnet_%d_id' % i,
- 'network_id': network['id'],
- 'cidr': '10.0.%d.0/24' % i,
- 'gateway_ip': '10.0.%d.1' % i,
- }
- bridge_subnet = {
- 'id': 'bridge_subnet_%d_id' % i,
- 'network_id': bridge_network['id'],
- 'cidr': '100.0.1.0/24',
- 'gateway_ip': '100.0.1.%d' % i,
- }
- port = {
- 'network_id': network['id'],
- 'device_id': router['id'],
- 'device_owner': 'network:router_interface',
- 'fixed_ips': [{'subnet_id': subnet['id'],
- 'ip_address': subnet['gateway_ip']}]
- }
- vm_port = {
- 'network_id': network['id'],
- 'device_id': 'vm%d_id' % i,
- 'device_owner': 'compute:None',
- 'fixed_ips': [{'subnet_id': subnet['id'],
- 'ip_address': '10.0.%d.3' % i}]
- }
- bridge_port = {
- 'network_id': bridge_network['id'],
- 'device_id': router['id'],
- 'device_owner': 'network:router_interface',
- 'fixed_ips': [{'subnet_id': bridge_subnet['id'],
- 'ip_address': bridge_subnet['gateway_ip']}]
- }
- pod_name = 'pod_%d' % i
- RES_MAP[pod_name]['network'].append(network)
- RES_MAP[pod_name]['network'].append(bridge_network)
- RES_MAP[pod_name]['subnet'].append(subnet)
- RES_MAP[pod_name]['subnet'].append(bridge_subnet)
- RES_MAP[pod_name]['port'].append(port)
- RES_MAP[pod_name]['port'].append(vm_port)
- RES_MAP[pod_name]['port'].append(bridge_port)
- RES_MAP[pod_name]['router'].append(router)
-
- route = {'top_id': top_router_id, 'bottom_id': router['id'],
- 'pod_id': pod_dict['pod_id'], 'resource_type': 'router'}
- with self.context.session.begin():
- core.create_resource(self.context, models.ResourceRouting,
- route)
- BOTTOM1_NETWORK.append({'id': 'network_3_id'})
- BOTTOM1_SUBNET.append({'id': 'subnet_3_id',
- 'network_id': 'network_3_id',
- 'cidr': '10.0.3.0/24',
- 'gateway_ip': '10.0.3.1'})
- BOTTOM1_PORT.append({'network_id': 'network_3_id',
- 'device_id': 'router_1_id',
- 'device_owner': 'network:router_interface',
- 'fixed_ips': [{'subnet_id': 'subnet_3_id',
- 'ip_address': '10.0.3.1'}]})
- BOTTOM1_PORT.append({'network_id': 'network_3_id',
- 'device_id': 'vm3_id',
- 'device_owner': 'compute:None',
- 'fixed_ips': [{'subnet_id': 'subnet_3_id',
- 'ip_address': '10.0.3.3'}]})
-
- self.xmanager.configure_extra_routes(self.context,
- payload={'router': top_router_id})
- calls = [mock.call(self.context, 'router_1_id',
- {'router': {
- 'routes': [{'nexthop': '100.0.1.2',
- 'destination': '10.0.2.3/32'}]}}),
- mock.call(self.context, 'router_2_id',
- {'router': {
- 'routes': [{'nexthop': '100.0.1.1',
- 'destination': '10.0.1.3/32'},
- {'nexthop': '100.0.1.1',
- 'destination': '10.0.3.3/32'}]}}),
- mock.call(self.context, 'router_2_id',
- {'router': {
- 'routes': [{'nexthop': '100.0.1.1',
- 'destination': '10.0.3.3/32'},
- {'nexthop': '100.0.1.1',
- 'destination': '10.0.1.3/32'}]}})]
-
- called = mock_update.call_args_list[1] == calls[1]
- called = called or (mock_update.call_args_list[1] == calls[2])
- called = called and (mock_update.call_args_list[0] == calls[0])
- self.assertTrue(called)
-
def test_job_handle(self):
@xmanager._job_handle('fake_resource')
def fake_handle(self, ctx, payload):
diff --git a/tricircle/tests/unit/nova_apigw/__init__.py b/trio2o/xjob/__init__.py
similarity index 100%
rename from tricircle/tests/unit/nova_apigw/__init__.py
rename to trio2o/xjob/__init__.py
diff --git a/tricircle/cinder_apigw/opts.py b/trio2o/xjob/opts.py
similarity index 83%
rename from tricircle/cinder_apigw/opts.py
rename to trio2o/xjob/opts.py
index 6313838..5655723 100644
--- a/tricircle/cinder_apigw/opts.py
+++ b/trio2o/xjob/opts.py
@@ -13,10 +13,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-import tricircle.cinder_apigw.app
+import trio2o.xjob.xservice
def list_opts():
return [
- ('DEFAULT', tricircle.cinder_apigw.app.common_opts),
+ ('DEFAULT', trio2o.xjob.xservice.common_opts),
+ ('DEFAULT', trio2o.xjob.xservice.service_opts),
]
diff --git a/trio2o/xjob/xmanager.py b/trio2o/xjob/xmanager.py
new file mode 100644
index 0000000..247a966
--- /dev/null
+++ b/trio2o/xjob/xmanager.py
@@ -0,0 +1,244 @@
+# Copyright 2015 Huawei Technologies Co., Ltd.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import datetime
+import eventlet
+import random
+import six
+
+from oslo_config import cfg
+from oslo_log import log as logging
+import oslo_messaging as messaging
+from oslo_service import periodic_task
+
+from trio2o.common import client
+from trio2o.common import constants
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+from trio2o.common.i18n import _LI
+from trio2o.common.i18n import _LW
+from trio2o.common import xrpcapi
+import trio2o.db.api as db_api
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+
+IN_TEST = False
+AZ_HINTS = 'availability_zone_hints'
+
+
+def _job_handle(job_type):
+ def handle_func(func):
+ @six.wraps(func)
+ def handle_args(*args, **kwargs):
+ if IN_TEST:
+ # NOTE(zhiyuan) job mechanism will cause some unpredictable
+ # result in unit test so we would like to bypass it. However
+ # we have problem mocking a decorator which decorates member
+ # functions, that's why we use this label, not an elegant
+ # way though.
+ func(*args, **kwargs)
+ return
+ ctx = args[1]
+ payload = kwargs['payload']
+
+ resource_id = payload[job_type]
+ db_api.new_job(ctx, job_type, resource_id)
+ start_time = datetime.datetime.now()
+
+ while True:
+ current_time = datetime.datetime.now()
+ delta = current_time - start_time
+ if delta.seconds >= CONF.worker_handle_timeout:
+ # quit when this handle is running for a long time
+ break
+ time_new = db_api.get_latest_timestamp(ctx, constants.JS_New,
+ job_type, resource_id)
+ time_success = db_api.get_latest_timestamp(
+ ctx, constants.JS_Success, job_type, resource_id)
+ if time_success and time_success >= time_new:
+ break
+ job = db_api.register_job(ctx, job_type, resource_id)
+ if not job:
+ # fail to obtain the lock, let other worker handle the job
+ running_job = db_api.get_running_job(ctx, job_type,
+ resource_id)
+ if not running_job:
+ # there are two reasons that running_job is None. one
+ # is that the running job has just been finished, the
+ # other is that all workers fail to register the job
+ # due to deadlock exception. so we sleep and try again
+ eventlet.sleep(CONF.worker_sleep_time)
+ continue
+ job_time = running_job['timestamp']
+ current_time = datetime.datetime.now()
+ delta = current_time - job_time
+ if delta.seconds > CONF.job_run_expire:
+ # previous running job expires, we set its status to
+ # fail and try again to obtain the lock
+ db_api.finish_job(ctx, running_job['id'], False,
+ time_new)
+ LOG.warning(_LW('Job %(job)s of type %(job_type)s for '
+ 'resource %(resource)s expires, set '
+ 'its state to Fail'),
+ {'job': running_job['id'],
+ 'job_type': job_type,
+ 'resource': resource_id})
+ eventlet.sleep(CONF.worker_sleep_time)
+ continue
+ else:
+ # previous running job is still valid, we just leave
+ # the job to the worker who holds the lock
+ break
+ # successfully obtain the lock, start to execute handler
+ try:
+ func(*args, **kwargs)
+ except Exception:
+ db_api.finish_job(ctx, job['id'], False, time_new)
+ LOG.error(_LE('Job %(job)s of type %(job_type)s for '
+ 'resource %(resource)s fails'),
+ {'job': job['id'],
+ 'job_type': job_type,
+ 'resource': resource_id})
+ break
+ db_api.finish_job(ctx, job['id'], True, time_new)
+ eventlet.sleep(CONF.worker_sleep_time)
+ return handle_args
+ return handle_func
+
+
+class PeriodicTasks(periodic_task.PeriodicTasks):
+ def __init__(self):
+ super(PeriodicTasks, self).__init__(CONF)
+
+
+class XManager(PeriodicTasks):
+
+ target = messaging.Target(version='1.0')
+
+ def __init__(self, host=None, service_name='xjob'):
+
+ LOG.debug(_('XManager initialization...'))
+
+ if not host:
+ host = CONF.host
+ self.host = host
+ self.service_name = service_name
+ # self.notifier = rpc.get_notifier(self.service_name, self.host)
+ self.additional_endpoints = []
+ self.clients = {constants.TOP: client.Client()}
+ self.job_handles = {}
+ self.xjob_handler = xrpcapi.XJobAPI()
+ super(XManager, self).__init__()
+
+ def _get_client(self, pod_name=None):
+ if not pod_name:
+ return self.clients[constants.TOP]
+ if pod_name not in self.clients:
+ self.clients[pod_name] = client.Client(pod_name)
+ return self.clients[pod_name]
+
+ def periodic_tasks(self, context, raise_on_error=False):
+ """Tasks to be run at a periodic interval."""
+ return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
+
+ def init_host(self):
+
+ """init_host
+
+ Hook to do additional manager initialization when one requests
+ the service be started. This is called before any service record
+ is created.
+ Child classes should override this method.
+ """
+
+ LOG.debug(_('XManager init_host...'))
+
+ pass
+
+ def cleanup_host(self):
+
+ """cleanup_host
+
+ Hook to do cleanup work when the service shuts down.
+ Child classes should override this method.
+ """
+
+ LOG.debug(_('XManager cleanup_host...'))
+
+ pass
+
+ def pre_start_hook(self):
+
+ """pre_start_hook
+
+ Hook to provide the manager the ability to do additional
+ start-up work before any RPC queues/consumers are created. This is
+ called after other initialization has succeeded and a service
+ record is created.
+ Child classes should override this method.
+ """
+
+ LOG.debug(_('XManager pre_start_hook...'))
+
+ pass
+
+ def post_start_hook(self):
+
+ """post_start_hook
+
+ Hook to provide the manager the ability to do additional
+ start-up work immediately after a service creates RPC consumers
+ and starts 'running'.
+ Child classes should override this method.
+ """
+
+ LOG.debug(_('XManager post_start_hook...'))
+
+ pass
+
+ # rpc message endpoint handling
+ def test_rpc(self, ctx, payload):
+
+ LOG.info(_LI("xmanager receive payload: %s"), payload)
+
+ info_text = "xmanager receive payload: %s" % payload
+
+ return info_text
+
+ @staticmethod
+ def _get_resource_by_name(cli, cxt, _type, name):
+ return cli.list_resources(_type, cxt, filters=[{'key': 'name',
+ 'comparator': 'eq',
+ 'value': name}])[0]
+
+ @periodic_task.periodic_task
+ def redo_failed_job(self, ctx):
+ failed_jobs = db_api.get_latest_failed_jobs(ctx)
+ failed_jobs = [
+ job for job in failed_jobs if job['type'] in self.job_handles]
+ if not failed_jobs:
+ return
+ # in one run we only pick one job to handle
+ job_index = random.randint(0, len(failed_jobs) - 1)
+ failed_job = failed_jobs[job_index]
+ job_type = failed_job['type']
+ payload = {job_type: failed_job['resource_id']}
+ LOG.debug(_('Redo failed job for %(resource_id)s of type '
+ '%(job_type)s'),
+ {'resource_id': failed_job['resource_id'],
+ 'job_type': job_type})
+ self.job_handles[job_type](ctx, payload=payload)
diff --git a/tricircle/xjob/xservice.py b/trio2o/xjob/xservice.py
similarity index 93%
rename from tricircle/xjob/xservice.py
rename to trio2o/xjob/xservice.py
index dc1a39d..b3683b3 100644
--- a/tricircle/xjob/xservice.py
+++ b/trio2o/xjob/xservice.py
@@ -24,27 +24,27 @@ from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import service as srv
-from tricircle.common.i18n import _
-from tricircle.common.i18n import _LE
-from tricircle.common.i18n import _LI
+from trio2o.common.i18n import _
+from trio2o.common.i18n import _LE
+from trio2o.common.i18n import _LI
-from tricircle.common import baserpc
-from tricircle.common import context
-from tricircle.common import rpc
-from tricircle.common import version
+from trio2o.common import baserpc
+from trio2o.common import context
+from trio2o.common import rpc
+from trio2o.common import version
-from tricircle.common.serializer import TricircleSerializer as Serializer
+from trio2o.common.serializer import Trio2oSerializer as Serializer
-from tricircle.common import topics
-from tricircle.xjob.xmanager import XManager
+from trio2o.common import topics
+from trio2o.xjob.xmanager import XManager
_TIMER_INTERVAL = 30
_TIMER_INTERVAL_MAX = 60
common_opts = [
- cfg.StrOpt('host', default='tricircle.xhost',
+ cfg.StrOpt('host', default='trio2o.xhost',
help=_("The host name for RPC server")),
cfg.IntOpt('workers', default=1,
help=_("Number of workers")),
@@ -167,10 +167,10 @@ class XService(srv.Service):
if not binary:
binary = os.path.basename(sys.argv[0])
if not topic:
- topic = binary.rpartition('tricircle-')[2]
+ topic = binary.rpartition('trio2o-')[2]
if not manager:
manager_cls = ('%s_manager' %
- binary.rpartition('tricircle-')[2])
+ binary.rpartition('trio2o-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval