Move all nsxlib code and tests to vmware_nsxlib
Change-Id: I75533e713a680674368d16f0a7aeb4fdbffe3608
This commit is contained in:
parent
f32c46d037
commit
e9ddc3dd33
@ -1,7 +1,4 @@
|
||||
[DEFAULT]
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
|
||||
test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_LOG_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./vmware_nsxlib/tests/unit} $LISTOPT $IDOPTION
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
@ -3,3 +3,15 @@
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
pbr>=1.6 # Apache-2.0
|
||||
|
||||
enum34;python_version=='2.7' or python_version=='2.6' or python_version=='3.3' # BSD
|
||||
eventlet!=0.18.3,>=0.18.2 # MIT
|
||||
netaddr!=0.7.16,>=0.7.13 # BSD
|
||||
retrying!=1.3.0,>=1.2.3 # Apache-2.0
|
||||
six>=1.9.0 # MIT
|
||||
neutron-lib>=0.4.0 # Apache-2.0
|
||||
oslo.i18n>=2.1.0 # Apache-2.0
|
||||
oslo.log>=3.11.0 # Apache-2.0
|
||||
oslo.serialization>=1.10.0 # Apache-2.0
|
||||
oslo.service>=1.10.0 # Apache-2.0
|
||||
oslo.utils>=3.16.0 # Apache-2.0
|
||||
|
260
run_tests.sh
Normal file
260
run_tests.sh
Normal file
@ -0,0 +1,260 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
function usage {
|
||||
echo "Usage: $0 [OPTION]..."
|
||||
echo "Run Neutron's test suite(s)"
|
||||
echo ""
|
||||
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
|
||||
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
|
||||
echo " -s, --no-site-packages Isolate the virtualenv from the global Python environment"
|
||||
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
|
||||
echo " -n, --no-recreate-db Don't recreate the test database."
|
||||
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
|
||||
echo " -u, --update Update the virtual environment with any newer package versions"
|
||||
echo " -p, --pep8 Just run PEP8 and HACKING compliance check"
|
||||
echo " -8, --pep8-only-changed [<basecommit>]"
|
||||
echo " Just run PEP8 and HACKING compliance check on files changed since HEAD~1 (or <basecommit>)"
|
||||
echo " -P, --no-pep8 Don't run static code checks"
|
||||
echo " -c, --coverage Generate coverage report"
|
||||
echo " -d, --debug Run tests with testtools instead of testr. This allows you to use the debugger."
|
||||
echo " -h, --help Print this usage message"
|
||||
echo " --virtual-env-path <path> Location of the virtualenv directory"
|
||||
echo " Default: \$(pwd)"
|
||||
echo " --virtual-env-name <name> Name of the virtualenv directory"
|
||||
echo " Default: .venv"
|
||||
echo " --tools-path <dir> Location of the tools directory"
|
||||
echo " Default: \$(pwd)"
|
||||
echo ""
|
||||
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
|
||||
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
|
||||
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
|
||||
exit
|
||||
}
|
||||
|
||||
function process_options {
|
||||
i=1
|
||||
while [ $i -le $# ]; do
|
||||
case "${!i}" in
|
||||
-h|--help) usage;;
|
||||
-V|--virtual-env) always_venv=1; never_venv=0;;
|
||||
-N|--no-virtual-env) always_venv=0; never_venv=1;;
|
||||
-s|--no-site-packages) no_site_packages=1;;
|
||||
-r|--recreate-db) recreate_db=1;;
|
||||
-n|--no-recreate-db) recreate_db=0;;
|
||||
-f|--force) force=1;;
|
||||
-u|--update) update=1;;
|
||||
-p|--pep8) just_pep8=1;;
|
||||
-8|--pep8-only-changed) just_pep8_changed=1;;
|
||||
-P|--no-pep8) no_pep8=1;;
|
||||
-c|--coverage) coverage=1;;
|
||||
-d|--debug) debug=1;;
|
||||
--virtual-env-path)
|
||||
(( i++ ))
|
||||
venv_path=${!i}
|
||||
;;
|
||||
--virtual-env-name)
|
||||
(( i++ ))
|
||||
venv_dir=${!i}
|
||||
;;
|
||||
--tools-path)
|
||||
(( i++ ))
|
||||
tools_path=${!i}
|
||||
;;
|
||||
-*) testopts="$testopts ${!i}";;
|
||||
*) testargs="$testargs ${!i}"
|
||||
esac
|
||||
(( i++ ))
|
||||
done
|
||||
}
|
||||
|
||||
tool_path=${tools_path:-$(pwd)}
|
||||
venv_path=${venv_path:-$(pwd)}
|
||||
venv_dir=${venv_name:-.venv}
|
||||
with_venv=tools/with_venv.sh
|
||||
always_venv=0
|
||||
never_venv=0
|
||||
force=0
|
||||
no_site_packages=0
|
||||
installvenvopts=
|
||||
testargs=
|
||||
testopts=
|
||||
wrapper=""
|
||||
just_pep8=0
|
||||
just_pep8_changed=0
|
||||
no_pep8=0
|
||||
coverage=0
|
||||
debug=0
|
||||
recreate_db=1
|
||||
update=0
|
||||
|
||||
LANG=en_US.UTF-8
|
||||
LANGUAGE=en_US:en
|
||||
LC_ALL=C
|
||||
|
||||
process_options $@
|
||||
# Make our paths available to other scripts we call
|
||||
export venv_path
|
||||
export venv_dir
|
||||
export venv_name
|
||||
export tools_dir
|
||||
export venv=${venv_path}/${venv_dir}
|
||||
|
||||
if [ $no_site_packages -eq 1 ]; then
|
||||
installvenvopts="--no-site-packages"
|
||||
fi
|
||||
|
||||
|
||||
function run_tests {
|
||||
# Cleanup *pyc
|
||||
${wrapper} find . -type f -name "*.pyc" -delete
|
||||
|
||||
if [ $debug -eq 1 ]; then
|
||||
if [ "$testopts" = "" ] && [ "$testargs" = "" ]; then
|
||||
# Default to running all tests if specific test is not
|
||||
# provided.
|
||||
testargs="discover ./vmware_nsxlib/tests"
|
||||
fi
|
||||
${wrapper} python -m testtools.run $testopts $testargs
|
||||
|
||||
# Short circuit because all of the testr and coverage stuff
|
||||
# below does not make sense when running testtools.run for
|
||||
# debugging purposes.
|
||||
return $?
|
||||
fi
|
||||
|
||||
if [ $coverage -eq 1 ]; then
|
||||
TESTRTESTS="$TESTRTESTS --coverage"
|
||||
else
|
||||
TESTRTESTS="$TESTRTESTS --slowest"
|
||||
fi
|
||||
|
||||
# Just run the test suites in current environment
|
||||
set +e
|
||||
testargs=`echo "$testargs" | sed -e's/^\s*\(.*\)\s*$/\1/'`
|
||||
TESTRTESTS="$TESTRTESTS --testr-args='--subunit $testopts $testargs'"
|
||||
OS_TEST_PATH=`echo $testargs|grep -o 'vmware_nsxlib\neutron\.tests[^[:space:]:]\+'|tr . /`
|
||||
if [ -n "$OS_TEST_PATH" ]; then
|
||||
os_test_dir=$(dirname "$OS_TEST_PATH")
|
||||
else
|
||||
os_test_dir=''
|
||||
fi
|
||||
if [ -d "$OS_TEST_PATH" ]; then
|
||||
wrapper="OS_TEST_PATH=$OS_TEST_PATH $wrapper"
|
||||
elif [ -d "$os_test_dir" ]; then
|
||||
wrapper="OS_TEST_PATH=$os_test_dir $wrapper"
|
||||
fi
|
||||
echo "Running \`${wrapper} $TESTRTESTS\`"
|
||||
bash -c "${wrapper} $TESTRTESTS | ${wrapper} subunit2pyunit"
|
||||
RESULT=$?
|
||||
set -e
|
||||
|
||||
copy_subunit_log
|
||||
|
||||
if [ $coverage -eq 1 ]; then
|
||||
echo "Generating coverage report in covhtml/"
|
||||
# Don't compute coverage for common code, which is tested elsewhere
|
||||
${wrapper} coverage combine
|
||||
${wrapper} coverage html --include='neutron/*' --omit='neutron/openstack/common/*' -d covhtml -i
|
||||
fi
|
||||
|
||||
return $RESULT
|
||||
}
|
||||
|
||||
function copy_subunit_log {
|
||||
LOGNAME=`cat .testrepository/next-stream`
|
||||
LOGNAME=$(($LOGNAME - 1))
|
||||
LOGNAME=".testrepository/${LOGNAME}"
|
||||
cp $LOGNAME subunit.log
|
||||
}
|
||||
|
||||
function warn_on_flake8_without_venv {
|
||||
if [ $never_venv -eq 1 ]; then
|
||||
echo "**WARNING**:"
|
||||
echo "Running flake8 without virtual env may miss OpenStack HACKING detection"
|
||||
fi
|
||||
}
|
||||
|
||||
function run_pep8 {
|
||||
echo "Running flake8 ..."
|
||||
warn_on_flake8_without_venv
|
||||
${wrapper} flake8
|
||||
}
|
||||
|
||||
function run_pep8_changed {
|
||||
# NOTE(gilliard) We want use flake8 to check the entirety of every file that has
|
||||
# a change in it. Unfortunately the --filenames argument to flake8 only accepts
|
||||
# file *names* and there are no files named (eg) "nova/compute/manager.py". The
|
||||
# --diff argument behaves surprisingly as well, because although you feed it a
|
||||
# diff, it actually checks the file on disk anyway.
|
||||
local target=${testargs:-HEAD~1}
|
||||
local files=$(git diff --name-only $target | tr '\n' ' ')
|
||||
echo "Running flake8 on ${files}"
|
||||
warn_on_flake8_without_venv
|
||||
diff -u --from-file /dev/null ${files} | ${wrapper} flake8 --diff
|
||||
}
|
||||
|
||||
|
||||
TESTRTESTS="python setup.py testr"
|
||||
|
||||
if [ $never_venv -eq 0 ]
|
||||
then
|
||||
# Remove the virtual environment if --force used
|
||||
if [ $force -eq 1 ]; then
|
||||
echo "Cleaning virtualenv..."
|
||||
rm -rf ${venv}
|
||||
fi
|
||||
if [ $update -eq 1 ]; then
|
||||
echo "Updating virtualenv..."
|
||||
python tools/install_venv.py $installvenvopts
|
||||
fi
|
||||
if [ -e ${venv} ]; then
|
||||
wrapper="${with_venv}"
|
||||
else
|
||||
if [ $always_venv -eq 1 ]; then
|
||||
# Automatically install the virtualenv
|
||||
python tools/install_venv.py $installvenvopts
|
||||
wrapper="${with_venv}"
|
||||
else
|
||||
echo -e "No virtual environment found...create one? (Y/n) \c"
|
||||
read use_ve
|
||||
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
|
||||
# Install the virtualenv and run the test suite in it
|
||||
python tools/install_venv.py $installvenvopts
|
||||
wrapper=${with_venv}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Delete old coverage data from previous runs
|
||||
if [ $coverage -eq 1 ]; then
|
||||
${wrapper} coverage erase
|
||||
fi
|
||||
|
||||
if [ $just_pep8 -eq 1 ]; then
|
||||
run_pep8
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ $just_pep8_changed -eq 1 ]; then
|
||||
run_pep8_changed
|
||||
exit
|
||||
fi
|
||||
|
||||
if [ $recreate_db -eq 1 ]; then
|
||||
rm -f tests.sqlite
|
||||
fi
|
||||
|
||||
run_tests
|
||||
|
||||
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
|
||||
# not when we're running tests individually. To handle this, we need to
|
||||
# distinguish between options (testopts), which begin with a '-', and
|
||||
# arguments (testargs).
|
||||
if [ -z "$testargs" ]; then
|
||||
if [ $no_pep8 -eq 0 ]; then
|
||||
run_pep8
|
||||
fi
|
||||
fi
|
@ -5,13 +5,21 @@
|
||||
hacking<0.12,>=0.11.0 # Apache-2.0
|
||||
|
||||
coverage>=3.6 # Apache-2.0
|
||||
fixtures>=3.0.0 # Apache-2.0/BSD
|
||||
mock>=2.0 # BSD
|
||||
python-subunit>=0.0.18 # Apache-2.0/BSD
|
||||
sphinx!=1.3b1,<1.3,>=1.2.1 # BSD
|
||||
oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
|
||||
sphinx!=1.3b1,<1.4,>=1.2.1 # BSD
|
||||
oslosphinx>=4.7.0 # Apache-2.0
|
||||
oslotest>=1.10.0 # Apache-2.0
|
||||
testrepository>=0.0.18 # Apache-2.0/BSD
|
||||
testscenarios>=0.4 # Apache-2.0/BSD
|
||||
testresources>=0.2.4 # Apache-2.0/BSD
|
||||
testtools>=1.4.0 # MIT
|
||||
|
||||
# releasenotes
|
||||
testscenarios>=0.4 # Apache-2.0/BSD
|
||||
WebTest>=2.0 # MIT
|
||||
# This is needed for subunit-trace
|
||||
tempest-lib>=0.14.0 # Apache-2.0
|
||||
reno>=1.8.0 # Apache2
|
||||
bandit>=1.1.0 # Apache-2.0
|
||||
tempest>=12.1.0 # Apache-2.0
|
||||
pylint==1.4.5 # GPLv2
|
||||
requests-mock>=1.1 # Apache-2.0
|
||||
|
0
tools/__init__.py
Normal file
0
tools/__init__.py
Normal file
9
tools/ostestr_compat_shim.sh
Executable file
9
tools/ostestr_compat_shim.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copied from neutron/tools. Otherwise no units tests are found.
|
||||
# preserve old behavior of using an arg as a regex when '--' is not present
|
||||
case $@ in
|
||||
(*--*) ostestr $@;;
|
||||
('') ostestr;;
|
||||
(*) ostestr --regex "$@"
|
||||
esac
|
118
tox.ini
118
tox.ini
@ -1,39 +1,109 @@
|
||||
[tox]
|
||||
minversion = 2.0
|
||||
envlist = py34,py27,pypy,pep8
|
||||
envlist = py35,py34,py27,pep8,docs
|
||||
minversion = 1.6
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
PYTHONWARNINGS=default::DeprecationWarning
|
||||
passenv = TRACE_FAILONLY GENERATE_HASHES http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
|
||||
usedevelop = True
|
||||
install_command = pip install -U {opts} {packages}
|
||||
setenv =
|
||||
VIRTUAL_ENV={envdir}
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = python setup.py test --slowest --testr-args='{posargs}'
|
||||
install_command =
|
||||
pip install -U -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
whitelist_externals = sh
|
||||
commands =
|
||||
{toxinidir}/tools/ostestr_compat_shim.sh {posargs}
|
||||
# there is also secret magic in ostestr which lets you run in a fail only
|
||||
# mode. To do this define the TRACE_FAILONLY environmental variable.
|
||||
|
||||
[testenv:common]
|
||||
# Fake job to define environment variables shared between dsvm/non-dsvm jobs
|
||||
setenv = OS_TEST_TIMEOUT=180
|
||||
commands = false
|
||||
|
||||
[testenv:functional]
|
||||
basepython = python2.7
|
||||
setenv = {[testenv]setenv}
|
||||
{[testenv:common]setenv}
|
||||
OS_TEST_PATH=./vmware_nsxlib/tests/functional
|
||||
OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs}
|
||||
deps =
|
||||
{[testenv]deps}
|
||||
-r{toxinidir}/vmware_nsxlib/tests/functional/requirements.txt
|
||||
|
||||
[testenv:dsvm-functional]
|
||||
basepython = python2.7
|
||||
setenv = OS_SUDO_TESTING=1
|
||||
OS_FAIL_ON_MISSING_DEPS=1
|
||||
OS_TEST_TIMEOUT=180
|
||||
sitepackages=True
|
||||
deps =
|
||||
{[testenv:functional]deps}
|
||||
commands =
|
||||
|
||||
[tox:jenkins]
|
||||
sitepackages = True
|
||||
|
||||
[testenv:releasenotes]
|
||||
commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
|
||||
|
||||
[testenv:pep8]
|
||||
commands = flake8 {posargs}
|
||||
basepython = python2.7
|
||||
deps =
|
||||
{[testenv]deps}
|
||||
commands =
|
||||
# Checks for coding and style guidelines
|
||||
flake8
|
||||
{[testenv:genconfig]commands}
|
||||
whitelist_externals =
|
||||
sh
|
||||
bash
|
||||
|
||||
[testenv:bandit]
|
||||
deps = -r{toxinidir}/test-requirements.txt
|
||||
commands = bandit -r vmware_nsxlib -n 5 -ll
|
||||
|
||||
[testenv:cover]
|
||||
basepython = python2.7
|
||||
commands =
|
||||
python setup.py testr --coverage --testr-args='{posargs}'
|
||||
coverage report
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:cover]
|
||||
commands = python setup.py test --coverage --testr-args='{posargs}'
|
||||
|
||||
[testenv:docs]
|
||||
commands = python setup.py build_sphinx
|
||||
|
||||
[testenv:releasenotes]
|
||||
commands =
|
||||
sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
|
||||
|
||||
[testenv:debug]
|
||||
commands = oslo_debug_helper {posargs}
|
||||
commands = sphinx-build -W -b html doc/source doc/build/html
|
||||
|
||||
[flake8]
|
||||
# E123, E125 skipped as they are invalid PEP-8.
|
||||
|
||||
show-source = True
|
||||
ignore = E123,E125
|
||||
# E125 continuation line does not distinguish itself from next logical line
|
||||
# E126 continuation line over-indented for hanging indent
|
||||
# E128 continuation line under-indented for visual indent
|
||||
# E129 visually indented line with same indent as next logical line
|
||||
# E265 block comment should start with ‘# ‘
|
||||
# H305 imports not grouped correctly
|
||||
# H307 like imports should be grouped together
|
||||
# H402 one line docstring needs punctuation
|
||||
# H404 multi line docstring should start with a summary
|
||||
# H405 multi line docstring summary not separated with an empty line
|
||||
# H904 Wrap long lines in parentheses instead of a backslash
|
||||
# TODO(dougwig) -- uncomment this to test for remaining linkages
|
||||
# N530 direct neutron imports not allowed
|
||||
ignore = E125,E126,E128,E129,E265,H305,H307,H402,H404,H405,H904,N530
|
||||
show-source = true
|
||||
builtins = _
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
|
||||
exclude = .venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,.ropeproject
|
||||
|
||||
[hacking]
|
||||
import_exceptions = vmware_nsxlib._i18n,
|
||||
vmware_nsxlib_tempest._i18n
|
||||
local-check-factory = neutron_lib.hacking.checks.factory
|
||||
|
||||
[testenv:genconfig]
|
||||
commands =
|
||||
|
||||
[testenv:uuidgen]
|
||||
commands =
|
||||
check-uuid --fix
|
||||
|
43
vmware_nsxlib/_i18n.py
Normal file
43
vmware_nsxlib/_i18n.py
Normal file
@ -0,0 +1,43 @@
|
||||
# Copyright (c) 2015 VMware, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import oslo_i18n
|
||||
|
||||
DOMAIN = "vmware_nsxlib"
|
||||
|
||||
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
|
||||
|
||||
# The primary translation function using the well-known name "_"
|
||||
_ = _translators.primary
|
||||
|
||||
# The contextual translation function using the name "_C"
|
||||
_C = _translators.contextual_form
|
||||
|
||||
# The plural translation function using the name "_P"
|
||||
_P = _translators.plural_form
|
||||
|
||||
# Translators for log levels.
|
||||
#
|
||||
# The abbreviated names are meant to reflect the usual use of a short
|
||||
# name like '_'. The "L" is for "log" and the other letter comes from
|
||||
# the level.
|
||||
_LI = _translators.log_info
|
||||
_LW = _translators.log_warning
|
||||
_LE = _translators.log_error
|
||||
_LC = _translators.log_critical
|
||||
|
||||
|
||||
def get_available_languages():
|
||||
return oslo_i18n.get_available_languages(DOMAIN)
|
@ -1,28 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
test_vmware_nsxlib
|
||||
----------------------------------
|
||||
|
||||
Tests for `vmware_nsxlib` module.
|
||||
"""
|
||||
|
||||
from vmware_nsxlib.tests import base
|
||||
|
||||
|
||||
class TestVmware_nsxlib(base.TestCase):
|
||||
|
||||
def test_something(self):
|
||||
pass
|
0
vmware_nsxlib/tests/unit/__init__.py
Normal file
0
vmware_nsxlib/tests/unit/__init__.py
Normal file
0
vmware_nsxlib/tests/unit/v3/__init__.py
Normal file
0
vmware_nsxlib/tests/unit/v3/__init__.py
Normal file
231
vmware_nsxlib/tests/unit/v3/mocks.py
Normal file
231
vmware_nsxlib/tests/unit/v3/mocks.py
Normal file
@ -0,0 +1,231 @@
|
||||
# Copyright (c) 2015 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import requests
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from vmware_nsxlib.v3 import nsx_constants
|
||||
|
||||
|
||||
FAKE_NAME = "fake_name"
|
||||
DEFAULT_TIER0_ROUTER_UUID = "efad0078-9204-4b46-a2d8-d4dd31ed448f"
|
||||
NSX_BRIDGE_CLUSTER_NAME = 'default bridge cluster'
|
||||
FAKE_MANAGER = "fake_manager_ip"
|
||||
|
||||
|
||||
def make_fake_switch(switch_uuid=None, tz_uuid=None, name=FAKE_NAME):
|
||||
if not switch_uuid:
|
||||
switch_uuid = uuidutils.generate_uuid()
|
||||
if not tz_uuid:
|
||||
tz_uuid = uuidutils.generate_uuid()
|
||||
|
||||
fake_switch = {
|
||||
"id": switch_uuid,
|
||||
"display_name": name,
|
||||
"resource_type": "LogicalSwitch",
|
||||
"address_bindings": [],
|
||||
"transport_zone_id": tz_uuid,
|
||||
"replication_mode": nsx_constants.MTEP,
|
||||
"admin_state": nsx_constants.ADMIN_STATE_UP,
|
||||
"vni": 50056,
|
||||
"switching_profile_ids": [
|
||||
{
|
||||
"value": "64814784-7896-3901-9741-badeff705639",
|
||||
"key": "IpDiscoverySwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "fad98876-d7ff-11e4-b9d6-1681e6b88ec1",
|
||||
"key": "SpoofGuardSwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "93b4b7e8-f116-415d-a50c-3364611b5d09",
|
||||
"key": "PortMirroringSwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "fbc4fb17-83d9-4b53-a286-ccdf04301888",
|
||||
"key": "SwitchSecuritySwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "f313290b-eba8-4262-bd93-fab5026e9495",
|
||||
"key": "QosSwitchingProfile"
|
||||
}
|
||||
],
|
||||
}
|
||||
return fake_switch
|
||||
|
||||
|
||||
def make_fake_dhcp_profile():
|
||||
return {"id": uuidutils.generate_uuid(),
|
||||
"edge_cluster_id": uuidutils.generate_uuid(),
|
||||
"edge_cluster_member_indexes": [0, 1]}
|
||||
|
||||
|
||||
def make_fake_metadata_proxy():
|
||||
return {"id": uuidutils.generate_uuid(),
|
||||
"metadata_server_url": "http://1.2.3.4",
|
||||
"secret": "my secret",
|
||||
"edge_cluster_id": uuidutils.generate_uuid(),
|
||||
"edge_cluster_member_indexes": [0, 1]}
|
||||
|
||||
|
||||
class MockRequestsResponse(object):
|
||||
def __init__(self, status_code, content=None):
|
||||
self.status_code = status_code
|
||||
self.content = content
|
||||
|
||||
def json(self):
|
||||
return jsonutils.loads(self.content)
|
||||
|
||||
|
||||
class MockRequestSessionApi(object):
|
||||
|
||||
def __init__(self):
|
||||
self._store = {}
|
||||
|
||||
def _format_uri(self, uri):
|
||||
uri = urlparse.urlparse(uri).path
|
||||
while uri.endswith('/'):
|
||||
uri = uri[:-1]
|
||||
while uri.startswith('/'):
|
||||
uri = uri[1:]
|
||||
if not self._is_uuid_uri(uri):
|
||||
uri = "%s/" % uri
|
||||
return uri
|
||||
|
||||
def _is_uuid_uri(self, uri):
|
||||
return uuidutils.is_uuid_like(
|
||||
urlparse.urlparse(uri).path.split('/')[-1])
|
||||
|
||||
def _query(self, search_key, copy=True):
|
||||
items = []
|
||||
for uri, obj in self._store.items():
|
||||
if uri.startswith(search_key):
|
||||
items.append(obj.copy() if copy else obj)
|
||||
return items
|
||||
|
||||
def _build_response(self, url, content=None,
|
||||
status=requests.codes.ok, **kwargs):
|
||||
if isinstance(content, list):
|
||||
content = {
|
||||
'result_count': len(content),
|
||||
'results': content
|
||||
}
|
||||
|
||||
if (content is not None and kwargs.get('headers', {}).get(
|
||||
'Content-Type') == 'application/json'):
|
||||
content = jsonutils.dumps(content)
|
||||
|
||||
return MockRequestsResponse(status, content=content)
|
||||
|
||||
def _get_content(self, **kwargs):
|
||||
content = kwargs.get('data', None)
|
||||
if content and kwargs.get('headers', {}).get(
|
||||
'Content-Type') == 'application/json':
|
||||
content = jsonutils.loads(content)
|
||||
return content
|
||||
|
||||
def get(self, url, **kwargs):
|
||||
url = self._format_uri(url)
|
||||
|
||||
if self._is_uuid_uri(url):
|
||||
item = self._store.get(url)
|
||||
code = requests.codes.ok if item else requests.codes.not_found
|
||||
return self._build_response(
|
||||
url, content=item, status=code, **kwargs)
|
||||
|
||||
return self._build_response(
|
||||
url, content=self._query(url), status=requests.codes.ok, **kwargs)
|
||||
|
||||
def _create(self, url, content, **kwargs):
|
||||
resource_id = content.get('id')
|
||||
if resource_id and self._store.get("%s%s" % (url, resource_id)):
|
||||
return self._build_response(
|
||||
url, content=None, status=requests.codes.bad, **kwargs)
|
||||
|
||||
resource_id = resource_id or uuidutils.generate_uuid()
|
||||
content['id'] = resource_id
|
||||
|
||||
self._store["%s%s" % (url, resource_id)] = content.copy()
|
||||
return content
|
||||
|
||||
def post(self, url, **kwargs):
|
||||
parsed_url = urlparse.urlparse(url)
|
||||
url = self._format_uri(url)
|
||||
|
||||
if self._is_uuid_uri(url):
|
||||
if self._store.get(url) is None:
|
||||
return self._build_response(
|
||||
url, content=None, status=requests.codes.bad, **kwargs)
|
||||
|
||||
body = self._get_content(**kwargs)
|
||||
if body is None:
|
||||
return self._build_response(
|
||||
url, content=None, status=requests.codes.bad, **kwargs)
|
||||
|
||||
response_content = None
|
||||
|
||||
url_queries = urlparse.parse_qs(parsed_url.query)
|
||||
if 'create_multiple' in url_queries.get('action', []):
|
||||
response_content = {}
|
||||
for resource_name, resource_body in body.items():
|
||||
for new_resource in resource_body:
|
||||
created_resource = self._create(
|
||||
url, new_resource, **kwargs)
|
||||
if response_content.get(resource_name, None) is None:
|
||||
response_content[resource_name] = []
|
||||
response_content[resource_name].append(created_resource)
|
||||
else:
|
||||
response_content = self._create(url, body, **kwargs)
|
||||
|
||||
if isinstance(response_content, MockRequestsResponse):
|
||||
return response_content
|
||||
|
||||
return self._build_response(
|
||||
url, content=response_content,
|
||||
status=requests.codes.created, **kwargs)
|
||||
|
||||
def put(self, url, **kwargs):
|
||||
url = self._format_uri(url)
|
||||
|
||||
item = {}
|
||||
if self._is_uuid_uri(url):
|
||||
item = self._store.get(url, None)
|
||||
if item is None:
|
||||
return self._build_response(
|
||||
url, content=None,
|
||||
status=requests.codes.not_found, **kwargs)
|
||||
|
||||
body = self._get_content(**kwargs)
|
||||
if body is None:
|
||||
return self._build_response(
|
||||
url, content=None, status=requests.codes.bad, **kwargs)
|
||||
|
||||
item.update(body)
|
||||
self._store[url] = item
|
||||
return self._build_response(
|
||||
url, content=item, status=requests.codes.ok, **kwargs)
|
||||
|
||||
def delete(self, url, **kwargs):
|
||||
url = self._format_uri(url)
|
||||
|
||||
if not self._store.get(url):
|
||||
return self._build_response(
|
||||
url, content=None, status=requests.codes.not_found, **kwargs)
|
||||
|
||||
del self._store[url]
|
||||
return self._build_response(
|
||||
url, content=None, status=requests.codes.ok, **kwargs)
|
322
vmware_nsxlib/tests/unit/v3/nsxlib_testcase.py
Normal file
322
vmware_nsxlib/tests/unit/v3/nsxlib_testcase.py
Normal file
@ -0,0 +1,322 @@
|
||||
# Copyright (c) 2015 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import copy
|
||||
import mock
|
||||
import unittest
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
from requests import exceptions as requests_exceptions
|
||||
|
||||
from vmware_nsxlib import v3
|
||||
from vmware_nsxlib.v3 import client as nsx_client
|
||||
from vmware_nsxlib.v3 import cluster as nsx_cluster
|
||||
from vmware_nsxlib.v3 import config
|
||||
|
||||
NSX_USER = 'admin'
|
||||
NSX_PASSWORD = 'default'
|
||||
NSX_MANAGER = '1.2.3.4'
|
||||
NSX_INSECURE = False
|
||||
NSX_CERT = '/opt/stack/certs/nsx.pem'
|
||||
NSX_HTTP_RETRIES = 10
|
||||
NSX_HTTP_TIMEOUT = 10
|
||||
NSX_HTTP_READ_TIMEOUT = 180
|
||||
NSX_CONCURENT_CONN = 10
|
||||
NSX_CONN_IDLE_TIME = 10
|
||||
NSX_MAX_ATTEMPTS = 10
|
||||
|
||||
PLUGIN_SCOPE = "plugin scope"
|
||||
PLUGIN_TAG = "plugin tag"
|
||||
PLUGIN_VER = "plugin ver"
|
||||
|
||||
|
||||
def _mock_nsxlib():
|
||||
def _return_id_key(*args, **kwargs):
|
||||
return {'id': uuidutils.generate_uuid()}
|
||||
|
||||
def _mock_add_rules_in_section(*args):
|
||||
# NOTE(arosen): the code in the neutron plugin expects the
|
||||
# neutron rule id as the display_name.
|
||||
rules = args[0]
|
||||
return {
|
||||
'rules': [
|
||||
{'display_name': rule['display_name'],
|
||||
'id': uuidutils.generate_uuid()}
|
||||
for rule in rules
|
||||
]}
|
||||
|
||||
mock.patch(
|
||||
"vmware_nsxlib.v3.cluster.NSXRequestsHTTPProvider"
|
||||
".validate_connection").start()
|
||||
|
||||
mock.patch(
|
||||
"vmware_nsxlib.v3.security.NsxLibNsGroup.create",
|
||||
side_effect=_return_id_key
|
||||
).start()
|
||||
|
||||
mock.patch(
|
||||
"vmware_nsxlib.v3.security.NsxLibFirewallSection.create_empty",
|
||||
side_effect=_return_id_key).start()
|
||||
|
||||
mock.patch(
|
||||
"vmware_nsxlib.v3.security.NsxLibFirewallSection.init_default",
|
||||
side_effect=_return_id_key).start()
|
||||
|
||||
mock.patch(
|
||||
"vmware_nsxlib.v3.security.NsxLibNsGroup.list").start()
|
||||
|
||||
mock.patch(
|
||||
"vmware_nsxlib.v3.security.NsxLibFirewallSection.add_rules",
|
||||
side_effect=_mock_add_rules_in_section).start()
|
||||
|
||||
mock.patch(
|
||||
"vmware_nsxlib.v3.NsxLibTransportZone.get_id_by_name_or_id",
|
||||
side_effect=_return_id_key).start()
|
||||
|
||||
mock.patch(
|
||||
"vmware_nsxlib.v3.NsxLib.get_version",
|
||||
return_value='1.1.0').start()
|
||||
|
||||
|
||||
def get_default_nsxlib_config():
|
||||
return config.NsxLibConfig(
|
||||
username=NSX_USER,
|
||||
password=NSX_PASSWORD,
|
||||
retries=NSX_HTTP_RETRIES,
|
||||
insecure=NSX_INSECURE,
|
||||
ca_file=NSX_CERT,
|
||||
concurrent_connections=NSX_CONCURENT_CONN,
|
||||
http_timeout=NSX_HTTP_TIMEOUT,
|
||||
http_read_timeout=NSX_HTTP_READ_TIMEOUT,
|
||||
conn_idle_timeout=NSX_CONN_IDLE_TIME,
|
||||
http_provider=None,
|
||||
nsx_api_managers=[],
|
||||
plugin_scope=PLUGIN_SCOPE,
|
||||
plugin_tag=PLUGIN_TAG,
|
||||
plugin_ver=PLUGIN_VER)
|
||||
|
||||
|
||||
class NsxLibTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self, *args, **kwargs):
|
||||
super(NsxLibTestCase, self).setUp()
|
||||
_mock_nsxlib()
|
||||
|
||||
nsxlib_config = get_default_nsxlib_config()
|
||||
self.nsxlib = v3.NsxLib(nsxlib_config)
|
||||
|
||||
# print diffs when assert comparisons fail
|
||||
self.maxDiff = None
|
||||
|
||||
|
||||
class MemoryMockAPIProvider(nsx_cluster.AbstractHTTPProvider):
|
||||
"""Acts as a HTTP provider for mocking which is backed
|
||||
|
||||
by a MockRequestSessionApi.
|
||||
"""
|
||||
|
||||
def __init__(self, mock_session_api):
|
||||
self._store = mock_session_api
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return "Memory mock API"
|
||||
|
||||
def validate_connection(self, cluster_api, endpoint, conn):
|
||||
return
|
||||
|
||||
def new_connection(self, cluster_api, provider):
|
||||
# all callers use the same backing
|
||||
return self._store
|
||||
|
||||
def is_connection_exception(self, exception):
|
||||
return isinstance(exception, requests_exceptions.ConnectionError)
|
||||
|
||||
|
||||
class NsxClientTestCase(NsxLibTestCase):
|
||||
|
||||
class MockNSXClusteredAPI(nsx_cluster.NSXClusteredAPI):
|
||||
|
||||
def __init__(
|
||||
self, session_response=None,
|
||||
username=None,
|
||||
password=None,
|
||||
retries=None,
|
||||
insecure=None,
|
||||
ca_file=None,
|
||||
concurrent_connections=None,
|
||||
http_timeout=None,
|
||||
http_read_timeout=None,
|
||||
conn_idle_timeout=None,
|
||||
nsx_api_managers=None):
|
||||
|
||||
nsxlib_config = config.NsxLibConfig(
|
||||
username=username or NSX_USER,
|
||||
password=password or NSX_PASSWORD,
|
||||
retries=retries or NSX_HTTP_RETRIES,
|
||||
insecure=insecure if insecure is not None else NSX_INSECURE,
|
||||
ca_file=ca_file or NSX_CERT,
|
||||
concurrent_connections=(concurrent_connections or
|
||||
NSX_CONCURENT_CONN),
|
||||
http_timeout=http_timeout or NSX_HTTP_TIMEOUT,
|
||||
http_read_timeout=http_read_timeout or NSX_HTTP_READ_TIMEOUT,
|
||||
conn_idle_timeout=conn_idle_timeout or NSX_CONN_IDLE_TIME,
|
||||
http_provider=NsxClientTestCase.MockHTTPProvider(
|
||||
session_response=session_response),
|
||||
nsx_api_managers=nsx_api_managers or [NSX_MANAGER],
|
||||
plugin_scope=PLUGIN_SCOPE,
|
||||
plugin_tag=PLUGIN_TAG,
|
||||
plugin_ver=PLUGIN_VER)
|
||||
|
||||
super(NsxClientTestCase.MockNSXClusteredAPI, self).__init__(
|
||||
nsxlib_config)
|
||||
self._record = mock.Mock()
|
||||
|
||||
def record_call(self, request, **kwargs):
|
||||
verb = request.method.lower()
|
||||
|
||||
# filter out requests specific attributes
|
||||
checked_kwargs = copy.copy(kwargs)
|
||||
del checked_kwargs['proxies']
|
||||
del checked_kwargs['stream']
|
||||
if 'allow_redirects' in checked_kwargs:
|
||||
del checked_kwargs['allow_redirects']
|
||||
|
||||
for attr in ['url', 'body']:
|
||||
checked_kwargs[attr] = getattr(request, attr, None)
|
||||
|
||||
# remove headers we don't need to verify
|
||||
checked_kwargs['headers'] = copy.copy(request.headers)
|
||||
for header in ['Accept-Encoding', 'User-Agent',
|
||||
'Connection', 'Authorization',
|
||||
'Content-Length']:
|
||||
if header in checked_kwargs['headers']:
|
||||
del checked_kwargs['headers'][header]
|
||||
|
||||
checked_kwargs['headers'] = request.headers
|
||||
|
||||
# record the call in the mock object
|
||||
method = getattr(self._record, verb)
|
||||
method(**checked_kwargs)
|
||||
|
||||
def assert_called_once(self, verb, **kwargs):
|
||||
mock_call = getattr(self._record, verb.lower())
|
||||
mock_call.assert_called_once_with(**kwargs)
|
||||
|
||||
@property
|
||||
def recorded_calls(self):
|
||||
return self._record
|
||||
|
||||
class MockHTTPProvider(nsx_cluster.NSXRequestsHTTPProvider):
|
||||
|
||||
def __init__(self, session_response=None):
|
||||
super(NsxClientTestCase.MockHTTPProvider, self).__init__()
|
||||
self._session_response = session_response
|
||||
|
||||
def new_connection(self, cluster_api, provider):
|
||||
# wrapper the session so we can intercept and record calls
|
||||
session = super(NsxClientTestCase.MockHTTPProvider,
|
||||
self).new_connection(cluster_api, provider)
|
||||
|
||||
mock_adapter = mock.Mock()
|
||||
session_send = session.send
|
||||
|
||||
def _adapter_send(request, **kwargs):
|
||||
# record calls at the requests HTTP adapter level
|
||||
mock_response = mock.Mock()
|
||||
mock_response.history = None
|
||||
# needed to bypass requests internal checks for mock
|
||||
mock_response.raw._original_response = {}
|
||||
|
||||
# record the request for later verification
|
||||
cluster_api.record_call(request, **kwargs)
|
||||
return mock_response
|
||||
|
||||
def _session_send(request, **kwargs):
|
||||
# calls at the Session level
|
||||
if self._session_response:
|
||||
# consumer has setup a response for the session
|
||||
cluster_api.record_call(request, **kwargs)
|
||||
return (self._session_response()
|
||||
if hasattr(self._session_response, '__call__')
|
||||
else self._session_response)
|
||||
|
||||
# bypass requests redirect handling for mock
|
||||
kwargs['allow_redirects'] = False
|
||||
|
||||
# session send will end up calling adapter send
|
||||
return session_send(request, **kwargs)
|
||||
|
||||
mock_adapter.send = _adapter_send
|
||||
session.send = _session_send
|
||||
|
||||
def _mock_adapter(*args, **kwargs):
|
||||
# use our mock adapter rather than requests adapter
|
||||
return mock_adapter
|
||||
|
||||
session.get_adapter = _mock_adapter
|
||||
return session
|
||||
|
||||
def validate_connection(self, cluster_api, endpoint, conn):
|
||||
assert conn is not None
|
||||
|
||||
def mock_nsx_clustered_api(self, session_response=None, **kwargs):
|
||||
return NsxClientTestCase.MockNSXClusteredAPI(
|
||||
session_response=session_response, **kwargs)
|
||||
|
||||
def mocked_resource(self, resource_class, mock_validate=True,
|
||||
session_response=None):
|
||||
mocked = resource_class(nsx_client.NSX3Client(
|
||||
self.mock_nsx_clustered_api(session_response=session_response),
|
||||
max_attempts=NSX_MAX_ATTEMPTS))
|
||||
if mock_validate:
|
||||
mock.patch.object(mocked._client, '_validate_result').start()
|
||||
|
||||
return mocked
|
||||
|
||||
def new_mocked_client(self, client_class, mock_validate=True,
|
||||
session_response=None, mock_cluster=None,
|
||||
**kwargs):
|
||||
client = client_class(mock_cluster or self.mock_nsx_clustered_api(
|
||||
session_response=session_response), **kwargs)
|
||||
|
||||
if mock_validate:
|
||||
mock.patch.object(client, '_validate_result').start()
|
||||
|
||||
new_client_for = client.new_client_for
|
||||
|
||||
def _new_client_for(*args, **kwargs):
|
||||
sub_client = new_client_for(*args, **kwargs)
|
||||
if mock_validate:
|
||||
mock.patch.object(sub_client, '_validate_result').start()
|
||||
return sub_client
|
||||
|
||||
client.new_client_for = _new_client_for
|
||||
|
||||
return client
|
||||
|
||||
def new_mocked_cluster(self, conf_managers, validate_conn_func,
|
||||
concurrent_connections=None):
|
||||
mock_provider = mock.Mock()
|
||||
mock_provider.default_scheme = 'https'
|
||||
mock_provider.validate_connection = validate_conn_func
|
||||
|
||||
nsxlib_config = get_default_nsxlib_config()
|
||||
if concurrent_connections:
|
||||
nsxlib_config.concurrent_connections = concurrent_connections
|
||||
nsxlib_config.http_provider = mock_provider
|
||||
nsxlib_config.nsx_api_managers = conf_managers
|
||||
|
||||
return nsx_cluster.NSXClusteredAPI(nsxlib_config)
|
308
vmware_nsxlib/tests/unit/v3/test_client.py
Normal file
308
vmware_nsxlib/tests/unit/v3/test_client.py
Normal file
@ -0,0 +1,308 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
import copy
|
||||
|
||||
from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from vmware_nsxlib.tests.unit.v3 import mocks
|
||||
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
|
||||
from vmware_nsxlib.v3 import client
|
||||
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
CLIENT_PKG = 'vmware_nsxlib.v3.client'
|
||||
|
||||
DFT_ACCEPT_HEADERS = {
|
||||
'Accept': '*/*'
|
||||
}
|
||||
|
||||
|
||||
def _headers(**kwargs):
|
||||
headers = copy.copy(DFT_ACCEPT_HEADERS)
|
||||
headers.update(kwargs)
|
||||
return headers
|
||||
|
||||
|
||||
def assert_call(verb, client_or_resource,
|
||||
url, verify=nsxlib_testcase.NSX_CERT,
|
||||
data=None, headers=DFT_ACCEPT_HEADERS,
|
||||
timeout=(nsxlib_testcase.NSX_HTTP_TIMEOUT,
|
||||
nsxlib_testcase.NSX_HTTP_READ_TIMEOUT)):
|
||||
nsx_client = client_or_resource
|
||||
if getattr(nsx_client, '_client', None) is not None:
|
||||
nsx_client = nsx_client._client
|
||||
cluster = nsx_client._conn
|
||||
cluster.assert_called_once(
|
||||
verb,
|
||||
**{'url': url, 'verify': verify, 'body': data,
|
||||
'headers': headers, 'cert': None, 'timeout': timeout})
|
||||
|
||||
|
||||
def assert_json_call(verb, client_or_resource, url,
|
||||
verify=nsxlib_testcase.NSX_CERT,
|
||||
data=None,
|
||||
headers=client.JSONRESTClient._DEFAULT_HEADERS):
|
||||
return assert_call(verb, client_or_resource, url,
|
||||
verify=verify, data=data,
|
||||
headers=headers)
|
||||
|
||||
|
||||
class NsxV3RESTClientTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_client_url_prefix(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='/cloud/api')
|
||||
|
||||
api.list()
|
||||
|
||||
assert_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/cloud/api')
|
||||
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='/cloud/api')
|
||||
|
||||
api.url_list('v1/ports')
|
||||
|
||||
assert_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/cloud/api/v1/ports')
|
||||
|
||||
def test_client_headers(self):
|
||||
default_headers = {'Content-Type': 'application/golang'}
|
||||
api = self.new_mocked_client(
|
||||
client.RESTClient, default_headers=default_headers,
|
||||
url_prefix='/v1/api')
|
||||
|
||||
api.list()
|
||||
|
||||
assert_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/v1/api',
|
||||
headers=_headers(**default_headers))
|
||||
|
||||
api = self.new_mocked_client(
|
||||
client.RESTClient,
|
||||
default_headers=default_headers,
|
||||
url_prefix='/v1/api')
|
||||
|
||||
method_headers = {'X-API-Key': 'strong-crypt'}
|
||||
api.url_list('ports/33', headers=method_headers)
|
||||
method_headers.update(default_headers)
|
||||
assert_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/v1/api/ports/33',
|
||||
headers=_headers(**method_headers))
|
||||
|
||||
def test_client_for(self):
|
||||
api = self.new_mocked_client(client.RESTClient, url_prefix='api/v1/')
|
||||
sub_api = api.new_client_for('switch/ports')
|
||||
|
||||
sub_api.get('11a2b')
|
||||
|
||||
assert_call(
|
||||
'get', sub_api,
|
||||
'https://1.2.3.4/api/v1/switch/ports/11a2b')
|
||||
|
||||
def test_client_list(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
api.list()
|
||||
|
||||
assert_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/api/v1/ports')
|
||||
|
||||
def test_client_get(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
api.get('unique-id')
|
||||
|
||||
assert_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/api/v1/ports/unique-id')
|
||||
|
||||
def test_client_delete(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
api.delete('unique-id')
|
||||
|
||||
assert_call(
|
||||
'delete', api,
|
||||
'https://1.2.3.4/api/v1/ports/unique-id')
|
||||
|
||||
def test_client_update(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
api.update('unique-id', jsonutils.dumps({'name': 'a-new-name'}))
|
||||
|
||||
assert_call(
|
||||
'put', api,
|
||||
'https://1.2.3.4/api/v1/ports/unique-id',
|
||||
data=jsonutils.dumps({'name': 'a-new-name'}))
|
||||
|
||||
def test_client_create(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
api.create(body=jsonutils.dumps({'resource-name': 'port1'}))
|
||||
|
||||
assert_call(
|
||||
'post', api,
|
||||
'https://1.2.3.4/api/v1/ports',
|
||||
data=jsonutils.dumps({'resource-name': 'port1'}))
|
||||
|
||||
def test_client_url_list(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
|
||||
json_headers = {'Content-Type': 'application/json'}
|
||||
|
||||
api.url_list('/connections', json_headers)
|
||||
|
||||
assert_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/api/v1/ports/connections',
|
||||
headers=_headers(**json_headers))
|
||||
|
||||
def test_client_url_get(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
api.url_get('connections/1')
|
||||
|
||||
assert_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/api/v1/ports/connections/1')
|
||||
|
||||
def test_client_url_delete(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
api.url_delete('1')
|
||||
|
||||
assert_call(
|
||||
'delete', api,
|
||||
'https://1.2.3.4/api/v1/ports/1')
|
||||
|
||||
def test_client_url_put(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
api.url_put('connections/1', jsonutils.dumps({'name': 'conn1'}))
|
||||
|
||||
assert_call(
|
||||
'put', api,
|
||||
'https://1.2.3.4/api/v1/ports/connections/1',
|
||||
data=jsonutils.dumps({'name': 'conn1'}))
|
||||
|
||||
def test_client_url_post(self):
|
||||
api = self.new_mocked_client(client.RESTClient,
|
||||
url_prefix='api/v1/ports')
|
||||
api.url_post('1/connections', jsonutils.dumps({'name': 'conn1'}))
|
||||
|
||||
assert_call(
|
||||
'post', api,
|
||||
'https://1.2.3.4/api/v1/ports/1/connections',
|
||||
data=jsonutils.dumps({'name': 'conn1'}))
|
||||
|
||||
def test_client_validate_result(self):
|
||||
|
||||
def _verb_response_code(http_verb, status_code):
|
||||
response = mocks.MockRequestsResponse(
|
||||
status_code, None)
|
||||
|
||||
client_api = self.new_mocked_client(
|
||||
client.RESTClient, mock_validate=False,
|
||||
session_response=response)
|
||||
|
||||
client_call = getattr(client_api, "url_%s" % http_verb)
|
||||
client_call('', None)
|
||||
|
||||
for verb in ['get', 'post', 'put', 'delete']:
|
||||
for code in client.RESTClient._VERB_RESP_CODES.get(verb):
|
||||
_verb_response_code(verb, code)
|
||||
self.assertRaises(
|
||||
nsxlib_exc.ManagerError,
|
||||
_verb_response_code, verb, 500)
|
||||
|
||||
|
||||
class NsxV3JSONClientTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_json_request(self):
|
||||
resp = mocks.MockRequestsResponse(
|
||||
200, jsonutils.dumps({'result': {'ok': 200}}))
|
||||
|
||||
api = self.new_mocked_client(client.JSONRESTClient,
|
||||
session_response=resp,
|
||||
url_prefix='api/v2/nat')
|
||||
|
||||
resp = api.create(body={'name': 'mgmt-egress'})
|
||||
|
||||
assert_json_call(
|
||||
'post', api,
|
||||
'https://1.2.3.4/api/v2/nat',
|
||||
data=jsonutils.dumps({'name': 'mgmt-egress'}))
|
||||
|
||||
self.assertEqual(resp, {'result': {'ok': 200}})
|
||||
|
||||
|
||||
class NsxV3APIClientTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_api_call(self):
|
||||
api = self.new_mocked_client(client.NSX3Client)
|
||||
api.get('ports')
|
||||
|
||||
assert_json_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/api/v1/ports')
|
||||
|
||||
|
||||
# NOTE(boden): remove this when tmp brigding removed
|
||||
class NsxV3APIClientBridgeTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_get_resource(self):
|
||||
api = self.new_mocked_client(client.NSX3Client)
|
||||
api.get('ports')
|
||||
|
||||
assert_json_call(
|
||||
'get', api,
|
||||
'https://1.2.3.4/api/v1/ports')
|
||||
|
||||
def test_create_resource(self):
|
||||
api = self.new_mocked_client(client.NSX3Client)
|
||||
api.create('ports', {'resource-name': 'port1'})
|
||||
|
||||
assert_json_call(
|
||||
'post', api,
|
||||
'https://1.2.3.4/api/v1/ports',
|
||||
data=jsonutils.dumps({'resource-name': 'port1'}))
|
||||
|
||||
def test_update_resource(self):
|
||||
api = self.new_mocked_client(client.NSX3Client)
|
||||
api.update('ports/1', {'name': 'a-new-name'})
|
||||
|
||||
assert_json_call(
|
||||
'put', api,
|
||||
'https://1.2.3.4/api/v1/ports/1',
|
||||
data=jsonutils.dumps({'name': 'a-new-name'}))
|
||||
|
||||
def test_delete_resource(self):
|
||||
api = self.new_mocked_client(client.NSX3Client)
|
||||
api.delete('ports/11')
|
||||
|
||||
assert_json_call(
|
||||
'delete', api,
|
||||
'https://1.2.3.4/api/v1/ports/11')
|
205
vmware_nsxlib/tests/unit/v3/test_cluster.py
Normal file
205
vmware_nsxlib/tests/unit/v3/test_cluster.py
Normal file
@ -0,0 +1,205 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
import mock
|
||||
import six.moves.urllib.parse as urlparse
|
||||
import unittest
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from requests import exceptions as requests_exceptions
|
||||
|
||||
from vmware_nsxlib.tests.unit.v3 import mocks
|
||||
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
|
||||
from vmware_nsxlib.v3 import client
|
||||
from vmware_nsxlib.v3 import cluster
|
||||
from vmware_nsxlib.v3 import exceptions as nsxlib_exc
|
||||
|
||||
|
||||
def _validate_conn_up(*args, **kwargs):
|
||||
return
|
||||
|
||||
|
||||
def _validate_conn_down(*args, **kwargs):
|
||||
raise requests_exceptions.ConnectionError()
|
||||
|
||||
|
||||
class RequestsHTTPProviderTestCase(unittest.TestCase):
|
||||
|
||||
def test_new_connection(self):
|
||||
mock_api = mock.Mock()
|
||||
mock_api.nsxlib_config = mock.Mock()
|
||||
mock_api.nsxlib_config.username = 'nsxuser'
|
||||
mock_api.nsxlib_config.password = 'nsxpassword'
|
||||
mock_api.nsxlib_config.retries = 100
|
||||
mock_api.nsxlib_config.insecure = True
|
||||
mock_api.nsxlib_config.ca_file = None
|
||||
mock_api.nsxlib_config.http_timeout = 99
|
||||
mock_api.nsxlib_config.conn_idle_timeout = 39
|
||||
provider = cluster.NSXRequestsHTTPProvider()
|
||||
session = provider.new_connection(
|
||||
mock_api, cluster.Provider('9.8.7.6', 'https://9.8.7.6',
|
||||
'nsxuser', 'nsxpassword', None))
|
||||
|
||||
self.assertEqual(session.auth, ('nsxuser', 'nsxpassword'))
|
||||
self.assertEqual(session.verify, False)
|
||||
self.assertEqual(session.cert, None)
|
||||
self.assertEqual(session.adapters['https://'].max_retries.total, 100)
|
||||
self.assertEqual(session.timeout, 99)
|
||||
|
||||
def test_validate_connection(self):
|
||||
self.skipTest("Revist")
|
||||
mock_conn = mocks.MockRequestSessionApi()
|
||||
mock_ep = mock.Mock()
|
||||
mock_ep.provider.url = 'https://1.2.3.4'
|
||||
provider = cluster.NSXRequestsHTTPProvider()
|
||||
self.assertRaises(nsxlib_exc.ResourceNotFound,
|
||||
provider.validate_connection,
|
||||
mock.Mock(), mock_ep, mock_conn)
|
||||
|
||||
mock_conn.post('api/v1/transport-zones',
|
||||
data=jsonutils.dumps({'id': 'dummy-tz'}),
|
||||
headers=client.JSONRESTClient._DEFAULT_HEADERS)
|
||||
provider.validate_connection(mock.Mock(), mock_ep, mock_conn)
|
||||
|
||||
|
||||
class NsxV3ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def _assert_providers(self, cluster_api, provider_tuples):
|
||||
self.assertEqual(len(cluster_api.providers), len(provider_tuples))
|
||||
|
||||
def _assert_provider(pid, purl):
|
||||
for provider in cluster_api.providers:
|
||||
if provider.id == pid and provider.url == purl:
|
||||
return
|
||||
self.fail("Provider: %s not found" % pid)
|
||||
|
||||
for provider_tuple in provider_tuples:
|
||||
_assert_provider(provider_tuple[0], provider_tuple[1])
|
||||
|
||||
def test_conf_providers_no_scheme(self):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12:4433']
|
||||
api = self.new_mocked_cluster(conf_managers, _validate_conn_up)
|
||||
|
||||
self._assert_providers(
|
||||
api, [(p, "https://%s" % p) for p in conf_managers])
|
||||
|
||||
def test_conf_providers_with_scheme(self):
|
||||
conf_managers = ['http://8.9.10.11:8080', 'https://9.10.11.12:4433']
|
||||
api = self.new_mocked_cluster(conf_managers, _validate_conn_up)
|
||||
|
||||
self._assert_providers(
|
||||
api, [(urlparse.urlparse(p).netloc, p) for p in conf_managers])
|
||||
|
||||
def test_http_retries(self):
|
||||
api = self.mock_nsx_clustered_api(retries=9)
|
||||
with api.endpoints['1.2.3.4'].pool.item() as session:
|
||||
self.assertEqual(
|
||||
session.adapters['https://'].max_retries.total, 9)
|
||||
|
||||
def test_conns_per_pool(self):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12:4433']
|
||||
api = self.new_mocked_cluster(
|
||||
conf_managers, _validate_conn_up,
|
||||
concurrent_connections=11)
|
||||
|
||||
for ep_id, ep in api.endpoints.items():
|
||||
self.assertEqual(ep.pool.max_size, 11)
|
||||
|
||||
def test_timeouts(self):
|
||||
api = self.mock_nsx_clustered_api(http_read_timeout=37, http_timeout=7)
|
||||
api.get('logical-ports')
|
||||
mock_call = api.recorded_calls.method_calls[0]
|
||||
name, args, kwargs = mock_call
|
||||
self.assertEqual(kwargs['timeout'], (7, 37))
|
||||
|
||||
|
||||
class ClusteredAPITestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def _test_health(self, validate_fn, expected_health):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12']
|
||||
api = self.new_mocked_cluster(conf_managers, validate_fn)
|
||||
|
||||
self.assertEqual(api.health, expected_health)
|
||||
|
||||
def test_orange_health(self):
|
||||
|
||||
def _validate(cluster_api, endpoint, conn):
|
||||
if endpoint.provider.id == '8.9.10.11':
|
||||
raise Exception()
|
||||
|
||||
self._test_health(_validate, cluster.ClusterHealth.ORANGE)
|
||||
|
||||
def test_green_health(self):
|
||||
self._test_health(_validate_conn_up, cluster.ClusterHealth.GREEN)
|
||||
|
||||
def test_red_health(self):
|
||||
self._test_health(_validate_conn_down, cluster.ClusterHealth.RED)
|
||||
|
||||
def test_cluster_validate_with_exception(self):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12', '10.11.12.13']
|
||||
api = self.new_mocked_cluster(conf_managers, _validate_conn_down)
|
||||
|
||||
self.assertEqual(len(api.endpoints), 3)
|
||||
self.assertRaises(nsxlib_exc.ServiceClusterUnavailable,
|
||||
api.get, 'api/v1/transport-zones')
|
||||
|
||||
def test_cluster_proxy_stale_revision(self):
|
||||
|
||||
def stale_revision():
|
||||
raise nsxlib_exc.StaleRevision(manager='1.1.1.1',
|
||||
operation='whatever')
|
||||
|
||||
api = self.mock_nsx_clustered_api(session_response=stale_revision)
|
||||
self.assertRaises(nsxlib_exc.StaleRevision,
|
||||
api.get, 'api/v1/transport-zones')
|
||||
|
||||
def test_cluster_proxy_connection_error(self):
|
||||
|
||||
def connect_timeout():
|
||||
raise requests_exceptions.ConnectTimeout()
|
||||
|
||||
api = self.mock_nsx_clustered_api(session_response=connect_timeout)
|
||||
api._validate = mock.Mock()
|
||||
self.assertRaises(nsxlib_exc.ServiceClusterUnavailable,
|
||||
api.get, 'api/v1/transport-zones')
|
||||
|
||||
def test_cluster_round_robin_servicing(self):
|
||||
conf_managers = ['8.9.10.11', '9.10.11.12', '10.11.12.13']
|
||||
api = self.mock_nsx_clustered_api(nsx_api_managers=conf_managers)
|
||||
api._validate = mock.Mock()
|
||||
|
||||
eps = list(api._endpoints.values())
|
||||
|
||||
def _get_schedule(num_eps):
|
||||
return [api._select_endpoint() for i in range(num_eps)]
|
||||
|
||||
self.assertEqual(_get_schedule(3), eps)
|
||||
|
||||
self.assertEqual(_get_schedule(6), [eps[0], eps[1], eps[2],
|
||||
eps[0], eps[1], eps[2]])
|
||||
|
||||
eps[0]._state = cluster.EndpointState.DOWN
|
||||
self.assertEqual(_get_schedule(4), [eps[1], eps[2], eps[1], eps[2]])
|
||||
|
||||
eps[1]._state = cluster.EndpointState.DOWN
|
||||
self.assertEqual(_get_schedule(2), [eps[2], eps[2]])
|
||||
|
||||
eps[0]._state = cluster.EndpointState.UP
|
||||
self.assertEqual(_get_schedule(4), [eps[0], eps[2], eps[0], eps[2]])
|
||||
|
||||
def test_reinitialize_cluster(self):
|
||||
api = self.mock_nsx_clustered_api()
|
||||
# just make sure this api is defined, and does not crash
|
||||
api._reinit_cluster()
|
162
vmware_nsxlib/tests/unit/v3/test_constants.py
Normal file
162
vmware_nsxlib/tests/unit/v3/test_constants.py
Normal file
@ -0,0 +1,162 @@
|
||||
# Copyright (c) 2016 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
FAKE_NAME = "fake_name"
|
||||
FAKE_SWITCH_UUID = uuidutils.generate_uuid()
|
||||
|
||||
FAKE_PORT_UUID = uuidutils.generate_uuid()
|
||||
FAKE_PORT = {
|
||||
"id": FAKE_PORT_UUID,
|
||||
"display_name": FAKE_NAME,
|
||||
"resource_type": "LogicalPort",
|
||||
"address_bindings": [],
|
||||
"logical_switch_id": FAKE_SWITCH_UUID,
|
||||
"admin_state": "UP",
|
||||
"attachment": {
|
||||
"id": "9ca8d413-f7bf-4276-b4c9-62f42516bdb2",
|
||||
"attachment_type": "VIF"
|
||||
},
|
||||
"switching_profile_ids": [
|
||||
{
|
||||
"value": "64814784-7896-3901-9741-badeff705639",
|
||||
"key": "IpDiscoverySwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "fad98876-d7ff-11e4-b9d6-1681e6b88ec1",
|
||||
"key": "SpoofGuardSwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "93b4b7e8-f116-415d-a50c-3364611b5d09",
|
||||
"key": "PortMirroringSwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "fbc4fb17-83d9-4b53-a286-ccdf04301888",
|
||||
"key": "SwitchSecuritySwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "f313290b-eba8-4262-bd93-fab5026e9495",
|
||||
"key": "QosSwitchingProfile"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
FAKE_CONTAINER_PORT = {
|
||||
"id": FAKE_PORT_UUID,
|
||||
"display_name": FAKE_NAME,
|
||||
"resource_type": "LogicalPort",
|
||||
"address_bindings": [
|
||||
{
|
||||
"ip_address": "192.168.1.110",
|
||||
"mac_address": "aa:bb:cc:dd:ee:ff"
|
||||
}
|
||||
],
|
||||
"logical_switch_id": FAKE_SWITCH_UUID,
|
||||
"admin_state": "UP",
|
||||
"attachment": {
|
||||
"id": "9ca8d413-f7bf-4276-b4c9-62f42516bdb2",
|
||||
"attachment_type": "CIF",
|
||||
"context": {
|
||||
"vlan_tag": 122,
|
||||
"container_host_vif_id": "c6f817a0-4e36-421e-98a6-8a2faed880bc",
|
||||
"key_values": [],
|
||||
"resource_type": "CifAttachmentContext",
|
||||
}
|
||||
},
|
||||
"switching_profile_ids": [
|
||||
{
|
||||
"value": "64814784-7896-3901-9741-badeff705639",
|
||||
"key": "IpDiscoverySwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "fad98876-d7ff-11e4-b9d6-1681e6b88ec1",
|
||||
"key": "SpoofGuardSwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "93b4b7e8-f116-415d-a50c-3364611b5d09",
|
||||
"key": "PortMirroringSwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "fbc4fb17-83d9-4b53-a286-ccdf04301888",
|
||||
"key": "SwitchSecuritySwitchingProfile"
|
||||
},
|
||||
{
|
||||
"value": "f313290b-eba8-4262-bd93-fab5026e9495",
|
||||
"key": "QosSwitchingProfile"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
FAKE_ROUTER_UUID = uuidutils.generate_uuid()
|
||||
FAKE_ROUTER = {
|
||||
"resource_type": "LogicalRouter",
|
||||
"revision": 0,
|
||||
"id": FAKE_ROUTER_UUID,
|
||||
"display_name": FAKE_NAME
|
||||
}
|
||||
|
||||
FAKE_ROUTER_PORT_UUID = uuidutils.generate_uuid()
|
||||
FAKE_ROUTER_PORT = {
|
||||
"resource_type": "LogicalRouterLinkPort",
|
||||
"revision": 0,
|
||||
"id": FAKE_ROUTER_PORT_UUID,
|
||||
"display_name": FAKE_NAME,
|
||||
"logical_router_id": FAKE_ROUTER_UUID
|
||||
}
|
||||
|
||||
FAKE_QOS_PROFILE = {
|
||||
"resource_type": "QosSwitchingProfile",
|
||||
"id": uuidutils.generate_uuid(),
|
||||
"display_name": FAKE_NAME,
|
||||
"system_defined": False,
|
||||
"dscp": {
|
||||
"priority": 25,
|
||||
"mode": "UNTRUSTED"
|
||||
},
|
||||
"tags": [],
|
||||
"description": FAKE_NAME,
|
||||
"class_of_service": 0,
|
||||
"shaper_configuration": [
|
||||
{
|
||||
"resource_type": "IngressRateShaper",
|
||||
"enabled": False,
|
||||
"peak_bandwidth_mbps": 0,
|
||||
"burst_size_bytes": 0,
|
||||
"average_bandwidth_mbps": 0
|
||||
},
|
||||
{
|
||||
"resource_type": "IngressBroadcastRateShaper",
|
||||
"enabled": False,
|
||||
"peak_bandwidth_kbps": 0,
|
||||
"average_bandwidth_kbps": 0,
|
||||
"burst_size_bytes": 0
|
||||
},
|
||||
{
|
||||
"resource_type": "EgressRateShaper",
|
||||
"enabled": False,
|
||||
"peak_bandwidth_mbps": 0,
|
||||
"burst_size_bytes": 0,
|
||||
"average_bandwidth_mbps": 0
|
||||
}
|
||||
],
|
||||
"_last_modified_user": "admin",
|
||||
"_last_modified_time": 1438383180608,
|
||||
"_create_time": 1438383180608,
|
||||
"_create_user": "admin",
|
||||
"_revision": 0
|
||||
}
|
178
vmware_nsxlib/tests/unit/v3/test_qos_switching_profile.py
Normal file
178
vmware_nsxlib/tests/unit/v3/test_qos_switching_profile.py
Normal file
@ -0,0 +1,178 @@
|
||||
# Copyright (c) 2015 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import mock
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
|
||||
from vmware_nsxlib.tests.unit.v3 import test_constants
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class NsxLibQosTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def _body(self, qos_marking=None, dscp=None,
|
||||
description=test_constants.FAKE_NAME):
|
||||
body = {
|
||||
"resource_type": "QosSwitchingProfile",
|
||||
"tags": []
|
||||
}
|
||||
if qos_marking:
|
||||
body = self.nsxlib.qos_switching_profile._update_dscp_in_args(
|
||||
body, qos_marking, dscp)
|
||||
|
||||
body["display_name"] = test_constants.FAKE_NAME
|
||||
body["description"] = description
|
||||
|
||||
return body
|
||||
|
||||
def _body_with_shaping(self, shaping_enabled=False,
|
||||
burst_size=None,
|
||||
peak_bandwidth=None,
|
||||
average_bandwidth=None,
|
||||
description=test_constants.FAKE_NAME,
|
||||
qos_marking=None,
|
||||
dscp=0):
|
||||
body = test_constants.FAKE_QOS_PROFILE
|
||||
body["display_name"] = test_constants.FAKE_NAME
|
||||
body["description"] = description
|
||||
|
||||
for shaper in body["shaper_configuration"]:
|
||||
# We currently support only shaping of Egress traffic
|
||||
if shaper["resource_type"] == "EgressRateShaper":
|
||||
shaper["enabled"] = shaping_enabled
|
||||
if burst_size:
|
||||
shaper["burst_size_bytes"] = burst_size
|
||||
if peak_bandwidth:
|
||||
shaper["peak_bandwidth_mbps"] = peak_bandwidth
|
||||
if average_bandwidth:
|
||||
shaper["average_bandwidth_mbps"] = average_bandwidth
|
||||
break
|
||||
|
||||
if qos_marking:
|
||||
body = self.nsxlib.qos_switching_profile._update_dscp_in_args(
|
||||
body, qos_marking, dscp)
|
||||
|
||||
return body
|
||||
|
||||
def test_create_qos_switching_profile(self):
|
||||
"""Test creating a qos-switching profile
|
||||
|
||||
returns the correct response
|
||||
"""
|
||||
with mock.patch.object(self.nsxlib.client, 'create') as create:
|
||||
self.nsxlib.qos_switching_profile.create(
|
||||
tags=[],
|
||||
name=test_constants.FAKE_NAME,
|
||||
description=test_constants.FAKE_NAME)
|
||||
create.assert_called_with(
|
||||
'switching-profiles', self._body())
|
||||
|
||||
def test_update_qos_switching_profile(self):
|
||||
"""Test updating a qos-switching profile
|
||||
|
||||
returns the correct response
|
||||
"""
|
||||
original_profile = self._body()
|
||||
new_description = "Test"
|
||||
with mock.patch.object(self.nsxlib.client, 'get',
|
||||
return_value=original_profile):
|
||||
with mock.patch.object(self.nsxlib.client, 'update') as update:
|
||||
|
||||
# update the description of the profile
|
||||
self.nsxlib.qos_switching_profile.update(
|
||||
test_constants.FAKE_QOS_PROFILE['id'],
|
||||
tags=[],
|
||||
description=new_description)
|
||||
update.assert_called_with(
|
||||
'switching-profiles/%s'
|
||||
% test_constants.FAKE_QOS_PROFILE['id'],
|
||||
self._body(description=new_description))
|
||||
|
||||
def test_enable_qos_switching_profile_shaping(self):
|
||||
"""Test updating a qos-switching profile
|
||||
|
||||
returns the correct response
|
||||
"""
|
||||
|
||||
original_profile = self._body_with_shaping()
|
||||
burst_size = 100
|
||||
peak_bandwidth = 200
|
||||
average_bandwidth = 300
|
||||
qos_marking = "untrusted"
|
||||
dscp = 10
|
||||
|
||||
with mock.patch.object(self.nsxlib.client, 'get',
|
||||
return_value=original_profile):
|
||||
with mock.patch.object(self.nsxlib.client, 'update') as update:
|
||||
# update the bw shaping of the profile
|
||||
self.nsxlib.qos_switching_profile.update_shaping(
|
||||
test_constants.FAKE_QOS_PROFILE['id'],
|
||||
shaping_enabled=True,
|
||||
burst_size=burst_size,
|
||||
peak_bandwidth=peak_bandwidth,
|
||||
average_bandwidth=average_bandwidth,
|
||||
qos_marking=qos_marking,
|
||||
dscp=dscp)
|
||||
|
||||
update.assert_called_with(
|
||||
'switching-profiles/%s'
|
||||
% test_constants.FAKE_QOS_PROFILE['id'],
|
||||
self._body_with_shaping(
|
||||
shaping_enabled=True,
|
||||
burst_size=burst_size,
|
||||
peak_bandwidth=peak_bandwidth,
|
||||
average_bandwidth=average_bandwidth,
|
||||
qos_marking="untrusted", dscp=10))
|
||||
|
||||
def test_disable_qos_switching_profile_shaping(self):
|
||||
"""Test updating a qos-switching profile
|
||||
|
||||
returns the correct response
|
||||
"""
|
||||
burst_size = 100
|
||||
peak_bandwidth = 200
|
||||
average_bandwidth = 300
|
||||
original_profile = self._body_with_shaping(
|
||||
shaping_enabled=True,
|
||||
burst_size=burst_size,
|
||||
peak_bandwidth=peak_bandwidth,
|
||||
average_bandwidth=average_bandwidth,
|
||||
qos_marking="untrusted",
|
||||
dscp=10)
|
||||
|
||||
with mock.patch.object(self.nsxlib.client, 'get',
|
||||
return_value=original_profile):
|
||||
with mock.patch.object(self.nsxlib.client, 'update') as update:
|
||||
# update the bw shaping of the profile
|
||||
self.nsxlib.qos_switching_profile.update_shaping(
|
||||
test_constants.FAKE_QOS_PROFILE['id'],
|
||||
shaping_enabled=False, qos_marking="trusted")
|
||||
|
||||
update.assert_called_with(
|
||||
'switching-profiles/%s'
|
||||
% test_constants.FAKE_QOS_PROFILE['id'],
|
||||
self._body_with_shaping(qos_marking="trusted"))
|
||||
|
||||
def test_delete_qos_switching_profile(self):
|
||||
"""Test deleting qos-switching-profile"""
|
||||
with mock.patch.object(self.nsxlib.client, 'delete') as delete:
|
||||
self.nsxlib.qos_switching_profile.delete(
|
||||
test_constants.FAKE_QOS_PROFILE['id'])
|
||||
delete.assert_called_with(
|
||||
'switching-profiles/%s'
|
||||
% test_constants.FAKE_QOS_PROFILE['id'])
|
514
vmware_nsxlib/tests/unit/v3/test_resources.py
Normal file
514
vmware_nsxlib/tests/unit/v3/test_resources.py
Normal file
@ -0,0 +1,514 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
import copy
|
||||
|
||||
import mock
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
|
||||
from vmware_nsxlib.tests.unit.v3 import mocks
|
||||
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
|
||||
from vmware_nsxlib.tests.unit.v3 import test_client
|
||||
from vmware_nsxlib.tests.unit.v3 import test_constants
|
||||
from vmware_nsxlib.v3 import resources
|
||||
|
||||
|
||||
CLIENT_PKG = test_client.CLIENT_PKG
|
||||
profile_types = resources.SwitchingProfileTypes
|
||||
|
||||
|
||||
class TestSwitchingProfileTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def _mocked_switching_profile(self, session_response=None):
|
||||
return self.mocked_resource(
|
||||
resources.SwitchingProfile, session_response=session_response)
|
||||
|
||||
def test_switching_profile_create(self):
|
||||
mocked_resource = self._mocked_switching_profile()
|
||||
|
||||
mocked_resource.create(profile_types.PORT_MIRRORING,
|
||||
'pm-profile', 'port mirror prof')
|
||||
|
||||
test_client.assert_json_call(
|
||||
'post', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/switching-profiles',
|
||||
data=jsonutils.dumps({
|
||||
'resource_type': profile_types.PORT_MIRRORING,
|
||||
'display_name': 'pm-profile',
|
||||
'description': 'port mirror prof'
|
||||
}, sort_keys=True))
|
||||
|
||||
def test_switching_profile_update(self):
|
||||
|
||||
tags = [
|
||||
{
|
||||
'scope': 'os-project-id',
|
||||
'tag': 'tenant-1'
|
||||
},
|
||||
{
|
||||
'scope': 'os-api-version',
|
||||
'tag': '2.1.1.0'
|
||||
}
|
||||
]
|
||||
|
||||
mocked_resource = self._mocked_switching_profile()
|
||||
|
||||
mocked_resource.update(
|
||||
'a12bc1', profile_types.PORT_MIRRORING, tags=tags)
|
||||
|
||||
test_client.assert_json_call(
|
||||
'put', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/switching-profiles/a12bc1',
|
||||
data=jsonutils.dumps({
|
||||
'resource_type': profile_types.PORT_MIRRORING,
|
||||
'tags': tags
|
||||
}, sort_keys=True))
|
||||
|
||||
def test_spoofgaurd_profile_create(self):
|
||||
|
||||
tags = [
|
||||
{
|
||||
'scope': 'os-project-id',
|
||||
'tag': 'tenant-1'
|
||||
},
|
||||
{
|
||||
'scope': 'os-api-version',
|
||||
'tag': '2.1.1.0'
|
||||
}
|
||||
]
|
||||
|
||||
mocked_resource = self._mocked_switching_profile()
|
||||
|
||||
mocked_resource.create_spoofguard_profile(
|
||||
'plugin-spoof', 'spoofguard-for-plugin',
|
||||
whitelist_ports=True, tags=tags)
|
||||
|
||||
test_client.assert_json_call(
|
||||
'post', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/switching-profiles',
|
||||
data=jsonutils.dumps({
|
||||
'resource_type': profile_types.SPOOF_GUARD,
|
||||
'display_name': 'plugin-spoof',
|
||||
'description': 'spoofguard-for-plugin',
|
||||
'white_list_providers': ['LPORT_BINDINGS'],
|
||||
'tags': tags
|
||||
}, sort_keys=True))
|
||||
|
||||
def test_create_dhcp_profile(self):
|
||||
|
||||
tags = [
|
||||
{
|
||||
'scope': 'os-project-id',
|
||||
'tag': 'tenant-1'
|
||||
},
|
||||
{
|
||||
'scope': 'os-api-version',
|
||||
'tag': '2.1.1.0'
|
||||
}
|
||||
]
|
||||
|
||||
mocked_resource = self._mocked_switching_profile()
|
||||
|
||||
mocked_resource.create_dhcp_profile(
|
||||
'plugin-dhcp', 'dhcp-for-plugin',
|
||||
tags=tags)
|
||||
|
||||
test_client.assert_json_call(
|
||||
'post', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/switching-profiles',
|
||||
data=jsonutils.dumps({
|
||||
'bpdu_filter': {
|
||||
'enabled': True,
|
||||
'white_list': []
|
||||
},
|
||||
'resource_type': profile_types.SWITCH_SECURITY,
|
||||
'display_name': 'plugin-dhcp',
|
||||
'description': 'dhcp-for-plugin',
|
||||
'tags': tags,
|
||||
'dhcp_filter': {
|
||||
'client_block_enabled': True,
|
||||
'server_block_enabled': False
|
||||
},
|
||||
'rate_limits': {
|
||||
'enabled': False,
|
||||
'rx_broadcast': 0,
|
||||
'tx_broadcast': 0,
|
||||
'rx_multicast': 0,
|
||||
'tx_multicast': 0
|
||||
},
|
||||
'block_non_ip_traffic': True
|
||||
}, sort_keys=True))
|
||||
|
||||
def test_create_mac_learning_profile(self):
|
||||
|
||||
tags = [
|
||||
{
|
||||
'scope': 'os-project-id',
|
||||
'tag': 'tenant-1'
|
||||
},
|
||||
{
|
||||
'scope': 'os-api-version',
|
||||
'tag': '2.1.1.0'
|
||||
}
|
||||
]
|
||||
|
||||
mocked_resource = self._mocked_switching_profile()
|
||||
|
||||
mocked_resource.create_mac_learning_profile(
|
||||
'plugin-mac-learning', 'mac-learning-for-plugin',
|
||||
tags=tags)
|
||||
|
||||
test_client.assert_json_call(
|
||||
'post', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/switching-profiles',
|
||||
data=jsonutils.dumps({
|
||||
'mac_learning': {
|
||||
'enabled': True,
|
||||
},
|
||||
'resource_type': profile_types.MAC_LEARNING,
|
||||
'display_name': 'plugin-mac-learning',
|
||||
'description': 'mac-learning-for-plugin',
|
||||
'tags': tags,
|
||||
}, sort_keys=True))
|
||||
|
||||
def test_find_by_display_name(self):
|
||||
resp_resources = {
|
||||
'results': [
|
||||
{'display_name': 'resource-1'},
|
||||
{'display_name': 'resource-2'},
|
||||
{'display_name': 'resource-3'}
|
||||
]
|
||||
}
|
||||
session_response = mocks.MockRequestsResponse(
|
||||
200, jsonutils.dumps(resp_resources))
|
||||
mocked_resource = self._mocked_switching_profile(
|
||||
session_response=session_response)
|
||||
|
||||
self.assertEqual([{'display_name': 'resource-1'}],
|
||||
mocked_resource.find_by_display_name('resource-1'))
|
||||
self.assertEqual([{'display_name': 'resource-2'}],
|
||||
mocked_resource.find_by_display_name('resource-2'))
|
||||
self.assertEqual([{'display_name': 'resource-3'}],
|
||||
mocked_resource.find_by_display_name('resource-3'))
|
||||
|
||||
resp_resources = {
|
||||
'results': [
|
||||
{'display_name': 'resource-1'},
|
||||
{'display_name': 'resource-1'},
|
||||
{'display_name': 'resource-1'}
|
||||
]
|
||||
}
|
||||
session_response = mocks.MockRequestsResponse(
|
||||
200, jsonutils.dumps(resp_resources))
|
||||
mocked_resource = self._mocked_switching_profile(
|
||||
session_response=session_response)
|
||||
self.assertEqual(resp_resources['results'],
|
||||
mocked_resource.find_by_display_name('resource-1'))
|
||||
|
||||
def test_list_all_profiles(self):
|
||||
mocked_resource = self._mocked_switching_profile()
|
||||
mocked_resource.list()
|
||||
test_client.assert_json_call(
|
||||
'get', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/switching-profiles/'
|
||||
'?include_system_owned=True',
|
||||
data=None)
|
||||
|
||||
|
||||
class LogicalPortTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def _mocked_lport(self, session_response=None):
|
||||
return self.mocked_resource(
|
||||
resources.LogicalPort, session_response=session_response)
|
||||
|
||||
def _get_profile_dicts(self, fake_port):
|
||||
fake_profile_dicts = []
|
||||
for profile_id in fake_port['switching_profile_ids']:
|
||||
fake_profile_dicts.append({'resource_type': profile_id['key'],
|
||||
'id': profile_id['value']})
|
||||
return fake_profile_dicts
|
||||
|
||||
def _get_pktcls_bindings(self):
|
||||
fake_pkt_classifiers = []
|
||||
fake_binding_repr = []
|
||||
for i in range(0, 3):
|
||||
ip = "9.10.11.%s" % i
|
||||
mac = "00:0c:29:35:4a:%sc" % i
|
||||
fake_pkt_classifiers.append(resources.PacketAddressClassifier(
|
||||
ip, mac, None))
|
||||
fake_binding_repr.append({
|
||||
'ip_address': ip,
|
||||
'mac_address': mac
|
||||
})
|
||||
return fake_pkt_classifiers, fake_binding_repr
|
||||
|
||||
def test_create_logical_port(self):
|
||||
"""Test creating a port
|
||||
|
||||
returns the correct response and 200 status
|
||||
"""
|
||||
fake_port = test_constants.FAKE_PORT.copy()
|
||||
|
||||
profile_dicts = self._get_profile_dicts(fake_port)
|
||||
|
||||
pkt_classifiers, binding_repr = self._get_pktcls_bindings()
|
||||
|
||||
fake_port['address_bindings'] = binding_repr
|
||||
|
||||
mocked_resource = self._mocked_lport()
|
||||
|
||||
switch_profile = resources.SwitchingProfile
|
||||
mocked_resource.create(
|
||||
fake_port['logical_switch_id'],
|
||||
fake_port['attachment']['id'],
|
||||
address_bindings=pkt_classifiers,
|
||||
switch_profile_ids=switch_profile.build_switch_profile_ids(
|
||||
mock.Mock(), *profile_dicts))
|
||||
|
||||
resp_body = {
|
||||
'logical_switch_id': fake_port['logical_switch_id'],
|
||||
'switching_profile_ids': fake_port['switching_profile_ids'],
|
||||
'attachment': {
|
||||
'attachment_type': 'VIF',
|
||||
'id': fake_port['attachment']['id']
|
||||
},
|
||||
'admin_state': 'UP',
|
||||
'address_bindings': fake_port['address_bindings']
|
||||
}
|
||||
|
||||
test_client.assert_json_call(
|
||||
'post', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/logical-ports',
|
||||
data=jsonutils.dumps(resp_body, sort_keys=True))
|
||||
|
||||
def test_create_logical_port_with_attachtype_cif(self):
|
||||
"""Test creating a port returns the correct response and 200 status
|
||||
|
||||
"""
|
||||
fake_port = test_constants.FAKE_CONTAINER_PORT.copy()
|
||||
|
||||
profile_dicts = self._get_profile_dicts(fake_port)
|
||||
|
||||
pkt_classifiers, binding_repr = self._get_pktcls_bindings()
|
||||
|
||||
fake_port['address_bindings'] = binding_repr
|
||||
|
||||
mocked_resource = self._mocked_lport()
|
||||
switch_profile = resources.SwitchingProfile
|
||||
fake_port_ctx = fake_port['attachment']['context']
|
||||
|
||||
fake_container_host_vif_id = fake_port_ctx['container_host_vif_id']
|
||||
|
||||
mocked_resource.create(
|
||||
fake_port['logical_switch_id'],
|
||||
fake_port['attachment']['id'],
|
||||
parent_vif_id=fake_container_host_vif_id,
|
||||
parent_tag=fake_port_ctx['vlan_tag'],
|
||||
address_bindings=pkt_classifiers,
|
||||
switch_profile_ids=switch_profile.build_switch_profile_ids(
|
||||
mock.Mock(), *profile_dicts))
|
||||
|
||||
resp_body = {
|
||||
'logical_switch_id': fake_port['logical_switch_id'],
|
||||
'switching_profile_ids': fake_port['switching_profile_ids'],
|
||||
'attachment': {
|
||||
'attachment_type': 'CIF',
|
||||
'id': fake_port['attachment']['id'],
|
||||
'context': {
|
||||
'vlan_tag': fake_port_ctx['vlan_tag'],
|
||||
'container_host_vif_id': fake_container_host_vif_id,
|
||||
'resource_type': 'CifAttachmentContext'
|
||||
}
|
||||
},
|
||||
'admin_state': 'UP',
|
||||
'address_bindings': fake_port['address_bindings']
|
||||
}
|
||||
|
||||
test_client.assert_json_call(
|
||||
'post', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/logical-ports',
|
||||
data=jsonutils.dumps(resp_body, sort_keys=True))
|
||||
|
||||
def test_create_logical_port_admin_down(self):
|
||||
"""Test creating port with admin_state down"""
|
||||
fake_port = test_constants.FAKE_PORT
|
||||
fake_port['admin_state'] = "DOWN"
|
||||
|
||||
mocked_resource = self._mocked_lport(
|
||||
session_response=mocks.MockRequestsResponse(
|
||||
200, jsonutils.dumps(fake_port)))
|
||||
|
||||
result = mocked_resource.create(
|
||||
test_constants.FAKE_PORT['logical_switch_id'],
|
||||
test_constants.FAKE_PORT['attachment']['id'],
|
||||
tags={}, admin_state=False)
|
||||
|
||||
self.assertEqual(fake_port, result)
|
||||
|
||||
def test_delete_logical_port(self):
|
||||
"""Test deleting port"""
|
||||
mocked_resource = self._mocked_lport()
|
||||
|
||||
uuid = test_constants.FAKE_PORT['id']
|
||||
mocked_resource.delete(uuid)
|
||||
test_client.assert_json_call(
|
||||
'delete', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/logical-ports/%s?detach=true' % uuid)
|
||||
|
||||
def test_clear_port_bindings(self):
|
||||
fake_port = copy.copy(test_constants.FAKE_PORT)
|
||||
fake_port['address_bindings'] = ['a', 'b']
|
||||
mocked_resource = self._mocked_lport()
|
||||
|
||||
def get_fake_port(*args):
|
||||
return fake_port
|
||||
|
||||
mocked_resource.get = get_fake_port
|
||||
mocked_resource.update(
|
||||
fake_port['id'], fake_port['id'], address_bindings=[])
|
||||
|
||||
fake_port['address_bindings'] = []
|
||||
test_client.assert_json_call(
|
||||
'put', mocked_resource,
|
||||
'https://1.2.3.4/api/v1/logical-ports/%s' % fake_port['id'],
|
||||
data=jsonutils.dumps(fake_port, sort_keys=True))
|
||||
|
||||
|
||||
class LogicalRouterTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def _mocked_lrouter(self, session_response=None):
|
||||
return self.mocked_resource(
|
||||
resources.LogicalRouter, session_response=session_response)
|
||||
|
||||
def test_create_logical_router(self):
|
||||
"""Test creating a router returns the correct response and 201 status
|
||||
|
||||
"""
|
||||
fake_router = test_constants.FAKE_ROUTER.copy()
|
||||
|
||||
router = self._mocked_lrouter()
|
||||
|
||||
tier0_router = True
|
||||
router.create(fake_router['display_name'], None, None, tier0_router)
|
||||
|
||||
data = {
|
||||
'display_name': fake_router['display_name'],
|
||||
'router_type': 'TIER0' if tier0_router else 'TIER1',
|
||||
'tags': None
|
||||
}
|
||||
|
||||
test_client.assert_json_call(
|
||||
'post', router,
|
||||
'https://1.2.3.4/api/v1/logical-routers',
|
||||
data=jsonutils.dumps(data, sort_keys=True))
|
||||
|
||||
def test_delete_logical_router(self):
|
||||
"""Test deleting router"""
|
||||
router = self._mocked_lrouter()
|
||||
uuid = test_constants.FAKE_ROUTER['id']
|
||||
router.delete(uuid)
|
||||
test_client.assert_json_call(
|
||||
'delete', router,
|
||||
'https://1.2.3.4/api/v1/logical-routers/%s' % uuid)
|
||||
|
||||
|
||||
class LogicalRouterPortTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def _mocked_lrport(self, session_response=None):
|
||||
return self.mocked_resource(
|
||||
resources.LogicalRouterPort, session_response=session_response)
|
||||
|
||||
def test_create_logical_router_port(self):
|
||||
"""Test creating a router port
|
||||
|
||||
returns the correct response and 201 status
|
||||
"""
|
||||
fake_router_port = test_constants.FAKE_ROUTER_PORT.copy()
|
||||
|
||||
lrport = self._mocked_lrport()
|
||||
|
||||
lrport.create(fake_router_port['logical_router_id'],
|
||||
fake_router_port['display_name'],
|
||||
None,
|
||||
fake_router_port['resource_type'],
|
||||
None, None, None)
|
||||
|
||||
data = {
|
||||
'display_name': fake_router_port['display_name'],
|
||||
'logical_router_id': fake_router_port['logical_router_id'],
|
||||
'resource_type': fake_router_port['resource_type'],
|
||||
'tags': []
|
||||
}
|
||||
|
||||
test_client.assert_json_call(
|
||||
'post', lrport,
|
||||
'https://1.2.3.4/api/v1/logical-router-ports',
|
||||
data=jsonutils.dumps(data, sort_keys=True))
|
||||
|
||||
def test_logical_router_port_max_attempts(self):
|
||||
"""
|
||||
Test a router port api has the configured retries
|
||||
"""
|
||||
lrport = self._mocked_lrport()
|
||||
|
||||
self.assertEqual(nsxlib_testcase.NSX_MAX_ATTEMPTS,
|
||||
lrport._client.max_attempts)
|
||||
|
||||
def test_delete_logical_router_port(self):
|
||||
"""Test deleting router port"""
|
||||
lrport = self._mocked_lrport()
|
||||
|
||||
uuid = test_constants.FAKE_ROUTER_PORT['id']
|
||||
lrport.delete(uuid)
|
||||
test_client.assert_json_call(
|
||||
'delete', lrport,
|
||||
'https://1.2.3.4/api/v1/logical-router-ports/%s' % uuid)
|
||||
|
||||
def test_get_logical_router_port_by_router_id(self):
|
||||
"""Test getting a router port by router id"""
|
||||
fake_router_port = test_constants.FAKE_ROUTER_PORT.copy()
|
||||
resp_resources = {'results': [fake_router_port]}
|
||||
|
||||
lrport = self._mocked_lrport(
|
||||
session_response=mocks.MockRequestsResponse(
|
||||
200, jsonutils.dumps(resp_resources)))
|
||||
|
||||
router_id = fake_router_port['logical_router_id']
|
||||
result = lrport.get_by_router_id(router_id)
|
||||
self.assertEqual(fake_router_port, result[0])
|
||||
test_client.assert_json_call(
|
||||
'get', lrport,
|
||||
'https://1.2.3.4/api/v1/logical-router-ports/?'
|
||||
'logical_router_id=%s' % router_id)
|
||||
|
||||
def test_get_logical_router_port_by_switch_id(self):
|
||||
"""Test getting a router port by switch id"""
|
||||
fake_router_port = test_constants.FAKE_ROUTER_PORT.copy()
|
||||
resp_resources = {
|
||||
'result_count': 1,
|
||||
'results': [fake_router_port]
|
||||
}
|
||||
|
||||
lrport = self._mocked_lrport(
|
||||
session_response=mocks.MockRequestsResponse(
|
||||
200, jsonutils.dumps(resp_resources)))
|
||||
|
||||
switch_id = test_constants.FAKE_SWITCH_UUID
|
||||
lrport.get_by_lswitch_id(switch_id)
|
||||
test_client.assert_json_call(
|
||||
'get', lrport,
|
||||
'https://1.2.3.4/api/v1/logical-router-ports/?'
|
||||
'logical_switch_id=%s' % switch_id)
|
83
vmware_nsxlib/tests/unit/v3/test_switch.py
Normal file
83
vmware_nsxlib/tests/unit/v3/test_switch.py
Normal file
@ -0,0 +1,83 @@
|
||||
# Copyright (c) 2015 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import mock
|
||||
from oslo_log import log
|
||||
|
||||
from vmware_nsxlib.tests.unit.v3 import mocks as nsx_v3_mocks
|
||||
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
|
||||
from vmware_nsxlib.v3 import nsx_constants
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class NsxLibSwitchTestCase(nsxlib_testcase.NsxClientTestCase):
|
||||
_tz_id = "8f602f97-ee3e-46b0-9d9f-358955f03608"
|
||||
|
||||
def _create_body(self, admin_state=nsx_constants.ADMIN_STATE_UP,
|
||||
vlan_id=None):
|
||||
body = {
|
||||
"transport_zone_id": NsxLibSwitchTestCase._tz_id,
|
||||
"replication_mode": "MTEP",
|
||||
"display_name": "fake_name",
|
||||
"tags": [],
|
||||
"admin_state": admin_state
|
||||
}
|
||||
if vlan_id:
|
||||
body['vlan'] = vlan_id
|
||||
return body
|
||||
|
||||
def test_create_logical_switch(self):
|
||||
"""Test creating a switch returns the correct response and 200 status
|
||||
|
||||
"""
|
||||
with mock.patch.object(self.nsxlib.client, 'create') as create:
|
||||
self.nsxlib.logical_switch.create(
|
||||
nsx_v3_mocks.FAKE_NAME, NsxLibSwitchTestCase._tz_id, [])
|
||||
create.assert_called_with('logical-switches', self._create_body())
|
||||
|
||||
def test_create_logical_switch_admin_down(self):
|
||||
"""Test creating switch with admin_state down"""
|
||||
with mock.patch.object(self.nsxlib.client, 'create') as create:
|
||||
self.nsxlib.logical_switch.create(
|
||||
nsx_v3_mocks.FAKE_NAME, NsxLibSwitchTestCase._tz_id,
|
||||
[], admin_state=False)
|
||||
|
||||
create.assert_called_with(
|
||||
'logical-switches',
|
||||
self._create_body(
|
||||
admin_state=nsx_constants.ADMIN_STATE_DOWN))
|
||||
|
||||
def test_create_logical_switch_vlan(self):
|
||||
"""Test creating switch with provider:network_type VLAN"""
|
||||
with mock.patch.object(self.nsxlib.client, 'create') as create:
|
||||
self.nsxlib.logical_switch.create(
|
||||
nsx_v3_mocks.FAKE_NAME, NsxLibSwitchTestCase._tz_id,
|
||||
[], vlan_id='123')
|
||||
|
||||
create.assert_called_with(
|
||||
'logical-switches',
|
||||
self._create_body(vlan_id='123'))
|
||||
|
||||
def test_delete_logical_switch(self):
|
||||
"""Test deleting switch"""
|
||||
with mock.patch.object(self.nsxlib.client, 'delete') as delete:
|
||||
fake_switch = nsx_v3_mocks.make_fake_switch()
|
||||
self.nsxlib.logical_switch.delete(fake_switch['id'])
|
||||
delete.assert_called_with(
|
||||
'logical-switches/%s'
|
||||
'?detach=true&cascade=true' % fake_switch['id'])
|
204
vmware_nsxlib/tests/unit/v3/test_utils.py
Normal file
204
vmware_nsxlib/tests/unit/v3/test_utils.py
Normal file
@ -0,0 +1,204 @@
|
||||
# Copyright (c) 2015 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from neutron_lib import exceptions as n_exc
|
||||
|
||||
from vmware_nsxlib.tests.unit.v3 import nsxlib_testcase
|
||||
from vmware_nsxlib.v3 import utils
|
||||
|
||||
|
||||
class TestNsxV3Utils(nsxlib_testcase.NsxClientTestCase):
|
||||
|
||||
def test_build_v3_tags_payload(self):
|
||||
result = self.nsxlib.build_v3_tags_payload(
|
||||
{'id': 'fake_id',
|
||||
'tenant_id': 'fake_tenant_id'},
|
||||
resource_type='os-net-id',
|
||||
project_name='fake_tenant_name')
|
||||
expected = [{'scope': 'os-net-id', 'tag': 'fake_id'},
|
||||
{'scope': 'os-project-id', 'tag': 'fake_tenant_id'},
|
||||
{'scope': 'os-project-name', 'tag': 'fake_tenant_name'},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER}]
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_v3_tags_payload_internal(self):
|
||||
result = self.nsxlib.build_v3_tags_payload(
|
||||
{'id': 'fake_id',
|
||||
'tenant_id': 'fake_tenant_id'},
|
||||
resource_type='os-net-id',
|
||||
project_name=None)
|
||||
expected = [{'scope': 'os-net-id', 'tag': 'fake_id'},
|
||||
{'scope': 'os-project-id', 'tag': 'fake_tenant_id'},
|
||||
{'scope': 'os-project-name',
|
||||
'tag': nsxlib_testcase.PLUGIN_TAG},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER}]
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_build_v3_tags_payload_invalid_length(self):
|
||||
self.assertRaises(n_exc.InvalidInput,
|
||||
self.nsxlib.build_v3_tags_payload,
|
||||
{'id': 'fake_id',
|
||||
'tenant_id': 'fake_tenant_id'},
|
||||
resource_type='os-longer-maldini-rocks-id',
|
||||
project_name='fake')
|
||||
|
||||
def test_build_v3_api_version_tag(self):
|
||||
result = self.nsxlib.build_v3_api_version_tag()
|
||||
expected = [{'scope': nsxlib_testcase.PLUGIN_SCOPE,
|
||||
'tag': nsxlib_testcase.PLUGIN_TAG},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER}]
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_is_internal_resource(self):
|
||||
project_tag = self.nsxlib.build_v3_tags_payload(
|
||||
{'id': 'fake_id',
|
||||
'tenant_id': 'fake_tenant_id'},
|
||||
resource_type='os-net-id',
|
||||
project_name=None)
|
||||
internal_tag = self.nsxlib.build_v3_api_version_tag()
|
||||
|
||||
expect_false = self.nsxlib.is_internal_resource({'tags': project_tag})
|
||||
self.assertFalse(expect_false)
|
||||
|
||||
expect_true = self.nsxlib.is_internal_resource({'tags': internal_tag})
|
||||
self.assertTrue(expect_true)
|
||||
|
||||
def test_get_name_and_uuid(self):
|
||||
uuid = 'afc40f8a-4967-477e-a17a-9d560d1786c7'
|
||||
suffix = '_afc40...786c7'
|
||||
expected = 'maldini%s' % suffix
|
||||
short_name = utils.get_name_and_uuid('maldini', uuid)
|
||||
self.assertEqual(expected, short_name)
|
||||
|
||||
name = 'X' * 255
|
||||
expected = '%s%s' % ('X' * (80 - len(suffix)), suffix)
|
||||
short_name = utils.get_name_and_uuid(name, uuid)
|
||||
self.assertEqual(expected, short_name)
|
||||
|
||||
def test_build_v3_tags_max_length_payload(self):
|
||||
result = self.nsxlib.build_v3_tags_payload(
|
||||
{'id': 'X' * 255,
|
||||
'tenant_id': 'X' * 255},
|
||||
resource_type='os-net-id',
|
||||
project_name='X' * 255)
|
||||
expected = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'X' * 40},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER}]
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_add_v3_tag(self):
|
||||
result = utils.add_v3_tag([], 'fake-scope', 'fake-tag')
|
||||
expected = [{'scope': 'fake-scope', 'tag': 'fake-tag'}]
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_add_v3_tag_max_length_payload(self):
|
||||
result = utils.add_v3_tag([], 'fake-scope', 'X' * 255)
|
||||
expected = [{'scope': 'fake-scope', 'tag': 'X' * 40}]
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
def test_add_v3_tag_invalid_scope_length(self):
|
||||
self.assertRaises(n_exc.InvalidInput,
|
||||
utils.add_v3_tag,
|
||||
[],
|
||||
'fake-scope-name-is-far-too-long',
|
||||
'fake-tag')
|
||||
|
||||
def test_update_v3_tags_addition(self):
|
||||
tags = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'Y' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER}]
|
||||
resources = [{'scope': 'os-instance-uuid',
|
||||
'tag': 'A' * 40}]
|
||||
tags = utils.update_v3_tags(tags, resources)
|
||||
expected = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'Y' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER},
|
||||
{'scope': 'os-instance-uuid',
|
||||
'tag': 'A' * 40}]
|
||||
self.assertEqual(sorted(expected, key=lambda x: x.get('tag')),
|
||||
sorted(tags, key=lambda x: x.get('tag')))
|
||||
|
||||
def test_update_v3_tags_removal(self):
|
||||
tags = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'Y' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER}]
|
||||
resources = [{'scope': 'os-net-id',
|
||||
'tag': ''}]
|
||||
tags = utils.update_v3_tags(tags, resources)
|
||||
expected = [{'scope': 'os-project-id', 'tag': 'Y' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER}]
|
||||
self.assertEqual(sorted(expected, key=lambda x: x.get('tag')),
|
||||
sorted(tags, key=lambda x: x.get('tag')))
|
||||
|
||||
def test_update_v3_tags_update(self):
|
||||
tags = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'Y' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER}]
|
||||
resources = [{'scope': 'os-project-id',
|
||||
'tag': 'A' * 40}]
|
||||
tags = utils.update_v3_tags(tags, resources)
|
||||
expected = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'A' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': nsxlib_testcase.PLUGIN_VER}]
|
||||
self.assertEqual(sorted(expected, key=lambda x: x.get('tag')),
|
||||
sorted(tags, key=lambda x: x.get('tag')))
|
||||
|
||||
def test_update_v3_tags_repetitive_scopes(self):
|
||||
tags = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'Y' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40},
|
||||
{'scope': 'os-security-group', 'tag': 'SG1'},
|
||||
{'scope': 'os-security-group', 'tag': 'SG2'}]
|
||||
tags_update = [{'scope': 'os-security-group', 'tag': 'SG3'},
|
||||
{'scope': 'os-security-group', 'tag': 'SG4'}]
|
||||
tags = utils.update_v3_tags(tags, tags_update)
|
||||
expected = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'Y' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40},
|
||||
{'scope': 'os-security-group', 'tag': 'SG3'},
|
||||
{'scope': 'os-security-group', 'tag': 'SG4'}]
|
||||
self.assertEqual(sorted(expected, key=lambda x: x.get('tag')),
|
||||
sorted(tags, key=lambda x: x.get('tag')))
|
||||
|
||||
def test_update_v3_tags_repetitive_scopes_remove(self):
|
||||
tags = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'Y' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40},
|
||||
{'scope': 'os-security-group', 'tag': 'SG1'},
|
||||
{'scope': 'os-security-group', 'tag': 'SG2'}]
|
||||
tags_update = [{'scope': 'os-security-group', 'tag': None}]
|
||||
tags = utils.update_v3_tags(tags, tags_update)
|
||||
expected = [{'scope': 'os-net-id', 'tag': 'X' * 40},
|
||||
{'scope': 'os-project-id', 'tag': 'Y' * 40},
|
||||
{'scope': 'os-project-name', 'tag': 'Z' * 40}]
|
||||
self.assertEqual(sorted(expected, key=lambda x: x.get('tag')),
|
||||
sorted(tags, key=lambda x: x.get('tag')))
|
437
vmware_nsxlib/v3/__init__.py
Normal file
437
vmware_nsxlib/v3/__init__.py
Normal file
@ -0,0 +1,437 @@
|
||||
# Copyright 2016 OpenStack Foundation
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from vmware_nsxlib._i18n import _, _LW
|
||||
from vmware_nsxlib.v3 import client
|
||||
from vmware_nsxlib.v3 import cluster
|
||||
from vmware_nsxlib.v3 import exceptions
|
||||
from vmware_nsxlib.v3 import native_dhcp
|
||||
from vmware_nsxlib.v3 import nsx_constants
|
||||
from vmware_nsxlib.v3 import security
|
||||
from vmware_nsxlib.v3 import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class NsxLib(object):
|
||||
|
||||
def __init__(self, nsxlib_config):
|
||||
|
||||
self.nsxlib_config = nsxlib_config
|
||||
|
||||
# create the Cluster
|
||||
self.cluster = cluster.NSXClusteredAPI(nsxlib_config)
|
||||
|
||||
# create the Client
|
||||
self.client = client.NSX3Client(
|
||||
self.cluster,
|
||||
max_attempts=nsxlib_config.max_attempts)
|
||||
|
||||
# init the api object
|
||||
self.general_apis = utils.NsxLibApiBase(
|
||||
self.client, nsxlib_config)
|
||||
self.port_mirror = NsxLibPortMirror(
|
||||
self.client, nsxlib_config)
|
||||
self.bridge_endpoint = NsxLibBridgeEndpoint(
|
||||
self.client, nsxlib_config)
|
||||
self.logical_switch = NsxLibLogicalSwitch(
|
||||
self.client, nsxlib_config)
|
||||
self.logical_router = NsxLibLogicalRouter(
|
||||
self.client, nsxlib_config)
|
||||
self.qos_switching_profile = NsxLibQosSwitchingProfile(
|
||||
self.client, nsxlib_config)
|
||||
self.edge_cluster = NsxLibEdgeCluster(
|
||||
self.client, nsxlib_config)
|
||||
self.bridge_cluster = NsxLibBridgeCluster(
|
||||
self.client, nsxlib_config)
|
||||
self.transport_zone = NsxLibTransportZone(
|
||||
self.client, nsxlib_config)
|
||||
self.firewall_section = security.NsxLibFirewallSection(
|
||||
self.client, nsxlib_config)
|
||||
self.ns_group = security.NsxLibNsGroup(
|
||||
self.client, nsxlib_config, self.firewall_section)
|
||||
self.native_dhcp = native_dhcp.NsxLibNativeDhcp(
|
||||
self.client, nsxlib_config)
|
||||
|
||||
super(NsxLib, self).__init__()
|
||||
|
||||
def get_version(self):
|
||||
node = self.client.get("node")
|
||||
version = node.get('node_version')
|
||||
return version
|
||||
|
||||
def build_v3_api_version_tag(self):
|
||||
return self.general_apis.build_v3_api_version_tag()
|
||||
|
||||
def is_internal_resource(self, nsx_resource):
|
||||
return self.general_apis.is_internal_resource(nsx_resource)
|
||||
|
||||
def build_v3_tags_payload(self, resource, resource_type, project_name):
|
||||
return self.general_apis.build_v3_tags_payload(
|
||||
resource, resource_type, project_name)
|
||||
|
||||
def reinitialize_cluster(self, resource, event, trigger, **kwargs):
|
||||
self.cluster._reinit_cluster()
|
||||
|
||||
|
||||
class NsxLibPortMirror(utils.NsxLibApiBase):
|
||||
|
||||
def create_session(self, source_ports, dest_ports, direction,
|
||||
description, name, tags):
|
||||
"""Create a PortMirror Session on the backend.
|
||||
|
||||
:param source_ports: List of UUIDs of the ports whose traffic is to be
|
||||
mirrored.
|
||||
:param dest_ports: List of UUIDs of the ports where the mirrored
|
||||
traffic is to be sent.
|
||||
:param direction: String representing the direction of traffic to be
|
||||
mirrored. [INGRESS, EGRESS, BIDIRECTIONAL]
|
||||
:param description: String representing the description of the session.
|
||||
:param name: String representing the name of the session.
|
||||
:param tags: nsx backend specific tags.
|
||||
"""
|
||||
|
||||
resource = 'mirror-sessions'
|
||||
body = {'direction': direction,
|
||||
'tags': tags,
|
||||
'display_name': name,
|
||||
'description': description,
|
||||
'mirror_sources': source_ports,
|
||||
'mirror_destination': dest_ports}
|
||||
return self.client.create(resource, body)
|
||||
|
||||
def delete_session(self, mirror_session_id):
|
||||
"""Delete a PortMirror session on the backend.
|
||||
|
||||
:param mirror_session_id: string representing the UUID of the port
|
||||
mirror session to be deleted.
|
||||
"""
|
||||
resource = 'mirror-sessions/%s' % mirror_session_id
|
||||
self.client.delete(resource)
|
||||
|
||||
|
||||
class NsxLibBridgeEndpoint(utils.NsxLibApiBase):
|
||||
|
||||
def create(self, device_name, seg_id, tags):
|
||||
"""Create a bridge endpoint on the backend.
|
||||
|
||||
Create a bridge endpoint resource on a bridge cluster for the L2
|
||||
gateway network connection.
|
||||
:param device_name: device_name actually refers to the bridge cluster's
|
||||
UUID.
|
||||
:param seg_id: integer representing the VLAN segmentation ID.
|
||||
:param tags: nsx backend specific tags.
|
||||
"""
|
||||
resource = 'bridge-endpoints'
|
||||
body = {'bridge_cluster_id': device_name,
|
||||
'tags': tags,
|
||||
'vlan': seg_id}
|
||||
return self.client.create(resource, body)
|
||||
|
||||
def delete(self, bridge_endpoint_id):
|
||||
"""Delete a bridge endpoint on the backend.
|
||||
|
||||
:param bridge_endpoint_id: string representing the UUID of the bridge
|
||||
endpoint to be deleted.
|
||||
"""
|
||||
resource = 'bridge-endpoints/%s' % bridge_endpoint_id
|
||||
self.client.delete(resource)
|
||||
|
||||
|
||||
class NsxLibLogicalSwitch(utils.NsxLibApiBase):
|
||||
|
||||
def create(self, display_name, transport_zone_id, tags,
|
||||
replication_mode=nsx_constants.MTEP,
|
||||
admin_state=True, vlan_id=None):
|
||||
# TODO(salv-orlando): Validate Replication mode and admin_state
|
||||
# NOTE: These checks might be moved to the API client library if one
|
||||
# that performs such checks in the client is available
|
||||
|
||||
resource = 'logical-switches'
|
||||
body = {'transport_zone_id': transport_zone_id,
|
||||
'replication_mode': replication_mode,
|
||||
'display_name': display_name,
|
||||
'tags': tags}
|
||||
|
||||
if admin_state:
|
||||
body['admin_state'] = nsx_constants.ADMIN_STATE_UP
|
||||
else:
|
||||
body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN
|
||||
|
||||
if vlan_id:
|
||||
body['vlan'] = vlan_id
|
||||
|
||||
return self.client.create(resource, body)
|
||||
|
||||
def delete(self, lswitch_id):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self.nsxlib_config.max_attempts)
|
||||
def _do_delete():
|
||||
resource = ('logical-switches/%s?detach=true&cascade=true' %
|
||||
lswitch_id)
|
||||
self.client.delete(resource)
|
||||
|
||||
_do_delete()
|
||||
|
||||
def get(self, logical_switch_id):
|
||||
resource = "logical-switches/%s" % logical_switch_id
|
||||
return self.client.get(resource)
|
||||
|
||||
def update(self, lswitch_id, name=None, admin_state=None, tags=None):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self.nsxlib_config.max_attempts)
|
||||
def _do_update():
|
||||
resource = "logical-switches/%s" % lswitch_id
|
||||
lswitch = self.get(lswitch_id)
|
||||
if name is not None:
|
||||
lswitch['display_name'] = name
|
||||
if admin_state is not None:
|
||||
if admin_state:
|
||||
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_UP
|
||||
else:
|
||||
lswitch['admin_state'] = nsx_constants.ADMIN_STATE_DOWN
|
||||
if tags is not None:
|
||||
lswitch['tags'] = tags
|
||||
return self.client.update(resource, lswitch)
|
||||
|
||||
return _do_update()
|
||||
|
||||
|
||||
class NsxLibQosSwitchingProfile(utils.NsxLibApiBase):
|
||||
|
||||
def _build_args(self, tags, name=None, description=None):
|
||||
body = {"resource_type": "QosSwitchingProfile",
|
||||
"tags": tags}
|
||||
return self._update_args(
|
||||
body, name=name, description=description)
|
||||
|
||||
def _update_args(self, body, name=None, description=None):
|
||||
if name:
|
||||
body["display_name"] = name
|
||||
if description:
|
||||
body["description"] = description
|
||||
return body
|
||||
|
||||
def _enable_shaping_in_args(self, body, burst_size=None,
|
||||
peak_bandwidth=None, average_bandwidth=None):
|
||||
for shaper in body["shaper_configuration"]:
|
||||
# We currently supports only shaping of Egress traffic
|
||||
if shaper["resource_type"] == "EgressRateShaper":
|
||||
shaper["enabled"] = True
|
||||
if burst_size:
|
||||
shaper["burst_size_bytes"] = burst_size
|
||||
if peak_bandwidth:
|
||||
shaper["peak_bandwidth_mbps"] = peak_bandwidth
|
||||
if average_bandwidth:
|
||||
shaper["average_bandwidth_mbps"] = average_bandwidth
|
||||
break
|
||||
|
||||
return body
|
||||
|
||||
def _disable_shaping_in_args(self, body):
|
||||
for shaper in body["shaper_configuration"]:
|
||||
# We currently supports only shaping of Egress traffic
|
||||
if shaper["resource_type"] == "EgressRateShaper":
|
||||
shaper["enabled"] = False
|
||||
shaper["burst_size_bytes"] = 0
|
||||
shaper["peak_bandwidth_mbps"] = 0
|
||||
shaper["average_bandwidth_mbps"] = 0
|
||||
break
|
||||
|
||||
return body
|
||||
|
||||
def _update_dscp_in_args(self, body, qos_marking, dscp):
|
||||
body["dscp"] = {}
|
||||
body["dscp"]["mode"] = qos_marking.upper()
|
||||
if dscp:
|
||||
body["dscp"]["priority"] = dscp
|
||||
|
||||
return body
|
||||
|
||||
def create(self, tags, name=None, description=None):
|
||||
resource = 'switching-profiles'
|
||||
body = self._build_args(tags, name, description)
|
||||
return self.client.create(resource, body)
|
||||
|
||||
def update(self, profile_id, tags, name=None, description=None):
|
||||
resource = 'switching-profiles/%s' % profile_id
|
||||
# get the current configuration
|
||||
body = self.get(profile_id)
|
||||
# update the relevant fields
|
||||
body = self._update_args(body, name, description)
|
||||
return self._update_resource_with_retry(resource, body)
|
||||
|
||||
def update_shaping(self, profile_id,
|
||||
shaping_enabled=False,
|
||||
burst_size=None,
|
||||
peak_bandwidth=None,
|
||||
average_bandwidth=None,
|
||||
qos_marking=None, dscp=None):
|
||||
resource = 'switching-profiles/%s' % profile_id
|
||||
# get the current configuration
|
||||
body = self.get(profile_id)
|
||||
# update the relevant fields
|
||||
if shaping_enabled:
|
||||
body = self._enable_shaping_in_args(
|
||||
body, burst_size=burst_size,
|
||||
peak_bandwidth=peak_bandwidth,
|
||||
average_bandwidth=average_bandwidth)
|
||||
else:
|
||||
body = self._disable_shaping_in_args(body)
|
||||
body = self._update_dscp_in_args(body, qos_marking, dscp)
|
||||
return self._update_resource_with_retry(resource, body)
|
||||
|
||||
def get(self, profile_id):
|
||||
resource = 'switching-profiles/%s' % profile_id
|
||||
return self.client.get(resource)
|
||||
|
||||
def delete(self, profile_id):
|
||||
resource = 'switching-profiles/%s' % profile_id
|
||||
self.client.delete(resource)
|
||||
|
||||
|
||||
class NsxLibLogicalRouter(utils.NsxLibApiBase):
|
||||
|
||||
def _delete_resource_by_values(self, resource,
|
||||
skip_not_found=True, **kwargs):
|
||||
resources_get = self.client.get(resource)
|
||||
matched_num = 0
|
||||
for res in resources_get['results']:
|
||||
if utils.dict_match(kwargs, res):
|
||||
LOG.debug("Deleting %s from resource %s", res, resource)
|
||||
delete_resource = resource + "/" + str(res['id'])
|
||||
self.client.delete(delete_resource)
|
||||
matched_num = matched_num + 1
|
||||
if matched_num == 0:
|
||||
if skip_not_found:
|
||||
LOG.warning(_LW("No resource in %(res)s matched for values: "
|
||||
"%(values)s"), {'res': resource,
|
||||
'values': kwargs})
|
||||
else:
|
||||
err_msg = (_("No resource in %(res)s matched for values: "
|
||||
"%(values)s") % {'res': resource,
|
||||
'values': kwargs})
|
||||
raise exceptions.ResourceNotFound(
|
||||
manager=self.cluster.nsx_api_managers,
|
||||
operation=err_msg)
|
||||
elif matched_num > 1:
|
||||
LOG.warning(_LW("%(num)s resources in %(res)s matched for values: "
|
||||
"%(values)s"), {'num': matched_num,
|
||||
'res': resource,
|
||||
'values': kwargs})
|
||||
|
||||
def add_nat_rule(self, logical_router_id, action, translated_network,
|
||||
source_net=None, dest_net=None,
|
||||
enabled=True, rule_priority=None):
|
||||
resource = 'logical-routers/%s/nat/rules' % logical_router_id
|
||||
body = {'action': action,
|
||||
'enabled': enabled,
|
||||
'translated_network': translated_network}
|
||||
if source_net:
|
||||
body['match_source_network'] = source_net
|
||||
if dest_net:
|
||||
body['match_destination_network'] = dest_net
|
||||
if rule_priority:
|
||||
body['rule_priority'] = rule_priority
|
||||
return self.client.create(resource, body)
|
||||
|
||||
def add_static_route(self, logical_router_id, dest_cidr, nexthop):
|
||||
resource = ('logical-routers/%s/routing/static-routes' %
|
||||
logical_router_id)
|
||||
body = {}
|
||||
if dest_cidr:
|
||||
body['network'] = dest_cidr
|
||||
if nexthop:
|
||||
body['next_hops'] = [{"ip_address": nexthop}]
|
||||
return self.client.create(resource, body)
|
||||
|
||||
def delete_static_route(self, logical_router_id, static_route_id):
|
||||
resource = 'logical-routers/%s/routing/static-routes/%s' % (
|
||||
logical_router_id, static_route_id)
|
||||
self.client.delete(resource)
|
||||
|
||||
def delete_static_route_by_values(self, logical_router_id,
|
||||
dest_cidr=None, nexthop=None):
|
||||
resource = ('logical-routers/%s/routing/static-routes' %
|
||||
logical_router_id)
|
||||
kwargs = {}
|
||||
if dest_cidr:
|
||||
kwargs['network'] = dest_cidr
|
||||
if nexthop:
|
||||
kwargs['next_hops'] = [{"ip_address": nexthop}]
|
||||
return self._delete_resource_by_values(resource, **kwargs)
|
||||
|
||||
def delete_nat_rule(self, logical_router_id, nat_rule_id):
|
||||
resource = 'logical-routers/%s/nat/rules/%s' % (logical_router_id,
|
||||
nat_rule_id)
|
||||
self.client.delete(resource)
|
||||
|
||||
def delete_nat_rule_by_values(self, logical_router_id, **kwargs):
|
||||
resource = 'logical-routers/%s/nat/rules' % logical_router_id
|
||||
return self._delete_resource_by_values(resource, **kwargs)
|
||||
|
||||
def update_advertisement(self, logical_router_id, **kwargs):
|
||||
resource = ('logical-routers/%s/routing/advertisement' %
|
||||
logical_router_id)
|
||||
return self._update_resource_with_retry(resource, kwargs)
|
||||
|
||||
def get_id_by_name_or_id(self, name_or_id):
|
||||
"""Get a logical router by it's display name or uuid
|
||||
|
||||
Return the logical router data, or raise an exception if not found or
|
||||
not unique
|
||||
"""
|
||||
|
||||
return self._get_resource_by_name_or_id(name_or_id,
|
||||
'logical-routers')
|
||||
|
||||
|
||||
class NsxLibEdgeCluster(utils.NsxLibApiBase):
|
||||
|
||||
def get(self, edge_cluster_uuid):
|
||||
resource = "edge-clusters/%s" % edge_cluster_uuid
|
||||
return self.client.get(resource)
|
||||
|
||||
|
||||
class NsxLibTransportZone(utils.NsxLibApiBase):
|
||||
|
||||
def get_id_by_name_or_id(self, name_or_id):
|
||||
"""Get a transport zone by it's display name or uuid
|
||||
|
||||
Return the transport zone data, or raise an exception if not found or
|
||||
not unique
|
||||
"""
|
||||
|
||||
return self._get_resource_by_name_or_id(name_or_id,
|
||||
'transport-zones')
|
||||
|
||||
|
||||
class NsxLibBridgeCluster(utils.NsxLibApiBase):
|
||||
|
||||
def get_id_by_name_or_id(self, name_or_id):
|
||||
"""Get a bridge cluster by it's display name or uuid
|
||||
|
||||
Return the bridge cluster data, or raise an exception if not found or
|
||||
not unique
|
||||
"""
|
||||
|
||||
return self._get_resource_by_name_or_id(name_or_id,
|
||||
'bridge-clusters')
|
208
vmware_nsxlib/v3/client.py
Normal file
208
vmware_nsxlib/v3/client.py
Normal file
@ -0,0 +1,208 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
import requests
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from oslo_log import log
|
||||
from oslo_serialization import jsonutils
|
||||
from vmware_nsxlib._i18n import _, _LW
|
||||
from vmware_nsxlib.v3 import exceptions
|
||||
from vmware_nsxlib.v3 import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
ERRORS = {requests.codes.NOT_FOUND: exceptions.ResourceNotFound,
|
||||
requests.codes.PRECONDITION_FAILED: exceptions.StaleRevision}
|
||||
DEFAULT_ERROR = exceptions.ManagerError
|
||||
|
||||
|
||||
class RESTClient(object):
|
||||
|
||||
_VERB_RESP_CODES = {
|
||||
'get': [requests.codes.ok],
|
||||
'post': [requests.codes.created, requests.codes.ok],
|
||||
'put': [requests.codes.ok],
|
||||
'delete': [requests.codes.ok]
|
||||
}
|
||||
|
||||
def __init__(self, connection, url_prefix=None,
|
||||
default_headers=None,
|
||||
max_attempts=utils.DEFAULT_MAX_ATTEMPTS):
|
||||
self._conn = connection
|
||||
self._url_prefix = url_prefix or ""
|
||||
self._default_headers = default_headers or {}
|
||||
self.max_attempts = max_attempts
|
||||
|
||||
def new_client_for(self, *uri_segments):
|
||||
uri = self._build_url('/'.join(uri_segments))
|
||||
|
||||
return self.__class__(
|
||||
self._conn,
|
||||
url_prefix=uri,
|
||||
default_headers=self._default_headers,
|
||||
max_attempts=self.max_attempts)
|
||||
|
||||
def list(self, headers=None):
|
||||
return self.url_list('')
|
||||
|
||||
def get(self, uuid, headers=None):
|
||||
return self.url_get(uuid, headers=headers)
|
||||
|
||||
def delete(self, uuid, headers=None):
|
||||
return self.url_delete(uuid, headers=headers)
|
||||
|
||||
def update(self, uuid, body=None, headers=None):
|
||||
return self.url_put(uuid, body, headers=headers)
|
||||
|
||||
def create(self, resource='', body=None, headers=None):
|
||||
return self.url_post(resource, body, headers=headers)
|
||||
|
||||
def url_list(self, url, headers=None):
|
||||
return self.url_get(url, headers=headers)
|
||||
|
||||
def url_get(self, url, headers=None):
|
||||
return self._rest_call(url, method='GET', headers=headers)
|
||||
|
||||
def url_delete(self, url, headers=None):
|
||||
return self._rest_call(url, method='DELETE', headers=headers)
|
||||
|
||||
def url_put(self, url, body, headers=None):
|
||||
return self._rest_call(url, method='PUT', body=body, headers=headers)
|
||||
|
||||
def url_post(self, url, body, headers=None):
|
||||
return self._rest_call(url, method='POST', body=body, headers=headers)
|
||||
|
||||
def _raise_error(self, status_code, operation, result_msg):
|
||||
error = ERRORS.get(status_code, DEFAULT_ERROR)
|
||||
raise error(manager='', operation=operation, details=result_msg)
|
||||
|
||||
def _validate_result(self, result, expected, operation):
|
||||
if result.status_code not in expected:
|
||||
result_msg = result.json() if result.content else ''
|
||||
LOG.warning(_LW("The HTTP request returned error code "
|
||||
"%(result)d, whereas %(expected)s response "
|
||||
"codes were expected. Response body %(body)s"),
|
||||
{'result': result.status_code,
|
||||
'expected': '/'.join([str(code)
|
||||
for code in expected]),
|
||||
'body': result_msg})
|
||||
|
||||
if isinstance(result_msg, dict) and 'error_message' in result_msg:
|
||||
related_errors = [error['error_message'] for error in
|
||||
result_msg.get('related_errors', [])]
|
||||
result_msg = result_msg['error_message']
|
||||
if related_errors:
|
||||
result_msg += " relatedErrors: %s" % ' '.join(
|
||||
related_errors)
|
||||
self._raise_error(result.status_code, operation, result_msg)
|
||||
|
||||
@classmethod
|
||||
def merge_headers(cls, *headers):
|
||||
merged = {}
|
||||
for header in headers:
|
||||
if header:
|
||||
merged.update(header)
|
||||
return merged
|
||||
|
||||
def _build_url(self, uri):
|
||||
prefix = urlparse.urlparse(self._url_prefix)
|
||||
uri = ("/%s/%s" % (prefix.path, uri)).replace('//', '/').strip('/')
|
||||
if prefix.netloc:
|
||||
uri = "%s/%s" % (prefix.netloc, uri)
|
||||
if prefix.scheme:
|
||||
uri = "%s://%s" % (prefix.scheme, uri)
|
||||
return uri
|
||||
|
||||
def _rest_call(self, url, method='GET', body=None, headers=None):
|
||||
request_headers = headers.copy() if headers else {}
|
||||
request_headers.update(self._default_headers)
|
||||
request_url = self._build_url(url)
|
||||
|
||||
do_request = getattr(self._conn, method.lower())
|
||||
|
||||
LOG.debug("REST call: %s %s\nHeaders: %s\nBody: %s",
|
||||
method, request_url, request_headers, body)
|
||||
|
||||
result = do_request(
|
||||
request_url,
|
||||
data=body,
|
||||
headers=request_headers)
|
||||
|
||||
LOG.debug("REST call: %s %s\nResponse: %s",
|
||||
method, request_url, result.json() if result.content else '')
|
||||
|
||||
self._validate_result(
|
||||
result, RESTClient._VERB_RESP_CODES[method.lower()],
|
||||
_("%(verb)s %(url)s") % {'verb': method, 'url': request_url})
|
||||
return result
|
||||
|
||||
|
||||
class JSONRESTClient(RESTClient):
|
||||
|
||||
_DEFAULT_HEADERS = {
|
||||
'Accept': 'application/json',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
def __init__(self, connection, url_prefix=None,
|
||||
default_headers=None,
|
||||
max_attempts=utils.DEFAULT_MAX_ATTEMPTS):
|
||||
|
||||
super(JSONRESTClient, self).__init__(
|
||||
connection,
|
||||
url_prefix=url_prefix,
|
||||
default_headers=RESTClient.merge_headers(
|
||||
JSONRESTClient._DEFAULT_HEADERS, default_headers),
|
||||
max_attempts=max_attempts)
|
||||
|
||||
def _rest_call(self, *args, **kwargs):
|
||||
if kwargs.get('body') is not None:
|
||||
kwargs['body'] = jsonutils.dumps(kwargs['body'], sort_keys=True)
|
||||
result = super(JSONRESTClient, self)._rest_call(*args, **kwargs)
|
||||
return result.json() if result.content else result
|
||||
|
||||
|
||||
class NSX3Client(JSONRESTClient):
|
||||
|
||||
_NSX_V1_API_PREFIX = 'api/v1/'
|
||||
|
||||
def __init__(self, connection, url_prefix=None,
|
||||
default_headers=None,
|
||||
nsx_api_managers=None,
|
||||
max_attempts=utils.DEFAULT_MAX_ATTEMPTS):
|
||||
|
||||
self.nsx_api_managers = nsx_api_managers or []
|
||||
|
||||
url_prefix = url_prefix or NSX3Client._NSX_V1_API_PREFIX
|
||||
if url_prefix and NSX3Client._NSX_V1_API_PREFIX not in url_prefix:
|
||||
if url_prefix.startswith('http'):
|
||||
url_prefix += '/' + NSX3Client._NSX_V1_API_PREFIX
|
||||
else:
|
||||
url_prefix = "%s/%s" % (NSX3Client._NSX_V1_API_PREFIX,
|
||||
url_prefix or '')
|
||||
self.max_attempts = max_attempts
|
||||
|
||||
super(NSX3Client, self).__init__(
|
||||
connection, url_prefix=url_prefix,
|
||||
default_headers=default_headers,
|
||||
max_attempts=max_attempts)
|
||||
|
||||
def _raise_error(self, status_code, operation, result_msg):
|
||||
"""Override the Rest client errors to add the manager IPs"""
|
||||
error = ERRORS.get(status_code, DEFAULT_ERROR)
|
||||
raise error(manager=self.nsx_api_managers,
|
||||
operation=operation,
|
||||
details=result_msg)
|
493
vmware_nsxlib/v3/cluster.py
Normal file
493
vmware_nsxlib/v3/cluster.py
Normal file
@ -0,0 +1,493 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
import abc
|
||||
import contextlib
|
||||
import copy
|
||||
import datetime
|
||||
import eventlet
|
||||
import itertools
|
||||
import logging
|
||||
import requests
|
||||
import six
|
||||
import six.moves.urllib.parse as urlparse
|
||||
|
||||
from eventlet import greenpool
|
||||
from eventlet import pools
|
||||
from oslo_log import log
|
||||
from oslo_service import loopingcall
|
||||
from requests import adapters
|
||||
from requests import exceptions as requests_exceptions
|
||||
from vmware_nsxlib._i18n import _, _LI, _LW
|
||||
from vmware_nsxlib.v3 import client as nsx_client
|
||||
from vmware_nsxlib.v3 import exceptions
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
# disable warning message for each HTTP retry
|
||||
logging.getLogger(
|
||||
"requests.packages.urllib3.connectionpool").setLevel(logging.ERROR)
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class AbstractHTTPProvider(object):
|
||||
"""Interface for providers of HTTP connections which
|
||||
are responsible for creating and validating connections
|
||||
for their underlying HTTP support.
|
||||
"""
|
||||
|
||||
@property
|
||||
def default_scheme(self):
|
||||
return 'https'
|
||||
|
||||
@abc.abstractproperty
|
||||
def provider_id(self):
|
||||
"""A unique string name for this provider."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def validate_connection(self, cluster_api, endpoint, conn):
|
||||
"""Validate the said connection for the given endpoint and cluster.
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def new_connection(self, cluster_api, provider):
|
||||
"""Create a new http connection for the said cluster and
|
||||
cluster provider. The actual connection should duck type
|
||||
requests.Session http methods (get(), put(), etc.).
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def is_connection_exception(self, exception):
|
||||
"""Determine if the given exception is related to connection
|
||||
failure. Return True if it's a connection exception and
|
||||
False otherwise.
|
||||
"""
|
||||
|
||||
|
||||
class TimeoutSession(requests.Session):
|
||||
"""Extends requests.Session to support timeout
|
||||
at the session level.
|
||||
"""
|
||||
|
||||
def __init__(self, timeout, read_timeout):
|
||||
self.timeout = timeout
|
||||
self.read_timeout = read_timeout
|
||||
super(TimeoutSession, self).__init__()
|
||||
|
||||
# wrapper timeouts at the session level
|
||||
# see: https://goo.gl/xNk7aM
|
||||
def request(self, *args, **kwargs):
|
||||
if 'timeout' not in kwargs:
|
||||
kwargs['timeout'] = (self.timeout, self.read_timeout)
|
||||
return super(TimeoutSession, self).request(*args, **kwargs)
|
||||
|
||||
|
||||
class NSXRequestsHTTPProvider(AbstractHTTPProvider):
|
||||
"""Concrete implementation of AbstractHTTPProvider
|
||||
using requests.Session() as the underlying connection.
|
||||
"""
|
||||
|
||||
@property
|
||||
def provider_id(self):
|
||||
return "%s-%s" % (requests.__title__, requests.__version__)
|
||||
|
||||
def validate_connection(self, cluster_api, endpoint, conn):
|
||||
client = nsx_client.NSX3Client(conn, url_prefix=endpoint.provider.url)
|
||||
zones = client.get('transport-zones')
|
||||
if not zones or zones['result_count'] <= 0:
|
||||
msg = _("No transport zones found "
|
||||
"for '%s'") % endpoint.provider.url
|
||||
LOG.warning(msg)
|
||||
raise exceptions.ResourceNotFound(
|
||||
manager=endpoint.provider.url, operation=msg)
|
||||
|
||||
def new_connection(self, cluster_api, provider):
|
||||
config = cluster_api.nsxlib_config
|
||||
session = TimeoutSession(config.http_timeout,
|
||||
config.http_read_timeout)
|
||||
session.auth = (provider.username, provider.password)
|
||||
# NSX v3 doesn't use redirects
|
||||
session.max_redirects = 0
|
||||
|
||||
session.verify = not config.insecure
|
||||
if session.verify and provider.ca_file:
|
||||
# verify using the said ca bundle path
|
||||
session.verify = provider.ca_file
|
||||
|
||||
# we are pooling with eventlet in the cluster class
|
||||
adapter = adapters.HTTPAdapter(
|
||||
pool_connections=1, pool_maxsize=1,
|
||||
max_retries=config.retries,
|
||||
pool_block=False)
|
||||
session.mount('http://', adapter)
|
||||
session.mount('https://', adapter)
|
||||
|
||||
return session
|
||||
|
||||
def is_connection_exception(self, exception):
|
||||
return isinstance(exception, requests_exceptions.ConnectionError)
|
||||
|
||||
|
||||
class ClusterHealth(object):
|
||||
"""Indicator of overall cluster health with respect
|
||||
to the connectivity of the clusters managed endpoints.
|
||||
"""
|
||||
# all endpoints are UP
|
||||
GREEN = 'GREEN'
|
||||
# at least 1 endpoint is UP, but 1 or more are DOWN
|
||||
ORANGE = 'ORANGE'
|
||||
# all endpoints are DOWN
|
||||
RED = 'RED'
|
||||
|
||||
|
||||
class EndpointState(object):
|
||||
"""Tracks the connectivity state for a said endpoint.
|
||||
"""
|
||||
# no UP or DOWN state recorded yet
|
||||
INITIALIZED = 'INITIALIZED'
|
||||
# endpoint has been validate and is good
|
||||
UP = 'UP'
|
||||
# endpoint can't be reached or validated
|
||||
DOWN = 'DOWN'
|
||||
|
||||
|
||||
class Provider(object):
|
||||
"""Data holder for a provider which has a unique id
|
||||
a connection URL, and the credential details.
|
||||
"""
|
||||
|
||||
def __init__(self, provider_id, provider_url, username, password, ca_file):
|
||||
self.id = provider_id
|
||||
self.url = provider_url
|
||||
self.username = username
|
||||
self.password = password
|
||||
self.ca_file = ca_file
|
||||
|
||||
def __str__(self):
|
||||
return str(self.url)
|
||||
|
||||
|
||||
class Endpoint(object):
|
||||
"""A single NSX manager endpoint (host) which includes
|
||||
related information such as the endpoint's provider,
|
||||
state, etc.. A pool is used to hold connections to the
|
||||
endpoint which are doled out when proxying HTTP methods
|
||||
to the underlying connections.
|
||||
"""
|
||||
|
||||
def __init__(self, provider, pool):
|
||||
self.provider = provider
|
||||
self.pool = pool
|
||||
self._state = EndpointState.INITIALIZED
|
||||
self._last_updated = datetime.datetime.now()
|
||||
|
||||
@property
|
||||
def last_updated(self):
|
||||
return self._last_updated
|
||||
|
||||
@property
|
||||
def state(self):
|
||||
return self._state
|
||||
|
||||
def set_state(self, state):
|
||||
if self.state != state:
|
||||
LOG.info(_LI("Endpoint '%(ep)s' changing from state"
|
||||
" '%(old)s' to '%(new)s'"),
|
||||
{'ep': self.provider,
|
||||
'old': self.state,
|
||||
'new': state})
|
||||
old_state = self._state
|
||||
self._state = state
|
||||
|
||||
self._last_updated = datetime.datetime.now()
|
||||
|
||||
return old_state
|
||||
|
||||
def __str__(self):
|
||||
return "[%s] %s" % (self.state, self.provider)
|
||||
|
||||
|
||||
class EndpointConnection(object):
|
||||
"""Simple data holder which contains an endpoint and
|
||||
a connection for that endpoint.
|
||||
"""
|
||||
|
||||
def __init__(self, endpoint, connection):
|
||||
self.endpoint = endpoint
|
||||
self.connection = connection
|
||||
|
||||
|
||||
class ClusteredAPI(object):
|
||||
"""Duck types the major HTTP based methods of a
|
||||
requests.Session such as get(), put(), post(), etc.
|
||||
and transparently proxies those calls to one of
|
||||
its managed NSX manager endpoints.
|
||||
"""
|
||||
_HTTP_VERBS = ['get', 'delete', 'head', 'put', 'post', 'patch', 'create']
|
||||
|
||||
def __init__(self, providers,
|
||||
http_provider,
|
||||
min_conns_per_pool=1,
|
||||
max_conns_per_pool=500,
|
||||
keepalive_interval=33):
|
||||
|
||||
self._http_provider = http_provider
|
||||
self._keepalive_interval = keepalive_interval
|
||||
|
||||
def _init_cluster(*args, **kwargs):
|
||||
self._init_endpoints(providers,
|
||||
min_conns_per_pool, max_conns_per_pool)
|
||||
|
||||
_init_cluster()
|
||||
|
||||
# keep this internal method for reinitialize upon fork
|
||||
# for api workers to ensure each process has its own keepalive
|
||||
# loops + state
|
||||
self._reinit_cluster = _init_cluster
|
||||
|
||||
def _init_endpoints(self, providers,
|
||||
min_conns_per_pool, max_conns_per_pool):
|
||||
LOG.debug("Initializing API endpoints")
|
||||
|
||||
def _create_conn(p):
|
||||
def _conn():
|
||||
# called when a pool needs to create a new connection
|
||||
return self._http_provider.new_connection(self, p)
|
||||
return _conn
|
||||
|
||||
self._endpoints = {}
|
||||
for provider in providers:
|
||||
pool = pools.Pool(
|
||||
min_size=min_conns_per_pool,
|
||||
max_size=max_conns_per_pool,
|
||||
order_as_stack=True,
|
||||
create=_create_conn(provider))
|
||||
|
||||
endpoint = Endpoint(provider, pool)
|
||||
self._endpoints[provider.id] = endpoint
|
||||
|
||||
# service requests using round robin
|
||||
self._endpoint_schedule = itertools.cycle(self._endpoints.values())
|
||||
|
||||
# duck type to proxy http invocations
|
||||
for method in ClusteredAPI._HTTP_VERBS:
|
||||
setattr(self, method, self._proxy_stub(method))
|
||||
|
||||
conns = greenpool.GreenPool()
|
||||
for endpoint in self._endpoints.values():
|
||||
conns.spawn(self._validate, endpoint)
|
||||
eventlet.sleep(0)
|
||||
while conns.running():
|
||||
if (self.health == ClusterHealth.GREEN
|
||||
or self.health == ClusterHealth.ORANGE):
|
||||
# only wait for 1 or more endpoints to reduce init time
|
||||
break
|
||||
eventlet.sleep(0.5)
|
||||
|
||||
for endpoint in self._endpoints.values():
|
||||
# dynamic loop for each endpoint to ensure connectivity
|
||||
loop = loopingcall.DynamicLoopingCall(
|
||||
self._endpoint_keepalive, endpoint)
|
||||
loop.start(initial_delay=self._keepalive_interval,
|
||||
periodic_interval_max=self._keepalive_interval,
|
||||
stop_on_exception=False)
|
||||
|
||||
LOG.debug("Done initializing API endpoint(s). "
|
||||
"API cluster health: %s", self.health)
|
||||
|
||||
def _endpoint_keepalive(self, endpoint):
|
||||
delta = datetime.datetime.now() - endpoint.last_updated
|
||||
if delta.seconds >= self._keepalive_interval:
|
||||
# TODO(boden): backoff on validation failure
|
||||
self._validate(endpoint)
|
||||
return self._keepalive_interval
|
||||
return self._keepalive_interval - delta.seconds
|
||||
|
||||
@property
|
||||
def providers(self):
|
||||
return [ep.provider for ep in self._endpoints.values()]
|
||||
|
||||
@property
|
||||
def endpoints(self):
|
||||
return copy.copy(self._endpoints)
|
||||
|
||||
@property
|
||||
def http_provider(self):
|
||||
return self._http_provider
|
||||
|
||||
@property
|
||||
def health(self):
|
||||
down = 0
|
||||
up = 0
|
||||
for endpoint in self._endpoints.values():
|
||||
if endpoint.state != EndpointState.UP:
|
||||
down += 1
|
||||
else:
|
||||
up += 1
|
||||
|
||||
if down == len(self._endpoints):
|
||||
return ClusterHealth.RED
|
||||
return (ClusterHealth.GREEN
|
||||
if up == len(self._endpoints)
|
||||
else ClusterHealth.ORANGE)
|
||||
|
||||
def _validate(self, endpoint):
|
||||
try:
|
||||
with endpoint.pool.item() as conn:
|
||||
self._http_provider.validate_connection(self, endpoint, conn)
|
||||
endpoint.set_state(EndpointState.UP)
|
||||
except Exception as e:
|
||||
endpoint.set_state(EndpointState.DOWN)
|
||||
LOG.warning(_LW("Failed to validate API cluster endpoint "
|
||||
"'%(ep)s' due to: %(err)s"),
|
||||
{'ep': endpoint, 'err': e})
|
||||
|
||||
def _select_endpoint(self):
|
||||
# check for UP state until exhausting all endpoints
|
||||
seen, total = 0, len(self._endpoints.values())
|
||||
while seen < total:
|
||||
endpoint = next(self._endpoint_schedule)
|
||||
if endpoint.state == EndpointState.UP:
|
||||
return endpoint
|
||||
seen += 1
|
||||
|
||||
def endpoint_for_connection(self, conn):
|
||||
# check all endpoint pools
|
||||
for endpoint in self._endpoints.values():
|
||||
if (conn in endpoint.pool.channel.queue or
|
||||
conn in endpoint.pool.free_items):
|
||||
return endpoint
|
||||
|
||||
@property
|
||||
def cluster_id(self):
|
||||
return ','.join([str(ep.provider.url)
|
||||
for ep in self._endpoints.values()])
|
||||
|
||||
@contextlib.contextmanager
|
||||
def connection(self):
|
||||
with self.endpoint_connection() as conn_data:
|
||||
yield conn_data.connection
|
||||
|
||||
@contextlib.contextmanager
|
||||
def endpoint_connection(self):
|
||||
endpoint = self._select_endpoint()
|
||||
if not endpoint:
|
||||
LOG.debug("All endpoints down for: %s" %
|
||||
[str(ep) for ep in self._endpoints.values()])
|
||||
# all endpoints are DOWN and will have their next
|
||||
# state updated as per _endpoint_keepalive()
|
||||
raise exceptions.ServiceClusterUnavailable(
|
||||
cluster_id=self.cluster_id)
|
||||
|
||||
if endpoint.pool.free() == 0:
|
||||
LOG.info(_LI("API endpoint %(ep)s at connection "
|
||||
"capacity %(max)s and has %(waiting)s waiting"),
|
||||
{'ep': endpoint,
|
||||
'max': endpoint.pool.max_size,
|
||||
'waiting': endpoint.pool.waiting()})
|
||||
# pool.item() will wait if pool has 0 free
|
||||
with endpoint.pool.item() as conn:
|
||||
yield EndpointConnection(endpoint, conn)
|
||||
|
||||
def _proxy_stub(self, proxy_for):
|
||||
def _call_proxy(url, *args, **kwargs):
|
||||
return self._proxy(proxy_for, url, *args, **kwargs)
|
||||
return _call_proxy
|
||||
|
||||
def _proxy(self, proxy_for, uri, *args, **kwargs):
|
||||
# proxy http request call to an avail endpoint
|
||||
with self.endpoint_connection() as conn_data:
|
||||
conn = conn_data.connection
|
||||
endpoint = conn_data.endpoint
|
||||
|
||||
# http conn must support requests style interface
|
||||
do_request = getattr(conn, proxy_for)
|
||||
|
||||
if not uri.startswith('/'):
|
||||
uri = "/%s" % uri
|
||||
url = "%s%s" % (endpoint.provider.url, uri)
|
||||
try:
|
||||
LOG.debug("API cluster proxy %s %s to %s",
|
||||
proxy_for.upper(), uri, url)
|
||||
# call the actual connection method to do the
|
||||
# http request/response over the wire
|
||||
response = do_request(url, *args, **kwargs)
|
||||
endpoint.set_state(EndpointState.UP)
|
||||
|
||||
return response
|
||||
except Exception as e:
|
||||
LOG.warning(_LW("Request failed due to: %s"), e)
|
||||
if not self._http_provider.is_connection_exception(e):
|
||||
# only trap and retry connection errors
|
||||
raise e
|
||||
endpoint.set_state(EndpointState.DOWN)
|
||||
LOG.debug("Connection to %s failed, checking additional "
|
||||
"endpoints" % url)
|
||||
# retry until exhausting endpoints
|
||||
return self._proxy(proxy_for, uri, *args, **kwargs)
|
||||
|
||||
|
||||
class NSXClusteredAPI(ClusteredAPI):
|
||||
"""Extends ClusteredAPI to get conf values and setup the
|
||||
NSX v3 cluster.
|
||||
"""
|
||||
|
||||
def __init__(self, nsxlib_config):
|
||||
self.nsxlib_config = nsxlib_config
|
||||
|
||||
self._http_provider = (nsxlib_config.http_provider or
|
||||
NSXRequestsHTTPProvider())
|
||||
|
||||
super(NSXClusteredAPI, self).__init__(
|
||||
self._build_conf_providers(),
|
||||
self._http_provider,
|
||||
max_conns_per_pool=self.nsxlib_config.concurrent_connections,
|
||||
keepalive_interval=self.nsxlib_config.conn_idle_timeout)
|
||||
|
||||
LOG.debug("Created NSX clustered API with '%s' "
|
||||
"provider", self._http_provider.provider_id)
|
||||
|
||||
def _build_conf_providers(self):
|
||||
|
||||
def _schemed_url(uri):
|
||||
uri = uri.strip('/')
|
||||
return urlparse.urlparse(
|
||||
uri if uri.startswith('http') else
|
||||
"%s://%s" % (self._http_provider.default_scheme, uri))
|
||||
|
||||
conf_urls = self.nsxlib_config.nsx_api_managers[:]
|
||||
urls = []
|
||||
providers = []
|
||||
provider_index = -1
|
||||
for conf_url in conf_urls:
|
||||
provider_index += 1
|
||||
conf_url = _schemed_url(conf_url)
|
||||
if conf_url in urls:
|
||||
LOG.warning(_LW("'%s' already defined in configuration file. "
|
||||
"Skipping."), urlparse.urlunparse(conf_url))
|
||||
continue
|
||||
urls.append(conf_url)
|
||||
providers.append(
|
||||
Provider(
|
||||
conf_url.netloc,
|
||||
urlparse.urlunparse(conf_url),
|
||||
self.nsxlib_config.username(provider_index),
|
||||
self.nsxlib_config.password(provider_index),
|
||||
self.nsxlib_config.ca_file(provider_index)))
|
||||
return providers
|
123
vmware_nsxlib/v3/config.py
Normal file
123
vmware_nsxlib/v3/config.py
Normal file
@ -0,0 +1,123 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class NsxLibConfig(object):
|
||||
"""Class holding all the configuration parameters used by the nsxlib code.
|
||||
|
||||
:param nsx_api_managers: List of IP addresses of the NSX managers.
|
||||
Each IP address should be of the form:
|
||||
[<scheme>://]<ip_adress>[:<port>]
|
||||
If scheme is not provided https is used.
|
||||
If port is not provided port 80 is used for http
|
||||
and port 443 for https.
|
||||
:param username: User name for the NSX manager
|
||||
:param password: Password for the NSX manager
|
||||
:param insecure: If true, the NSX Manager server certificate is not
|
||||
verified. If false the CA bundle specified via "ca_file"
|
||||
will be used or if unsest the default system root CAs
|
||||
will be used.
|
||||
:param ca_file: Specify a CA bundle file to use in verifying the NSX
|
||||
Manager server certificate. This option is ignored if
|
||||
"insecure" is set to True. If "insecure" is set to
|
||||
False and ca_file is unset, the system root CAs will
|
||||
be used to verify the server certificate.
|
||||
|
||||
:param concurrent_connections: Maximum concurrent connections to each NSX
|
||||
manager.
|
||||
:param retries: Maximum number of times to retry a HTTP connection.
|
||||
:param http_timeout: The time in seconds before aborting a HTTP connection
|
||||
to a NSX manager.
|
||||
:param http_read_timeout: The time in seconds before aborting a HTTP read
|
||||
response from a NSX manager.
|
||||
:param conn_idle_timeout: The amount of time in seconds to wait before
|
||||
ensuring connectivity to the NSX manager if no
|
||||
manager connection has been used.
|
||||
:param http_provider: HTTPProvider object, or None.
|
||||
|
||||
:param max_attempts: Maximum number of times to retry API requests upon
|
||||
stale revision errors.
|
||||
|
||||
:param plugin_scope: The default scope for the v3 api-version tag
|
||||
:param plugin_tag: The value for the v3 api-version tag
|
||||
:param plugin_ver: The version of the plugin used as the 'os-api-version'
|
||||
tag value in the v3 api-version tag
|
||||
:param dns_nameservers: List of nameservers to configure for the DHCP
|
||||
binding entries. These will be used if there are
|
||||
no nameservers defined on the subnet.
|
||||
:param dns_domain: Domain to use for building the hostnames.
|
||||
:param dhcp_profile_uuid: The UUID of the NSX DHCP Profile that will be
|
||||
used to enable native DHCP service.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
nsx_api_managers=None,
|
||||
username=None,
|
||||
password=None,
|
||||
insecure=True,
|
||||
ca_file=None,
|
||||
concurrent_connections=10,
|
||||
retries=3,
|
||||
http_timeout=10,
|
||||
http_read_timeout=180,
|
||||
conn_idle_timeout=10,
|
||||
http_provider=None,
|
||||
max_attempts=10,
|
||||
plugin_scope=None,
|
||||
plugin_tag=None,
|
||||
plugin_ver=None,
|
||||
dns_nameservers=None,
|
||||
dns_domain='openstacklocal',
|
||||
dhcp_profile_uuid=None):
|
||||
|
||||
self.nsx_api_managers = nsx_api_managers
|
||||
self._username = username
|
||||
self._password = password
|
||||
self._ca_file = ca_file
|
||||
self.insecure = insecure
|
||||
self.concurrent_connections = concurrent_connections
|
||||
self.retries = retries
|
||||
self.http_timeout = http_timeout
|
||||
self.http_read_timeout = http_read_timeout
|
||||
self.conn_idle_timeout = conn_idle_timeout
|
||||
self.http_provider = http_provider
|
||||
self.max_attempts = max_attempts
|
||||
self.plugin_scope = plugin_scope
|
||||
self.plugin_tag = plugin_tag
|
||||
self.plugin_ver = plugin_ver
|
||||
self.dns_nameservers = dns_nameservers or []
|
||||
self.dns_domain = dns_domain
|
||||
self.dhcp_profile_uuid = dhcp_profile_uuid
|
||||
|
||||
def _attribute_by_index(self, scalar_or_list, index):
|
||||
if isinstance(scalar_or_list, list):
|
||||
if not len(scalar_or_list):
|
||||
return None
|
||||
if len(scalar_or_list) > index:
|
||||
return scalar_or_list[index]
|
||||
# if not long enough - use the first one as default
|
||||
return scalar_or_list[0]
|
||||
# this is a scalar
|
||||
return scalar_or_list
|
||||
|
||||
def username(self, index):
|
||||
return self._attribute_by_index(self._username, index)
|
||||
|
||||
def password(self, index):
|
||||
return self._attribute_by_index(self._password, index)
|
||||
|
||||
def ca_file(self, index):
|
||||
return self._attribute_by_index(self._ca_file, index)
|
97
vmware_nsxlib/v3/exceptions.py
Normal file
97
vmware_nsxlib/v3/exceptions.py
Normal file
@ -0,0 +1,97 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from oslo_utils import excutils
|
||||
import six
|
||||
|
||||
from vmware_nsxlib._i18n import _
|
||||
|
||||
|
||||
class NsxLibException(Exception):
|
||||
"""Base NsxLib Exception.
|
||||
|
||||
To correctly use this class, inherit from it and define
|
||||
a 'message' property. That message will get printf'd
|
||||
with the keyword arguments provided to the constructor.
|
||||
"""
|
||||
message = _("An unknown exception occurred.")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
try:
|
||||
super(NsxLibException, self).__init__(self.message % kwargs)
|
||||
self.msg = self.message % kwargs
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
if not self.use_fatal_exceptions():
|
||||
ctxt.reraise = False
|
||||
# at least get the core message out if something happened
|
||||
super(NsxLibException, self).__init__(self.message)
|
||||
|
||||
if six.PY2:
|
||||
def __unicode__(self):
|
||||
return unicode(self.msg)
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
def use_fatal_exceptions(self):
|
||||
return False
|
||||
|
||||
|
||||
class ManagerError(NsxLibException):
|
||||
message = _("Unexpected error from backend manager (%(manager)s) "
|
||||
"for %(operation)s %(details)s")
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
kwargs['details'] = (': %s' % kwargs['details']
|
||||
if 'details' in kwargs
|
||||
else '')
|
||||
super(ManagerError, self).__init__(**kwargs)
|
||||
self.msg = self.message % kwargs
|
||||
|
||||
|
||||
class ResourceNotFound(ManagerError):
|
||||
message = _("Resource could not be found on backend (%(manager)s) for "
|
||||
"%(operation)s")
|
||||
|
||||
|
||||
class StaleRevision(ManagerError):
|
||||
pass
|
||||
|
||||
|
||||
class ServiceClusterUnavailable(ManagerError):
|
||||
message = _("Service cluster: '%(cluster_id)s' is unavailable. Please, "
|
||||
"check NSX setup and/or configuration")
|
||||
|
||||
|
||||
class NSGroupMemberNotFound(ManagerError):
|
||||
message = _("Could not find NSGroup %(nsgroup_id)s member %(member_id)s "
|
||||
"for removal.")
|
||||
|
||||
|
||||
class NSGroupIsFull(ManagerError):
|
||||
message = _("NSGroup %(nsgroup_id)s contains has reached its maximum "
|
||||
"capacity, unable to add additional members.")
|
||||
|
||||
|
||||
class NumberOfNsgroupCriteriaTagsReached(ManagerError):
|
||||
message = _("Port can be associated with at most %(max_num)s "
|
||||
"security-groups.")
|
||||
|
||||
|
||||
class SecurityGroupMaximumCapacityReached(ManagerError):
|
||||
message = _("Security Group %(sg_id)s has reached its maximum capacity, "
|
||||
"no more ports can be associated with this security-group.")
|
64
vmware_nsxlib/v3/native_dhcp.py
Normal file
64
vmware_nsxlib/v3/native_dhcp.py
Normal file
@ -0,0 +1,64 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import netaddr
|
||||
from neutron_lib.api import validators
|
||||
from neutron_lib import constants
|
||||
|
||||
from vmware_nsxlib.v3 import utils
|
||||
|
||||
|
||||
class NsxLibNativeDhcp(utils.NsxLibApiBase):
|
||||
|
||||
def build_server_config(self, network, subnet, port, tags):
|
||||
# Prepare the configuration for a new logical DHCP server.
|
||||
server_ip = "%s/%u" % (port['fixed_ips'][0]['ip_address'],
|
||||
netaddr.IPNetwork(subnet['cidr']).prefixlen)
|
||||
dns_nameservers = subnet['dns_nameservers']
|
||||
if not dns_nameservers or not validators.is_attr_set(dns_nameservers):
|
||||
dns_nameservers = self.nsxlib_config.dns_nameservers
|
||||
gateway_ip = subnet['gateway_ip']
|
||||
if not validators.is_attr_set(gateway_ip):
|
||||
gateway_ip = None
|
||||
|
||||
# The following code is based on _generate_opts_per_subnet() in
|
||||
# neutron/agent/linux/dhcp.py. It prepares DHCP options for a subnet.
|
||||
|
||||
# Add route for directly connected network.
|
||||
host_routes = [{'network': subnet['cidr'], 'next_hop': '0.0.0.0'}]
|
||||
# Copy routes from subnet host_routes attribute.
|
||||
for hr in subnet['host_routes']:
|
||||
if hr['destination'] == constants.IPv4_ANY:
|
||||
if not gateway_ip:
|
||||
gateway_ip = hr['nexthop']
|
||||
else:
|
||||
host_routes.append({'network': hr['destination'],
|
||||
'next_hop': hr['nexthop']})
|
||||
# If gateway_ip is defined, add default route via this gateway.
|
||||
if gateway_ip:
|
||||
host_routes.append({'network': constants.IPv4_ANY,
|
||||
'next_hop': gateway_ip})
|
||||
|
||||
options = {'option121': {'static_routes': host_routes}}
|
||||
name = utils.get_name_and_uuid(network['name'] or 'dhcpserver',
|
||||
network['id'])
|
||||
return {'name': name,
|
||||
'dhcp_profile_id': self.nsxlib_config.dhcp_profile_uuid,
|
||||
'server_ip': server_ip,
|
||||
'dns_nameservers': dns_nameservers,
|
||||
'domain_name': self.nsxlib_config.dns_domain,
|
||||
'gateway_ip': gateway_ip,
|
||||
'options': options,
|
||||
'tags': tags}
|
150
vmware_nsxlib/v3/ns_group_manager.py
Normal file
150
vmware_nsxlib/v3/ns_group_manager.py
Normal file
@ -0,0 +1,150 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import uuid
|
||||
|
||||
from oslo_log import log
|
||||
|
||||
from vmware_nsxlib._i18n import _, _LW
|
||||
from vmware_nsxlib.v3 import exceptions
|
||||
from vmware_nsxlib.v3 import nsx_constants as consts
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class NSGroupManager(object):
|
||||
"""
|
||||
This class assists with NSX integration for Neutron security-groups,
|
||||
Each Neutron security-group is associated with NSX NSGroup object.
|
||||
Some specific security policies are the same across all security-groups,
|
||||
i.e - Default drop rule, DHCP. In order to bind these rules to all
|
||||
NSGroups (security-groups), we create a nested NSGroup (which its members
|
||||
are also of type NSGroups) to group the other NSGroups and associate it
|
||||
with these rules.
|
||||
In practice, one NSGroup (nested) can't contain all the other NSGroups, as
|
||||
it has strict size limit. To overcome the limited space challenge, we
|
||||
create several nested groups instead of just one, and we evenly distribute
|
||||
NSGroups (security-groups) between them.
|
||||
By using an hashing function on the NSGroup uuid we determine in which
|
||||
group it should be added, and when deleting an NSGroup (security-group) we
|
||||
use the same procedure to find which nested group it was added.
|
||||
"""
|
||||
|
||||
NESTED_GROUP_NAME = 'OS Nested Group'
|
||||
NESTED_GROUP_DESCRIPTION = ('OpenStack NSGroup. Do not delete.')
|
||||
|
||||
def __init__(self, nsxlib, size):
|
||||
self.nsxlib_nsgroup = nsxlib.ns_group
|
||||
self._nested_groups = self._init_nested_groups(size)
|
||||
self._size = len(self._nested_groups)
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return self._size
|
||||
|
||||
@property
|
||||
def nested_groups(self):
|
||||
return self._nested_groups
|
||||
|
||||
def _init_nested_groups(self, requested_size):
|
||||
# Construct the groups dict -
|
||||
# {0: <groups-1>,.., n-1: <groups-n>}
|
||||
size = requested_size
|
||||
nested_groups = {
|
||||
self._get_nested_group_index_from_name(nsgroup): nsgroup['id']
|
||||
for nsgroup in self.nsxlib_nsgroup.list()
|
||||
if self.nsxlib_nsgroup.is_internal_resource(nsgroup)}
|
||||
|
||||
if nested_groups:
|
||||
size = max(requested_size, max(nested_groups) + 1)
|
||||
if size > requested_size:
|
||||
LOG.warning(_LW("Lowering the value of "
|
||||
"nsx_v3:number_of_nested_groups isn't "
|
||||
"supported, '%s' nested-groups will be used."),
|
||||
size)
|
||||
|
||||
absent_groups = set(range(size)) - set(nested_groups.keys())
|
||||
if absent_groups:
|
||||
LOG.warning(
|
||||
_LW("Found %(num_present)s Nested Groups, "
|
||||
"creating %(num_absent)s more."),
|
||||
{'num_present': len(nested_groups),
|
||||
'num_absent': len(absent_groups)})
|
||||
for i in absent_groups:
|
||||
cont = self._create_nested_group(i)
|
||||
nested_groups[i] = cont['id']
|
||||
|
||||
return nested_groups
|
||||
|
||||
def _get_nested_group_index_from_name(self, nested_group):
|
||||
# The name format is "Nested Group <index+1>"
|
||||
return int(nested_group['display_name'].split()[-1]) - 1
|
||||
|
||||
def _create_nested_group(self, index):
|
||||
name_prefix = NSGroupManager.NESTED_GROUP_NAME
|
||||
name = '%s %s' % (name_prefix, index + 1)
|
||||
description = NSGroupManager.NESTED_GROUP_DESCRIPTION
|
||||
tags = self.nsxlib_nsgroup.build_v3_api_version_tag()
|
||||
return self.nsxlib_nsgroup.create(name, description, tags)
|
||||
|
||||
def _hash_uuid(self, internal_id):
|
||||
return hash(uuid.UUID(internal_id))
|
||||
|
||||
def _suggest_nested_group(self, internal_id):
|
||||
# Suggests a nested group to use, can be iterated to find alternative
|
||||
# group in case that previous suggestions did not help.
|
||||
|
||||
index = self._hash_uuid(internal_id) % self.size
|
||||
yield self.nested_groups[index]
|
||||
|
||||
for i in range(1, self.size):
|
||||
index = (index + 1) % self.size
|
||||
yield self.nested_groups[index]
|
||||
|
||||
def add_nsgroup(self, nsgroup_id):
|
||||
for group in self._suggest_nested_group(nsgroup_id):
|
||||
try:
|
||||
LOG.debug("Adding NSGroup %s to nested group %s",
|
||||
nsgroup_id, group)
|
||||
self.nsxlib_nsgroup.add_members(
|
||||
group, consts.NSGROUP, [nsgroup_id])
|
||||
break
|
||||
except exceptions.NSGroupIsFull:
|
||||
LOG.debug("Nested group %(group_id)s is full, trying the "
|
||||
"next group..", {'group_id': group})
|
||||
else:
|
||||
raise exceptions.ManagerError(
|
||||
details=_("Reached the maximum supported amount of "
|
||||
"security groups."))
|
||||
|
||||
def remove_nsgroup(self, nsgroup_id):
|
||||
for group in self._suggest_nested_group(nsgroup_id):
|
||||
try:
|
||||
self.nsxlib_nsgroup.remove_member(
|
||||
group, consts.NSGROUP,
|
||||
nsgroup_id, verify=True)
|
||||
break
|
||||
except exceptions.NSGroupMemberNotFound:
|
||||
LOG.warning(_LW("NSGroup %(nsgroup)s was expected to be found "
|
||||
"in group %(group_id)s, but wasn't. "
|
||||
"Looking in the next group.."),
|
||||
{'nsgroup': nsgroup_id, 'group_id': group})
|
||||
continue
|
||||
else:
|
||||
LOG.warning(_LW("NSGroup %s was marked for removal, but its "
|
||||
"reference is missing."), nsgroup_id)
|
96
vmware_nsxlib/v3/nsx_constants.py
Normal file
96
vmware_nsxlib/v3/nsx_constants.py
Normal file
@ -0,0 +1,96 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Admin statuses
|
||||
ADMIN_STATE_UP = "UP"
|
||||
ADMIN_STATE_DOWN = "DOWN"
|
||||
|
||||
# Replication modes
|
||||
MTEP = "MTEP"
|
||||
|
||||
# Port attachment types
|
||||
ATTACHMENT_VIF = "VIF"
|
||||
ATTACHMENT_CIF = "CIF"
|
||||
ATTACHMENT_LR = "LOGICALROUTER"
|
||||
ATTACHMENT_DHCP = "DHCP_SERVICE"
|
||||
ATTACHMENT_MDPROXY = "METADATA_PROXY"
|
||||
|
||||
CIF_RESOURCE_TYPE = "CifAttachmentContext"
|
||||
|
||||
# NSXv3 L2 Gateway constants
|
||||
BRIDGE_ENDPOINT = "BRIDGEENDPOINT"
|
||||
|
||||
# Router type
|
||||
ROUTER_TYPE_TIER0 = "TIER0"
|
||||
ROUTER_TYPE_TIER1 = "TIER1"
|
||||
|
||||
LROUTERPORT_UPLINK = "LogicalRouterUplinkPort"
|
||||
LROUTERPORT_DOWNLINK = "LogicalRouterDownLinkPort"
|
||||
LROUTERPORT_LINKONTIER0 = "LogicalRouterLinkPortOnTIER0"
|
||||
LROUTERPORT_LINKONTIER1 = "LogicalRouterLinkPortOnTIER1"
|
||||
|
||||
# NSX service type
|
||||
SERVICE_DHCP = "dhcp"
|
||||
|
||||
# NSX-V3 Distributed Firewall constants
|
||||
NSGROUP = 'NSGroup'
|
||||
NSGROUP_SIMPLE_EXP = 'NSGroupSimpleExpression'
|
||||
NSGROUP_TAG_EXP = 'NSGroupTagExpression'
|
||||
|
||||
# Firewall rule position
|
||||
FW_INSERT_BEFORE = 'insert_before'
|
||||
FW_INSERT_BOTTOM = 'insert_bottom'
|
||||
FW_INSERT_TOP = 'insert_top'
|
||||
|
||||
# firewall rule actions
|
||||
FW_ACTION_ALLOW = 'ALLOW'
|
||||
FW_ACTION_DROP = 'DROP'
|
||||
FW_ACTION_REJECT = 'REJECT'
|
||||
|
||||
# nsgroup members update actions
|
||||
NSGROUP_ADD_MEMBERS = 'ADD_MEMBERS'
|
||||
NSGROUP_REMOVE_MEMBERS = 'REMOVE_MEMBERS'
|
||||
|
||||
# NSServices resource types
|
||||
L4_PORT_SET_NSSERVICE = 'L4PortSetNSService'
|
||||
ICMP_TYPE_NSSERVICE = 'ICMPTypeNSService'
|
||||
IP_PROTOCOL_NSSERVICE = 'IPProtocolNSService'
|
||||
|
||||
# firewall section types
|
||||
FW_SECTION_LAYER3 = 'LAYER3'
|
||||
|
||||
TARGET_TYPE_LOGICAL_SWITCH = 'LogicalSwitch'
|
||||
TARGET_TYPE_LOGICAL_PORT = 'LogicalPort'
|
||||
TARGET_TYPE_IPV4ADDRESS = 'IPv4Address'
|
||||
TARGET_TYPE_IPV6ADDRESS = 'IPv6Address'
|
||||
|
||||
# filtering operators and expressions
|
||||
EQUALS = 'EQUALS'
|
||||
|
||||
IN = 'IN'
|
||||
OUT = 'OUT'
|
||||
IN_OUT = 'IN_OUT'
|
||||
|
||||
TCP = 'TCP'
|
||||
UDP = 'UDP'
|
||||
ICMPV4 = 'ICMPv4'
|
||||
ICMPV6 = 'ICMPv6'
|
||||
IPV4 = 'IPV4'
|
||||
IPV6 = 'IPV6'
|
||||
IPV4_IPV6 = 'IPV4_IPV6'
|
||||
|
||||
LOCAL_IP_PREFIX = 'local_ip_prefix'
|
||||
|
||||
LOGGING = 'logging'
|
576
vmware_nsxlib/v3/resources.py
Normal file
576
vmware_nsxlib/v3/resources.py
Normal file
@ -0,0 +1,576 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
import abc
|
||||
import collections
|
||||
import six
|
||||
|
||||
from vmware_nsxlib._i18n import _
|
||||
from vmware_nsxlib.v3 import exceptions
|
||||
from vmware_nsxlib.v3 import nsx_constants
|
||||
from vmware_nsxlib.v3 import utils
|
||||
|
||||
|
||||
SwitchingProfileTypeId = collections.namedtuple(
|
||||
'SwitchingProfileTypeId', 'profile_type, profile_id')
|
||||
|
||||
|
||||
PacketAddressClassifier = collections.namedtuple(
|
||||
'PacketAddressClassifier', 'ip_address, mac_address, vlan')
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class AbstractRESTResource(object):
|
||||
|
||||
def __init__(self, rest_client, *args, **kwargs):
|
||||
self._client = rest_client.new_client_for(self.uri_segment)
|
||||
|
||||
@abc.abstractproperty
|
||||
def uri_segment(self):
|
||||
pass
|
||||
|
||||
def list(self):
|
||||
return self._client.list()
|
||||
|
||||
def get(self, uuid):
|
||||
return self._client.get(uuid)
|
||||
|
||||
def delete(self, uuid):
|
||||
return self._client.delete(uuid)
|
||||
|
||||
@abc.abstractmethod
|
||||
def create(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def update(self, uuid, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def find_by_display_name(self, display_name):
|
||||
found = []
|
||||
for resource in self.list()['results']:
|
||||
if resource['display_name'] == display_name:
|
||||
found.append(resource)
|
||||
return found
|
||||
|
||||
|
||||
class SwitchingProfileTypes(object):
|
||||
IP_DISCOVERY = 'IpDiscoverySwitchingProfile'
|
||||
MAC_LEARNING = 'MacManagementSwitchingProfile'
|
||||
PORT_MIRRORING = 'PortMirroringSwitchingProfile'
|
||||
QOS = 'QosSwitchingProfile'
|
||||
SPOOF_GUARD = 'SpoofGuardSwitchingProfile'
|
||||
SWITCH_SECURITY = 'SwitchSecuritySwitchingProfile'
|
||||
|
||||
|
||||
class WhiteListAddressTypes(object):
|
||||
PORT = 'LPORT_BINDINGS'
|
||||
SWITCH = 'LSWITCH_BINDINGS'
|
||||
|
||||
|
||||
class SwitchingProfile(AbstractRESTResource):
|
||||
|
||||
@property
|
||||
def uri_segment(self):
|
||||
return 'switching-profiles'
|
||||
|
||||
def list(self):
|
||||
return self._client.url_get('?include_system_owned=True')
|
||||
|
||||
def create(self, profile_type, display_name=None,
|
||||
description=None, **api_args):
|
||||
body = {
|
||||
'resource_type': profile_type,
|
||||
'display_name': display_name or '',
|
||||
'description': description or ''
|
||||
}
|
||||
body.update(api_args)
|
||||
|
||||
return self._client.create(body=body)
|
||||
|
||||
def update(self, uuid, profile_type, **api_args):
|
||||
body = {
|
||||
'resource_type': profile_type
|
||||
}
|
||||
body.update(api_args)
|
||||
|
||||
return self._client.update(uuid, body=body)
|
||||
|
||||
def create_spoofguard_profile(self, display_name,
|
||||
description,
|
||||
whitelist_ports=False,
|
||||
whitelist_switches=False,
|
||||
tags=None):
|
||||
whitelist_providers = []
|
||||
if whitelist_ports:
|
||||
whitelist_providers.append(WhiteListAddressTypes.PORT)
|
||||
if whitelist_switches:
|
||||
whitelist_providers.append(WhiteListAddressTypes.SWITCH)
|
||||
|
||||
return self.create(SwitchingProfileTypes.SPOOF_GUARD,
|
||||
display_name=display_name,
|
||||
description=description,
|
||||
white_list_providers=whitelist_providers,
|
||||
tags=tags or [])
|
||||
|
||||
def create_dhcp_profile(self, display_name,
|
||||
description, tags=None):
|
||||
dhcp_filter = {
|
||||
'client_block_enabled': True,
|
||||
'server_block_enabled': False
|
||||
}
|
||||
rate_limits = {
|
||||
'enabled': False,
|
||||
'rx_broadcast': 0,
|
||||
'tx_broadcast': 0,
|
||||
'rx_multicast': 0,
|
||||
'tx_multicast': 0
|
||||
}
|
||||
bpdu_filter = {
|
||||
'enabled': True,
|
||||
'white_list': []
|
||||
}
|
||||
return self.create(SwitchingProfileTypes.SWITCH_SECURITY,
|
||||
display_name=display_name,
|
||||
description=description,
|
||||
tags=tags or [],
|
||||
dhcp_filter=dhcp_filter,
|
||||
rate_limits=rate_limits,
|
||||
bpdu_filter=bpdu_filter,
|
||||
block_non_ip_traffic=True)
|
||||
|
||||
def create_mac_learning_profile(self, display_name,
|
||||
description, tags=None):
|
||||
mac_learning = {
|
||||
'enabled': True,
|
||||
}
|
||||
return self.create(SwitchingProfileTypes.MAC_LEARNING,
|
||||
display_name=display_name,
|
||||
description=description,
|
||||
tags=tags or [],
|
||||
mac_learning=mac_learning)
|
||||
|
||||
def create_port_mirror_profile(self, display_name, description,
|
||||
direction, destinations, tags=None):
|
||||
return self.create(SwitchingProfileTypes.PORT_MIRRORING,
|
||||
display_name=display_name,
|
||||
description=description,
|
||||
tags=tags or [],
|
||||
direction=direction,
|
||||
destinations=destinations)
|
||||
|
||||
@classmethod
|
||||
def build_switch_profile_ids(cls, client, *profiles):
|
||||
ids = []
|
||||
for profile in profiles:
|
||||
if isinstance(profile, str):
|
||||
profile = client.get(profile)
|
||||
if not isinstance(profile, SwitchingProfileTypeId):
|
||||
profile = SwitchingProfileTypeId(
|
||||
profile.get('key', profile.get('resource_type')),
|
||||
profile.get('value', profile.get('id')))
|
||||
ids.append(profile)
|
||||
return ids
|
||||
|
||||
|
||||
class LogicalPort(AbstractRESTResource):
|
||||
|
||||
@property
|
||||
def uri_segment(self):
|
||||
return 'logical-ports'
|
||||
|
||||
def _build_body_attrs(
|
||||
self, display_name=None,
|
||||
admin_state=True, tags=None,
|
||||
address_bindings=None,
|
||||
switch_profile_ids=None,
|
||||
attachment=None):
|
||||
tags = tags or []
|
||||
address_bindings = address_bindings or []
|
||||
switch_profile_ids = switch_profile_ids or []
|
||||
body = {}
|
||||
if tags:
|
||||
body['tags'] = tags
|
||||
if display_name is not None:
|
||||
body['display_name'] = display_name
|
||||
|
||||
if admin_state is not None:
|
||||
if admin_state:
|
||||
body['admin_state'] = nsx_constants.ADMIN_STATE_UP
|
||||
else:
|
||||
body['admin_state'] = nsx_constants.ADMIN_STATE_DOWN
|
||||
|
||||
if address_bindings:
|
||||
bindings = []
|
||||
for binding in address_bindings:
|
||||
address_classifier = {
|
||||
'ip_address': binding.ip_address,
|
||||
'mac_address': binding.mac_address
|
||||
}
|
||||
if binding.vlan is not None:
|
||||
address_classifier['vlan'] = int(binding.vlan)
|
||||
bindings.append(address_classifier)
|
||||
body['address_bindings'] = bindings
|
||||
elif address_bindings == []:
|
||||
# explicitly clear out address bindings
|
||||
body['address_bindings'] = []
|
||||
|
||||
if switch_profile_ids:
|
||||
profiles = []
|
||||
for profile in switch_profile_ids:
|
||||
profiles.append({
|
||||
'value': profile.profile_id,
|
||||
'key': profile.profile_type
|
||||
})
|
||||
body['switching_profile_ids'] = profiles
|
||||
|
||||
# Note that attachment could be None, meaning reset it.
|
||||
if attachment is not False:
|
||||
body['attachment'] = attachment
|
||||
|
||||
return body
|
||||
|
||||
def _prepare_attachment(self, vif_uuid, parent_vif_id, parent_tag,
|
||||
address_bindings, attachment_type):
|
||||
if attachment_type and vif_uuid:
|
||||
attachment = {'attachment_type': attachment_type,
|
||||
'id': vif_uuid}
|
||||
if parent_vif_id:
|
||||
context = {'vlan_tag': parent_tag,
|
||||
'container_host_vif_id': parent_vif_id,
|
||||
'resource_type': nsx_constants.CIF_RESOURCE_TYPE}
|
||||
attachment['context'] = context
|
||||
return attachment
|
||||
elif attachment_type is None or vif_uuid is None:
|
||||
return None # reset attachment
|
||||
else:
|
||||
return False # no attachment change
|
||||
|
||||
def create(self, lswitch_id, vif_uuid, tags=None,
|
||||
attachment_type=nsx_constants.ATTACHMENT_VIF,
|
||||
admin_state=True, name=None, address_bindings=None,
|
||||
parent_vif_id=None, parent_tag=None,
|
||||
switch_profile_ids=None):
|
||||
tags = tags or []
|
||||
|
||||
body = {'logical_switch_id': lswitch_id}
|
||||
# NOTE(arosen): If parent_vif_id is specified we need to use
|
||||
# CIF attachment type.
|
||||
if parent_vif_id:
|
||||
attachment_type = nsx_constants.ATTACHMENT_CIF
|
||||
attachment = self._prepare_attachment(vif_uuid, parent_vif_id,
|
||||
parent_tag, address_bindings,
|
||||
attachment_type)
|
||||
body.update(self._build_body_attrs(
|
||||
display_name=name,
|
||||
admin_state=admin_state, tags=tags,
|
||||
address_bindings=address_bindings,
|
||||
switch_profile_ids=switch_profile_ids,
|
||||
attachment=attachment))
|
||||
return self._client.create(body=body)
|
||||
|
||||
def delete(self, lport_id):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_delete():
|
||||
return self._client.url_delete('%s?detach=true' % lport_id)
|
||||
|
||||
return _do_delete()
|
||||
|
||||
def update(self, lport_id, vif_uuid,
|
||||
name=None, admin_state=None,
|
||||
address_bindings=None, switch_profile_ids=None,
|
||||
tags_update=None,
|
||||
attachment_type=nsx_constants.ATTACHMENT_VIF,
|
||||
parent_vif_id=None, parent_tag=None):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def do_update():
|
||||
lport = self.get(lport_id)
|
||||
tags = lport.get('tags', [])
|
||||
if tags_update:
|
||||
tags = utils.update_v3_tags(tags, tags_update)
|
||||
attachment = self._prepare_attachment(vif_uuid, parent_vif_id,
|
||||
parent_tag, address_bindings,
|
||||
attachment_type)
|
||||
lport.update(self._build_body_attrs(
|
||||
display_name=name,
|
||||
admin_state=admin_state, tags=tags,
|
||||
address_bindings=address_bindings,
|
||||
switch_profile_ids=switch_profile_ids,
|
||||
attachment=attachment))
|
||||
|
||||
# If revision_id of the payload that we send is older than what
|
||||
# NSX has, we will get a 412: Precondition Failed.
|
||||
# In that case we need to re-fetch, patch the response and send
|
||||
# it again with the new revision_id
|
||||
return self._client.update(lport_id, body=lport)
|
||||
return do_update()
|
||||
|
||||
|
||||
class LogicalRouter(AbstractRESTResource):
|
||||
|
||||
@property
|
||||
def uri_segment(self):
|
||||
return 'logical-routers'
|
||||
|
||||
def create(self, display_name, tags, edge_cluster_uuid=None, tier_0=False):
|
||||
# TODO(salv-orlando): If possible do not manage edge clusters
|
||||
# in the main plugin logic.
|
||||
router_type = (nsx_constants.ROUTER_TYPE_TIER0 if tier_0 else
|
||||
nsx_constants.ROUTER_TYPE_TIER1)
|
||||
body = {'display_name': display_name,
|
||||
'router_type': router_type,
|
||||
'tags': tags}
|
||||
if edge_cluster_uuid:
|
||||
body['edge_cluster_id'] = edge_cluster_uuid
|
||||
return self._client.create(body=body)
|
||||
|
||||
def delete(self, lrouter_id):
|
||||
return self._client.url_delete(lrouter_id)
|
||||
|
||||
def update(self, lrouter_id, *args, **kwargs):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_update():
|
||||
lrouter = self.get(lrouter_id)
|
||||
for k in kwargs:
|
||||
lrouter[k] = kwargs[k]
|
||||
# If revision_id of the payload that we send is older than what
|
||||
# NSX has, we will get a 412: Precondition Failed.
|
||||
# In that case we need to re-fetch, patch the response and send
|
||||
# it again with the new revision_id
|
||||
return self._client.update(lrouter_id, body=lrouter)
|
||||
|
||||
return _do_update()
|
||||
|
||||
|
||||
class LogicalRouterPort(AbstractRESTResource):
|
||||
|
||||
@property
|
||||
def uri_segment(self):
|
||||
return 'logical-router-ports'
|
||||
|
||||
def create(self, logical_router_id,
|
||||
display_name,
|
||||
tags,
|
||||
resource_type,
|
||||
logical_port_id,
|
||||
address_groups,
|
||||
edge_cluster_member_index=None):
|
||||
body = {'display_name': display_name,
|
||||
'resource_type': resource_type,
|
||||
'logical_router_id': logical_router_id,
|
||||
'tags': tags or []}
|
||||
if address_groups:
|
||||
body['subnets'] = address_groups
|
||||
if resource_type in [nsx_constants.LROUTERPORT_UPLINK,
|
||||
nsx_constants.LROUTERPORT_DOWNLINK]:
|
||||
body['linked_logical_switch_port_id'] = {
|
||||
'target_id': logical_port_id}
|
||||
elif resource_type == nsx_constants.LROUTERPORT_LINKONTIER1:
|
||||
body['linked_logical_router_port_id'] = {
|
||||
'target_id': logical_port_id}
|
||||
elif logical_port_id:
|
||||
body['linked_logical_router_port_id'] = logical_port_id
|
||||
if edge_cluster_member_index:
|
||||
body['edge_cluster_member_index'] = edge_cluster_member_index
|
||||
|
||||
return self._client.create(body=body)
|
||||
|
||||
def update(self, logical_port_id, **kwargs):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_update():
|
||||
logical_router_port = self.get(logical_port_id)
|
||||
for k in kwargs:
|
||||
logical_router_port[k] = kwargs[k]
|
||||
# If revision_id of the payload that we send is older than what
|
||||
# NSX has, we will get a 412: Precondition Failed.
|
||||
# In that case we need to re-fetch, patch the response and send
|
||||
# it again with the new revision_id
|
||||
return self._client.update(logical_port_id,
|
||||
body=logical_router_port)
|
||||
return _do_update()
|
||||
|
||||
def delete(self, logical_port_id):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_delete():
|
||||
return self._client.url_delete(logical_port_id)
|
||||
|
||||
return _do_delete()
|
||||
|
||||
def get_by_lswitch_id(self, logical_switch_id):
|
||||
resource = '?logical_switch_id=%s' % logical_switch_id
|
||||
router_ports = self._client.url_get(resource)
|
||||
result_count = int(router_ports.get('result_count', "0"))
|
||||
if result_count >= 2:
|
||||
raise exceptions.ManagerError(
|
||||
details=_("Can't support more than one logical router ports "
|
||||
"on same logical switch %s ") % logical_switch_id)
|
||||
elif result_count == 1:
|
||||
return router_ports['results'][0]
|
||||
else:
|
||||
err_msg = (_("Logical router link port not found on logical "
|
||||
"switch %s") % logical_switch_id)
|
||||
raise exceptions.ResourceNotFound(
|
||||
manager=self._client.nsx_api_managers,
|
||||
operation=err_msg)
|
||||
|
||||
def update_by_lswitch_id(self, logical_router_id, ls_id, **payload):
|
||||
port = self.get_by_lswitch_id(ls_id)
|
||||
return self.update(port['id'], **payload)
|
||||
|
||||
def delete_by_lswitch_id(self, ls_id):
|
||||
port = self.get_by_lswitch_id(ls_id)
|
||||
self.delete(port['id'])
|
||||
|
||||
def get_by_router_id(self, logical_router_id):
|
||||
resource = '?logical_router_id=%s' % logical_router_id
|
||||
logical_router_ports = self._client.url_get(resource)
|
||||
return logical_router_ports['results']
|
||||
|
||||
def get_tier1_link_port(self, logical_router_id):
|
||||
logical_router_ports = self.get_by_router_id(logical_router_id)
|
||||
for port in logical_router_ports:
|
||||
if port['resource_type'] == nsx_constants.LROUTERPORT_LINKONTIER1:
|
||||
return port
|
||||
raise exceptions.ResourceNotFound(
|
||||
manager=self._client.nsx_api_managers,
|
||||
operation="get router link port")
|
||||
|
||||
|
||||
class MetaDataProxy(AbstractRESTResource):
|
||||
|
||||
@property
|
||||
def uri_segment(self):
|
||||
return 'md-proxies'
|
||||
|
||||
def create(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def update(self, uuid, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class DhcpProfile(AbstractRESTResource):
|
||||
|
||||
@property
|
||||
def uri_segment(self):
|
||||
return 'dhcp/server-profiles'
|
||||
|
||||
def create(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def update(self, uuid, *args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class LogicalDhcpServer(AbstractRESTResource):
|
||||
|
||||
@property
|
||||
def uri_segment(self):
|
||||
return 'dhcp/servers'
|
||||
|
||||
def _construct_server(self, body, dhcp_profile_id=None, server_ip=None,
|
||||
name=None, dns_nameservers=None, domain_name=None,
|
||||
gateway_ip=False, options=None, tags=None):
|
||||
if name:
|
||||
body['display_name'] = name
|
||||
if dhcp_profile_id:
|
||||
body['dhcp_profile_id'] = dhcp_profile_id
|
||||
if server_ip:
|
||||
body['ipv4_dhcp_server']['dhcp_server_ip'] = server_ip
|
||||
if dns_nameservers is not None:
|
||||
# Note that [] is valid for dns_nameservers, means deleting it.
|
||||
body['ipv4_dhcp_server']['dns_nameservers'] = dns_nameservers
|
||||
if domain_name:
|
||||
body['ipv4_dhcp_server']['domain_name'] = domain_name
|
||||
if gateway_ip is not False:
|
||||
# Note that None is valid for gateway_ip, means deleting it.
|
||||
body['ipv4_dhcp_server']['gateway_ip'] = gateway_ip
|
||||
if options:
|
||||
body['ipv4_dhcp_server']['options'] = options
|
||||
if tags:
|
||||
body['tags'] = tags
|
||||
|
||||
def create(self, dhcp_profile_id, server_ip, name=None,
|
||||
dns_nameservers=None, domain_name=None, gateway_ip=False,
|
||||
options=None, tags=None):
|
||||
body = {'ipv4_dhcp_server': {}}
|
||||
self._construct_server(body, dhcp_profile_id, server_ip, name,
|
||||
dns_nameservers, domain_name, gateway_ip,
|
||||
options, tags)
|
||||
return self._client.create(body=body)
|
||||
|
||||
def update(self, uuid, dhcp_profile_id=None, server_ip=None, name=None,
|
||||
dns_nameservers=None, domain_name=None, gateway_ip=False,
|
||||
options=None, tags=None):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_update():
|
||||
body = self._client.get(uuid)
|
||||
self._construct_server(body, dhcp_profile_id, server_ip, name,
|
||||
dns_nameservers, domain_name, gateway_ip,
|
||||
options, tags)
|
||||
return self._client.update(uuid, body=body)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def create_binding(self, server_uuid, mac, ip, hostname=None,
|
||||
lease_time=None, options=None):
|
||||
body = {'mac_address': mac, 'ip_address': ip}
|
||||
if hostname:
|
||||
body['host_name'] = hostname
|
||||
if lease_time:
|
||||
body['lease_time'] = lease_time
|
||||
if options:
|
||||
body['options'] = options
|
||||
url = "%s/static-bindings" % server_uuid
|
||||
return self._client.url_post(url, body)
|
||||
|
||||
def get_binding(self, server_uuid, binding_uuid):
|
||||
url = "%s/static-bindings/%s" % (server_uuid, binding_uuid)
|
||||
return self._client.url_get(url)
|
||||
|
||||
def update_binding(self, server_uuid, binding_uuid, **kwargs):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self._client.max_attempts)
|
||||
def _do_update():
|
||||
body = self.get_binding(server_uuid, binding_uuid)
|
||||
body.update(kwargs)
|
||||
url = "%s/static-bindings/%s" % (server_uuid, binding_uuid)
|
||||
return self._client.url_put(url, body)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def delete_binding(self, server_uuid, binding_uuid):
|
||||
url = "%s/static-bindings/%s" % (server_uuid, binding_uuid)
|
||||
return self._client.url_delete(url)
|
194
vmware_nsxlib/v3/router.py
Normal file
194
vmware_nsxlib/v3/router.py
Normal file
@ -0,0 +1,194 @@
|
||||
# Copyright 2015 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
NSX-V3 Plugin router module
|
||||
"""
|
||||
import copy
|
||||
|
||||
from neutron_lib import exceptions as n_exc
|
||||
from oslo_log import log
|
||||
|
||||
from vmware_nsxlib._i18n import _, _LW
|
||||
from vmware_nsxlib.v3 import exceptions
|
||||
from vmware_nsxlib.v3 import nsx_constants
|
||||
from vmware_nsxlib.v3 import utils
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
MIN_EDGE_NODE_NUM = 1
|
||||
|
||||
TIER0_ROUTER_LINK_PORT_NAME = "TIER0-RouterLinkPort"
|
||||
TIER1_ROUTER_LINK_PORT_NAME = "TIER1-RouterLinkPort"
|
||||
ROUTER_INTF_PORT_NAME = "Tier1-RouterDownLinkPort"
|
||||
|
||||
FIP_NAT_PRI = 900
|
||||
GW_NAT_PRI = 1000
|
||||
|
||||
|
||||
class RouterLib(object):
|
||||
|
||||
def __init__(self, router_client, router_port_client, nsxlib):
|
||||
self._router_client = router_client
|
||||
self._router_port_client = router_port_client
|
||||
self.nsxlib = nsxlib
|
||||
|
||||
def validate_tier0(self, tier0_groups_dict, tier0_uuid):
|
||||
err_msg = None
|
||||
try:
|
||||
lrouter = self._router_client.get(tier0_uuid)
|
||||
except exceptions.ResourceNotFound:
|
||||
err_msg = (_("Tier0 router %s not found at the backend. Either a "
|
||||
"valid UUID must be specified or a default tier0 "
|
||||
"router UUID must be configured in nsx.ini") %
|
||||
tier0_uuid)
|
||||
else:
|
||||
edge_cluster_uuid = lrouter.get('edge_cluster_id')
|
||||
if not edge_cluster_uuid:
|
||||
err_msg = _("Failed to get edge cluster uuid from tier0 "
|
||||
"router %s at the backend") % lrouter
|
||||
else:
|
||||
edge_cluster = self.nsxlib.edge_cluster.get(edge_cluster_uuid)
|
||||
member_index_list = [member['member_index']
|
||||
for member in edge_cluster['members']]
|
||||
if len(member_index_list) < MIN_EDGE_NODE_NUM:
|
||||
err_msg = _("%(act_num)s edge members found in "
|
||||
"edge_cluster %(cluster_id)s, however we "
|
||||
"require at least %(exp_num)s edge nodes "
|
||||
"in edge cluster for use.") % {
|
||||
'act_num': len(member_index_list),
|
||||
'exp_num': MIN_EDGE_NODE_NUM,
|
||||
'cluster_id': edge_cluster_uuid}
|
||||
if err_msg:
|
||||
raise n_exc.InvalidInput(error_message=err_msg)
|
||||
else:
|
||||
tier0_groups_dict[tier0_uuid] = {
|
||||
'edge_cluster_uuid': edge_cluster_uuid,
|
||||
'member_index_list': member_index_list}
|
||||
|
||||
def add_router_link_port(self, tier1_uuid, tier0_uuid, tags):
|
||||
# Create Tier0 logical router link port
|
||||
t0_tags = copy.copy(tags)
|
||||
t0_tags = utils.add_v3_tag(t0_tags, 'os-tier0-uuid', tier0_uuid)
|
||||
tier0_link_port = self._router_port_client.create(
|
||||
tier0_uuid, display_name=TIER0_ROUTER_LINK_PORT_NAME, tags=t0_tags,
|
||||
resource_type=nsx_constants.LROUTERPORT_LINKONTIER0,
|
||||
logical_port_id=None,
|
||||
address_groups=None)
|
||||
linked_logical_port_id = tier0_link_port['id']
|
||||
# Create Tier1 logical router link port
|
||||
t1_tags = copy.copy(tags)
|
||||
t1_tags = utils.add_v3_tag(t1_tags, 'os-tier1-uuid', tier1_uuid)
|
||||
self._router_port_client.create(
|
||||
tier1_uuid, display_name=TIER1_ROUTER_LINK_PORT_NAME, tags=t1_tags,
|
||||
resource_type=nsx_constants.LROUTERPORT_LINKONTIER1,
|
||||
logical_port_id=linked_logical_port_id,
|
||||
address_groups=None)
|
||||
|
||||
def remove_router_link_port(self, tier1_uuid, tier0_uuid):
|
||||
try:
|
||||
tier1_link_port = (
|
||||
self._router_port_client.get_tier1_link_port(tier1_uuid))
|
||||
except exceptions.ResourceNotFound:
|
||||
LOG.warning(_LW("Logical router link port for tier1 router: %s "
|
||||
"not found at the backend"), tier1_uuid)
|
||||
return
|
||||
tier1_link_port_id = tier1_link_port['id']
|
||||
tier0_link_port_id = (
|
||||
tier1_link_port['linked_logical_router_port_id'].get('target_id'))
|
||||
self._router_port_client.delete(tier1_link_port_id)
|
||||
self._router_port_client.delete(tier0_link_port_id)
|
||||
|
||||
def update_advertisement(self, logical_router_id,
|
||||
advertise_route_nat,
|
||||
advertise_route_connected,
|
||||
advertise_route_static=False,
|
||||
enabled=True):
|
||||
return self.nsxlib.logical_router.update_advertisement(
|
||||
logical_router_id,
|
||||
advertise_nat_routes=advertise_route_nat,
|
||||
advertise_nsx_connected_routes=advertise_route_connected,
|
||||
advertise_static_routes=advertise_route_static,
|
||||
enabled=enabled)
|
||||
|
||||
def delete_gw_snat_rule(self, logical_router_id, gw_ip):
|
||||
return self.nsxlib.logical_router.delete_nat_rule_by_values(
|
||||
logical_router_id,
|
||||
translated_network=gw_ip)
|
||||
|
||||
def add_gw_snat_rule(self, logical_router_id, gw_ip):
|
||||
return self.nsxlib.logical_router.add_nat_rule(
|
||||
logical_router_id, action="SNAT",
|
||||
translated_network=gw_ip,
|
||||
rule_priority=GW_NAT_PRI)
|
||||
|
||||
def update_router_edge_cluster(self, nsx_router_id, edge_cluster_uuid):
|
||||
return self._router_client.update(nsx_router_id,
|
||||
edge_cluster_id=edge_cluster_uuid)
|
||||
|
||||
def create_logical_router_intf_port_by_ls_id(self, logical_router_id,
|
||||
display_name,
|
||||
tags,
|
||||
ls_id,
|
||||
logical_switch_port_id,
|
||||
address_groups):
|
||||
try:
|
||||
port = self._router_port_client.get_by_lswitch_id(ls_id)
|
||||
except exceptions.ResourceNotFound:
|
||||
return self._router_port_client.create(
|
||||
logical_router_id,
|
||||
display_name,
|
||||
tags,
|
||||
nsx_constants.LROUTERPORT_DOWNLINK,
|
||||
logical_switch_port_id,
|
||||
address_groups)
|
||||
else:
|
||||
return self._router_port_client.update(
|
||||
port['id'], subnets=address_groups)
|
||||
|
||||
def add_fip_nat_rules(self, logical_router_id, ext_ip, int_ip):
|
||||
self.nsxlib.logical_router.add_nat_rule(
|
||||
logical_router_id, action="SNAT",
|
||||
translated_network=ext_ip,
|
||||
source_net=int_ip,
|
||||
rule_priority=FIP_NAT_PRI)
|
||||
self.nsxlib.logical_router.add_nat_rule(
|
||||
logical_router_id, action="DNAT",
|
||||
translated_network=int_ip,
|
||||
dest_net=ext_ip,
|
||||
rule_priority=FIP_NAT_PRI)
|
||||
|
||||
def delete_fip_nat_rules(self, logical_router_id, ext_ip, int_ip):
|
||||
self.nsxlib.logical_router.delete_nat_rule_by_values(
|
||||
logical_router_id,
|
||||
action="SNAT",
|
||||
translated_network=ext_ip,
|
||||
match_source_network=int_ip)
|
||||
self.nsxlib.logical_router.delete_nat_rule_by_values(
|
||||
logical_router_id,
|
||||
action="DNAT",
|
||||
translated_network=int_ip,
|
||||
match_destination_network=ext_ip)
|
||||
|
||||
def add_static_routes(self, nsx_router_id, route):
|
||||
return self.nsxlib.logical_router.add_static_route(
|
||||
nsx_router_id,
|
||||
route['destination'],
|
||||
route['nexthop'])
|
||||
|
||||
def delete_static_routes(self, nsx_router_id, route):
|
||||
return self.nsxlib.logical_router.delete_static_route_by_values(
|
||||
nsx_router_id, dest_cidr=route['destination'],
|
||||
nexthop=route['nexthop'])
|
529
vmware_nsxlib/v3/security.py
Normal file
529
vmware_nsxlib/v3/security.py
Normal file
@ -0,0 +1,529 @@
|
||||
# Copyright 2015 OpenStack Foundation
|
||||
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
NSX-V3 Plugin security & Distributed Firewall integration module
|
||||
"""
|
||||
|
||||
from neutron_lib import constants
|
||||
from oslo_log import log
|
||||
from oslo_utils import excutils
|
||||
|
||||
from vmware_nsxlib._i18n import _LE, _LW
|
||||
from vmware_nsxlib.v3 import exceptions
|
||||
from vmware_nsxlib.v3 import nsx_constants as consts
|
||||
from vmware_nsxlib.v3 import utils
|
||||
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
PORT_SG_SCOPE = 'os-security-group'
|
||||
MAX_NSGROUPS_CRITERIA_TAGS = 10
|
||||
|
||||
|
||||
class NsxLibNsGroup(utils.NsxLibApiBase):
|
||||
|
||||
def __init__(self, client, max_attempts, firewall_section_handler):
|
||||
self.firewall_section = firewall_section_handler
|
||||
super(NsxLibNsGroup, self).__init__(client, max_attempts)
|
||||
|
||||
def update_on_backend(self, context, security_group,
|
||||
nsgroup_id, section_id,
|
||||
log_sg_allowed_traffic):
|
||||
name = self.get_name(security_group)
|
||||
description = security_group['description']
|
||||
logging = (log_sg_allowed_traffic or
|
||||
security_group[consts.LOGGING])
|
||||
rules = self.firewall_section._process_rules_logging_for_update(
|
||||
section_id, logging)
|
||||
self.update(nsgroup_id, name, description)
|
||||
self.firewall_section.update(section_id, name, description,
|
||||
rules=rules)
|
||||
|
||||
def get_name(self, security_group):
|
||||
# NOTE(roeyc): We add the security-group id to the NSGroup name,
|
||||
# for usability purposes.
|
||||
return '%(name)s - %(id)s' % security_group
|
||||
|
||||
def get_lport_tags(self, secgroups):
|
||||
if len(secgroups) > MAX_NSGROUPS_CRITERIA_TAGS:
|
||||
raise exceptions.NumberOfNsgroupCriteriaTagsReached(
|
||||
max_num=MAX_NSGROUPS_CRITERIA_TAGS)
|
||||
tags = []
|
||||
for sg in secgroups:
|
||||
tags = utils.add_v3_tag(tags, PORT_SG_SCOPE, sg)
|
||||
if not tags:
|
||||
# This port shouldn't be associated with any security-group
|
||||
tags = [{'scope': PORT_SG_SCOPE, 'tag': None}]
|
||||
return tags
|
||||
|
||||
def update_lport(self, context, lport_id, original, updated):
|
||||
added = set(updated) - set(original)
|
||||
removed = set(original) - set(updated)
|
||||
for nsgroup_id in added:
|
||||
try:
|
||||
self.add_members(
|
||||
nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT,
|
||||
[lport_id])
|
||||
except exceptions.NSGroupIsFull:
|
||||
for nsgroup_id in added:
|
||||
# NOTE(roeyc): If the port was not added to the nsgroup
|
||||
# yet, then this request will silently fail.
|
||||
self.remove_member(
|
||||
nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT,
|
||||
lport_id)
|
||||
raise exceptions.SecurityGroupMaximumCapacityReached(
|
||||
sg_id=nsgroup_id)
|
||||
except exceptions.ResourceNotFound:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error(_LE("NSGroup %s doesn't exists"), nsgroup_id)
|
||||
for nsgroup_id in removed:
|
||||
self.remove_member(
|
||||
nsgroup_id, consts.TARGET_TYPE_LOGICAL_PORT, lport_id)
|
||||
|
||||
def init_default_section(self, name, description, nested_groups,
|
||||
log_sg_blocked_traffic):
|
||||
fw_sections = self.list_sections()
|
||||
for section in fw_sections:
|
||||
if section['display_name'] == name:
|
||||
break
|
||||
else:
|
||||
tags = utils.build_v3_api_version_tag()
|
||||
section = self.create_empty_section(
|
||||
name, description, nested_groups, tags)
|
||||
|
||||
block_rule = self.get_firewall_rule_dict(
|
||||
'Block All', action=consts.FW_ACTION_DROP,
|
||||
logged=log_sg_blocked_traffic)
|
||||
# TODO(roeyc): Add additional rules to allow IPV6 NDP.
|
||||
dhcp_client = self.get_nsservice(
|
||||
consts.L4_PORT_SET_NSSERVICE,
|
||||
l4_protocol=consts.UDP,
|
||||
source_ports=[67],
|
||||
destination_ports=[68])
|
||||
dhcp_client_rule_in = self.get_firewall_rule_dict(
|
||||
'DHCP Reply',
|
||||
direction=consts.IN,
|
||||
service=dhcp_client)
|
||||
|
||||
dhcp_server = (
|
||||
self.get_nsservice(consts.L4_PORT_SET_NSSERVICE,
|
||||
l4_protocol=consts.UDP,
|
||||
source_ports=[68],
|
||||
destination_ports=[67]))
|
||||
dhcp_client_rule_out = self.get_firewall_rule_dict(
|
||||
'DHCP Request',
|
||||
direction=consts.OUT,
|
||||
service=dhcp_server)
|
||||
|
||||
self.update_section(section['id'],
|
||||
name, section['description'],
|
||||
applied_tos=nested_groups,
|
||||
rules=[dhcp_client_rule_out,
|
||||
dhcp_client_rule_in,
|
||||
block_rule])
|
||||
return section['id']
|
||||
|
||||
def get_nsservice(self, resource_type, **properties):
|
||||
service = {'resource_type': resource_type}
|
||||
service.update(properties)
|
||||
return {'service': service}
|
||||
|
||||
def get_port_tag_expression(self, scope, tag):
|
||||
return {'resource_type': consts.NSGROUP_TAG_EXP,
|
||||
'target_type': consts.TARGET_TYPE_LOGICAL_PORT,
|
||||
'scope': scope,
|
||||
'tag': tag}
|
||||
|
||||
def create(self, display_name, description, tags,
|
||||
membership_criteria=None):
|
||||
body = {'display_name': display_name,
|
||||
'description': description,
|
||||
'tags': tags,
|
||||
'members': []}
|
||||
if membership_criteria:
|
||||
body.update({'membership_criteria': [membership_criteria]})
|
||||
return self.client.create('ns-groups', body)
|
||||
|
||||
def list(self):
|
||||
return self.client.get(
|
||||
'ns-groups?populate_references=false').get('results', [])
|
||||
|
||||
def update(self, nsgroup_id, display_name=None, description=None,
|
||||
membership_criteria=None, members=None):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self.nsxlib_config.max_attempts)
|
||||
def _do_update():
|
||||
nsgroup = self.read(nsgroup_id)
|
||||
if display_name is not None:
|
||||
nsgroup['display_name'] = display_name
|
||||
if description is not None:
|
||||
nsgroup['description'] = description
|
||||
if members is not None:
|
||||
nsgroup['members'] = members
|
||||
if membership_criteria is not None:
|
||||
nsgroup['membership_criteria'] = [membership_criteria]
|
||||
return self.client.update(
|
||||
'ns-groups/%s' % nsgroup_id, nsgroup)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def get_member_expression(self, target_type, target_id):
|
||||
return {
|
||||
'resource_type': consts.NSGROUP_SIMPLE_EXP,
|
||||
'target_property': 'id',
|
||||
'target_type': target_type,
|
||||
'op': consts.EQUALS,
|
||||
'value': target_id}
|
||||
|
||||
def _update_with_members(self, nsgroup_id, members, action):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self.nsxlib_config.max_attempts)
|
||||
def _do_update():
|
||||
members_update = 'ns-groups/%s?action=%s' % (nsgroup_id, action)
|
||||
return self.client.create(members_update, members)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def add_members(self, nsgroup_id, target_type, target_ids):
|
||||
members = []
|
||||
for target_id in target_ids:
|
||||
member_expr = self.get_member_expression(
|
||||
target_type, target_id)
|
||||
members.append(member_expr)
|
||||
members = {'members': members}
|
||||
try:
|
||||
return self._update_with_members(
|
||||
nsgroup_id, members, consts.NSGROUP_ADD_MEMBERS)
|
||||
except (exceptions.StaleRevision, exceptions.ResourceNotFound):
|
||||
raise
|
||||
except exceptions.ManagerError:
|
||||
# REVISIT(roeyc): A ManagerError might have been raised for a
|
||||
# different reason, e.g - NSGroup does not exists.
|
||||
LOG.warning(_LW("Failed to add %(target_type)s resources "
|
||||
"(%(target_ids))s to NSGroup %(nsgroup_id)s"),
|
||||
{'target_type': target_type,
|
||||
'target_ids': target_ids,
|
||||
'nsgroup_id': nsgroup_id})
|
||||
|
||||
raise exceptions.NSGroupIsFull(nsgroup_id=nsgroup_id)
|
||||
|
||||
def remove_member(self, nsgroup_id, target_type,
|
||||
target_id, verify=False):
|
||||
member_expr = self.get_member_expression(
|
||||
target_type, target_id)
|
||||
members = {'members': [member_expr]}
|
||||
try:
|
||||
return self._update_with_members(
|
||||
nsgroup_id, members, consts.NSGROUP_REMOVE_MEMBERS)
|
||||
except exceptions.ManagerError:
|
||||
if verify:
|
||||
raise exceptions.NSGroupMemberNotFound(member_id=target_id,
|
||||
nsgroup_id=nsgroup_id)
|
||||
|
||||
def read(self, nsgroup_id):
|
||||
return self.client.get(
|
||||
'ns-groups/%s?populate_references=true' % nsgroup_id)
|
||||
|
||||
def delete(self, nsgroup_id):
|
||||
try:
|
||||
return self.client.delete(
|
||||
'ns-groups/%s?force=true' % nsgroup_id)
|
||||
# FIXME(roeyc): Should only except NotFound error.
|
||||
except Exception:
|
||||
LOG.debug("NSGroup %s does not exists for delete request.",
|
||||
nsgroup_id)
|
||||
|
||||
|
||||
class NsxLibFirewallSection(utils.NsxLibApiBase):
|
||||
|
||||
def _get_direction(self, sg_rule):
|
||||
return (
|
||||
consts.IN if sg_rule['direction'] == 'ingress'
|
||||
else consts.OUT
|
||||
)
|
||||
|
||||
def _get_l4_protocol_name(self, protocol_number):
|
||||
if protocol_number is None:
|
||||
return
|
||||
protocol_number = constants.IP_PROTOCOL_MAP.get(protocol_number,
|
||||
protocol_number)
|
||||
protocol_number = int(protocol_number)
|
||||
if protocol_number == 6:
|
||||
return consts.TCP
|
||||
elif protocol_number == 17:
|
||||
return consts.UDP
|
||||
elif protocol_number == 1:
|
||||
return consts.ICMPV4
|
||||
else:
|
||||
return protocol_number
|
||||
|
||||
def get_nsservice(self, resource_type, **properties):
|
||||
service = {'resource_type': resource_type}
|
||||
service.update(properties)
|
||||
return {'service': service}
|
||||
|
||||
def _decide_service(self, sg_rule):
|
||||
l4_protocol = self._get_l4_protocol_name(sg_rule['protocol'])
|
||||
direction = self._get_direction(sg_rule)
|
||||
|
||||
if l4_protocol in [consts.TCP, consts.UDP]:
|
||||
# If port_range_min is not specified then we assume all ports are
|
||||
# matched, relying on neutron to perform validation.
|
||||
source_ports = []
|
||||
if sg_rule['port_range_min'] is None:
|
||||
destination_ports = []
|
||||
elif sg_rule['port_range_min'] != sg_rule['port_range_max']:
|
||||
# NSX API requires a non-empty range (e.g - '22-23')
|
||||
destination_ports = ['%(port_range_min)s-%(port_range_max)s'
|
||||
% sg_rule]
|
||||
else:
|
||||
destination_ports = ['%(port_range_min)s' % sg_rule]
|
||||
|
||||
if direction == consts.OUT:
|
||||
source_ports, destination_ports = destination_ports, []
|
||||
|
||||
return self.get_nsservice(
|
||||
consts.L4_PORT_SET_NSSERVICE,
|
||||
l4_protocol=l4_protocol,
|
||||
source_ports=source_ports,
|
||||
destination_ports=destination_ports)
|
||||
elif l4_protocol == consts.ICMPV4:
|
||||
return self.get_nsservice(
|
||||
consts.ICMP_TYPE_NSSERVICE,
|
||||
protocol=l4_protocol,
|
||||
icmp_type=sg_rule['port_range_min'],
|
||||
icmp_code=sg_rule['port_range_max'])
|
||||
elif l4_protocol is not None:
|
||||
return self.get_nsservice(
|
||||
consts.IP_PROTOCOL_NSSERVICE,
|
||||
protocol_number=l4_protocol)
|
||||
|
||||
def _build(self, display_name, description, applied_tos, tags):
|
||||
return {'display_name': display_name,
|
||||
'description': description,
|
||||
'stateful': True,
|
||||
'section_type': consts.FW_SECTION_LAYER3,
|
||||
'applied_tos': [self.get_nsgroup_reference(t_id)
|
||||
for t_id in applied_tos],
|
||||
'tags': tags}
|
||||
|
||||
def create_empty(self, display_name, description,
|
||||
applied_tos, tags,
|
||||
operation=consts.FW_INSERT_BOTTOM,
|
||||
other_section=None):
|
||||
resource = 'firewall/sections?operation=%s' % operation
|
||||
body = self._build(display_name, description,
|
||||
applied_tos, tags)
|
||||
if other_section:
|
||||
resource += '&id=%s' % other_section
|
||||
return self.client.create(resource, body)
|
||||
|
||||
def update(self, section_id, display_name=None, description=None,
|
||||
applied_tos=None, rules=None):
|
||||
#Using internal method so we can access max_attempts in the decorator
|
||||
@utils.retry_upon_exception(
|
||||
exceptions.StaleRevision,
|
||||
max_attempts=self.nsxlib_config.max_attempts)
|
||||
def _do_update():
|
||||
resource = 'firewall/sections/%s' % section_id
|
||||
section = self.read(section_id)
|
||||
|
||||
if rules is not None:
|
||||
resource += '?action=update_with_rules'
|
||||
section.update({'rules': rules})
|
||||
if display_name is not None:
|
||||
section['display_name'] = display_name
|
||||
if description is not None:
|
||||
section['description'] = description
|
||||
if applied_tos is not None:
|
||||
section['applied_tos'] = [self.get_nsgroup_reference(nsg_id)
|
||||
for nsg_id in applied_tos]
|
||||
if rules is not None:
|
||||
return self.client.create(resource, section)
|
||||
elif any(p is not None for p in (display_name, description,
|
||||
applied_tos)):
|
||||
return self.client.update(resource, section)
|
||||
|
||||
return _do_update()
|
||||
|
||||
def read(self, section_id):
|
||||
resource = 'firewall/sections/%s' % section_id
|
||||
return self.client.get(resource)
|
||||
|
||||
def list(self):
|
||||
resource = 'firewall/sections'
|
||||
return self.client.get(resource).get('results', [])
|
||||
|
||||
def delete(self, section_id):
|
||||
resource = 'firewall/sections/%s?cascade=true' % section_id
|
||||
return self.client.delete(resource)
|
||||
|
||||
def get_nsgroup_reference(self, nsgroup_id):
|
||||
return {'target_id': nsgroup_id,
|
||||
'target_type': consts.NSGROUP}
|
||||
|
||||
def get_ip_cidr_reference(self, ip_cidr_block, ip_protocol):
|
||||
target_type = (consts.TARGET_TYPE_IPV4ADDRESS
|
||||
if ip_protocol == consts.IPV4
|
||||
else consts.TARGET_TYPE_IPV6ADDRESS)
|
||||
return {'target_id': ip_cidr_block,
|
||||
'target_type': target_type}
|
||||
|
||||
def get_rule_dict(
|
||||
self, display_name, source=None,
|
||||
destination=None,
|
||||
direction=consts.IN_OUT,
|
||||
ip_protocol=consts.IPV4_IPV6,
|
||||
service=None, action=consts.FW_ACTION_ALLOW,
|
||||
logged=False):
|
||||
return {'display_name': display_name,
|
||||
'sources': [source] if source else [],
|
||||
'destinations': [destination] if destination else [],
|
||||
'direction': direction,
|
||||
'ip_protocol': ip_protocol,
|
||||
'services': [service] if service else [],
|
||||
'action': action,
|
||||
'logged': logged}
|
||||
|
||||
def add_rule(self, rule, section_id):
|
||||
resource = 'firewall/sections/%s/rules' % section_id
|
||||
params = '?operation=insert_bottom'
|
||||
return self.client.create(resource + params, rule)
|
||||
|
||||
def add_rules(self, rules, section_id):
|
||||
resource = 'firewall/sections/%s/rules' % section_id
|
||||
params = '?action=create_multiple&operation=insert_bottom'
|
||||
return self.client.create(resource + params, {'rules': rules})
|
||||
|
||||
def delete_rule(self, section_id, rule_id):
|
||||
resource = 'firewall/sections/%s/rules/%s' % (section_id, rule_id)
|
||||
return self.client.delete(resource)
|
||||
|
||||
def get_rules(self, section_id):
|
||||
resource = 'firewall/sections/%s/rules' % section_id
|
||||
return self.client.get(resource)
|
||||
|
||||
def _get_fw_rule_from_sg_rule(self, sg_rule, nsgroup_id, rmt_nsgroup_id,
|
||||
logged, action):
|
||||
# IPV4 or IPV6
|
||||
ip_protocol = sg_rule['ethertype'].upper()
|
||||
direction = self._get_direction(sg_rule)
|
||||
|
||||
if sg_rule.get(consts.LOCAL_IP_PREFIX):
|
||||
local_ip_prefix = self.get_ip_cidr_reference(
|
||||
sg_rule[consts.LOCAL_IP_PREFIX],
|
||||
ip_protocol)
|
||||
else:
|
||||
local_ip_prefix = None
|
||||
|
||||
source = None
|
||||
local_group = self.get_nsgroup_reference(nsgroup_id)
|
||||
if sg_rule['remote_ip_prefix'] is not None:
|
||||
source = self.get_ip_cidr_reference(
|
||||
sg_rule['remote_ip_prefix'], ip_protocol)
|
||||
destination = local_ip_prefix or local_group
|
||||
else:
|
||||
if rmt_nsgroup_id:
|
||||
source = self.get_nsgroup_reference(rmt_nsgroup_id)
|
||||
destination = local_ip_prefix or local_group
|
||||
if direction == consts.OUT:
|
||||
source, destination = destination, source
|
||||
|
||||
service = self._decide_service(sg_rule)
|
||||
name = sg_rule['id']
|
||||
|
||||
return self.get_rule_dict(name, source,
|
||||
destination, direction,
|
||||
ip_protocol, service,
|
||||
action, logged)
|
||||
|
||||
def create_rules(self, context, section_id, nsgroup_id,
|
||||
logging_enabled, action, security_group_rules,
|
||||
ruleid_2_remote_nsgroup_map):
|
||||
# 1. translate rules
|
||||
# 2. insert in section
|
||||
# 3. return the rules
|
||||
firewall_rules = []
|
||||
for sg_rule in security_group_rules:
|
||||
remote_nsgroup_id = ruleid_2_remote_nsgroup_map[sg_rule['id']]
|
||||
fw_rule = self._get_fw_rule_from_sg_rule(
|
||||
sg_rule, nsgroup_id, remote_nsgroup_id,
|
||||
logging_enabled, action)
|
||||
|
||||
firewall_rules.append(fw_rule)
|
||||
|
||||
return self.add_rules(firewall_rules, section_id)
|
||||
|
||||
def set_rule_logging(self, section_id, logging):
|
||||
rules = self._process_rules_logging_for_update(
|
||||
section_id, logging)
|
||||
self.update(section_id, rules=rules)
|
||||
|
||||
def _process_rules_logging_for_update(self, section_id, logging_enabled):
|
||||
rules = self.get_rules(section_id).get('results', [])
|
||||
update_rules = False
|
||||
for rule in rules:
|
||||
if rule['logged'] != logging_enabled:
|
||||
rule['logged'] = logging_enabled
|
||||
update_rules = True
|
||||
return rules if update_rules else None
|
||||
|
||||
def init_default(self, name, description, nested_groups,
|
||||
log_sg_blocked_traffic):
|
||||
fw_sections = self.list()
|
||||
for section in fw_sections:
|
||||
if section['display_name'] == name:
|
||||
break
|
||||
else:
|
||||
tags = self.build_v3_api_version_tag()
|
||||
section = self.create_empty(
|
||||
name, description, nested_groups, tags)
|
||||
|
||||
block_rule = self.get_rule_dict(
|
||||
'Block All', action=consts.FW_ACTION_DROP,
|
||||
logged=log_sg_blocked_traffic)
|
||||
# TODO(roeyc): Add additional rules to allow IPV6 NDP.
|
||||
dhcp_client = self.get_nsservice(
|
||||
consts.L4_PORT_SET_NSSERVICE,
|
||||
l4_protocol=consts.UDP,
|
||||
source_ports=[67],
|
||||
destination_ports=[68])
|
||||
dhcp_client_rule_in = self.get_rule_dict(
|
||||
'DHCP Reply', direction=consts.IN,
|
||||
service=dhcp_client)
|
||||
|
||||
dhcp_server = (
|
||||
self.get_nsservice(
|
||||
consts.L4_PORT_SET_NSSERVICE,
|
||||
l4_protocol=consts.UDP,
|
||||
source_ports=[68],
|
||||
destination_ports=[67]))
|
||||
dhcp_client_rule_out = self.get_rule_dict(
|
||||
'DHCP Request', direction=consts.OUT,
|
||||
service=dhcp_server)
|
||||
|
||||
self.update(section['id'],
|
||||
name, section['description'],
|
||||
applied_tos=nested_groups,
|
||||
rules=[dhcp_client_rule_out,
|
||||
dhcp_client_rule_in,
|
||||
block_rule])
|
||||
return section['id']
|
213
vmware_nsxlib/v3/utils.py
Normal file
213
vmware_nsxlib/v3/utils.py
Normal file
@ -0,0 +1,213 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import retrying
|
||||
|
||||
from neutron_lib import exceptions
|
||||
from oslo_log import log
|
||||
|
||||
from vmware_nsxlib._i18n import _
|
||||
from vmware_nsxlib.v3 import exceptions as nsxlib_exceptions
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
MAX_RESOURCE_TYPE_LEN = 20
|
||||
MAX_TAG_LEN = 40
|
||||
DEFAULT_MAX_ATTEMPTS = 10
|
||||
|
||||
|
||||
def _validate_resource_type_length(resource_type):
|
||||
# Add in a validation to ensure that we catch this at build time
|
||||
if len(resource_type) > MAX_RESOURCE_TYPE_LEN:
|
||||
raise exceptions.InvalidInput(
|
||||
error_message=(_('Resource type cannot exceed %(max_len)s '
|
||||
'characters: %(resource_type)s') %
|
||||
{'max_len': MAX_RESOURCE_TYPE_LEN,
|
||||
'resource_type': resource_type}))
|
||||
|
||||
|
||||
def add_v3_tag(tags, resource_type, tag):
|
||||
_validate_resource_type_length(resource_type)
|
||||
tags.append({'scope': resource_type, 'tag': tag[:MAX_TAG_LEN]})
|
||||
return tags
|
||||
|
||||
|
||||
def update_v3_tags(current_tags, tags_update):
|
||||
current_scopes = set([tag['scope'] for tag in current_tags])
|
||||
updated_scopes = set([tag['scope'] for tag in tags_update])
|
||||
|
||||
tags = [{'scope': tag['scope'], 'tag': tag['tag']}
|
||||
for tag in (current_tags + tags_update)
|
||||
if tag['scope'] in (current_scopes ^ updated_scopes)]
|
||||
|
||||
modified_scopes = current_scopes & updated_scopes
|
||||
for tag in tags_update:
|
||||
if tag['scope'] in modified_scopes:
|
||||
# If the tag value is empty or None, then remove the tag completely
|
||||
if tag['tag']:
|
||||
tag['tag'] = tag['tag'][:MAX_TAG_LEN]
|
||||
tags.append(tag)
|
||||
|
||||
return tags
|
||||
|
||||
|
||||
def retry_upon_exception(exc, delay=500, max_delay=2000,
|
||||
max_attempts=DEFAULT_MAX_ATTEMPTS):
|
||||
return retrying.retry(retry_on_exception=lambda e: isinstance(e, exc),
|
||||
wait_exponential_multiplier=delay,
|
||||
wait_exponential_max=max_delay,
|
||||
stop_max_attempt_number=max_attempts)
|
||||
|
||||
|
||||
def list_match(list1, list2):
|
||||
# Check if list1 and list2 have identical elements, but relaxed on
|
||||
# dict elements where list1's dict element can be a subset of list2's
|
||||
# corresponding element.
|
||||
if (not isinstance(list1, list) or not isinstance(list2, list) or
|
||||
len(list1) != len(list2)):
|
||||
return False
|
||||
list1 = sorted(list1)
|
||||
list2 = sorted(list2)
|
||||
for (v1, v2) in zip(list1, list2):
|
||||
if isinstance(v1, dict):
|
||||
if not dict_match(v1, v2):
|
||||
return False
|
||||
elif isinstance(v1, list):
|
||||
if not list_match(v1, v2):
|
||||
return False
|
||||
elif v1 != v2:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def dict_match(dict1, dict2):
|
||||
# Check if dict1 is a subset of dict2.
|
||||
if not isinstance(dict1, dict) or not isinstance(dict2, dict):
|
||||
return False
|
||||
for k1, v1 in dict1.items():
|
||||
if k1 not in dict2:
|
||||
return False
|
||||
v2 = dict2[k1]
|
||||
if isinstance(v1, dict):
|
||||
if not dict_match(v1, v2):
|
||||
return False
|
||||
elif isinstance(v1, list):
|
||||
if not list_match(v1, v2):
|
||||
return False
|
||||
elif v1 != v2:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_name_and_uuid(name, uuid, tag=None, maxlen=80):
|
||||
short_uuid = '_' + uuid[:5] + '...' + uuid[-5:]
|
||||
maxlen = maxlen - len(short_uuid)
|
||||
if tag:
|
||||
maxlen = maxlen - len(tag) - 1
|
||||
return name[:maxlen] + '_' + tag + short_uuid
|
||||
else:
|
||||
return name[:maxlen] + short_uuid
|
||||
|
||||
|
||||
class NsxLibApiBase(object):
|
||||
"""Base class for nsxlib api """
|
||||
def __init__(self, client, nsxlib_config):
|
||||
self.client = client
|
||||
self.nsxlib_config = nsxlib_config
|
||||
super(NsxLibApiBase, self).__init__()
|
||||
|
||||
def _update_resource_with_retry(self, resource, payload):
|
||||
# Using internal method so we can access max_attempts in the decorator
|
||||
@retry_upon_exception(nsxlib_exceptions.StaleRevision,
|
||||
max_attempts=self.nsxlib_config.max_attempts)
|
||||
def do_update():
|
||||
revised_payload = self.client.get(resource)
|
||||
for key_name in payload.keys():
|
||||
revised_payload[key_name] = payload[key_name]
|
||||
return self.client.update(resource, revised_payload)
|
||||
|
||||
return do_update()
|
||||
|
||||
def _get_resource_by_name_or_id(self, name_or_id, resource):
|
||||
all_results = self.client.get(resource)['results']
|
||||
matched_results = []
|
||||
for rs in all_results:
|
||||
if rs.get('id') == name_or_id:
|
||||
# Matched by id - must be unique
|
||||
return name_or_id
|
||||
|
||||
if rs.get('display_name') == name_or_id:
|
||||
# Matched by name - add to the list to verify it is unique
|
||||
matched_results.append(rs)
|
||||
|
||||
if len(matched_results) == 0:
|
||||
err_msg = (_("Could not find %(resource)s %(name)s") %
|
||||
{'name': name_or_id, 'resource': resource})
|
||||
# TODO(aaron): improve exception handling...
|
||||
raise exceptions.ManagerError(details=err_msg)
|
||||
elif len(matched_results) > 1:
|
||||
err_msg = (_("Found multiple %(resource)s named %(name)s") %
|
||||
{'name': name_or_id, 'resource': resource})
|
||||
# TODO(aaron): improve exception handling...
|
||||
raise exceptions.ManagerError(details=err_msg)
|
||||
|
||||
return matched_results[0].get('id')
|
||||
|
||||
def build_v3_api_version_tag(self,):
|
||||
"""Some resources are created on the manager
|
||||
|
||||
that do not have a corresponding plugin resource.
|
||||
|
||||
"""
|
||||
return [{'scope': self.nsxlib_config.plugin_scope,
|
||||
'tag': self.nsxlib_config.plugin_tag},
|
||||
{'scope': "os-api-version",
|
||||
'tag': self.nsxlib_config.plugin_ver}]
|
||||
|
||||
def is_internal_resource(self, nsx_resource):
|
||||
"""Indicates whether the passed nsx-resource is internal
|
||||
|
||||
owned by the plugin for internal use.
|
||||
|
||||
"""
|
||||
for tag in nsx_resource.get('tags', []):
|
||||
if tag['scope'] == self.nsxlib_config.plugin_scope:
|
||||
return tag['tag'] == self.nsxlib_config.plugin_tag
|
||||
return False
|
||||
|
||||
def build_v3_tags_payload(self, resource, resource_type, project_name):
|
||||
"""Construct the tags payload that will be pushed to NSX-v3
|
||||
|
||||
Add <resource_type>:<resource-id>, os-project-id:<project-id>,
|
||||
os-project-name:<project_name> os-api-version:<plugin-api-version>
|
||||
|
||||
"""
|
||||
_validate_resource_type_length(resource_type)
|
||||
# There may be cases when the plugin creates the port, for example DHCP
|
||||
if not project_name:
|
||||
project_name = self.nsxlib_config.plugin_tag
|
||||
tenant_id = resource.get('tenant_id', '')
|
||||
# If tenant_id is present in resource and set to None, explicitly set
|
||||
# the tenant_id in tags as ''.
|
||||
if tenant_id is None:
|
||||
tenant_id = ''
|
||||
return [{'scope': resource_type,
|
||||
'tag': resource.get('id', '')[:MAX_TAG_LEN]},
|
||||
{'scope': 'os-project-id',
|
||||
'tag': tenant_id[:MAX_TAG_LEN]},
|
||||
{'scope': 'os-project-name',
|
||||
'tag': project_name[:MAX_TAG_LEN]},
|
||||
{'scope': 'os-api-version',
|
||||
'tag': self.nsxlib_config.plugin_ver}]
|
17
vmware_nsxlib/version.py
Normal file
17
vmware_nsxlib/version.py
Normal file
@ -0,0 +1,17 @@
|
||||
# Copyright 2016 VMware, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pbr.version
|
||||
|
||||
version_info = pbr.version.VersionInfo('vmware-nsxlib')
|
Loading…
Reference in New Issue
Block a user