From 4e7bea0c68122bd76b4c5f9d216f5c6c01af9a79 Mon Sep 17 00:00:00 2001 From: Yair Fried Date: Sun, 17 May 2015 17:26:28 +0300 Subject: [PATCH] [Scenario] Split Scenarios - P6 Move under plugins/openstack: * Designate * Neutron * Nova * VM Implements: blueprint split-plugins Change-Id: Iaaa2e7f0b35931fd11480737524d9e09371f721a --- .../openstack/context/vm/custom_image.py | 4 +- .../openstack/scenarios/cinder/volumes.py | 2 +- .../openstack/scenarios/designate/__init__.py | 0 .../openstack/scenarios/designate/basic.py | 175 ++++ .../openstack/scenarios/designate/utils.py | 122 +++ .../openstack/scenarios/glance/images.py | 2 +- .../scenarios/murano/environments.py | 2 +- .../openstack/scenarios/neutron/__init__.py | 0 .../openstack/scenarios/neutron/network.py | 331 ++++++++ .../openstack/scenarios/neutron/utils.py | 289 +++++++ .../openstack/scenarios/nova/__init__.py | 0 .../openstack/scenarios/nova/hypervisors.py | 40 + .../openstack/scenarios/nova/keypairs.py | 79 ++ .../openstack/scenarios/nova/servers.py | 619 ++++++++++++++ .../plugins/openstack/scenarios/nova/utils.py | 775 ++++++++++++++++++ .../openstack/scenarios/vm/__init__.py | 0 rally/plugins/openstack/scenarios/vm/utils.py | 173 ++++ .../plugins/openstack/scenarios/vm/vmtasks.py | 106 +++ .../openstack/scenarios/designate/__init__.py | 0 .../scenarios/designate/test_basic.py | 145 ++++ .../scenarios/designate/test_utils.py | 169 ++++ .../openstack/scenarios/glance/test_images.py | 2 +- .../openstack/scenarios/neutron/__init__.py | 0 .../scenarios/neutron/test_network.py | 638 ++++++++++++++ .../openstack/scenarios/neutron/test_utils.py | 434 ++++++++++ .../openstack/scenarios/nova/__init__.py | 0 .../scenarios/nova/test_hypervisors.py | 31 + .../openstack/scenarios/nova/test_keypairs.py | 63 ++ .../openstack/scenarios/nova/test_servers.py | 593 ++++++++++++++ .../openstack/scenarios/nova/test_utils.py | 774 +++++++++++++++++ .../openstack/scenarios/vm/__init__.py | 0 .../openstack/scenarios/vm/test_utils.py | 294 +++++++ .../openstack/scenarios/vm/test_vmtasks.py | 81 ++ 33 files changed, 5937 insertions(+), 6 deletions(-) create mode 100644 rally/plugins/openstack/scenarios/designate/__init__.py create mode 100644 rally/plugins/openstack/scenarios/designate/basic.py create mode 100644 rally/plugins/openstack/scenarios/designate/utils.py create mode 100644 rally/plugins/openstack/scenarios/neutron/__init__.py create mode 100644 rally/plugins/openstack/scenarios/neutron/network.py create mode 100644 rally/plugins/openstack/scenarios/neutron/utils.py create mode 100644 rally/plugins/openstack/scenarios/nova/__init__.py create mode 100644 rally/plugins/openstack/scenarios/nova/hypervisors.py create mode 100644 rally/plugins/openstack/scenarios/nova/keypairs.py create mode 100644 rally/plugins/openstack/scenarios/nova/servers.py create mode 100644 rally/plugins/openstack/scenarios/nova/utils.py create mode 100644 rally/plugins/openstack/scenarios/vm/__init__.py create mode 100644 rally/plugins/openstack/scenarios/vm/utils.py create mode 100644 rally/plugins/openstack/scenarios/vm/vmtasks.py create mode 100644 tests/unit/plugins/openstack/scenarios/designate/__init__.py create mode 100644 tests/unit/plugins/openstack/scenarios/designate/test_basic.py create mode 100644 tests/unit/plugins/openstack/scenarios/designate/test_utils.py create mode 100644 tests/unit/plugins/openstack/scenarios/neutron/__init__.py create mode 100644 tests/unit/plugins/openstack/scenarios/neutron/test_network.py create mode 100644 tests/unit/plugins/openstack/scenarios/neutron/test_utils.py create mode 100644 tests/unit/plugins/openstack/scenarios/nova/__init__.py create mode 100644 tests/unit/plugins/openstack/scenarios/nova/test_hypervisors.py create mode 100644 tests/unit/plugins/openstack/scenarios/nova/test_keypairs.py create mode 100644 tests/unit/plugins/openstack/scenarios/nova/test_servers.py create mode 100644 tests/unit/plugins/openstack/scenarios/nova/test_utils.py create mode 100644 tests/unit/plugins/openstack/scenarios/vm/__init__.py create mode 100644 tests/unit/plugins/openstack/scenarios/vm/test_utils.py create mode 100644 tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py diff --git a/rally/plugins/openstack/context/vm/custom_image.py b/rally/plugins/openstack/context/vm/custom_image.py index fe9f772b..eb941e64 100644 --- a/rally/plugins/openstack/context/vm/custom_image.py +++ b/rally/plugins/openstack/context/vm/custom_image.py @@ -18,8 +18,6 @@ import abc import six from rally.benchmark.context import base -from rally.benchmark.scenarios.nova import utils as nova_utils -from rally.benchmark.scenarios.vm import vmtasks from rally.benchmark import types from rally.common import broker from rally.common.i18n import _ @@ -27,6 +25,8 @@ from rally.common import log as logging from rally.common import utils from rally import consts from rally import osclients +from rally.plugins.openstack.scenarios.nova import utils as nova_utils +from rally.plugins.openstack.scenarios.vm import vmtasks LOG = logging.getLogger(__name__) diff --git a/rally/plugins/openstack/scenarios/cinder/volumes.py b/rally/plugins/openstack/scenarios/cinder/volumes.py index 8e762c64..0d6e904c 100644 --- a/rally/plugins/openstack/scenarios/cinder/volumes.py +++ b/rally/plugins/openstack/scenarios/cinder/volumes.py @@ -16,13 +16,13 @@ import random from rally.benchmark.scenarios import base -from rally.benchmark.scenarios.nova import utils as nova_utils from rally.benchmark import types as types from rally.benchmark import validation from rally.common import log as logging from rally import consts from rally.plugins.openstack.scenarios.cinder import utils from rally.plugins.openstack.scenarios.glance import utils as glance_utils +from rally.plugins.openstack.scenarios.nova import utils as nova_utils LOG = logging.getLogger(__name__) diff --git a/rally/plugins/openstack/scenarios/designate/__init__.py b/rally/plugins/openstack/scenarios/designate/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/rally/plugins/openstack/scenarios/designate/basic.py b/rally/plugins/openstack/scenarios/designate/basic.py new file mode 100644 index 00000000..65bad348 --- /dev/null +++ b/rally/plugins/openstack/scenarios/designate/basic.py @@ -0,0 +1,175 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Author: Endre Karlson +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from rally.benchmark.scenarios import base +from rally.benchmark import validation +from rally import consts +from rally.plugins.openstack.scenarios.designate import utils + + +class DesignateBasic(utils.DesignateScenario): + """Basic benchmark scenarios for Designate.""" + + @validation.required_services(consts.Service.DESIGNATE) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["designate"]}) + def create_and_list_domains(self): + """Create a domain and list all domains. + + Measure the "designate domain-list" command performance. + + If you have only 1 user in your context, you will + add 1 domain on every iteration. So you will have more + and more domain and will be able to measure the + performance of the "designate domain-list" command depending on + the number of domains owned by users. + """ + self._create_domain() + self._list_domains() + + @validation.required_services(consts.Service.DESIGNATE) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["designate"]}) + def list_domains(self): + """List Designate domains. + + This simple scenario tests the designate domain-list command by listing + all the domains. + + Suppose if we have 2 users in context and each has 2 domains + uploaded for them we will be able to test the performance of + designate domain-list command in this case. + """ + + self._list_domains() + + @validation.required_services(consts.Service.DESIGNATE) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["designate"]}) + def create_and_delete_domain(self): + """Add and then delete a domain. + + Measure the performance of creating and deleting domains + with different level of load. + """ + domain = self._create_domain() + self._delete_domain(domain["id"]) + + @validation.required_services(consts.Service.DESIGNATE) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["designate"]}) + def create_and_delete_records(self, records_per_domain=5): + """Add and then delete records. + + Measure the performance of creating and deleting records + with different level of load. + + :param records_per_domain: Records to create pr domain. + """ + domain = self._create_domain() + + records = [] + + key = "designate.create_%s_records" % records_per_domain + with base.AtomicAction(self, key): + for i in range(records_per_domain): + record = self._create_record(domain, atomic_action=False) + records.append(record) + + key = "designate.delete_%s_records" % records_per_domain + with base.AtomicAction(self, key): + for record in records: + self._delete_record( + domain["id"], record["id"], atomic_action=False) + + @validation.required_services(consts.Service.DESIGNATE) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["designate"]}) + def list_records(self, domain_id): + """List Designate records. + + This simple scenario tests the designate record-list command by listing + all the records in a domain. + + Suppose if we have 2 users in context and each has 2 domains + uploaded for them we will be able to test the performance of + designate record-list command in this case. + + :param domain_id: Domain ID + """ + + self._list_records(domain_id) + + @validation.required_services(consts.Service.DESIGNATE) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["designate"]}) + def create_and_list_records(self, records_per_domain=5): + """Add and then list records. + + If you have only 1 user in your context, you will + add 1 record on every iteration. So you will have more + and more records and will be able to measure the + performance of the "designate record-list" command depending on + the number of domains/records owned by users. + + :param records_per_domain: Records to create pr domain. + """ + domain = self._create_domain() + + key = "designate.create_%s_records" % records_per_domain + with base.AtomicAction(self, key): + for i in range(records_per_domain): + self._create_record(domain, atomic_action=False) + + self._list_records(domain["id"]) + + @validation.required_services(consts.Service.DESIGNATE) + @validation.required_openstack(admin=True) + @base.scenario(context={"cleanup": ["designate"]}) + def create_and_list_servers(self): + """Create a Designate server and list all servers. + + If you have only 1 user in your context, you will + add 1 server on every iteration. So you will have more + and more server and will be able to measure the + performance of the "designate server-list" command depending on + the number of servers owned by users. + """ + self._create_server() + self._list_servers() + + @validation.required_services(consts.Service.DESIGNATE) + @validation.required_openstack(admin=True) + @base.scenario(context={"cleanup": ["designate"]}) + def create_and_delete_server(self): + """Add and then delete a server. + + Measure the performance of creating and deleting servers + with different level of load. + """ + server = self._create_server() + self._delete_server(server["id"]) + + @validation.required_services(consts.Service.DESIGNATE) + @validation.required_openstack(admin=True) + @base.scenario(context={"cleanup": ["designate"]}) + def list_servers(self): + """List Designate servers. + + This simple scenario tests the designate server-list command by listing + all the servers. + """ + self._list_servers() diff --git a/rally/plugins/openstack/scenarios/designate/utils.py b/rally/plugins/openstack/scenarios/designate/utils.py new file mode 100644 index 00000000..42f0b60c --- /dev/null +++ b/rally/plugins/openstack/scenarios/designate/utils.py @@ -0,0 +1,122 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Author: Endre Karlson +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from rally.benchmark.scenarios import base + + +class DesignateScenario(base.Scenario): + """Base class for Designate scenarios with basic atomic actions.""" + + RESOURCE_NAME_PREFIX = "rally_" + + @base.atomic_action_timer("designate.create_domain") + def _create_domain(self, domain=None): + """Create domain. + + :param domain: dict, POST /v1/domains request options + :returns: designate domain dict + """ + domain = domain or {} + + domain.setdefault("email", "root@random.name") + domain.setdefault("name", "%s.name." % self._generate_random_name()) + return self.clients("designate").domains.create(domain) + + @base.atomic_action_timer("designate.list_domains") + def _list_domains(self): + """Return user domain list.""" + return self.clients("designate").domains.list() + + @base.atomic_action_timer("designate.delete_domain") + def _delete_domain(self, domain_id): + """Delete designate zone. + + :param domain_id: domain ID + """ + self.clients("designate").domains.delete(domain_id) + + def _create_record(self, domain, record=None, atomic_action=True): + """Create a record in a domain. + + :param domain: domain dict + :param record: record dict + :param atomic_action: True if the record creation should be tracked + as an atomic action + :returns: Designate record dict + """ + record = record or {} + record.setdefault("type", "A") + record.setdefault("name", "%s.%s" % (self._generate_random_name(), + domain["name"])) + record.setdefault("data", "10.0.0.1") + + client = self.clients("designate") + + if atomic_action: + with base.AtomicAction(self, "designate.create_record"): + return client.records.create(domain["id"], record) + + return client.records.create(domain["id"], record) + + @base.atomic_action_timer("designate.list_records") + def _list_records(self, domain_id): + """List domain records. + + :param domain_id: domain ID + :returns: domain records list + """ + return self.clients("designate").records.list(domain_id) + + def _delete_record(self, domain_id, record_id, atomic_action=True): + """Delete a domain record. + + :param domain_id: domain ID + :param record_id: record ID + :param atomic_action: True if the record creation should be tracked + as an atomic action + """ + client = self.clients("designate") + + if atomic_action: + with base.AtomicAction(self, "designate.delete_record"): + client.records.create(domain_id, record_id) + + client.records.delete(domain_id, record_id) + + @base.atomic_action_timer("designate.create_server") + def _create_server(self, server=None): + """Create server. + + :param server: dict, POST /v1/servers request options + :returns: designate server dict + """ + server = server or {} + + server.setdefault("name", "name.%s." % self._generate_random_name()) + return self.admin_clients("designate").servers.create(server) + + @base.atomic_action_timer("designate.list_servers") + def _list_servers(self): + """Return user server list.""" + return self.admin_clients("designate").servers.list() + + @base.atomic_action_timer("designate.delete_server") + def _delete_server(self, server_id): + """Delete Server. + + :param server_id: unicode server ID + """ + self.admin_clients("designate").servers.delete(server_id) diff --git a/rally/plugins/openstack/scenarios/glance/images.py b/rally/plugins/openstack/scenarios/glance/images.py index 750f64fb..25a26ea1 100644 --- a/rally/plugins/openstack/scenarios/glance/images.py +++ b/rally/plugins/openstack/scenarios/glance/images.py @@ -14,11 +14,11 @@ # under the License. from rally.benchmark.scenarios import base -from rally.benchmark.scenarios.nova import utils as nova_utils from rally.benchmark import types as types from rally.benchmark import validation from rally import consts from rally.plugins.openstack.scenarios.glance import utils +from rally.plugins.openstack.scenarios.nova import utils as nova_utils class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario): diff --git a/rally/plugins/openstack/scenarios/murano/environments.py b/rally/plugins/openstack/scenarios/murano/environments.py index 6ccb6113..7c203108 100644 --- a/rally/plugins/openstack/scenarios/murano/environments.py +++ b/rally/plugins/openstack/scenarios/murano/environments.py @@ -14,11 +14,11 @@ # under the License. from rally.benchmark.scenarios import base -from rally.benchmark.scenarios.vm import utils as vm_utils from rally.benchmark import validation from rally.common import log as logging from rally import consts from rally.plugins.openstack.scenarios.murano import utils +from rally.plugins.openstack.scenarios.vm import utils as vm_utils LOG = logging.getLogger(__name__) diff --git a/rally/plugins/openstack/scenarios/neutron/__init__.py b/rally/plugins/openstack/scenarios/neutron/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/rally/plugins/openstack/scenarios/neutron/network.py b/rally/plugins/openstack/scenarios/neutron/network.py new file mode 100644 index 00000000..71fd0803 --- /dev/null +++ b/rally/plugins/openstack/scenarios/neutron/network.py @@ -0,0 +1,331 @@ +# Copyright 2014: Intel Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from rally.benchmark.scenarios import base +from rally.benchmark import validation +from rally import consts +from rally.plugins.openstack.scenarios.neutron import utils + + +class NeutronNetworks(utils.NeutronScenario): + """Benchmark scenarios for Neutron.""" + + @validation.required_services(consts.Service.NEUTRON) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_list_networks(self, network_create_args=None): + """Create a network and then list all networks. + + Measure the "neutron net-list" command performance. + + If you have only 1 user in your context, you will + add 1 network on every iteration. So you will have more + and more networks and will be able to measure the + performance of the "neutron net-list" command depending on + the number of networks owned by users. + + :param network_create_args: dict, POST /v2.0/networks request options + """ + self._create_network(network_create_args or {}) + self._list_networks() + + @validation.required_services(consts.Service.NEUTRON) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_update_networks(self, + network_update_args, + network_create_args=None): + """Create and update a network. + + Measure the "neutron net-create and net-update" command performance. + + :param network_update_args: dict, PUT /v2.0/networks update request + :param network_create_args: dict, POST /v2.0/networks request options + """ + network = self._create_network(network_create_args or {}) + self._update_network(network, network_update_args) + + @validation.required_services(consts.Service.NEUTRON) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_delete_networks(self, network_create_args=None): + """Create and delete a network. + + Measure the "neutron net-create" and "net-delete" command performance. + + :param network_create_args: dict, POST /v2.0/networks request options + """ + network = self._create_network(network_create_args or {}) + self._delete_network(network["network"]) + + @validation.number("subnets_per_network", minval=1, integer_only=True) + @validation.required_services(consts.Service.NEUTRON) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_list_subnets(self, + network_create_args=None, + subnet_create_args=None, + subnet_cidr_start=None, + subnets_per_network=None): + """Create and a given number of subnets and list all subnets. + + The scenario creates a network, a given number of subnets and then + lists subnets. + + :param network_create_args: dict, POST /v2.0/networks request options + :param subnet_create_args: dict, POST /v2.0/subnets request options + :param subnet_cidr_start: str, start value for subnets CIDR + :param subnets_per_network: int, number of subnets for one network + """ + self._create_network_and_subnets(network_create_args or {}, + subnet_create_args or {}, + subnets_per_network, + subnet_cidr_start) + self._list_subnets() + + @validation.number("subnets_per_network", minval=1, integer_only=True) + @validation.required_services(consts.Service.NEUTRON) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_update_subnets(self, + subnet_update_args, + network_create_args=None, + subnet_create_args=None, + subnet_cidr_start=None, + subnets_per_network=None): + """Create and update a subnet. + + The scenario creates a network, a given number of subnets + and then updates the subnet. This scenario measures the + "neutron subnet-update" command performance. + + :param subnet_update_args: dict, PUT /v2.0/subnets update options + :param network_create_args: dict, POST /v2.0/networks request options + :param subnet_create_args: dict, POST /v2.0/subnets request options + :param subnet_cidr_start: str, start value for subnets CIDR + :param subnets_per_network: int, number of subnets for one network + """ + network, subnets = self._create_network_and_subnets( + network_create_args or {}, + subnet_create_args or {}, + subnets_per_network, + subnet_cidr_start) + + for subnet in subnets: + self._update_subnet(subnet, subnet_update_args) + + @validation.required_parameters("subnets_per_network") + @validation.required_services(consts.Service.NEUTRON) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_delete_subnets(self, + network_create_args=None, + subnet_create_args=None, + subnet_cidr_start=None, + subnets_per_network=None): + """Create and delete a given number of subnets. + + The scenario creates a network, a given number of subnets and then + deletes subnets. + + :param network_create_args: dict, POST /v2.0/networks request options + :param subnet_create_args: dict, POST /v2.0/subnets request options + :param subnet_cidr_start: str, start value for subnets CIDR + :param subnets_per_network: int, number of subnets for one network + """ + network, subnets = self._create_network_and_subnets( + network_create_args or {}, + subnet_create_args or {}, + subnets_per_network, + subnet_cidr_start) + + for subnet in subnets: + self._delete_subnet(subnet) + + @validation.number("subnets_per_network", minval=1, integer_only=True) + @validation.required_services(consts.Service.NEUTRON) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_list_routers(self, + network_create_args=None, + subnet_create_args=None, + subnet_cidr_start=None, + subnets_per_network=None, + router_create_args=None): + """Create and a given number of routers and list all routers. + + Create a network, a given number of subnets and routers + and then list all routers. + + :param network_create_args: dict, POST /v2.0/networks request options + :param subnet_create_args: dict, POST /v2.0/subnets request options + :param subnet_cidr_start: str, start value for subnets CIDR + :param subnets_per_network: int, number of subnets for one network + :param router_create_args: dict, POST /v2.0/routers request options + """ + network, subnets = self._create_network_and_subnets( + network_create_args or {}, + subnet_create_args or {}, + subnets_per_network, + subnet_cidr_start) + + for subnet in subnets: + router = self._create_router(router_create_args or {}) + self.clients("neutron").add_interface_router( + router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + + self._list_routers() + + @validation.number("subnets_per_network", minval=1, integer_only=True) + @validation.required_parameters("subnets_per_network") + @validation.required_services(consts.Service.NEUTRON) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_update_routers(self, + router_update_args, + network_create_args=None, + subnet_create_args=None, + subnet_cidr_start=None, + subnets_per_network=None, + router_create_args=None): + """Create and update a given number of routers. + + Create a network, a given number of subnets and routers + and then updating all routers. + + :param router_update_args: dict, PUT /v2.0/routers update options + :param network_create_args: dict, POST /v2.0/networks request options + :param subnet_create_args: dict, POST /v2.0/subnets request options + :param subnet_cidr_start: str, start value for subnets CIDR + :param subnets_per_network: int, number of subnets for one network + :param router_create_args: dict, POST /v2.0/routers request options + """ + network, subnets = self._create_network_and_subnets( + network_create_args or {}, + subnet_create_args or {}, + subnets_per_network, + subnet_cidr_start) + + for subnet in subnets: + router = self._create_router(router_create_args or {}) + self.clients("neutron").add_interface_router( + router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + self._update_router(router, router_update_args) + + @base.scenario(context={"cleanup": ["neutron"]}) + @validation.required_parameters("subnets_per_network") + @validation.required_services(consts.Service.NEUTRON) + def create_and_delete_routers(self, + network_create_args=None, + subnet_create_args=None, + subnet_cidr_start=None, + subnets_per_network=None, + router_create_args=None): + """Create and delete a given number of routers. + + Create a network, a given number of subnets and routers + and then delete all routers. + + :param network_create_args: dict, POST /v2.0/networks request options + :param subnet_create_args: dict, POST /v2.0/subnets request options + :param subnet_cidr_start: str, start value for subnets CIDR + :param subnets_per_network: int, number of subnets for one network + :param router_create_args: dict, POST /v2.0/routers request options + """ + network, subnets = self._create_network_and_subnets( + network_create_args or {}, + subnet_create_args or {}, + subnets_per_network, + subnet_cidr_start) + + routers = [] + for subnet in subnets: + router = self._create_router(router_create_args or {}) + self.clients("neutron").add_interface_router( + router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + routers.append(router) + + for e in range(subnets_per_network): + router = routers[e] + subnet = subnets[e] + self.clients("neutron").remove_interface_router( + router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + self._delete_router(router) + + @validation.number("ports_per_network", minval=1, integer_only=True) + @validation.required_services(consts.Service.NEUTRON) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_list_ports(self, + network_create_args=None, + port_create_args=None, + ports_per_network=None): + """Create and a given number of ports and list all ports. + + :param network_create_args: dict, POST /v2.0/networks request options + :param port_create_args: dict, POST /v2.0/ports request options + :param ports_per_network: int, number of ports for one network + """ + network = self._create_network(network_create_args or {}) + for i in range(ports_per_network): + self._create_port(network, port_create_args or {}) + + self._list_ports() + + @validation.number("ports_per_network", minval=1, integer_only=True) + @validation.required_services(consts.Service.NEUTRON) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_update_ports(self, + port_update_args, + network_create_args=None, + port_create_args=None, + ports_per_network=None): + """Create and update a given number of ports. + + Measure the "neutron port-create" and "neutron port-update" commands + performance. + + :param port_update_args: dict, PUT /v2.0/ports update request options + :param network_create_args: dict, POST /v2.0/networks request options + :param port_create_args: dict, POST /v2.0/ports request options + :param ports_per_network: int, number of ports for one network + """ + network = self._create_network(network_create_args or {}) + for i in range(ports_per_network): + port = self._create_port(network, port_create_args or {}) + self._update_port(port, port_update_args) + + @validation.required_parameters("ports_per_network") + @validation.required_services(consts.Service.NEUTRON) + @base.scenario(context={"cleanup": ["neutron"]}) + def create_and_delete_ports(self, + network_create_args=None, + port_create_args=None, + ports_per_network=None): + """Create and delete a port. + + Measure the "neutron port-create" and "neutron port-delete" commands + performance. + + :param network_create_args: dict, POST /v2.0/networks request options + :param port_create_args: dict, POST /v2.0/ports request options + :param ports_per_network: int, number of ports for one network + """ + network = self._create_network(network_create_args or {}) + for i in range(ports_per_network): + port = self._create_port(network, port_create_args or {}) + self._delete_port(port) diff --git a/rally/plugins/openstack/scenarios/neutron/utils.py b/rally/plugins/openstack/scenarios/neutron/utils.py new file mode 100644 index 00000000..3869bee5 --- /dev/null +++ b/rally/plugins/openstack/scenarios/neutron/utils.py @@ -0,0 +1,289 @@ +# Copyright 2014: Intel Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils as uid + +from rally.benchmark.scenarios import base +from rally.benchmark.wrappers import network as network_wrapper +from rally.common import log as logging + +LOG = logging.getLogger(__name__) + + +class NeutronScenario(base.Scenario): + """Base class for Neutron scenarios with basic atomic actions.""" + + RESOURCE_NAME_PREFIX = "rally_net_" + RESOURCE_NAME_LENGTH = 16 + SUBNET_IP_VERSION = 4 + + @base.atomic_action_timer("neutron.create_network") + def _create_network(self, network_create_args): + """Create neutron network. + + :param network_create_args: dict, POST /v2.0/networks request options + :returns: neutron network dict + """ + network_create_args.setdefault("name", self._generate_random_name()) + return self.clients("neutron").create_network( + {"network": network_create_args}) + + @base.atomic_action_timer("neutron.list_networks") + def _list_networks(self): + """Return user networks list.""" + return self.clients("neutron").list_networks()["networks"] + + @base.atomic_action_timer("neutron.update_network") + def _update_network(self, network, network_update_args): + """Update the network name and admin state. + + This atomic function updates network name by + appending the existing name and admin state with network_update_args. + + :param network: Network object + :param network_update_args: dict, POST /v2.0/networks update options + :returns: updated neutron network dict + """ + suffix = network_update_args.get( + "name", self._generate_random_name("_")) + admin_state_up = network_update_args.get("admin_state_up", True) + body = { + "network": { + "name": network["network"]["name"] + suffix, + "admin_state_up": admin_state_up + } + } + return self.clients("neutron").update_network( + network["network"]["id"], body) + + @base.atomic_action_timer("neutron.delete_network") + def _delete_network(self, network): + """Delete neutron network. + + :param network: Network object + """ + self.clients("neutron").delete_network(network["id"]) + + @base.atomic_action_timer("neutron.create_subnet") + def _create_subnet(self, network, subnet_create_args, start_cidr=None): + """Create neutron subnet. + + :param network: neutron network dict + :param subnet_create_args: POST /v2.0/subnets request options + :returns: neutron subnet dict + """ + network_id = network["network"]["id"] + + if not subnet_create_args.get("cidr"): + start_cidr = start_cidr or "10.2.0.0/24" + subnet_create_args["cidr"] = ( + network_wrapper.generate_cidr(start_cidr=start_cidr)) + + subnet_create_args["network_id"] = network_id + subnet_create_args.setdefault( + "name", self._generate_random_name("rally_subnet_")) + subnet_create_args.setdefault("ip_version", self.SUBNET_IP_VERSION) + + return self.clients("neutron").create_subnet( + {"subnet": subnet_create_args}) + + @base.atomic_action_timer("neutron.list_subnets") + def _list_subnets(self): + """Returns user subnetworks list.""" + return self.clients("neutron").list_subnets()["subnets"] + + @base.atomic_action_timer("neutron.update_subnet") + def _update_subnet(self, subnet, subnet_update_args): + """Update the neutron subnet name and DHCP status. + + This atomic function updates subnet name by + appending the existing name and DHCP status with subnet_update_args. + + :param subnet: Subnet object + :param subnet_update_args: dict, PUT /v2.0/subnets update options + :returns: updated neutron subnet dict + """ + suffix = subnet_update_args.get( + "name", self._generate_random_name("_")) + enable_dhcp = subnet_update_args.get("enable_dhcp", True) + body = { + "subnet": { + "name": subnet["subnet"]["name"] + suffix, + "enable_dhcp": enable_dhcp + } + } + return self.clients("neutron").update_subnet( + subnet["subnet"]["id"], body) + + @base.atomic_action_timer("neutron.delete_subnet") + def _delete_subnet(self, subnet): + """Delete neutron subnet + + :param subnet: Subnet object + """ + self.clients("neutron").delete_subnet(subnet["subnet"]["id"]) + + @base.atomic_action_timer("neutron.create_router") + def _create_router(self, router_create_args, external_gw=False): + """Create neutron router. + + :param router_create_args: POST /v2.0/routers request options + :returns: neutron router dict + """ + router_create_args.setdefault( + "name", self._generate_random_name("rally_router_")) + + if external_gw: + for network in self._list_networks(): + if network.get("router:external"): + external_network = network + gw_info = {"network_id": external_network["id"], + "enable_snat": True} + router_create_args.setdefault("external_gateway_info", + gw_info) + + return self.clients("neutron").create_router( + {"router": router_create_args}) + + @base.atomic_action_timer("neutron.list_routers") + def _list_routers(self): + """Returns user routers list.""" + return self.clients("neutron").list_routers()["routers"] + + @base.atomic_action_timer("neutron.delete_router") + def _delete_router(self, router): + """Delete neutron router + + :param router: Router object + """ + self.clients("neutron").delete_router(router["router"]["id"]) + + @base.atomic_action_timer("neutron.update_router") + def _update_router(self, router, router_update_args): + """Update the neutron router name and admin state. + + This atomic function updates router name by + appending the existing name and admin state with router_update_args. + + :param router: dict, neutron router + :param router_update_args: dict, PUT /v2.0/routers update options + :returns: updated neutron router dict + """ + suffix = router_update_args.get( + "name", self._generate_random_name("_")) + admin_state = router_update_args.get("admin_state_up", True) + body = { + "router": { + "name": router["router"]["name"] + suffix, + "admin_state_up": admin_state + } + } + return self.clients("neutron").update_router( + router["router"]["id"], body) + + @base.atomic_action_timer("neutron.create_port") + def _create_port(self, network, port_create_args): + """Create neutron port. + + :param network: neutron network dict + :param port_create_args: POST /v2.0/ports request options + :returns: neutron port dict + """ + port_create_args["network_id"] = network["network"]["id"] + port_create_args.setdefault( + "name", self._generate_random_name("rally_port_")) + return self.clients("neutron").create_port({"port": port_create_args}) + + @base.atomic_action_timer("neutron.list_ports") + def _list_ports(self): + """Return user ports list.""" + return self.clients("neutron").list_ports()["ports"] + + @base.atomic_action_timer("neutron.update_port") + def _update_port(self, port, port_update_args): + """Update the neutron port name, admin state, device id and owner. + + This atomic function updates port name by + appending the existing name, admin state, device id and + device owner with port_update_args. + + :param port: dict, neutron port + :param port_update_args: dict, PUT /v2.0/ports update options + :returns: updated neutron port dict + """ + suffix = port_update_args.get( + "name", self._generate_random_name("_")) + admin_state = port_update_args.get("admin_state_up", True) + device_owner = port_update_args.get("device_owner", "compute:nova") + device_id = port_update_args.get("device_id", uid.generate_uuid()) + body = { + "port": { + "name": port["port"]["name"] + suffix, + "admin_state_up": admin_state, + "device_id": device_id, + "device_owner": device_owner + } + } + return self.clients("neutron").update_port(port["port"]["id"], body) + + @base.atomic_action_timer("neutron.delete_port") + def _delete_port(self, port): + """Delete neutron port. + + :param port: Port object + """ + self.clients("neutron").delete_port(port["port"]["id"]) + + def _create_network_and_subnets(self, + network_create_args=None, + subnet_create_args=None, + subnets_per_network=1, + subnet_cidr_start="1.0.0.0/24"): + """Create network and subnets. + + :parm network_create_args: dict, POST /v2.0/networks request options + :parm subnet_create_args: dict, POST /v2.0/subnets request options + :parm subnets_per_network: int, number of subnets for one network + :parm subnet_cidr_start: str, start value for subnets CIDR + :returns: tuple of result network and subnets list + """ + subnets = [] + network = self._create_network(network_create_args or {}) + + for i in range(subnets_per_network): + subnet = self._create_subnet(network, subnet_create_args or {}, + subnet_cidr_start) + subnets.append(subnet) + return network, subnets + + @base.atomic_action_timer("neutron.add_interface_router") + def _add_interface_router(self, subnet, router): + """Connect subnet to router. + + :param subnet: dict, neutron subnet + :param router: dict, neutron router + """ + self.clients("neutron").add_interface_router( + router["id"], {"subnet_id": subnet["id"]}) + + @base.atomic_action_timer("neutron.remove_interface_router") + def _remove_interface_router(self, subnet, router): + """Remove subnet from router + + :param subnet: dict, neutron subnet + :param router: dict, neutron router + """ + self.clients("neutron").remove_interface_router( + router["id"], {"subnet_id": subnet["id"]}) diff --git a/rally/plugins/openstack/scenarios/nova/__init__.py b/rally/plugins/openstack/scenarios/nova/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/rally/plugins/openstack/scenarios/nova/hypervisors.py b/rally/plugins/openstack/scenarios/nova/hypervisors.py new file mode 100644 index 00000000..e5f60276 --- /dev/null +++ b/rally/plugins/openstack/scenarios/nova/hypervisors.py @@ -0,0 +1,40 @@ +# Copyright 2015 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from rally.benchmark.scenarios import base +from rally.benchmark import validation +from rally.common import log as logging +from rally import consts +from rally.plugins.openstack.scenarios.nova import utils + + +LOG = logging.getLogger(__name__) + + +class NovaHypervisors(utils.NovaScenario): + """Benchmark scenarios for Nova hypervisors.""" + + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(admin=True) + @base.scenario() + def list_hypervisors(self, detailed=True): + """List hypervisors. + + Measure the "nova hypervisor-list" command performance. + + :param detailed: True if the hypervisor listing should contain + detailed information about all of them + """ + self._list_hypervisors(detailed) diff --git a/rally/plugins/openstack/scenarios/nova/keypairs.py b/rally/plugins/openstack/scenarios/nova/keypairs.py new file mode 100644 index 00000000..47b27712 --- /dev/null +++ b/rally/plugins/openstack/scenarios/nova/keypairs.py @@ -0,0 +1,79 @@ +# Copyright 2015: Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from rally.benchmark.scenarios import base +from rally.benchmark import types +from rally.benchmark import validation +from rally import consts +from rally.plugins.openstack.scenarios.nova import utils + + +class NovaKeypair(utils.NovaScenario): + """Benchmark scenarios for Nova keypairs.""" + + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def create_and_list_keypairs(self, **kwargs): + """Create a keypair with random name and list keypairs. + + This scenario creates a keypair and then lists all keypairs. + + :param kwargs: Optional additional arguments for keypair creation + """ + + self._create_keypair(**kwargs) + self._list_keypairs() + + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def create_and_delete_keypair(self, **kwargs): + """Create a keypair with random name and delete keypair. + + This scenario creates a keypair and then delete that keypair. + + :param kwargs: Optional additional arguments for keypair creation + """ + + keypair = self._create_keypair(**kwargs) + self._delete_keypair(keypair) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def boot_and_delete_server_with_keypair(self, image, flavor, + **kwargs): + """Boot and delete server with keypair. + + Plan of this scenario: + - create a keypair + - boot a VM with created keypair + - delete server + - delete keypair + + :param image: ID of the image to be used for server creation + :param flavor: ID of the flavor to be used for server creation + :param kwargs: Optional additional arguments for keypair creation + """ + + keypair = self._create_keypair(**kwargs) + server = self._boot_server(image, flavor, + key_name=keypair) + self._delete_server(server) + self._delete_keypair(keypair) diff --git a/rally/plugins/openstack/scenarios/nova/servers.py b/rally/plugins/openstack/scenarios/nova/servers.py new file mode 100644 index 00000000..372e43a4 --- /dev/null +++ b/rally/plugins/openstack/scenarios/nova/servers.py @@ -0,0 +1,619 @@ +# Copyright 2013: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import jsonschema + +from rally.benchmark.scenarios import base +from rally.benchmark.scenarios import utils as scenario_utils +from rally.benchmark import types as types +from rally.benchmark import validation +from rally.benchmark.wrappers import network as network_wrapper +from rally.common import log as logging +from rally import consts +from rally import exceptions as rally_exceptions +from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils +from rally.plugins.openstack.scenarios.nova import utils + +LOG = logging.getLogger(__name__) + + +class NovaServers(utils.NovaScenario, + cinder_utils.CinderScenario): + """Benchmark scenarios for Nova servers.""" + + RESOURCE_NAME_PREFIX = "rally_novaserver_" + RESOURCE_NAME_LENGTH = 16 + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def boot_and_list_server(self, image, flavor, + detailed=True, **kwargs): + """Boot a server from an image and then list all servers. + + Measure the "nova list" command performance. + + If you have only 1 user in your context, you will + add 1 server on every iteration. So you will have more + and more servers and will be able to measure the + performance of the "nova list" command depending on + the number of servers owned by users. + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param detailed: True if the server listing should contain + detailed information about all of them + :param kwargs: Optional additional arguments for server creation + """ + self._boot_server(image, flavor, **kwargs) + self._list_servers(detailed) + + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def list_servers(self, detailed=True): + """List all servers. + + This simple scenario test the nova list command by listing + all the servers. + + :param detailed: True if detailed information about servers + should be listed + """ + self._list_servers(detailed) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def boot_and_delete_server(self, image, flavor, + min_sleep=0, max_sleep=0, + force_delete=False, **kwargs): + """Boot and delete a server. + + Optional 'min_sleep' and 'max_sleep' parameters allow the scenario + to simulate a pause between volume creation and deletion + (of random duration from [min_sleep, max_sleep]). + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param min_sleep: Minimum sleep time in seconds (non-negative) + :param max_sleep: Maximum sleep time in seconds (non-negative) + :param force_delete: True if force_delete should be used + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(image, flavor, **kwargs) + self.sleep_between(min_sleep, max_sleep) + self._delete_server(server, force=force_delete) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA, consts.Service.CINDER) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova", "cinder"]}) + def boot_server_from_volume_and_delete(self, image, flavor, + volume_size, + min_sleep=0, max_sleep=0, + force_delete=False, **kwargs): + """Boot a server from volume and then delete it. + + The scenario first creates a volume and then a server. + Optional 'min_sleep' and 'max_sleep' parameters allow the scenario + to simulate a pause between volume creation and deletion + (of random duration from [min_sleep, max_sleep]). + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param volume_size: volume size (in GB) + :param min_sleep: Minimum sleep time in seconds (non-negative) + :param max_sleep: Maximum sleep time in seconds (non-negative) + :param force_delete: True if force_delete should be used + :param kwargs: Optional additional arguments for server creation + """ + volume = self._create_volume(volume_size, imageRef=image) + block_device_mapping = {"vda": "%s:::1" % volume.id} + server = self._boot_server(image, flavor, + block_device_mapping=block_device_mapping, + **kwargs) + self.sleep_between(min_sleep, max_sleep) + self._delete_server(server, force=force_delete) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def boot_and_bounce_server(self, image, flavor, + force_delete=False, actions=None, **kwargs): + """Boot a server and run specified actions against it. + + Actions should be passed into the actions parameter. Available actions + are 'hard_reboot', 'soft_reboot', 'stop_start' and 'rescue_unrescue'. + Delete server after all actions were completed. + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param force_delete: True if force_delete should be used + :param actions: list of action dictionaries, where each action + dictionary speicifes an action to be performed + in the following format: + {"action_name": } + :param kwargs: Optional additional arguments for server creation + """ + action_builder = self._bind_actions() + actions = actions or [] + try: + action_builder.validate(actions) + except jsonschema.exceptions.ValidationError as error: + raise rally_exceptions.InvalidConfigException( + "Invalid server actions configuration \'%(actions)s\' due to: " + "%(error)s" % {"actions": str(actions), "error": str(error)}) + server = self._boot_server(image, flavor, **kwargs) + for action in action_builder.build_actions(actions, server): + action() + self._delete_server(server, force=force_delete) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def boot_lock_unlock_and_delete(self, image, flavor, + min_sleep=0, max_sleep=0, + force_delete=False, + **kwargs): + """Boot a server, lock it, then unlock and delete it. + + Optional 'min_sleep' and 'max_sleep' parameters allow the + scenario to simulate a pause between locking and unlocking the + server (of random duration from min_sleep to max_sleep). + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param min_sleep: Minimum sleep time between locking and unlocking + in seconds + :param max_sleep: Maximum sleep time between locking and unlocking + in seconds + :param force_delete: True if force_delete should be used + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(image, flavor, **kwargs) + self._lock_server(server) + self.sleep_between(min_sleep, max_sleep) + self._unlock_server(server) + self._delete_server(server, force=force_delete) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA, consts.Service.GLANCE) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova", "glance"]}) + def snapshot_server(self, image, flavor, + force_delete=False, **kwargs): + """Boot a server, make its snapshot and delete both. + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param force_delete: True if force_delete should be used + :param kwargs: Optional additional arguments for server creation + """ + + server = self._boot_server(image, flavor, **kwargs) + image = self._create_image(server) + self._delete_server(server, force=force_delete) + + server = self._boot_server(image.id, flavor, **kwargs) + self._delete_server(server, force=force_delete) + self._delete_image(image) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def boot_server(self, image, flavor, auto_assign_nic=False, **kwargs): + """Boot a server. + + Assumes that cleanup is done elsewhere. + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param auto_assign_nic: True if NICs should be assigned + :param kwargs: Optional additional arguments for server creation + """ + self._boot_server(image, flavor, + auto_assign_nic=auto_assign_nic, **kwargs) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA, consts.Service.CINDER) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova", "cinder"]}) + def boot_server_from_volume(self, image, flavor, volume_size, + auto_assign_nic=False, **kwargs): + """Boot a server from volume. + + The scenario first creates a volume and then a server. + Assumes that cleanup is done elsewhere. + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param volume_size: volume size (in GB) + :param auto_assign_nic: True if NICs should be assigned + :param kwargs: Optional additional arguments for server creation + """ + volume = self._create_volume(volume_size, imageRef=image) + block_device_mapping = {"vda": "%s:::1" % volume.id} + self._boot_server(image, flavor, auto_assign_nic=auto_assign_nic, + block_device_mapping=block_device_mapping, + **kwargs) + + def _bind_actions(self): + actions = ["hard_reboot", "soft_reboot", "stop_start", + "rescue_unrescue"] + action_builder = scenario_utils.ActionBuilder(actions) + action_builder.bind_action("hard_reboot", self._reboot_server) + action_builder.bind_action("soft_reboot", self._soft_reboot_server) + action_builder.bind_action("stop_start", + self._stop_and_start_server) + action_builder.bind_action("rescue_unrescue", + self._rescue_and_unrescue_server) + return action_builder + + def _stop_and_start_server(self, server): + """Stop and then start the given server. + + A stop will be issued on the given server upon which time + this method will wait for the server to become 'SHUTOFF'. + Once the server is SHUTOFF a start will be issued and this + method will wait for the server to become 'ACTIVE' again. + + :param server: The server to stop and then start. + + """ + self._stop_server(server) + self._start_server(server) + + def _rescue_and_unrescue_server(self, server): + """Rescue and then unrescue the given server. + + A rescue will be issued on the given server upon which time + this method will wait for the server to become 'RESCUE'. + Once the server is RESCUE a unrescue will be issued and + this method will wait for the server to become 'ACTIVE' + again. + + :param server: The server to rescue and then unrescue. + + """ + self._rescue_server(server) + self._unrescue_server(server) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType, + to_flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def resize_server(self, image, flavor, to_flavor, + force_delete=False, **kwargs): + """Boot a server, then resize and delete it. + + This test will confirm the resize by default, + or revert the resize if confirm is set to false. + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param to_flavor: flavor to be used to resize the booted instance + :param force_delete: True if force_delete should be used + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(image, flavor, **kwargs) + self._resize(server, to_flavor) + # by default we confirm + confirm = kwargs.get("confirm", True) + if confirm: + self._resize_confirm(server) + else: + self._resize_revert(server) + self._delete_server(server, force=force_delete) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def suspend_and_resume_server(self, image, flavor, + force_delete=False, **kwargs): + """Create a server, suspend, resume and then delete it + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param force_delete: True if force_delete should be used + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(image, flavor, **kwargs) + self._suspend_server(server) + self._resume_server(server) + self._delete_server(server, force=force_delete) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def pause_and_unpause_server(self, image, flavor, + force_delete=False, **kwargs): + """Create a server, pause, unpause and then delete it + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param force_delete: True if force_delete should be used + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(image, flavor, **kwargs) + self._pause_server(server) + self._unpause_server(server) + self._delete_server(server, force=force_delete) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def shelve_and_unshelve_server(self, image, flavor, + force_delete=False, **kwargs): + """Create a server, shelve, unshelve and then delete it + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param force_delete: True if force_delete should be used + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(image, flavor, **kwargs) + self._shelve_server(server) + self._unshelve_server(server) + self._delete_server(server, force=force_delete) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(admin=True, users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def boot_and_live_migrate_server(self, image, + flavor, block_migration=False, + disk_over_commit=False, min_sleep=0, + max_sleep=0, **kwargs): + """Live Migrate a server. + + This scenario launches a VM on a compute node available in + the availability zone and then migrates the VM to another + compute node on the same availability zone. + + Optional 'min_sleep' and 'max_sleep' parameters allow the scenario + to simulate a pause between VM booting and running live migration + (of random duration from range [min_sleep, max_sleep]). + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param block_migration: Specifies the migration type + :param disk_over_commit: Specifies whether to allow overcommit + on migrated instance or not + :param min_sleep: Minimum sleep time in seconds (non-negative) + :param max_sleep: Maximum sleep time in seconds (non-negative) + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(image, flavor, **kwargs) + self.sleep_between(min_sleep, max_sleep) + + new_host = self._find_host_to_migrate(server) + self._live_migrate(server, new_host, + block_migration, disk_over_commit) + + self._delete_server(server) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA, consts.Service.CINDER) + @validation.required_openstack(admin=True, users=True) + @base.scenario(context={"cleanup": ["nova", "cinder"]}) + def boot_server_from_volume_and_live_migrate(self, image, flavor, + volume_size, + block_migration=False, + disk_over_commit=False, + force_delete=False, + min_sleep=0, max_sleep=0, + **kwargs): + """Boot a server from volume and then migrate it. + + The scenario first creates a volume and a server booted from + the volume on a compute node available in the availability zone and + then migrates the VM to another compute node on the same availability + zone. + + Optional 'min_sleep' and 'max_sleep' parameters allow the scenario + to simulate a pause between VM booting and running live migration + (of random duration from range [min_sleep, max_sleep]). + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param volume_size: volume size (in GB) + :param block_migration: Specifies the migration type + :param disk_over_commit: Specifies whether to allow overcommit + on migrated instance or not + :param force_delete: True if force_delete should be used + :param min_sleep: Minimum sleep time in seconds (non-negative) + :param max_sleep: Maximum sleep time in seconds (non-negative) + :param kwargs: Optional additional arguments for server creation + """ + volume = self._create_volume(volume_size, imageRef=image) + block_device_mapping = {"vda": "%s:::1" % volume.id} + server = self._boot_server(image, flavor, + block_device_mapping=block_device_mapping, + **kwargs) + self.sleep_between(min_sleep, max_sleep) + + new_host = self._find_host_to_migrate(server) + self._live_migrate(server, new_host, + block_migration, disk_over_commit) + + self._delete_server(server, force=force_delete) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA, consts.Service.CINDER) + @validation.required_openstack(admin=True, users=True) + @base.scenario(context={"cleanup": ["cinder", "nova"]}) + def boot_server_attach_created_volume_and_live_migrate( + self, + image, + flavor, + size, + block_migration=False, + disk_over_commit=False, + boot_server_kwargs=None, + create_volume_kwargs=None, + min_sleep=0, + max_sleep=0): + """Create a VM, attach a volume to it and live migrate. + + Simple test to create a VM and attach a volume, then migrate the VM, + detach the volume and delete volume/VM. + + Optional 'min_sleep' and 'max_sleep' parameters allow the scenario + to simulate a pause between attaching a volume and running live + migration (of random duration from range [min_sleep, max_sleep]). + + :param image: Glance image name to use for the VM + :param flavor: VM flavor name + :param size: volume size (in GB) + :param block_migration: Specifies the migration type + :param disk_over_commit: Specifies whether to allow overcommit + on migrated instance or not + :param boot_server_kwargs: optional arguments for VM creation + :param create_volume_kwargs: optional arguments for volume creation + :param min_sleep: Minimum sleep time in seconds (non-negative) + :param max_sleep: Maximum sleep time in seconds (non-negative) + """ + + if boot_server_kwargs is None: + boot_server_kwargs = {} + if create_volume_kwargs is None: + create_volume_kwargs = {} + + server = self._boot_server(image, flavor, **boot_server_kwargs) + volume = self._create_volume(size, **create_volume_kwargs) + + self._attach_volume(server, volume) + + self.sleep_between(min_sleep, max_sleep) + + new_host = self._find_host_to_migrate(server) + self._live_migrate(server, new_host, + block_migration, disk_over_commit) + + self._detach_volume(server, volume) + + self._delete_volume(volume) + self._delete_server(server) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(admin=True, users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def boot_and_migrate_server(self, image, flavor, **kwargs): + """Migrate a server. + + This scenario launches a VM on a compute node available in + the availability zone and stops the VM, and then migrates the VM + to another compute node on the same availability zone. + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(image, flavor, **kwargs) + self._stop_server(server) + self._migrate(server) + # NOTE(wtakase): This is required because cold migration and resize + # share same code path. + confirm = kwargs.get("confirm", True) + if confirm: + self._resize_confirm(server, status="SHUTOFF") + else: + self._resize_revert(server, status="SHUTOFF") + self._delete_server(server) + + @types.set(from_image=types.ImageResourceType, + to_image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "from_image") + @validation.image_valid_on_flavor("flavor", "to_image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(admin=True, users=True) + @base.scenario(context={"cleanup": ["nova"]}) + def boot_and_rebuild_server(self, from_image, to_image, flavor, **kwargs): + """Rebuild a server. + + This scenario launches a VM, then rebuilds that VM with a + different image. + + :param from_image: image to be used to boot an instance + :param to_image: image to be used to rebuild the instance + :param flavor: flavor to be used to boot an instance + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(from_image, flavor, **kwargs) + self._rebuild_server(server, to_image) + self._delete_server(server) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.required_services(consts.Service.NOVA) + @validation.required_openstack(users=True) + @validation.required_contexts("network") + @base.scenario(context={"cleanup": ["nova"]}) + def boot_and_associate_floating_ip(self, image, flavor, **kwargs): + """Boot a server and associate a floating IP to it. + + :param image: image to be used to boot an instance + :param flavor: flavor to be used to boot an instance + :param kwargs: Optional additional arguments for server creation + """ + server = self._boot_server(image, flavor, **kwargs) + address = network_wrapper.wrap(self.clients).create_floating_ip( + tenant_id=server.tenant_id) + self._associate_floating_ip(server, address["ip"]) diff --git a/rally/plugins/openstack/scenarios/nova/utils.py b/rally/plugins/openstack/scenarios/nova/utils.py new file mode 100644 index 00000000..8f10dfb4 --- /dev/null +++ b/rally/plugins/openstack/scenarios/nova/utils.py @@ -0,0 +1,775 @@ +# Copyright 2013: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random +import time + +from oslo_config import cfg +import six + +from rally.benchmark.scenarios import base +from rally.benchmark import utils as bench_utils +from rally.benchmark.wrappers import network as network_wrapper +from rally import exceptions + + +NOVA_BENCHMARK_OPTS = [] +option_names_and_defaults = [ + # action, prepoll delay, timeout, poll interval + ("start", 0, 300, 1), + ("stop", 0, 300, 2), + ("boot", 1, 300, 1), + ("delete", 2, 300, 2), + ("reboot", 2, 300, 2), + ("rebuild", 1, 300, 1), + ("rescue", 2, 300, 2), + ("unrescue", 2, 300, 2), + ("suspend", 2, 300, 2), + ("resume", 2, 300, 2), + ("pause", 2, 300, 2), + ("unpause", 2, 300, 2), + ("shelve", 2, 300, 2), + ("unshelve", 2, 300, 2), + ("image_create", 0, 300, 2), + ("image_delete", 0, 300, 2), + ("resize", 2, 400, 5), + ("resize_confirm", 0, 200, 2), + ("resize_revert", 0, 200, 2), + ("live_migrate", 1, 400, 2), + ("migrate", 1, 400, 2), +] + +for action, prepoll, timeout, poll in option_names_and_defaults: + NOVA_BENCHMARK_OPTS.extend([ + cfg.FloatOpt( + "nova_server_%s_prepoll_delay" % action, + default=float(prepoll), + help="Time to sleep after %s before polling for status" % action + ), + cfg.FloatOpt( + "nova_server_%s_timeout" % action, + default=float(timeout), + help="Server %s timeout" % action + ), + cfg.FloatOpt( + "nova_server_%s_poll_interval" % action, + default=float(poll), + help="Server %s poll interval" % action + ) + ]) + +NOVA_BENCHMARK_OPTS.extend([ + cfg.FloatOpt( + "nova_detach_volume_timeout", + default=float(200), + help="Nova volume detach timeout"), + cfg.FloatOpt( + "nova_detach_volume_poll_interval", + default=float(2), + help="Nova volume detach poll interval") +]) + +CONF = cfg.CONF +benchmark_group = cfg.OptGroup(name="benchmark", + title="benchmark options") +CONF.register_group(benchmark_group) +CONF.register_opts(NOVA_BENCHMARK_OPTS, group=benchmark_group) + + +class NovaScenario(base.Scenario): + """Base class for Nova scenarios with basic atomic actions.""" + + @base.atomic_action_timer("nova.list_servers") + def _list_servers(self, detailed=True): + """Returns user servers list.""" + return self.clients("nova").servers.list(detailed) + + @base.atomic_action_timer("nova.boot_server") + def _boot_server(self, image_id, flavor_id, + auto_assign_nic=False, name=None, **kwargs): + """Boot a server. + + Returns when the server is actually booted and in "ACTIVE" state. + + If multiple networks created by Network context are present, the first + network found that isn't associated with a floating IP pool is used. + + :param image_id: int, image ID for server creation + :param flavor_id: int, flavor ID for server creation + :param auto_assign_nic: bool, whether or not to auto assign NICs + :param name: str, server name + :param kwargs: other optional parameters to initialize the server + :returns: nova Server instance + """ + server_name = name or self._generate_random_name() + secgroup = self.context.get("user", {}).get("secgroup") + if secgroup: + if "security_groups" not in kwargs: + kwargs["security_groups"] = [secgroup["name"]] + elif secgroup["name"] not in kwargs["security_groups"]: + kwargs["security_groups"].append(secgroup["name"]) + + if auto_assign_nic and not kwargs.get("nics", False): + nets = [net["id"] for net in + self.context.get("tenant", {}).get("networks", [])] + if nets: + # NOTE(amaretskiy): Balance servers among networks: + # divmod(iteration % tenants_num, nets_num)[1] + net_idx = divmod( + (self.context["iteration"] + % self.context["config"]["users"]["tenants"]), + len(nets))[1] + kwargs["nics"] = [{"net-id": nets[net_idx]}] + + server = self.clients("nova").servers.create( + server_name, image_id, flavor_id, **kwargs) + + time.sleep(CONF.benchmark.nova_server_boot_prepoll_delay) + server = bench_utils.wait_for( + server, + is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_boot_timeout, + check_interval=CONF.benchmark.nova_server_boot_poll_interval + ) + return server + + def _do_server_reboot(self, server, reboottype): + server.reboot(reboot_type=reboottype) + time.sleep(CONF.benchmark.nova_server_reboot_prepoll_delay) + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_reboot_timeout, + check_interval=CONF.benchmark.nova_server_reboot_poll_interval + ) + + @base.atomic_action_timer("nova.soft_reboot_server") + def _soft_reboot_server(self, server): + """Reboot a server with soft reboot. + + A soft reboot will be issued on the given server upon which time + this method will wait for the server to become active. + + :param server: The server to reboot. + """ + self._do_server_reboot(server, "SOFT") + + @base.atomic_action_timer("nova.reboot_server") + def _reboot_server(self, server): + """Reboot a server with hard reboot. + + A reboot will be issued on the given server upon which time + this method will wait for the server to become active. + + :param server: The server to reboot. + """ + self._do_server_reboot(server, "HARD") + + @base.atomic_action_timer("nova.rebuild_server") + def _rebuild_server(self, server, image, **kwargs): + """Rebuild a server with a new image. + + :param server: The server to rebuild. + :param image: The new image to rebuild the server with. + :param kwargs: Optional additional arguments to pass to the rebuild + """ + server.rebuild(image, **kwargs) + time.sleep(CONF.benchmark.nova_server_rebuild_prepoll_delay) + bench_utils.wait_for( + server, + is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_rebuild_timeout, + check_interval=CONF.benchmark.nova_server_rebuild_poll_interval + ) + + @base.atomic_action_timer("nova.start_server") + def _start_server(self, server): + """Start the given server. + + A start will be issued for the given server upon which time + this method will wait for it to become ACTIVE. + + :param server: The server to start and wait to become ACTIVE. + """ + server.start() + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_start_timeout, + check_interval=CONF.benchmark.nova_server_start_poll_interval + ) + + @base.atomic_action_timer("nova.stop_server") + def _stop_server(self, server): + """Stop the given server. + + Issues a stop on the given server and waits for the server + to become SHUTOFF. + + :param server: The server to stop. + """ + server.stop() + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("SHUTOFF"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_stop_timeout, + check_interval=CONF.benchmark.nova_server_stop_poll_interval + ) + + @base.atomic_action_timer("nova.rescue_server") + def _rescue_server(self, server): + """Rescue the given server. + + Returns when the server is actually rescue and is in the "Rescue" + state. + + :param server: Server object + """ + server.rescue() + time.sleep(CONF.benchmark.nova_server_rescue_prepoll_delay) + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("RESCUE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_rescue_timeout, + check_interval=CONF.benchmark.nova_server_rescue_poll_interval + ) + + @base.atomic_action_timer("nova.unrescue_server") + def _unrescue_server(self, server): + """Unrescue the given server. + + Returns when the server is unrescue and waits to become ACTIVE + + :param server: Server object + """ + server.unrescue() + time.sleep(CONF.benchmark.nova_server_unrescue_prepoll_delay) + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_unrescue_timeout, + check_interval=CONF.benchmark.nova_server_unrescue_poll_interval + ) + + @base.atomic_action_timer("nova.suspend_server") + def _suspend_server(self, server): + """Suspends the given server. + + Returns when the server is actually suspended and is in the "Suspended" + state. + + :param server: Server object + """ + server.suspend() + time.sleep(CONF.benchmark.nova_server_suspend_prepoll_delay) + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("SUSPENDED"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_suspend_timeout, + check_interval=CONF.benchmark.nova_server_suspend_poll_interval + ) + + @base.atomic_action_timer("nova.resume_server") + def _resume_server(self, server): + """Resumes the suspended server. + + Returns when the server is actually resumed and is in the "ACTIVE" + state. + + :param server: Server object + """ + server.resume() + time.sleep(CONF.benchmark.nova_server_resume_prepoll_delay) + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_resume_timeout, + check_interval=CONF.benchmark.nova_server_resume_poll_interval + ) + + @base.atomic_action_timer("nova.pause_server") + def _pause_server(self, server): + """Pause the live server. + + Returns when the server is actually paused and is in the "PAUSED" + state. + + :param server: Server object + """ + server.pause() + time.sleep(CONF.benchmark.nova_server_pause_prepoll_delay) + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("PAUSED"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_pause_timeout, + check_interval=CONF.benchmark.nova_server_pause_poll_interval + ) + + @base.atomic_action_timer("nova.unpause_server") + def _unpause_server(self, server): + """Unpause the paused server. + + Returns when the server is actually unpaused and is in the "ACTIVE" + state. + + :param server: Server object + """ + server.unpause() + time.sleep(CONF.benchmark.nova_server_unpause_prepoll_delay) + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_unpause_timeout, + check_interval=CONF.benchmark.nova_server_unpause_poll_interval + ) + + @base.atomic_action_timer("nova.shelve_server") + def _shelve_server(self, server): + """Shelve the given server. + + Returns when the server is actually shelved and is in the + "SHELVED_OFFLOADED" state. + + :param server: Server object + """ + server.shelve() + time.sleep(CONF.benchmark.nova_server_shelve_prepoll_delay) + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("SHELVED_OFFLOADED"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_shelve_timeout, + check_interval=CONF.benchmark.nova_server_shelve_poll_interval + ) + + @base.atomic_action_timer("nova.unshelve_server") + def _unshelve_server(self, server): + """Unshelve the given server. + + Returns when the server is unshelved and is in the "ACTIVE" state. + + :param server: Server object + """ + server.unshelve() + time.sleep(CONF.benchmark.nova_server_unshelve_prepoll_delay) + bench_utils.wait_for( + server, is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_unshelve_timeout, + check_interval=CONF.benchmark.nova_server_unshelve_poll_interval + ) + + def _delete_server(self, server, force=False): + """Delete the given server. + + Returns when the server is actually deleted. + + :param server: Server object + :param force: If True, force_delete will be used instead of delete. + """ + atomic_name = ("nova.%sdelete_server") % (force and "force_" or "") + with base.AtomicAction(self, atomic_name): + if force: + server.force_delete() + else: + server.delete() + + bench_utils.wait_for_delete( + server, + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_delete_timeout, + check_interval=CONF.benchmark.nova_server_delete_poll_interval + ) + + @base.atomic_action_timer("nova.delete_image") + def _delete_image(self, image): + """Delete the given image. + + Returns when the image is actually deleted. + + :param image: Image object + """ + image.delete() + check_interval = CONF.benchmark.nova_server_image_delete_poll_interval + bench_utils.wait_for_delete( + image, + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_image_delete_timeout, + check_interval=check_interval + ) + + @base.atomic_action_timer("nova.create_image") + def _create_image(self, server): + """Create an image from the given server + + Uses the server name to name the created image. Returns when the image + is actually created and is in the "Active" state. + + :param server: Server object for which the image will be created + + :returns: Created image object + """ + image_uuid = self.clients("nova").servers.create_image(server, + server.name) + image = self.clients("nova").images.get(image_uuid) + check_interval = CONF.benchmark.nova_server_image_create_poll_interval + image = bench_utils.wait_for( + image, + is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_image_create_timeout, + check_interval=check_interval + ) + return image + + @base.atomic_action_timer("nova.create_keypair") + def _create_keypair(self, **kwargs): + """Create a keypair + + :returns: Created keypair name + """ + keypair_name = self._generate_random_name(prefix="rally_keypair_") + keypair = self.clients("nova").keypairs.create(keypair_name, **kwargs) + return keypair.name + + @base.atomic_action_timer("nova.list_keypairs") + def _list_keypairs(self): + """Return user keypairs list.""" + return self.clients("nova").keypairs.list() + + @base.atomic_action_timer("nova.delete_keypair") + def _delete_keypair(self, keypair_name): + """Delete keypair + + :param keypair_name: The keypair name to delete. + """ + self.clients("nova").keypairs.delete(keypair_name) + + @base.atomic_action_timer("nova.boot_servers") + def _boot_servers(self, name_prefix, image_id, flavor_id, + requests, instances_amount=1, **kwargs): + """Boot multiple servers. + + Returns when all the servers are actually booted and are in the + "Active" state. + + :param name_prefix: The prefix to use while naming the created servers. + The rest of the server names will be '_No.' + :param image_id: ID of the image to be used for server creation + :param flavor_id: ID of the flavor to be used for server creation + :param requests: Number of booting requests to perform + :param instances_amount: Number of instances to boot per each request + + :returns: List of created server objects + """ + for i in range(requests): + self.clients("nova").servers.create("%s_%d" % (name_prefix, i), + image_id, flavor_id, + min_count=instances_amount, + max_count=instances_amount, + **kwargs) + # NOTE(msdubov): Nova python client returns only one server even when + # min_count > 1, so we have to rediscover all the + # created servers manually. + servers = filter(lambda server: server.name.startswith(name_prefix), + self.clients("nova").servers.list()) + time.sleep(CONF.benchmark.nova_server_boot_prepoll_delay) + servers = [bench_utils.wait_for( + server, + is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils. + get_from_manager(), + timeout=CONF.benchmark.nova_server_boot_timeout, + check_interval=CONF.benchmark.nova_server_boot_poll_interval + ) for server in servers] + return servers + + @base.atomic_action_timer("nova.associate_floating_ip") + def _associate_floating_ip(self, server, address, fixed_address=None): + """Add floating IP to an instance + + :param server: The :class:`Server` to add an IP to. + :param address: The ip address or FloatingIP to add to the instance + :param fixed_address: The fixedIP address the FloatingIP is to be + associated with (optional) + """ + server.add_floating_ip(address, fixed_address=fixed_address) + bench_utils.wait_for( + server, + is_ready=self.check_ip_address(address), + update_resource=bench_utils.get_from_manager() + ) + # Update server data + server.addresses = server.manager.get(server.id).addresses + + @base.atomic_action_timer("nova.dissociate_floating_ip") + def _dissociate_floating_ip(self, server, address): + """Remove floating IP from an instance + + :param server: The :class:`Server` to add an IP to. + :param address: The ip address or FloatingIP to remove + """ + server.remove_floating_ip(address) + bench_utils.wait_for( + server, + is_ready=self.check_ip_address(address, must_exist=False), + update_resource=bench_utils.get_from_manager() + ) + # Update server data + server.addresses = server.manager.get(server.id).addresses + + @staticmethod + def check_ip_address(address, must_exist=True): + ip_to_check = getattr(address, "ip", address) + + def _check_addr(resource): + for network, addr_list in resource.addresses.items(): + for addr in addr_list: + if ip_to_check == addr["addr"]: + return must_exist + return not must_exist + return _check_addr + + @base.atomic_action_timer("nova.list_networks") + def _list_networks(self): + """Return user networks list.""" + return self.clients("nova").networks.list() + + @base.atomic_action_timer("nova.resize") + def _resize(self, server, flavor): + server.resize(flavor) + bench_utils.wait_for( + server, + is_ready=bench_utils.resource_is("VERIFY_RESIZE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_resize_timeout, + check_interval=CONF.benchmark.nova_server_resize_poll_interval + ) + + @base.atomic_action_timer("nova.resize_confirm") + def _resize_confirm(self, server, status="ACTIVE"): + server.confirm_resize() + bench_utils.wait_for( + server, + is_ready=bench_utils.resource_is(status), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_resize_confirm_timeout, + check_interval=( + CONF.benchmark.nova_server_resize_confirm_poll_interval) + ) + + @base.atomic_action_timer("nova.resize_revert") + def _resize_revert(self, server, status="ACTIVE"): + server.revert_resize() + bench_utils.wait_for( + server, + is_ready=bench_utils.resource_is(status), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_resize_revert_timeout, + check_interval=( + CONF.benchmark.nova_server_resize_revert_poll_interval) + ) + + @base.atomic_action_timer("nova.attach_volume") + def _attach_volume(self, server, volume, device=None): + server_id = server.id + volume_id = volume.id + self.clients("nova").volumes.create_server_volume(server_id, + volume_id, + device) + bench_utils.wait_for( + volume, + is_ready=bench_utils.resource_is("in-use"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_resize_revert_timeout, + check_interval=( + CONF.benchmark.nova_server_resize_revert_poll_interval) + ) + + @base.atomic_action_timer("nova.detach_volume") + def _detach_volume(self, server, volume): + server_id = server.id + volume_id = volume.id + self.clients("nova").volumes.delete_server_volume(server_id, + volume_id) + bench_utils.wait_for( + volume, + is_ready=bench_utils.resource_is("available"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_detach_volume_timeout, + check_interval=CONF.benchmark.nova_detach_volume_poll_interval + ) + + @base.atomic_action_timer("nova.live_migrate") + def _live_migrate(self, server, target_host, block_migration=False, + disk_over_commit=False, skip_host_check=False): + """Run live migration of the given server. + + :param server: Server object + :param target_host: Specifies the target compute node to migrate + :param block_migration: Specifies the migration type + :param disk_over_commit: Specifies whether to overcommit migrated + instance or not + :param skip_host_check: Specifies whether to verify the targeted host + availability + """ + server_admin = self.admin_clients("nova").servers.get(server.id) + host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") + server_admin.live_migrate(target_host, + block_migration=block_migration, + disk_over_commit=disk_over_commit) + bench_utils.wait_for( + server, + is_ready=bench_utils.resource_is("ACTIVE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_live_migrate_timeout, + check_interval=( + CONF.benchmark.nova_server_live_migrate_poll_interval) + ) + server_admin = self.admin_clients("nova").servers.get(server.id) + if (host_pre_migrate == getattr(server_admin, "OS-EXT-SRV-ATTR:host") + and not skip_host_check): + raise exceptions.LiveMigrateException( + "Migration complete but instance did not change host: %s" % + host_pre_migrate) + + @base.atomic_action_timer("nova.find_host_to_migrate") + def _find_host_to_migrate(self, server): + """Find a compute node for live migration. + + :param server: Server object + """ + server_admin = self.admin_clients("nova").servers.get(server.id) + host = getattr(server_admin, "OS-EXT-SRV-ATTR:host") + az_name = getattr(server_admin, "OS-EXT-AZ:availability_zone") + az = None + for a in self.admin_clients("nova").availability_zones.list(): + if az_name == a.zoneName: + az = a + break + try: + new_host = random.choice( + [key for key, value in six.iteritems(az.hosts) + if key != host and + value["nova-compute"]["available"] is True]) + return new_host + except IndexError: + raise exceptions.InvalidHostException( + "No valid host found to migrate") + + @base.atomic_action_timer("nova.migrate") + def _migrate(self, server, skip_host_check=False): + """Run migration of the given server. + + :param server: Server object + :param skip_host_check: Specifies whether to verify the targeted host + availability + """ + server_admin = self.admin_clients("nova").servers.get(server.id) + host_pre_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") + server_admin.migrate() + bench_utils.wait_for( + server, + is_ready=bench_utils.resource_is("VERIFY_RESIZE"), + update_resource=bench_utils.get_from_manager(), + timeout=CONF.benchmark.nova_server_migrate_timeout, + check_interval=( + CONF.benchmark.nova_server_migrate_poll_interval) + ) + if not skip_host_check: + server_admin = self.admin_clients("nova").servers.get(server.id) + host_after_migrate = getattr(server_admin, "OS-EXT-SRV-ATTR:host") + if host_pre_migrate == host_after_migrate: + raise exceptions.MigrateException( + "Migration complete but instance did not change host: %s" % + host_pre_migrate) + + def _create_security_groups(self, security_group_count): + security_groups = [] + with base.AtomicAction(self, "nova.create_%s_security_groups" % + security_group_count): + for i in range(security_group_count): + sg_name = self._generate_random_name() + sg = self.clients("nova").security_groups.create(sg_name, + sg_name) + security_groups.append(sg) + + return security_groups + + def _create_rules_for_security_group(self, security_groups, + rules_per_security_group, + ip_protocol="tcp", cidr="0.0.0.0/0"): + action_name = ("nova.create_%s_rules" % (rules_per_security_group * + len(security_groups))) + with base.AtomicAction(self, action_name): + for i in range(len(security_groups)): + for j in range(rules_per_security_group): + self.clients("nova").security_group_rules.create( + security_groups[i].id, + from_port=(i * rules_per_security_group + j + 1), + to_port=(i * rules_per_security_group + j + 1), + ip_protocol=ip_protocol, + cidr=cidr) + + def _delete_security_groups(self, security_group): + with base.AtomicAction(self, "nova.delete_%s_security_groups" % + len(security_group)): + for sg in security_group: + self.clients("nova").security_groups.delete(sg.id) + + def _list_security_groups(self): + """Return security groups list.""" + with base.AtomicAction(self, "nova.list_security_groups"): + return self.clients("nova").security_groups.list() + + @base.atomic_action_timer("nova.list_floating_ips_bulk") + def _list_floating_ips_bulk(self): + """List all floating IPs.""" + return self.admin_clients("nova").floating_ips_bulk.list() + + @base.atomic_action_timer("nova.create_floating_ips_bulk") + def _create_floating_ips_bulk(self, ip_range, **kwargs): + """Create floating IPs by range.""" + ip_range = network_wrapper.generate_cidr(start_cidr=ip_range) + pool_name = self._generate_random_name(prefix="rally_fip_pool_") + return self.admin_clients("nova").floating_ips_bulk.create( + ip_range=ip_range, pool=pool_name, **kwargs) + + @base.atomic_action_timer("nova.delete_floating_ips_bulk") + def _delete_floating_ips_bulk(self, ip_range): + """Delete floating IPs by range.""" + return self.admin_clients("nova").floating_ips_bulk.delete(ip_range) + + @base.atomic_action_timer("nova.list_hypervisors") + def _list_hypervisors(self, detailed=True): + """List hypervisors.""" + return self.admin_clients("nova").hypervisors.list(detailed) + + @base.atomic_action_timer("nova.lock_server") + def _lock_server(self, server): + """Lock the given server. + + :param server: Server to lock + """ + server.lock() + + @base.atomic_action_timer("nova.unlock_server") + def _unlock_server(self, server): + """Unlock the given server. + + :param server: Server to unlock + """ + server.unlock() diff --git a/rally/plugins/openstack/scenarios/vm/__init__.py b/rally/plugins/openstack/scenarios/vm/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/rally/plugins/openstack/scenarios/vm/utils.py b/rally/plugins/openstack/scenarios/vm/utils.py new file mode 100644 index 00000000..0cb969ea --- /dev/null +++ b/rally/plugins/openstack/scenarios/vm/utils.py @@ -0,0 +1,173 @@ +# Copyright 2013: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import subprocess +import sys + +import netaddr +import six + +from rally.benchmark.scenarios import base +from rally.benchmark import utils as bench_utils +from rally.benchmark.wrappers import network as network_wrapper +from rally.common.i18n import _ +from rally.common import log as logging +from rally.common import sshutils +from rally import exceptions + +LOG = logging.getLogger(__name__) + + +class VMScenario(base.Scenario): + """Base class for VM scenarios with basic atomic actions. + + VM scenarios are scenarios executed inside some launched VM instance. + """ + + @base.atomic_action_timer("vm.run_command_over_ssh") + def _run_command_over_ssh(self, ssh, interpreter, script, + is_file=True): + """Run command inside an instance. + + This is a separate function so that only script execution is timed. + + :param ssh: A SSHClient instance. + :param interpreter: The interpreter that will be used to execute + the script. + :param script: Path to the script file or its content in a StringIO. + :param is_file: if True, script represent a path, + else, script contains an inline script. + :returns: tuple (exit_status, stdout, stderr) + """ + if not is_file: + stdin = script + elif isinstance(script, six.string_types): + stdin = open(script, "rb") + elif isinstance(script, six.moves.StringIO): + stdin = script + else: + raise exceptions.ScriptError( + "Either file path or StringIO expected, given %s" % + type(script).__name__) + + return ssh.execute(interpreter, stdin=stdin) + + def _boot_server_with_fip(self, image, flavor, + use_floating_ip=True, floating_network=None, + wait_for_ping=True, **kwargs): + """Boot server prepared for SSH actions.""" + kwargs["auto_assign_nic"] = True + server = self._boot_server(image, flavor, **kwargs) + + if not server.networks: + raise RuntimeError( + "Server `%(server)s' is not connected to any network. " + "Use network context for auto-assigning networks " + "or provide `nics' argument with specific net-id." % { + "server": server.name}) + + if use_floating_ip: + fip = self._attach_floating_ip(server, floating_network) + else: + internal_network = list(server.networks)[0] + fip = {"ip": server.addresses[internal_network][0]["addr"]} + + if wait_for_ping: + self._wait_for_ping(fip["ip"]) + + return server, {"ip": fip.get("ip"), + "id": fip.get("id"), + "is_floating": use_floating_ip} + + @base.atomic_action_timer("vm.attach_floating_ip") + def _attach_floating_ip(self, server, floating_network): + internal_network = list(server.networks)[0] + fixed_ip = server.addresses[internal_network][0]["addr"] + + fip = network_wrapper.wrap(self.clients).create_floating_ip( + ext_network=floating_network, int_network=internal_network, + tenant_id=server.tenant_id, fixed_ip=fixed_ip) + + self._associate_floating_ip(server, fip["ip"], fixed_address=fixed_ip) + + return fip + + @base.atomic_action_timer("vm.delete_floating_ip") + def _delete_floating_ip(self, server, fip): + with logging.ExceptionLogger( + LOG, _("Unable to delete IP: %s") % fip["ip"]): + if self.check_ip_address(fip["ip"])(server): + self._dissociate_floating_ip(server, fip["ip"]) + network_wrapper.wrap(self.clients).delete_floating_ip(fip["id"], + wait=True) + + def _delete_server_with_fip(self, server, fip, force_delete=False): + if fip["is_floating"]: + self._delete_floating_ip(server, fip) + return self._delete_server(server, force=force_delete) + + @base.atomic_action_timer("vm.wait_for_ssh") + def _wait_for_ssh(self, ssh): + ssh.wait() + + @base.atomic_action_timer("vm.wait_for_ping") + def _wait_for_ping(self, server_ip): + bench_utils.wait_for( + server_ip, + is_ready=self._ping_ip_address, + timeout=120 + ) + + def _run_command(self, server_ip, port, username, password, interpreter, + script, pkey=None, is_file=True): + """Run command via SSH on server. + + Create SSH connection for server, wait for server to become + available (there is a delay between server being set to ACTIVE + and sshd being available). Then call run_command_over_ssh to actually + execute the command. + :param server_ip: server ip address + :param port: ssh port for SSH connection + :param username: str. ssh username for server + :param password: Password for SSH authentication + :param interpreter: server's interpreter to execute the script + :param script: script to run on server + :param pkey: key for SSH authentication + :param is_file: if True, script represent a path, + else, script contains an inline script. + """ + pkey = pkey if pkey else self.context["user"]["keypair"]["private"] + ssh = sshutils.SSH(username, server_ip, port=port, + pkey=pkey, password=password) + self._wait_for_ssh(ssh) + return self._run_command_over_ssh(ssh, interpreter, + script, is_file) + + @staticmethod + def _ping_ip_address(host, should_succeed=True): + ip = netaddr.IPAddress(host) + ping = "ping" if ip.version == 4 else "ping6" + if sys.platform.startswith("linux"): + cmd = [ping, "-c1", "-w1", host] + else: + cmd = [ping, "-c1", host] + + proc = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + proc.wait() + LOG.debug("Host %s is ICMP %s" + % (host, proc.returncode and "down" or "up")) + return (proc.returncode == 0) == should_succeed diff --git a/rally/plugins/openstack/scenarios/vm/vmtasks.py b/rally/plugins/openstack/scenarios/vm/vmtasks.py new file mode 100644 index 00000000..ff1dcfc2 --- /dev/null +++ b/rally/plugins/openstack/scenarios/vm/vmtasks.py @@ -0,0 +1,106 @@ +# Copyright 2014: Rackspace UK +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from rally.benchmark.scenarios import base +from rally.benchmark import types as types +from rally.benchmark import validation +from rally import consts +from rally import exceptions +from rally.plugins.openstack.scenarios.cinder import utils as cinder_utils +from rally.plugins.openstack.scenarios.nova import utils as nova_utils +from rally.plugins.openstack.scenarios.vm import utils as vm_utils + + +class VMTasks(nova_utils.NovaScenario, vm_utils.VMScenario, + cinder_utils.CinderScenario): + """Benchmark scenarios that are to be run inside VM instances.""" + + def __init__(self, *args, **kwargs): + super(VMTasks, self).__init__(*args, **kwargs) + + @types.set(image=types.ImageResourceType, + flavor=types.FlavorResourceType) + @validation.image_valid_on_flavor("flavor", "image") + @validation.file_exists("script") + @validation.number("port", minval=1, maxval=65535, nullable=True, + integer_only=True) + @validation.external_network_exists("floating_network") + @validation.required_services(consts.Service.NOVA, consts.Service.CINDER) + @validation.required_openstack(users=True) + @base.scenario(context={"cleanup": ["nova", "cinder"], + "keypair": {}, "allow_ssh": {}}) + def boot_runcommand_delete(self, image, flavor, + script, interpreter, username, + password=None, + volume_args=None, + floating_network=None, + port=22, + use_floating_ip=True, + force_delete=False, + **kwargs): + """Boot a server, run a script that outputs JSON, delete the server. + + Example Script in samples/tasks/support/instance_dd_test.sh + + :param image: glance image name to use for the vm + :param flavor: VM flavor name + :param script: script to run on server, must output JSON mapping + metric names to values (see the sample script below) + :param interpreter: server's interpreter to run the script + :param username: ssh username on server, str + :param password: Password on SSH authentication + :param volume_args: volume args for booting server from volume + :param floating_network: external network name, for floating ip + :param port: ssh port for SSH connection + :param use_floating_ip: bool, floating or fixed IP for SSH connection + :param force_delete: whether to use force_delete for servers + :param **kwargs: extra arguments for booting the server + :returns: dictionary with keys `data' and `errors': + data: dict, JSON output from the script + errors: str, raw data from the script's stderr stream + """ + + if volume_args: + volume = self._create_volume(volume_args["size"], imageRef=None) + kwargs["block_device_mapping"] = {"vdrally": "%s:::1" % volume.id} + + server, fip = self._boot_server_with_fip( + image, flavor, use_floating_ip=use_floating_ip, + floating_network=floating_network, + key_name=self.context["user"]["keypair"]["name"], + **kwargs) + try: + code, out, err = self._run_command(fip["ip"], port, username, + password, interpreter, script) + if code: + raise exceptions.ScriptError( + "Error running script %(script)s. " + "Error %(code)s: %(error)s" % { + "script": script, "code": code, "error": err}) + + try: + data = json.loads(out) + except ValueError as e: + raise exceptions.ScriptError( + "Script %(script)s has not output valid JSON: %(error)s. " + "Output: %(output)s" % { + "script": script, "error": str(e), "output": out}) + finally: + self._delete_server_with_fip(server, fip, + force_delete=force_delete) + + return {"data": data, "errors": err} diff --git a/tests/unit/plugins/openstack/scenarios/designate/__init__.py b/tests/unit/plugins/openstack/scenarios/designate/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/plugins/openstack/scenarios/designate/test_basic.py b/tests/unit/plugins/openstack/scenarios/designate/test_basic.py new file mode 100644 index 00000000..d35a4712 --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/designate/test_basic.py @@ -0,0 +1,145 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Author: Endre Karlson +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from rally.plugins.openstack.scenarios.designate import basic +from tests.unit import test + +DESIGNATE_BASIC = ("rally.plugins.openstack.scenarios.designate.basic" + ".DesignateBasic") + + +class DesignateBasicTestCase(test.TestCase): + + @mock.patch(DESIGNATE_BASIC + "._list_domains") + @mock.patch(DESIGNATE_BASIC + "._create_domain") + def test_create_and_list_domains(self, mock_create, mock_list): + scenario = basic.DesignateBasic() + + # Default options + scenario.create_and_list_domains() + mock_create.assert_called_once_with() + mock_list.assert_called_once_with() + + @mock.patch(DESIGNATE_BASIC + "._delete_domain") + @mock.patch(DESIGNATE_BASIC + "._create_domain") + def test_create_and_delete_domain(self, mock_create, mock_delete): + scenario = basic.DesignateBasic() + + mock_create.return_value = {"id": "123"} + + # Default options + scenario.create_and_delete_domain() + + mock_create.assert_called_once_with() + mock_delete.assert_called_once_with("123") + + @mock.patch(DESIGNATE_BASIC + "._list_domains") + def test_list_domains(self, mock_list): + scenario = basic.DesignateBasic() + + # Default options + scenario.list_domains() + mock_list.assert_called_once_with() + + @mock.patch(DESIGNATE_BASIC + "._list_records") + @mock.patch(DESIGNATE_BASIC + "._create_record") + @mock.patch(DESIGNATE_BASIC + "._create_domain") + def test_create_and_list_records(self, + mock_create_domain, + mock_create_record, + mock_list): + scenario = basic.DesignateBasic() + domain = { + "name": "zone.name", + "email": "email@zone.name", + "id": "123"} + mock_create_domain.return_value = domain + records_per_domain = 5 + + scenario.create_and_list_records( + records_per_domain=records_per_domain) + mock_create_domain.assert_called_once_with() + self.assertEqual(mock_create_record.mock_calls, + [mock.call(domain, atomic_action=False)] + * records_per_domain) + mock_list.assert_called_once_with(domain["id"]) + + @mock.patch(DESIGNATE_BASIC + "._delete_record") + @mock.patch(DESIGNATE_BASIC + "._create_record") + @mock.patch(DESIGNATE_BASIC + "._create_domain") + def test_create_and_delete_records(self, + mock_create_domain, + mock_create_record, + mock_delete): + scenario = basic.DesignateBasic() + domain = { + "name": "zone.name", + "email": "email@zone.name", + "id": "123"} + mock_create_domain.return_value = domain + mock_create_record.return_value = {"id": "321"} + records_per_domain = 5 + + scenario.create_and_delete_records( + records_per_domain=records_per_domain) + mock_create_domain.assert_called_once_with() + self.assertEqual(mock_create_record.mock_calls, + [mock.call(domain, atomic_action=False)] + * records_per_domain) + self.assertEqual(mock_delete.mock_calls, + [mock.call(domain["id"], "321", atomic_action=False)] + * records_per_domain) + + @mock.patch(DESIGNATE_BASIC + "._list_records") + def test_list_records(self, mock_list): + scenario = basic.DesignateBasic() + + # Default options + scenario.list_records("123") + mock_list.assert_called_once_with("123") + + @mock.patch(DESIGNATE_BASIC + "._list_servers") + @mock.patch(DESIGNATE_BASIC + "._create_server") + def test_create_and_list_servers(self, mock_create, mock_list): + scenario = basic.DesignateBasic() + + # Default options + scenario.create_and_list_servers() + mock_create.assert_called_once_with() + mock_list.assert_called_once_with() + + @mock.patch(DESIGNATE_BASIC + "._delete_server") + @mock.patch(DESIGNATE_BASIC + "._create_server") + def test_create_and_delete_server(self, mock_create, mock_delete): + scenario = basic.DesignateBasic() + + mock_create.return_value = {"id": "123"} + + # Default options + scenario.create_and_delete_server() + + mock_create.assert_called_once_with() + mock_delete.assert_called_once_with("123") + + @mock.patch(DESIGNATE_BASIC + "._list_servers") + def test_list_servers(self, mock_list): + scenario = basic.DesignateBasic() + + # Default options + scenario.list_servers() + mock_list.assert_called_once_with() diff --git a/tests/unit/plugins/openstack/scenarios/designate/test_utils.py b/tests/unit/plugins/openstack/scenarios/designate/test_utils.py new file mode 100644 index 00000000..cb232fa9 --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/designate/test_utils.py @@ -0,0 +1,169 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Author: Endre Karlson +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from rally.plugins.openstack.scenarios.designate import utils +from tests.unit import test + +DESIGNATE_UTILS = "rally.plugins.openstack.scenarios.designate.utils." + + +class DesignateScenarioTestCase(test.TestCase): + + def setUp(self): + super(DesignateScenarioTestCase, self).setUp() + self.domain = mock.Mock() + self.server = mock.Mock() + + @mock.patch(DESIGNATE_UTILS + "DesignateScenario._generate_random_name") + @mock.patch(DESIGNATE_UTILS + "DesignateScenario.clients") + def test_create_domain(self, mock_clients, mock_random_name): + scenario = utils.DesignateScenario() + + random_name = "foo" + explicit_name = "bar.io." + email = "root@zone.name" + + mock_random_name.return_value = random_name + mock_clients("designate").domains.create.return_value = self.domain + + # Check that the defaults / randoms are used if nothing is specified + domain = scenario._create_domain() + mock_clients("designate").domains.create.assert_called_once_with( + {"email": "root@random.name", "name": "%s.name." % random_name}) + self.assertEqual(self.domain, domain) + self._test_atomic_action_timer(scenario.atomic_actions(), + "designate.create_domain") + + mock_clients("designate").domains.create.reset_mock() + + # Check that when specifying zone defaults are not used... + data = {"email": email, "name": explicit_name} + domain = scenario._create_domain(data) + mock_clients("designate").domains.create.assert_called_once_with(data) + self.assertEqual(self.domain, domain) + + @mock.patch(DESIGNATE_UTILS + "DesignateScenario.clients") + def test_list_domains(self, mock_clients): + scenario = utils.DesignateScenario() + domains_list = [] + mock_clients("designate").domains.list.return_value = domains_list + return_domains_list = scenario._list_domains() + self.assertEqual(domains_list, return_domains_list) + self._test_atomic_action_timer(scenario.atomic_actions(), + "designate.list_domains") + + @mock.patch(DESIGNATE_UTILS + "DesignateScenario.clients") + def test_delete_domain(self, mock_clients): + scenario = utils.DesignateScenario() + + domain = scenario._create_domain() + scenario._delete_domain(domain["id"]) + self._test_atomic_action_timer(scenario.atomic_actions(), + "designate.delete_domain") + + @mock.patch(DESIGNATE_UTILS + "DesignateScenario._generate_random_name") + @mock.patch(DESIGNATE_UTILS + "DesignateScenario.clients") + def test_create_record(self, mock_clients, mock_random_name): + scenario = utils.DesignateScenario() + + random_name = "foo" + domain_name = "zone.name." + random_record_name = "%s.%s" % (random_name, domain_name) + + mock_random_name.return_value = random_name + + domain = {"name": domain_name, "id": "123"} + + # Create with randoms (name and type) + scenario._create_record(domain) + mock_clients("designate").records.create.assert_called_once_with( + domain["id"], + {"name": random_record_name, "type": "A", "data": "10.0.0.1"}) + + self._test_atomic_action_timer(scenario.atomic_actions(), + "designate.create_record") + + mock_clients("designate").records.create.reset_mock() + + # Specify name + record = {"name": "www.zone.name.", "type": "ASD"} + scenario._create_record(domain, record) + mock_clients("designate").records.create.assert_called_once_with( + domain["id"], record) + + @mock.patch(DESIGNATE_UTILS + "DesignateScenario.clients") + def test_list_records(self, mock_clients): + scenario = utils.DesignateScenario() + records_list = [] + mock_clients("designate").records.list.return_value = records_list + return_records_list = scenario._list_records("123") + self.assertEqual(records_list, return_records_list) + self._test_atomic_action_timer(scenario.atomic_actions(), + "designate.list_records") + + @mock.patch(DESIGNATE_UTILS + "DesignateScenario.clients") + def test_delete_record(self, mock_clients): + scenario = utils.DesignateScenario() + + mock_clients("designate").domains.create.return_value = { + "id": "123", "name": "asd"} + domain = scenario._create_domain() + + mock_clients("designate").records.create.return_value = {"id": "123"} + record = scenario._create_record(domain) + + scenario._delete_record(domain["id"], record["id"]) + self._test_atomic_action_timer(scenario.atomic_actions(), + "designate.delete_record") + + @mock.patch(DESIGNATE_UTILS + "DesignateScenario._generate_random_name") + @mock.patch(DESIGNATE_UTILS + "DesignateScenario.admin_clients") + def test_create_server(self, mock_clients, mock_random_name): + scenario = utils.DesignateScenario() + + random_name = "foo" + explicit_name = "bar.io." + + mock_random_name.return_value = random_name + mock_clients("designate").servers.create.return_value = self.server + + # Check that the defaults / randoms are used if nothing is specified + server = scenario._create_server() + mock_clients("designate").servers.create.assert_called_once_with( + {"name": "name.%s." % random_name}) + self.assertEqual(self.server, server) + self._test_atomic_action_timer(scenario.atomic_actions(), + "designate.create_server") + + mock_clients("designate").servers.create.reset_mock() + + # Check that when specifying server name defaults are not used... + data = {"name": explicit_name} + server = scenario._create_server(data) + mock_clients("designate").servers.create.assert_called_once_with(data) + self.assertEqual(self.server, server) + + @mock.patch(DESIGNATE_UTILS + "DesignateScenario.admin_clients") + def test_delete_server(self, mock_clients): + scenario = utils.DesignateScenario() + + scenario._delete_server("foo_id") + mock_clients("designate").servers.delete.assert_called_once_with( + "foo_id") + self._test_atomic_action_timer(scenario.atomic_actions(), + "designate.delete_server") diff --git a/tests/unit/plugins/openstack/scenarios/glance/test_images.py b/tests/unit/plugins/openstack/scenarios/glance/test_images.py index ba3bf5a8..df0c5783 100644 --- a/tests/unit/plugins/openstack/scenarios/glance/test_images.py +++ b/tests/unit/plugins/openstack/scenarios/glance/test_images.py @@ -15,10 +15,10 @@ import mock -from rally.benchmark.scenarios.nova import servers from rally import objects from rally import osclients from rally.plugins.openstack.scenarios.glance import images +from rally.plugins.openstack.scenarios.nova import servers from tests.unit import fakes from tests.unit import test diff --git a/tests/unit/plugins/openstack/scenarios/neutron/__init__.py b/tests/unit/plugins/openstack/scenarios/neutron/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/plugins/openstack/scenarios/neutron/test_network.py b/tests/unit/plugins/openstack/scenarios/neutron/test_network.py new file mode 100644 index 00000000..7d6f52d8 --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/neutron/test_network.py @@ -0,0 +1,638 @@ +# Copyright 2014: Intel Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from rally.plugins.openstack.scenarios.neutron import network +from tests.unit import test + +NEUTRON_NETWORKS = ("rally.plugins.openstack.scenarios.neutron.network" + ".NeutronNetworks") + + +class NeutronNetworksTestCase(test.TestCase): + + @mock.patch(NEUTRON_NETWORKS + "._list_networks") + @mock.patch(NEUTRON_NETWORKS + "._create_network") + def test_create_and_list_networks(self, mock_create, mock_list): + neutron_scenario = network.NeutronNetworks() + + # Default options + network_create_args = {} + neutron_scenario.create_and_list_networks( + network_create_args=network_create_args) + mock_create.assert_called_once_with(network_create_args) + mock_list.assert_called_once_with() + + mock_create.reset_mock() + mock_list.reset_mock() + + # Explicit network name is specified + network_create_args = {"name": "given-name"} + neutron_scenario.create_and_list_networks( + network_create_args=network_create_args) + mock_create.assert_called_once_with(network_create_args) + mock_list.assert_called_once_with() + + @mock.patch(NEUTRON_NETWORKS + "._update_network") + @mock.patch(NEUTRON_NETWORKS + "._create_network", return_value={ + "network": { + "id": "network-id", + "name": "network-name", + "admin_state_up": False + } + }) + def test_create_and_update_networks(self, + mock_create_network, + mock_update_network): + scenario = network.NeutronNetworks() + + network_update_args = {"name": "_updated", "admin_state_up": True} + + # Default options + scenario.create_and_update_networks( + network_update_args=network_update_args) + + mock_create_network.assert_called_once_with({}) + + mock_update_network.assert_has_calls( + [mock.call(mock_create_network.return_value, network_update_args)]) + + mock_create_network.reset_mock() + mock_update_network.reset_mock() + + # Explicit network name is specified + network_create_args = {"name": "network-name", "admin_state_up": False} + + scenario.create_and_update_networks( + network_create_args=network_create_args, + network_update_args=network_update_args) + mock_create_network.assert_called_once_with(network_create_args) + mock_update_network.assert_has_calls( + [mock.call(mock_create_network.return_value, network_update_args)]) + + @mock.patch(NEUTRON_NETWORKS + "._delete_network") + @mock.patch(NEUTRON_NETWORKS + "._create_network") + def test_create_and_delete_networks(self, mock_create, mock_delete): + neutron_scenario = network.NeutronNetworks() + + # Default options + network_create_args = {} + neutron_scenario.create_and_delete_networks() + mock_create.assert_called_once_with(network_create_args) + self.assertEqual(1, mock_delete.call_count) + + mock_create.reset_mock() + mock_delete.reset_mock() + + # Explict network name is specified + network_create_args = {"name": "given-name"} + neutron_scenario.create_and_delete_networks( + network_create_args=network_create_args) + mock_create.assert_called_once_with(network_create_args) + self.assertEqual(1, mock_delete.call_count) + + @mock.patch(NEUTRON_NETWORKS + "._list_subnets") + @mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets") + def test_create_and_list_subnets(self, + mock_create_network_and_subnets, + mock_list): + scenario = network.NeutronNetworks() + subnets_per_network = 4 + subnet_cidr_start = "default_cidr" + + mock_create_network_and_subnets.reset_mock() + mock_list.reset_mock() + + # Default options + scenario.create_and_list_subnets( + subnets_per_network=subnets_per_network, + subnet_cidr_start=subnet_cidr_start) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, {}, subnets_per_network, + subnet_cidr_start)]) + mock_list.assert_called_once_with() + + mock_create_network_and_subnets.reset_mock() + mock_list.reset_mock() + + # Custom options + scenario.create_and_list_subnets( + subnet_create_args={"allocation_pools": []}, + subnet_cidr_start="custom_cidr", + subnets_per_network=subnets_per_network) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, {"allocation_pools": []}, + subnets_per_network, "custom_cidr")]) + mock_list.assert_called_once_with() + + @mock.patch(NEUTRON_NETWORKS + "._update_subnet") + @mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets") + def test_create_and_update_subnets(self, + mock_create_network_and_subnets, + mock_update_subnet): + scenario = network.NeutronNetworks() + subnets_per_network = 1 + subnet_cidr_start = "default_cidr" + net = { + "network": { + "id": "network-id" + } + } + subnet = { + "subnet": { + "name": "subnet-name", + "id": "subnet-id", + "enable_dhcp": False + } + } + mock_create_network_and_subnets.return_value = (net, [subnet]) + subnet_update_args = {"name": "_updated", "enable_dhcp": True} + + mock_create_network_and_subnets.reset_mock() + mock_update_subnet.reset_mock() + + # Default options + scenario.create_and_update_subnets( + subnet_update_args=subnet_update_args, + subnet_cidr_start=subnet_cidr_start, + subnets_per_network=subnets_per_network) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, {}, subnets_per_network, subnet_cidr_start)]) + mock_update_subnet.assert_has_calls( + [mock.call(subnet, subnet_update_args)]) + + mock_create_network_and_subnets.reset_mock() + mock_update_subnet.reset_mock() + + # Custom options + subnet_cidr_start = "custom_cidr" + scenario.create_and_update_subnets( + subnet_update_args=subnet_update_args, + subnet_create_args={"allocation_pools": []}, + subnet_cidr_start=subnet_cidr_start, + subnets_per_network=subnets_per_network) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, {"allocation_pools": []}, subnets_per_network, + subnet_cidr_start)]) + mock_update_subnet.assert_has_calls( + [mock.call(subnet, subnet_update_args)]) + + @mock.patch(NEUTRON_NETWORKS + "._delete_subnet") + @mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets") + def test_create_and_delete_subnets(self, + mock_create_network_and_subnets, + mock_delete): + scenario = network.NeutronNetworks() + net = { + "network": { + "id": "network-id" + } + } + subnet = { + "subnet": { + "name": "subnet-name", + "id": "subnet-id", + "enable_dhcp": False + } + } + mock_create_network_and_subnets.return_value = (net, [subnet]) + subnets_per_network = 1 + subnet_cidr_start = "default_cidr" + + mock_create_network_and_subnets.reset_mock() + mock_delete.reset_mock() + + # Default options + scenario.create_and_delete_subnets( + subnets_per_network=subnets_per_network, + subnet_cidr_start=subnet_cidr_start) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, {}, subnets_per_network, + subnet_cidr_start)]) + + mock_delete.assert_has_calls([mock.call(subnet)]) + + mock_create_network_and_subnets.reset_mock() + mock_delete.reset_mock() + + # Custom options + subnet_cidr_start = "custom_cidr" + scenario.create_and_delete_subnets( + subnet_create_args={"allocation_pools": []}, + subnet_cidr_start="custom_cidr", + subnets_per_network=subnets_per_network) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, {"allocation_pools": []}, subnets_per_network, + subnet_cidr_start)]) + mock_delete.assert_has_calls([mock.call(subnet)]) + + @mock.patch(NEUTRON_NETWORKS + "._list_routers") + @mock.patch(NEUTRON_NETWORKS + "._create_router") + @mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets") + @mock.patch(NEUTRON_NETWORKS + ".clients") + def test_create_and_list_routers(self, + mock_clients, + mock_create_network_and_subnets, + mock_create_router, + mock_list): + scenario = network.NeutronNetworks() + subnets_per_network = 1 + subnet_cidr_start = "default_cidr" + + net = { + "network": { + "id": "network-id" + } + } + subnet = { + "subnet": { + "name": "subnet-name", + "id": "subnet-id", + "enable_dhcp": False + } + } + mock_create_network_and_subnets.return_value = (net, [subnet]) + mock_clients("neutron").add_interface_router = mock.Mock() + router = { + "router": { + "name": "router-name", + "id": "router-id" + } + } + mock_create_router.return_value = router + + # Default options + scenario.create_and_list_routers( + subnet_cidr_start=subnet_cidr_start, + subnets_per_network=subnets_per_network) + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, {}, subnets_per_network, subnet_cidr_start)]) + + mock_create_router.assert_has_calls( + [mock.call({})] * subnets_per_network) + + mock_clients("neutron").add_interface_router.assert_has_calls( + [mock.call(router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + ] * subnets_per_network) + + mock_create_network_and_subnets.reset_mock() + mock_create_router.reset_mock() + + mock_clients("neutron").add_interface_router.reset_mock() + mock_list.reset_mock() + + # Custom options + subnet_cidr_start = "custom_cidr" + subnet_create_args = {"allocation_pools": []} + router_create_args = {"admin_state_up": False} + scenario.create_and_list_routers( + subnet_create_args=subnet_create_args, + subnet_cidr_start="custom_cidr", + subnets_per_network=subnets_per_network, + router_create_args=router_create_args) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, subnet_create_args, subnets_per_network, + subnet_cidr_start)]) + + mock_create_router.assert_has_calls( + [mock.call(router_create_args)] * subnets_per_network) + mock_clients("neutron").add_interface_router.assert_has_calls( + [mock.call(router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + ] * subnets_per_network) + + mock_list.assert_called_once_with() + + @mock.patch(NEUTRON_NETWORKS + "._update_router") + @mock.patch(NEUTRON_NETWORKS + "._create_router") + @mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets") + @mock.patch(NEUTRON_NETWORKS + ".clients") + def test_create_and_update_routers(self, + mock_clients, + mock_create_network_and_subnets, + mock_create_router, + mock_update_router): + scenario = network.NeutronNetworks() + subnets_per_network = 1 + subnet_cidr_start = "default_cidr" + + net = { + "network": { + "id": "network-id" + } + } + subnet = { + "subnet": { + "name": "subnet-name", + "id": "subnet-id", + "enable_dhcp": False + } + } + router = { + "router": { + "name": "router-name", + "id": "router-id" + } + } + router_update_args = { + "name": "_updated", + "admin_state_up": False + } + mock_create_router.return_value = router + mock_create_network_and_subnets.return_value = (net, [subnet]) + mock_clients("neutron").add_interface_router = mock.Mock() + + # Default options + scenario.create_and_update_routers( + router_update_args=router_update_args, + subnet_cidr_start=subnet_cidr_start, + subnets_per_network=subnets_per_network) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, {}, subnets_per_network, subnet_cidr_start)]) + + mock_create_router.assert_has_calls( + [mock.call({})] * subnets_per_network) + mock_clients("neutron").add_interface_router.assert_has_calls( + [mock.call(router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + ] * subnets_per_network) + + mock_update_router.assert_has_calls( + [mock.call(router, router_update_args) + ] * subnets_per_network) + + mock_create_network_and_subnets.reset_mock() + mock_create_router.reset_mock() + mock_clients("neutron").add_interface_router.reset_mock() + mock_update_router.reset_mock() + + # Custom options + subnet_cidr_start = "custom_cidr" + subnet_create_args = {"allocation_pools": []} + router_create_args = {"admin_state_up": False} + scenario.create_and_update_routers( + router_update_args=router_update_args, + subnet_create_args=subnet_create_args, + subnet_cidr_start="custom_cidr", + subnets_per_network=subnets_per_network, + router_create_args=router_create_args) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, subnet_create_args, subnets_per_network, + subnet_cidr_start)]) + + mock_create_router.assert_has_calls( + [mock.call(router_create_args)] * subnets_per_network) + mock_clients("neutron").add_interface_router.assert_has_calls( + [mock.call(router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + ] * subnets_per_network) + + mock_update_router.assert_has_calls( + [mock.call(router, router_update_args) + ] * subnets_per_network) + + @mock.patch(NEUTRON_NETWORKS + "._delete_router") + @mock.patch(NEUTRON_NETWORKS + "._create_router") + @mock.patch(NEUTRON_NETWORKS + "._create_network_and_subnets") + @mock.patch(NEUTRON_NETWORKS + ".clients") + def test_create_and_delete_routers(self, + mock_clients, + mock_create_network_and_subnets, + mock_create_router, + mock_delete_router): + scenario = network.NeutronNetworks() + subnets_per_network = 1 + subnet_cidr_start = "default_cidr" + + net = { + "network": { + "id": "network-id" + } + } + subnet = { + "subnet": { + "name": "subnet-name", + "id": "subnet-id", + "enable_dhcp": False + } + } + router = { + "router": { + "name": "router-name", + "id": "router-id" + } + } + + mock_create_router.return_value = router + mock_create_network_and_subnets.return_value = (net, [subnet]) + mock_clients("neutron").add_interface_router = mock.Mock() + + # Default options + scenario.create_and_delete_routers( + subnet_cidr_start=subnet_cidr_start, + subnets_per_network=subnets_per_network) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, {}, subnets_per_network, subnet_cidr_start)]) + + mock_create_router.assert_has_calls( + [mock.call({})] * subnets_per_network) + mock_clients("neutron").add_interface_router.assert_has_calls( + [mock.call(router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + ] * subnets_per_network) + + mock_delete_router.assert_has_calls( + [mock.call(router)] * subnets_per_network) + + mock_create_network_and_subnets.reset_mock() + mock_create_router.reset_mock() + mock_clients("neutron").add_interface_router.reset_mock() + mock_delete_router.reset_mock() + + # Custom options + subnet_cidr_start = "custom_cidr" + subnet_create_args = {"allocation_pools": []} + router_create_args = {"admin_state_up": False} + scenario.create_and_delete_routers( + subnet_create_args=subnet_create_args, + subnet_cidr_start="custom_cidr", + subnets_per_network=subnets_per_network, + router_create_args=router_create_args) + + mock_create_network_and_subnets.assert_has_calls( + [mock.call({}, subnet_create_args, subnets_per_network, + subnet_cidr_start)]) + + mock_create_router.assert_has_calls( + [mock.call(router_create_args)] * subnets_per_network) + mock_clients("neutron").add_interface_router.assert_has_calls( + [mock.call(router["router"]["id"], + {"subnet_id": subnet["subnet"]["id"]}) + ] * subnets_per_network) + + mock_delete_router.assert_has_calls( + [mock.call(router)] * subnets_per_network) + + @mock.patch(NEUTRON_NETWORKS + "._generate_random_name") + @mock.patch(NEUTRON_NETWORKS + "._list_ports") + @mock.patch(NEUTRON_NETWORKS + "._create_port") + @mock.patch(NEUTRON_NETWORKS + "._create_network") + def test_create_and_list_ports(self, + mock_create_network, + mock_create_port, + mock_list, + mock_random_name): + scenario = network.NeutronNetworks() + mock_random_name.return_value = "random-name" + net = {"network": {"id": "fake-id"}} + mock_create_network.return_value = net + ports_per_network = 10 + + self.assertRaises(TypeError, scenario.create_and_list_ports) + + mock_create_network.reset_mock() + + # Defaults + scenario.create_and_list_ports(ports_per_network=ports_per_network) + mock_create_network.assert_called_once_with({}) + self.assertEqual(mock_create_port.mock_calls, + [mock.call(net, {})] * ports_per_network) + mock_list.assert_called_once_with() + + mock_create_network.reset_mock() + mock_create_port.reset_mock() + mock_list.reset_mock() + + # Custom options + scenario.create_and_list_ports( + network_create_args={"name": "given-name"}, + port_create_args={"allocation_pools": []}, + ports_per_network=ports_per_network) + mock_create_network.assert_called_once_with({"name": "given-name"}) + self.assertEqual( + mock_create_port.mock_calls, + [mock.call(net, {"allocation_pools": []})] * ports_per_network) + mock_list.assert_called_once_with() + + @mock.patch(NEUTRON_NETWORKS + "._generate_random_name") + @mock.patch(NEUTRON_NETWORKS + "._update_port") + @mock.patch(NEUTRON_NETWORKS + "._create_port", return_value={ + "port": { + "name": "port-name", + "id": "port-id", + "admin_state_up": True + } + }) + @mock.patch(NEUTRON_NETWORKS + "._create_network", return_value={ + "network": {"id": "fake-id"}}) + def test_create_and_update_ports(self, + mock_create_network, + mock_create_port, + mock_update_port, + mock_random_name): + scenario = network.NeutronNetworks() + mock_random_name.return_value = "random-name" + ports_per_network = 10 + + port_update_args = { + "name": "_updated", + "admin_state_up": False + } + + # Defaults + scenario.create_and_update_ports( + port_update_args=port_update_args, + ports_per_network=ports_per_network) + mock_create_network.assert_called_once_with({}) + + mock_create_port.assert_has_calls( + [mock.call({"network": {"id": "fake-id"}}, + {})] * ports_per_network) + mock_update_port.assert_has_calls( + [mock.call(mock_create_port.return_value, port_update_args) + ] * ports_per_network) + + mock_create_network.reset_mock() + mock_create_port.reset_mock() + mock_update_port.reset_mock() + + # Custom options + scenario.create_and_update_ports( + port_update_args=port_update_args, + network_create_args={"name": "given-name"}, + port_create_args={"allocation_pools": []}, + ports_per_network=ports_per_network) + mock_create_network.assert_called_once_with({"name": "given-name"}) + mock_create_port.assert_has_calls( + [mock.call({"network": {"id": "fake-id"}}, + {"allocation_pools": []})] * ports_per_network) + mock_update_port.assert_has_calls( + [mock.call(mock_create_port.return_value, port_update_args) + ] * ports_per_network) + + @mock.patch(NEUTRON_NETWORKS + "._generate_random_name") + @mock.patch(NEUTRON_NETWORKS + "._delete_port") + @mock.patch(NEUTRON_NETWORKS + "._create_port") + @mock.patch(NEUTRON_NETWORKS + "._create_network") + def test_create_and_delete_ports(self, + mock_create_network, + mock_create_port, + mock_delete, + mock_random_name): + scenario = network.NeutronNetworks() + mock_random_name.return_value = "random-name" + net = {"network": {"id": "fake-id"}} + mock_create_network.return_value = net + ports_per_network = 10 + + self.assertRaises(TypeError, scenario.create_and_delete_ports) + + mock_create_network.reset_mock() + + # Default options + scenario.create_and_delete_ports(ports_per_network=ports_per_network) + mock_create_network.assert_called_once_with({}) + self.assertEqual(mock_create_port.mock_calls, + [mock.call(net, {})] * ports_per_network) + self.assertEqual(mock_delete.mock_calls, + [mock.call(mock_create_port())] * ports_per_network) + + mock_create_network.reset_mock() + mock_create_port.reset_mock() + mock_delete.reset_mock() + + # Custom options + scenario.create_and_delete_ports( + network_create_args={"name": "given-name"}, + port_create_args={"allocation_pools": []}, + ports_per_network=ports_per_network) + mock_create_network.assert_called_once_with({"name": "given-name"}) + self.assertEqual( + mock_create_port.mock_calls, + [mock.call(net, {"allocation_pools": []})] * ports_per_network) + self.assertEqual(mock_delete.mock_calls, + [mock.call(mock_create_port())] * ports_per_network) diff --git a/tests/unit/plugins/openstack/scenarios/neutron/test_utils.py b/tests/unit/plugins/openstack/scenarios/neutron/test_utils.py new file mode 100644 index 00000000..c387e809 --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/neutron/test_utils.py @@ -0,0 +1,434 @@ +# Copyright 2013: Intel Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from rally.plugins.openstack.scenarios.neutron import utils +from tests.unit import fakes +from tests.unit import test + + +NEUTRON_UTILS = "rally.plugins.openstack.scenarios.neutron.utils." + + +class NeutronScenarioTestCase(test.TestCase): + + def setUp(self): + super(NeutronScenarioTestCase, self).setUp() + self.network = mock.Mock() + + @mock.patch(NEUTRON_UTILS + "NeutronScenario._generate_random_name") + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_create_network(self, mock_clients, mock_random_name): + neutron_scenario = utils.NeutronScenario() + explicit_name = "explicit_name" + random_name = "random_name" + mock_random_name.return_value = random_name + mock_clients("neutron").create_network.return_value = self.network + + # Network name is specified + network_data = {"name": explicit_name, "admin_state_up": False} + expected_network_data = {"network": network_data} + network = neutron_scenario._create_network(network_data) + mock_clients("neutron").create_network.assert_called_once_with( + expected_network_data) + self.assertEqual(self.network, network) + self._test_atomic_action_timer(neutron_scenario.atomic_actions(), + "neutron.create_network") + + mock_clients("neutron").create_network.reset_mock() + + # Network name is random generated + network_data = {"admin_state_up": False} + expected_network_data["network"]["name"] = random_name + network = neutron_scenario._create_network(network_data) + mock_clients("neutron").create_network.assert_called_once_with( + expected_network_data) + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_list_networks(self, mock_clients): + scenario = utils.NeutronScenario() + networks_list = [] + networks_dict = {"networks": networks_list} + mock_clients("neutron").list_networks.return_value = networks_dict + return_networks_list = scenario._list_networks() + self.assertEqual(networks_list, return_networks_list) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.list_networks") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_update_network(self, mock_clients): + scenario = utils.NeutronScenario() + expected_network = { + "network": { + "name": "network-name_updated", + "admin_state_up": False + } + } + mock_clients("neutron").update_network.return_value = expected_network + + network = {"network": {"name": "network-name", "id": "network-id"}} + network_update_args = {"name": "_updated", "admin_state_up": False} + + result_network = scenario._update_network(network, network_update_args) + mock_clients("neutron").update_network.assert_called_once_with( + network["network"]["id"], expected_network) + self.assertEqual(result_network, expected_network) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.update_network") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_delete_network(self, mock_clients): + scenario = utils.NeutronScenario() + + network_create_args = {} + network = scenario._create_network(network_create_args) + scenario._delete_network(network) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.delete_network") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario._generate_random_name", + return_value="test_subnet") + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_create_subnet(self, mock_clients, mock_random_name): + scenario = utils.NeutronScenario() + network_id = "fake-id" + start_cidr = "192.168.0.0/24" + + network = {"network": {"id": network_id}} + expected_subnet_data = { + "subnet": { + "network_id": network_id, + "cidr": start_cidr, + "ip_version": scenario.SUBNET_IP_VERSION, + "name": mock_random_name.return_value + } + } + + # Default options + subnet_data = {"network_id": network_id} + scenario._create_subnet(network, subnet_data, start_cidr) + mock_clients("neutron").create_subnet.assert_called_once_with( + expected_subnet_data) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.create_subnet") + + mock_clients("neutron").create_subnet.reset_mock() + + # Custom options + extras = {"cidr": "192.168.16.0/24", "allocation_pools": []} + subnet_data.update(extras) + expected_subnet_data["subnet"].update(extras) + scenario._create_subnet(network, subnet_data) + mock_clients("neutron").create_subnet.assert_called_once_with( + expected_subnet_data) + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_list_subnets(self, mock_clients): + subnets = [{"name": "fake1"}, {"name": "fake2"}] + mock_clients("neutron").list_subnets.return_value = { + "subnets": subnets + } + scenario = utils.NeutronScenario() + result = scenario._list_subnets() + self.assertEqual(subnets, result) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.list_subnets") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_update_subnet(self, mock_clients): + scenario = utils.NeutronScenario() + expected_subnet = { + "subnet": { + "name": "subnet-name_updated", + "enable_dhcp": False + } + } + mock_clients("neutron").update_subnet.return_value = expected_subnet + + subnet = {"subnet": {"name": "subnet-name", "id": "subnet-id"}} + subnet_update_args = {"name": "_updated", "enable_dhcp": False} + + result_subnet = scenario._update_subnet(subnet, subnet_update_args) + mock_clients("neutron").update_subnet.assert_called_once_with( + subnet["subnet"]["id"], expected_subnet) + self.assertEqual(result_subnet, expected_subnet) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.update_subnet") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_delete_subnet(self, mock_clients): + scenario = utils.NeutronScenario() + + network = scenario._create_network({}) + subnet = scenario._create_subnet(network, {}) + scenario._delete_subnet(subnet) + + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.delete_subnet") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario._generate_random_name") + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_create_router(self, mock_clients, mock_random_name): + scenario = utils.NeutronScenario() + router = mock.Mock() + explicit_name = "explicit_name" + random_name = "random_name" + mock_random_name.return_value = random_name + mock_clients("neutron").create_router.return_value = router + + # Default options + result_router = scenario._create_router({}) + mock_clients("neutron").create_router.assert_called_once_with( + {"router": {"name": random_name}}) + self.assertEqual(result_router, router) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.create_router") + + mock_clients("neutron").create_router.reset_mock() + + # Custom options + router_data = {"name": explicit_name, "admin_state_up": True} + result_router = scenario._create_router(router_data) + mock_clients("neutron").create_router.assert_called_once_with( + {"router": router_data}) + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_list_routers(self, mock_clients): + scenario = utils.NeutronScenario() + routers = [mock.Mock()] + mock_clients("neutron").list_routers.return_value = { + "routers": routers} + self.assertEqual(routers, scenario._list_routers()) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.list_routers") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_update_router(self, mock_clients): + scenario = utils.NeutronScenario() + expected_router = { + "router": { + "name": "router-name_updated", + "admin_state_up": False + } + } + mock_clients("neutron").update_router.return_value = expected_router + + router = { + "router": { + "id": "router-id", + "name": "router-name", + "admin_state_up": True + } + } + router_update_args = {"name": "_updated", "admin_state_up": False} + + result_router = scenario._update_router(router, router_update_args) + mock_clients("neutron").update_router.assert_called_once_with( + router["router"]["id"], expected_router) + self.assertEqual(result_router, expected_router) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.update_router") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_delete_router(self, mock_clients): + scenario = utils.NeutronScenario() + router = scenario._create_router({}) + scenario._delete_router(router) + mock_clients("neutron").delete_router.assert_called_once_with( + router["router"]["id"]) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.delete_router") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_remove_interface_router(self, mock_clients): + subnet = {"name": "subnet-name", "id": "subnet-id"} + router_data = {"id": 1} + scenario = utils.NeutronScenario() + router = scenario._create_router(router_data) + scenario._add_interface_router(subnet, router) + scenario._remove_interface_router(subnet, router) + mock_remove_router = mock_clients("neutron").remove_interface_router + mock_remove_router.assert_called_once_with( + router["id"], {"subnet_id": subnet["id"]}) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.remove_interface_router") + + def test_SUBNET_IP_VERSION(self): + """Curent NeutronScenario implementation supports only IPv4.""" + self.assertEqual(utils.NeutronScenario.SUBNET_IP_VERSION, 4) + + @mock.patch(NEUTRON_UTILS + "NeutronScenario._generate_random_name") + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_create_port(self, mock_clients, mock_rand_name): + scenario = utils.NeutronScenario() + + net_id = "network-id" + net = {"network": {"id": net_id}} + rand_name = "random-name" + mock_rand_name.return_value = rand_name + expected_port_args = { + "port": { + "network_id": net_id, + "name": rand_name + } + } + + # Defaults + port_create_args = {} + scenario._create_port(net, port_create_args) + mock_clients("neutron" + ).create_port.assert_called_once_with(expected_port_args) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.create_port") + + mock_clients("neutron").create_port.reset_mock() + + # Custom options + port_args = {"admin_state_up": True} + expected_port_args["port"].update(port_args) + scenario._create_port(net, port_args) + mock_clients("neutron" + ).create_port.assert_called_once_with(expected_port_args) + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_list_ports(self, mock_clients): + scenario = utils.NeutronScenario() + ports = [{"name": "port1"}, {"name": "port2"}] + mock_clients("neutron").list_ports.return_value = {"ports": ports} + self.assertEqual(ports, scenario._list_ports()) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.list_ports") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_update_port(self, mock_clients): + scenario = utils.NeutronScenario() + expected_port = { + "port": { + "name": "port-name_updated", + "admin_state_up": False, + "device_id": "dummy_id", + "device_owner": "dummy_owner" + } + } + mock_clients("neutron").update_port.return_value = expected_port + + port = { + "port": { + "id": "port-id", + "name": "port-name", + "admin_state_up": True + } + } + port_update_args = { + "name": "_updated", + "admin_state_up": False, + "device_id": "dummy_id", + "device_owner": "dummy_owner" + } + + result_port = scenario._update_port(port, port_update_args) + mock_clients("neutron").update_port.assert_called_once_with( + port["port"]["id"], expected_port) + self.assertEqual(result_port, expected_port) + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.update_port") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario.clients") + def test_delete_port(self, mock_clients): + scenario = utils.NeutronScenario() + + network = scenario._create_network({}) + port = scenario._create_port(network, {}) + scenario._delete_port(port) + + self._test_atomic_action_timer(scenario.atomic_actions(), + "neutron.create_port") + + @mock.patch(NEUTRON_UTILS + "NeutronScenario._create_subnet", + return_value={ + "subnet": { + "name": "subnet-name", + "id": "subnet-id", + "enable_dhcp": False + } + }) + @mock.patch(NEUTRON_UTILS + "NeutronScenario._create_network", + return_value={ + "network": { + "id": "fake-id" + } + }) + def test_create_network_and_subnets(self, + mock_create_network, + mock_create_subnet): + scenario = utils.NeutronScenario() + network_create_args = {} + subnet_create_args = {} + subnets_per_network = 4 + + mock_create_network.reset_mock() + mock_create_subnet.reset_mock() + + # Default options + scenario._create_network_and_subnets( + network_create_args=network_create_args, + subnet_create_args=subnet_create_args, + subnets_per_network=subnets_per_network) + + mock_create_network.assert_called_once_with({}) + mock_create_subnet.assert_has_calls( + [mock.call({"network": {"id": "fake-id"}}, + {}, "1.0.0.0/24")] * subnets_per_network) + + mock_create_network.reset_mock() + mock_create_subnet.reset_mock() + + # Custom options + scenario._create_network_and_subnets( + network_create_args=network_create_args, + subnet_create_args={"allocation_pools": []}, + subnet_cidr_start="10.10.10.0/24", + subnets_per_network=subnets_per_network) + + mock_create_network.assert_called_once_with({}) + mock_create_subnet.assert_has_calls( + [mock.call({"network": {"id": "fake-id"}}, + {"allocation_pools": []}, + "10.10.10.0/24")] * subnets_per_network) + + @mock.patch(NEUTRON_UTILS + "network_wrapper.generate_cidr") + def test_functional_create_network_and_subnets(self, mock_generate_cidr): + scenario = utils.NeutronScenario(clients=fakes.FakeClients()) + network_create_args = {"name": "foo_network"} + subnet_create_args = {} + subnets_per_network = 5 + subnet_cidr_start = "1.1.1.0/24" + + cidrs = ["1.1.%d.0/24" % i for i in range(subnets_per_network)] + cidrs_ = iter(cidrs) + mock_generate_cidr.side_effect = lambda **kw: next(cidrs_) + + network, subnets = scenario._create_network_and_subnets( + network_create_args, + subnet_create_args, + subnets_per_network, + subnet_cidr_start) + + self.assertEqual(network["network"]["name"], "foo_network") + + # This checks both data (cidrs seem to be enough) and subnets number + result_cidrs = sorted([s["subnet"]["cidr"] for s in subnets]) + self.assertEqual(cidrs, result_cidrs) diff --git a/tests/unit/plugins/openstack/scenarios/nova/__init__.py b/tests/unit/plugins/openstack/scenarios/nova/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_hypervisors.py b/tests/unit/plugins/openstack/scenarios/nova/test_hypervisors.py new file mode 100644 index 00000000..607bdc29 --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/nova/test_hypervisors.py @@ -0,0 +1,31 @@ +# Copyright 2013 Cisco Systems Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from rally.plugins.openstack.scenarios.nova import hypervisors +from tests.unit import test + + +NOVA_HYPERVISORS_MODULE = "rally.plugins.openstack.scenarios.nova.hypervisors" +NOVA_HYPERVISORS = NOVA_HYPERVISORS_MODULE + ".NovaHypervisors" + + +class NovaHypervisorsTestCase(test.TestCase): + def test_list_hypervisors(self): + scenario = hypervisors.NovaHypervisors() + scenario._list_hypervisors = mock.Mock() + scenario.list_hypervisors(detailed=False) + scenario._list_hypervisors.assert_called_once_with(False) diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_keypairs.py b/tests/unit/plugins/openstack/scenarios/nova/test_keypairs.py new file mode 100644 index 00000000..9032fc69 --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/nova/test_keypairs.py @@ -0,0 +1,63 @@ +# Copyright 2015: Hewlett-Packard Development Company, L.P. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from rally.plugins.openstack.scenarios.nova import keypairs +from tests.unit import test + + +class NovaKeypairTestCase(test.TestCase): + + def test_create_and_list_keypairs(self): + scenario = keypairs.NovaKeypair() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._create_keypair = mock.MagicMock(return_value="foo_keypair") + scenario._list_keypairs = mock.MagicMock() + + scenario.create_and_list_keypairs(fakearg="fakearg") + + scenario._create_keypair.assert_called_once_with(fakearg="fakearg") + scenario._list_keypairs.assert_called_once_with() + + def test_create_and_delete_keypair(self): + scenario = keypairs.NovaKeypair() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._create_keypair = mock.MagicMock(return_value="foo_keypair") + scenario._delete_keypair = mock.MagicMock() + + scenario.create_and_delete_keypair(fakearg="fakearg") + + scenario._create_keypair.assert_called_once_with(fakearg="fakearg") + scenario._delete_keypair.assert_called_once_with("foo_keypair") + + def test_boot_and_delete_server_with_keypair(self): + scenario = keypairs.NovaKeypair() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._create_keypair = mock.MagicMock(return_value="foo_keypair") + scenario._boot_server = mock.MagicMock(return_value="foo_server") + scenario._delete_server = mock.MagicMock() + scenario._delete_keypair = mock.MagicMock() + + scenario.boot_and_delete_server_with_keypair("img", 1) + + scenario._create_keypair.assert_called_once_with() + + scenario._boot_server.assert_called_once_with( + "img", 1, key_name="foo_keypair") + + scenario._delete_server.assert_called_once_with("foo_server") + + scenario._delete_keypair.assert_called_once_with("foo_keypair") diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_servers.py b/tests/unit/plugins/openstack/scenarios/nova/test_servers.py new file mode 100644 index 00000000..c1e234fc --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/nova/test_servers.py @@ -0,0 +1,593 @@ +# Copyright 2013: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from rally import exceptions as rally_exceptions +from rally import objects +from rally import osclients +from rally.plugins.openstack.scenarios.nova import servers +from tests.unit import fakes +from tests.unit import test + + +NOVA_SERVERS_MODULE = "rally.plugins.openstack.scenarios.nova.servers" +NOVA_SERVERS = NOVA_SERVERS_MODULE + ".NovaServers" + + +class NovaServersTestCase(test.TestCase): + + def test_boot_rescue_unrescue(self): + actions = [{"rescue_unrescue": 5}] + fake_server = mock.MagicMock() + scenario = servers.NovaServers() + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._rescue_server = mock.MagicMock() + scenario._unrescue_server = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + scenario.boot_and_bounce_server("img", 1, actions=actions) + scenario._boot_server.assert_called_once_with("img", 1) + server_calls = [] + for i in range(5): + server_calls.append(mock.call(fake_server)) + self.assertEqual(5, scenario._rescue_server.call_count, + "Rescue not called 5 times") + self.assertEqual(5, scenario._unrescue_server.call_count, + "Unrescue not called 5 times") + scenario._rescue_server.assert_has_calls(server_calls) + scenario._unrescue_server.assert_has_calls(server_calls) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def test_boot_stop_start(self): + actions = [{"stop_start": 5}] + fake_server = mock.MagicMock() + scenario = servers.NovaServers() + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._start_server = mock.MagicMock() + scenario._stop_server = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + scenario.boot_and_bounce_server("img", 1, actions=actions) + + scenario._boot_server.assert_called_once_with("img", 1) + server_calls = [] + for i in range(5): + server_calls.append(mock.call(fake_server)) + self.assertEqual(5, scenario._stop_server.call_count, + "Stop not called 5 times") + self.assertEqual(5, scenario._start_server.call_count, + "Start not called 5 times") + scenario._stop_server.assert_has_calls(server_calls) + scenario._start_server.assert_has_calls(server_calls) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def test_multiple_bounce_actions(self): + actions = [{"hard_reboot": 5}, {"stop_start": 8}] + fake_server = mock.MagicMock() + scenario = servers.NovaServers() + + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._delete_server = mock.MagicMock() + scenario._reboot_server = mock.MagicMock() + scenario._stop_and_start_server = mock.MagicMock() + scenario._generate_random_name = mock.MagicMock(return_value="name") + + scenario.boot_and_bounce_server("img", 1, actions=actions) + scenario._boot_server.assert_called_once_with("img", 1) + server_calls = [] + for i in range(5): + server_calls.append(mock.call(fake_server)) + self.assertEqual(5, scenario._reboot_server.call_count, + "Reboot not called 5 times") + scenario._reboot_server.assert_has_calls(server_calls) + server_calls = [] + for i in range(8): + server_calls.append(mock.call(fake_server)) + self.assertEqual(8, scenario._stop_and_start_server.call_count, + "Stop/Start not called 8 times") + scenario._stop_and_start_server.assert_has_calls(server_calls) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def test_boot_lock_unlock_and_delete(self): + server = fakes.FakeServer() + image = fakes.FakeImage() + flavor = fakes.FakeFlavor() + + scenario = servers.NovaServers() + scenario._boot_server = mock.Mock(return_value=server) + scenario._lock_server = mock.Mock(side_effect=lambda s: s.lock()) + scenario._unlock_server = mock.Mock(side_effect=lambda s: s.unlock()) + scenario._delete_server = mock.Mock( + side_effect=lambda s, **kwargs: + self.assertFalse(getattr(s, "OS-EXT-STS:locked", False))) + + scenario.boot_lock_unlock_and_delete(image, flavor, fakearg="fakearg") + + scenario._boot_server.assert_called_once_with(image, flavor, + fakearg="fakearg") + scenario._lock_server.assert_called_once_with(server) + scenario._unlock_server.assert_called_once_with(server) + scenario._delete_server.assert_called_once_with(server, force=False) + + def test_validate_actions(self): + actions = [{"hardd_reboot": 6}] + scenario = servers.NovaServers() + + self.assertRaises(rally_exceptions.InvalidConfigException, + scenario.boot_and_bounce_server, + 1, 1, actions=actions) + actions = [{"hard_reboot": "no"}] + self.assertRaises(rally_exceptions.InvalidConfigException, + scenario.boot_and_bounce_server, + 1, 1, actions=actions) + actions = {"hard_reboot": 6} + self.assertRaises(rally_exceptions.InvalidConfigException, + scenario.boot_and_bounce_server, + 1, 1, actions=actions) + actions = {"hard_reboot": -1} + self.assertRaises(rally_exceptions.InvalidConfigException, + scenario.boot_and_bounce_server, + 1, 1, actions=actions) + actions = {"hard_reboot": 0} + self.assertRaises(rally_exceptions.InvalidConfigException, + scenario.boot_and_bounce_server, + 1, 1, actions=actions) + + def _verify_reboot(self, soft=True): + actions = [{"soft_reboot" if soft else "hard_reboot": 5}] + fake_server = mock.MagicMock() + scenario = servers.NovaServers() + + scenario._reboot_server = mock.MagicMock() + scenario._soft_reboot_server = mock.MagicMock() + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._delete_server = mock.MagicMock() + scenario._generate_random_name = mock.MagicMock(return_value="name") + + scenario.boot_and_bounce_server("img", 1, actions=actions) + + scenario._boot_server.assert_called_once_with("img", 1) + server_calls = [] + for i in range(5): + server_calls.append(mock.call(fake_server)) + if soft: + self.assertEqual(5, scenario._soft_reboot_server.call_count, + "Reboot not called 5 times") + scenario._soft_reboot_server.assert_has_calls(server_calls) + else: + self.assertEqual(5, scenario._reboot_server.call_count, + "Reboot not called 5 times") + scenario._reboot_server.assert_has_calls(server_calls) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def test_boot_soft_reboot(self): + self._verify_reboot(soft=True) + + def test_boot_hard_reboot(self): + self._verify_reboot(soft=False) + + def test_boot_and_delete_server(self): + fake_server = object() + + scenario = servers.NovaServers() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._delete_server = mock.MagicMock() + scenario.sleep_between = mock.MagicMock() + + scenario.boot_and_delete_server("img", 0, 10, 20, fakearg="fakearg") + + scenario._boot_server.assert_called_once_with("img", 0, + fakearg="fakearg") + scenario.sleep_between.assert_called_once_with(10, 20) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def test_boot_and_list_server(self): + scenario = servers.NovaServers() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._boot_server = mock.MagicMock() + scenario._list_servers = mock.MagicMock() + + scenario.boot_and_list_server("img", 0, fakearg="fakearg") + + scenario._boot_server.assert_called_once_with("img", 0, + fakearg="fakearg") + scenario._list_servers.assert_called_once_with(True) + + def test_suspend_and_resume_server(self): + fake_server = object() + + scenario = servers.NovaServers() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._suspend_server = mock.MagicMock() + scenario._resume_server = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + scenario.suspend_and_resume_server("img", 0, fakearg="fakearg") + + scenario._boot_server.assert_called_once_with("img", 0, + fakearg="fakearg") + + scenario._suspend_server.assert_called_once_with(fake_server) + scenario._resume_server.assert_called_once_with(fake_server) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def test_pause_and_unpause_server(self): + fake_server = object() + + scenario = servers.NovaServers() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._pause_server = mock.MagicMock() + scenario._unpause_server = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + scenario.pause_and_unpause_server("img", 0, fakearg="fakearg") + + scenario._boot_server.assert_called_once_with("img", 0, + fakearg="fakearg") + + scenario._pause_server.assert_called_once_with(fake_server) + scenario._unpause_server.assert_called_once_with(fake_server) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def test_shelve_and_unshelve_server(self): + fake_server = mock.MagicMock() + scenario = servers.NovaServers() + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._shelve_server = mock.MagicMock() + scenario._unshelve_server = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + scenario.shelve_and_unshelve_server("img", 0, fakearg="fakearg") + + scenario._boot_server.assert_called_once_with("img", 0, + fakearg="fakearg") + + scenario._shelve_server.assert_called_once_with(fake_server) + scenario._unshelve_server.assert_called_once_with(fake_server) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def test_list_servers(self): + scenario = servers.NovaServers() + scenario._list_servers = mock.MagicMock() + scenario.list_servers(True) + scenario._list_servers.assert_called_once_with(True) + + def test_boot_server_from_volume_and_delete(self): + fake_server = object() + scenario = servers.NovaServers() + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario.sleep_between = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + fake_volume = fakes.FakeVolumeManager().create() + fake_volume.id = "volume_id" + scenario._create_volume = mock.MagicMock(return_value=fake_volume) + + scenario.boot_server_from_volume_and_delete("img", 0, 5, 10, 20, + fakearg="f") + + scenario._create_volume.assert_called_once_with(5, imageRef="img") + scenario._boot_server.assert_called_once_with( + "img", 0, + block_device_mapping={"vda": "volume_id:::1"}, + fakearg="f") + scenario.sleep_between.assert_called_once_with(10, 20) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def _prepare_boot(self, mock_osclients, nic=None, assert_nic=False): + fake_server = mock.MagicMock() + + fc = fakes.FakeClients() + mock_osclients.Clients.return_value = fc + nova = fakes.FakeNovaClient() + fc.nova = lambda: nova + + user_endpoint = objects.Endpoint("url", "user", "password", "tenant") + clients = osclients.Clients(user_endpoint) + scenario = servers.NovaServers(clients=clients) + + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._generate_random_name = mock.MagicMock(return_value="name") + + kwargs = {"fakearg": "f"} + expected_kwargs = {"fakearg": "f"} + + assert_nic = nic or assert_nic + if nic: + kwargs["nics"] = nic + if assert_nic: + nova.networks.create("net-1") + expected_kwargs["nics"] = nic or [{"net-id": "net-2"}] + + print(kwargs) + print(expected_kwargs) + + return scenario, kwargs, expected_kwargs + + def _verify_boot_server(self, mock_osclients, nic=None, assert_nic=False): + scenario, kwargs, expected_kwargs = self._prepare_boot( + mock_osclients=mock_osclients, + nic=nic, assert_nic=assert_nic) + + scenario.boot_server("img", 0, **kwargs) + scenario._boot_server.assert_called_once_with( + "img", 0, auto_assign_nic=False, **expected_kwargs) + + @mock.patch("rally.plugins.openstack.scenarios" + ".nova.servers.NovaServers.clients") + @mock.patch("rally.benchmark.runners.base.osclients") + def test_boot_server_no_nics(self, mock_osclients, mock_nova_clients): + mock_nova_clients.return_value = fakes.FakeNovaClient() + self._verify_boot_server(mock_osclients=mock_osclients, + nic=None, assert_nic=False) + + @mock.patch("rally.benchmark.runners.base.osclients") + def test_boot_server_with_nic(self, mock_osclients): + self._verify_boot_server(mock_osclients=mock_osclients, + nic=[{"net-id": "net-1"}], assert_nic=True) + + def test_snapshot_server(self): + fake_server = object() + fake_image = fakes.FakeImageManager()._create() + fake_image.id = "image_id" + + scenario = servers.NovaServers() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._create_image = mock.MagicMock(return_value=fake_image) + scenario._delete_server = mock.MagicMock() + scenario._delete_image = mock.MagicMock() + + scenario.snapshot_server("i", 0, fakearg=2) + + scenario._boot_server.assert_has_calls([ + mock.call("i", 0, fakearg=2), + mock.call("image_id", 0, fakearg=2)]) + scenario._create_image.assert_called_once_with(fake_server) + scenario._delete_server.assert_has_calls([ + mock.call(fake_server, force=False), + mock.call(fake_server, force=False)]) + scenario._delete_image.assert_called_once_with(fake_image) + + def _test_resize(self, confirm=False): + fake_server = object() + fake_image = fakes.FakeImageManager()._create() + fake_image.id = "image_id" + flavor = mock.MagicMock() + to_flavor = mock.MagicMock() + + scenario = servers.NovaServers() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._resize_confirm = mock.MagicMock() + scenario._resize_revert = mock.MagicMock() + scenario._resize = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + kwargs = {"confirm": confirm} + scenario.resize_server(fake_image, flavor, to_flavor, **kwargs) + + scenario._resize.assert_called_once_with(fake_server, to_flavor) + + if confirm: + scenario._resize_confirm.assert_called_once_with(fake_server) + else: + scenario._resize_revert.assert_called_once_with(fake_server) + + def test_resize_with_confirm(self): + self._test_resize(confirm=True) + + def test_resize_with_revert(self): + self._test_resize(confirm=False) + + def test_boot_and_live_migrate_server(self): + fake_server = mock.MagicMock() + + scenario = servers.NovaServers() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario.sleep_between = mock.MagicMock() + scenario._find_host_to_migrate = mock.MagicMock( + return_value="host_name") + scenario._live_migrate = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + scenario.boot_and_live_migrate_server("img", 0, min_sleep=10, + max_sleep=20, fakearg="fakearg") + + scenario._boot_server.assert_called_once_with("img", 0, + fakearg="fakearg") + + scenario.sleep_between.assert_called_once_with(10, 20) + + scenario._find_host_to_migrate.assert_called_once_with(fake_server) + + scenario._live_migrate.assert_called_once_with(fake_server, + "host_name", + False, False) + scenario._delete_server.assert_called_once_with(fake_server) + + def test_boot_server_from_volume_and_live_migrate(self): + fake_server = mock.MagicMock() + + scenario = servers.NovaServers() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario.sleep_between = mock.MagicMock() + scenario._find_host_to_migrate = mock.MagicMock( + return_value="host_name") + scenario._live_migrate = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + fake_volume = fakes.FakeVolumeManager().create() + fake_volume.id = "volume_id" + scenario._create_volume = mock.MagicMock(return_value=fake_volume) + + scenario.boot_server_from_volume_and_live_migrate("img", 0, 5, + min_sleep=10, + max_sleep=20, + fakearg="f") + + scenario._create_volume.assert_called_once_with(5, imageRef="img") + + scenario._boot_server.assert_called_once_with( + "img", 0, + block_device_mapping={"vda": "volume_id:::1"}, + fakearg="f") + + scenario.sleep_between.assert_called_once_with(10, 20) + + scenario._find_host_to_migrate.assert_called_once_with(fake_server) + + scenario._live_migrate.assert_called_once_with(fake_server, + "host_name", + False, False) + scenario._delete_server.assert_called_once_with(fake_server, + force=False) + + def test_boot_server_attach_created_volume_and_live_migrate(self): + fake_volume = mock.MagicMock() + fake_server = mock.MagicMock() + + scenario = servers.NovaServers() + + scenario._attach_volume = mock.MagicMock() + scenario._detach_volume = mock.MagicMock() + + scenario.sleep_between = mock.MagicMock() + + scenario._find_host_to_migrate = mock.MagicMock( + return_value="host_name") + scenario._live_migrate = mock.MagicMock() + + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._delete_server = mock.MagicMock() + scenario._create_volume = mock.MagicMock(return_value=fake_volume) + scenario._delete_volume = mock.MagicMock() + + image = "img" + flavor = "flavor" + size = 5 + boot_kwargs = {"some_var": "asd"} + scenario.boot_server_attach_created_volume_and_live_migrate( + image, flavor, size, min_sleep=10, max_sleep=20, + boot_server_kwargs=boot_kwargs) + scenario._boot_server.assert_called_once_with(image, flavor, + **boot_kwargs) + scenario._create_volume.assert_called_once_with(size) + scenario._attach_volume.assert_called_once_with(fake_server, + fake_volume) + scenario._detach_volume.assert_called_once_with(fake_server, + fake_volume) + scenario.sleep_between.assert_called_once_with(10, 20) + scenario._live_migrate.assert_called_once_with(fake_server, + "host_name", + False, False) + + scenario._delete_volume.assert_called_once_with(fake_volume) + scenario._delete_server.assert_called_once_with(fake_server) + + def _test_boot_and_migrate_server(self, confirm=False): + fake_server = mock.MagicMock() + + scenario = servers.NovaServers() + scenario._generate_random_name = mock.MagicMock(return_value="name") + scenario._boot_server = mock.MagicMock(return_value=fake_server) + scenario._stop_server = mock.MagicMock() + scenario._migrate = mock.MagicMock() + scenario._resize_confirm = mock.MagicMock() + scenario._resize_revert = mock.MagicMock() + scenario._delete_server = mock.MagicMock() + + kwargs = {"confirm": confirm} + scenario.boot_and_migrate_server("img", 0, + fakearg="fakearg", **kwargs) + + scenario._boot_server.assert_called_once_with("img", 0, + fakearg="fakearg", + confirm=confirm) + + scenario._stop_server.assert_called_once_with(fake_server) + + scenario._migrate.assert_called_once_with(fake_server) + + if confirm: + scenario._resize_confirm.assert_called_once_with(fake_server, + status="SHUTOFF") + else: + scenario._resize_revert.assert_called_once_with(fake_server, + status="SHUTOFF") + + scenario._delete_server.assert_called_once_with(fake_server) + + def test_boot_and_migrate_server_with_confirm(self): + self._test_boot_and_migrate_server(confirm=True) + + def test_boot_and_migrate_server_with_revert(self): + self._test_boot_and_migrate_server(confirm=False) + + def test_boot_and_rebuild_server(self): + scenario = servers.NovaServers() + scenario._boot_server = mock.Mock() + scenario._rebuild_server = mock.Mock() + scenario._delete_server = mock.Mock() + + from_image = "img1" + to_image = "img2" + flavor = "flavor" + scenario.boot_and_rebuild_server(from_image, to_image, flavor, + fakearg="fakearg") + + scenario._boot_server.assert_called_once_with(from_image, flavor, + fakearg="fakearg") + server = scenario._boot_server.return_value + scenario._rebuild_server.assert_called_once_with(server, to_image) + scenario._delete_server.assert_called_once_with(server) + + @mock.patch(NOVA_SERVERS_MODULE + ".network_wrapper.wrap") + def test_boot_and_associate_floating_ip(self, mock_wrap): + scenario = servers.NovaServers() + server = mock.Mock() + scenario._boot_server = mock.Mock(return_value=server) + scenario._associate_floating_ip = mock.Mock() + + image = "img" + flavor = "flavor" + scenario.boot_and_associate_floating_ip(image, flavor, + fakearg="fakearg") + + scenario._boot_server.assert_called_once_with(image, flavor, + fakearg="fakearg") + net_wrap = mock_wrap.return_value + net_wrap.create_floating_ip.assert_called_once_with( + tenant_id=server.tenant_id) + scenario._associate_floating_ip.assert_called_once_with( + server, net_wrap.create_floating_ip.return_value["ip"]) diff --git a/tests/unit/plugins/openstack/scenarios/nova/test_utils.py b/tests/unit/plugins/openstack/scenarios/nova/test_utils.py new file mode 100644 index 00000000..4521b7d7 --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/nova/test_utils.py @@ -0,0 +1,774 @@ +# Copyright 2013: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock +from oslo_config import cfg +from oslotest import mockpatch + +from rally.benchmark import utils as butils +from rally import exceptions as rally_exceptions +from rally.plugins.openstack.scenarios.nova import utils +from tests.unit import fakes +from tests.unit import test + +BM_UTILS = "rally.benchmark.utils" +NOVA_UTILS = "rally.plugins.openstack.scenarios.nova.utils" +SCN = "rally.benchmark.scenarios.base" +CONF = cfg.CONF + + +class NovaScenarioTestCase(test.TestCase): + + def setUp(self): + super(NovaScenarioTestCase, self).setUp() + self.server = mock.Mock() + self.server1 = mock.Mock() + self.volume = mock.Mock() + self.floating_ip = mock.Mock() + self.image = mock.Mock() + self.keypair = mock.Mock() + self.res_is = mockpatch.Patch(BM_UTILS + ".resource_is") + self.get_fm = mockpatch.Patch(BM_UTILS + ".get_from_manager") + self.wait_for = mockpatch.Patch(NOVA_UTILS + ".bench_utils.wait_for") + self.wait_for_delete = mockpatch.Patch(NOVA_UTILS + + ".bench_utils.wait_for_delete") + self.useFixture(self.wait_for_delete) + self.useFixture(self.wait_for) + self.useFixture(self.res_is) + self.useFixture(self.get_fm) + self.gfm = self.get_fm.mock + self.useFixture(mockpatch.Patch("time.sleep")) + + def test_failed_server_status(self): + self.get_fm.cleanUp() + server_manager = fakes.FakeFailedServerManager() + self.assertRaises(rally_exceptions.GetResourceFailure, + butils.get_from_manager(), + server_manager.create("fails", "1", "2")) + + def _test_assert_called_once_with(self, mock, resource, + chk_interval, time_out, **kwargs): + """Method to replace repeatative asserts on resources + + :param mock: The mock to call assert with + :param resource: The resource used in mock + :param chk_interval: The interval used for polling the action + :param time_out: Time out value for action + :param kwargs: currently used for validating the is_ready attribute, + can be extended as required + """ + + isready = self.res_is.mock() + if kwargs: + if kwargs["is_ready"]: + mock.assert_called_once_with( + resource, + update_resource=self.gfm(), + is_ready=isready, + check_interval=chk_interval, + timeout=time_out) + else: + mock.assert_called_once_with( + resource, + update_resource=self.gfm(), + check_interval=chk_interval, + timeout=time_out) + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__list_servers(self, mock_clients): + servers_list = [] + mock_clients("nova").servers.list.return_value = servers_list + nova_scenario = utils.NovaScenario() + return_servers_list = nova_scenario._list_servers(True) + self.assertEqual(servers_list, return_servers_list) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.list_servers") + + @mock.patch(SCN + ".Scenario._generate_random_name", + return_value="foo_server_name") + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__boot_server(self, mock_clients, mock_generate_random_name): + mock_clients("nova").servers.create.return_value = self.server + nova_scenario = utils.NovaScenario(context={}) + return_server = nova_scenario._boot_server("image_id", + "flavor_id") + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_boot_poll_interval, + CONF.benchmark.nova_server_boot_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self.assertEqual(self.wait_for.mock(), return_server) + mock_clients("nova").servers.create.assert_called_once_with( + "foo_server_name", "image_id", "flavor_id") + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.boot_server") + + @mock.patch(SCN + ".Scenario._generate_random_name", + return_value="foo_server_name") + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__boot_server_with_network(self, mock_clients, + mock_generate_random_name): + mock_clients("nova").servers.create.return_value = self.server + networks = [{"id": "foo_id", "external": False}, + {"id": "bar_id", "external": False}] + mock_clients("nova").networks.list.return_value = networks + nova_scenario = utils.NovaScenario(context={ + "iteration": 3, + "config": {"users": {"tenants": 2}}, + "tenant": {"networks": networks}}) + return_server = nova_scenario._boot_server("image_id", + "flavor_id", + auto_assign_nic=True) + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_boot_poll_interval, + CONF.benchmark.nova_server_boot_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + mock_clients("nova").servers.create.assert_called_once_with( + "foo_server_name", "image_id", "flavor_id", + nics=[{"net-id": "bar_id"}]) + self.assertEqual(self.wait_for.mock(), return_server) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.boot_server") + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__boot_server_with_network_exception(self, mock_clients): + mock_clients("nova").servers.create.return_value = self.server + nova_scenario = utils.NovaScenario( + context={"tenant": {"networks": None}}) + self.assertRaises(TypeError, nova_scenario._boot_server, + "image_id", "flavor_id", + auto_assign_nic=True) + + @mock.patch(SCN + ".Scenario._generate_random_name", + return_value="foo_server_name") + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__boot_server_with_ssh(self, mock_clients, + mock_generate_random_name): + mock_clients("nova").servers.create.return_value = self.server + nova_scenario = utils.NovaScenario(context={ + "user": {"secgroup": {"name": "test"}}} + ) + return_server = nova_scenario._boot_server("image_id", "flavor_id") + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_boot_poll_interval, + CONF.benchmark.nova_server_boot_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self.assertEqual(self.wait_for.mock(), return_server) + mock_clients("nova").servers.create.assert_called_once_with( + "foo_server_name", "image_id", "flavor_id", + security_groups=["test"]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.boot_server") + + @mock.patch(SCN + ".Scenario._generate_random_name", + return_value="foo_server_name") + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__boot_server_with_sec_group(self, mock_clients, + mock_generate_random_name): + mock_clients("nova").servers.create.return_value = self.server + nova_scenario = utils.NovaScenario(context={ + "user": {"secgroup": {"name": "new"}}} + ) + return_server = nova_scenario._boot_server( + "image_id", "flavor_id", + security_groups=["test"]) + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_boot_poll_interval, + CONF.benchmark.nova_server_boot_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self.assertEqual(self.wait_for.mock(), return_server) + mock_clients("nova").servers.create.assert_called_once_with( + "foo_server_name", "image_id", "flavor_id", + security_groups=["test", "new"]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.boot_server") + + @mock.patch(SCN + ".Scenario._generate_random_name", + return_value="foo_server_name") + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__boot_server_with_similar_sec_group(self, mock_clients, + mock_generate_random_name): + mock_clients("nova").servers.create.return_value = self.server + nova_scenario = utils.NovaScenario(context={ + "user": {"secgroup": {"name": "test1"}}} + ) + return_server = nova_scenario._boot_server( + "image_id", "flavor_id", + security_groups=["test1"]) + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_boot_poll_interval, + CONF.benchmark.nova_server_boot_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self.assertEqual(self.wait_for.mock(), return_server) + mock_clients("nova").servers.create.assert_called_once_with( + "foo_server_name", "image_id", "flavor_id", + security_groups=["test1"]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.boot_server") + + def test__suspend_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._suspend_server(self.server) + self.server.suspend.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_suspend_poll_interval, + CONF.benchmark.nova_server_suspend_timeout) + self.res_is.mock.assert_has_calls([mock.call("SUSPENDED")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.suspend_server") + + def test__resume_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._resume_server(self.server) + self.server.resume.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_resume_poll_interval, + CONF.benchmark.nova_server_resume_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.resume_server") + + def test__pause_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._pause_server(self.server) + self.server.pause.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_pause_poll_interval, + CONF.benchmark.nova_server_pause_timeout) + self.res_is.mock.assert_has_calls([mock.call("PAUSED")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.pause_server") + + def test__unpause_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._unpause_server(self.server) + self.server.unpause.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_unpause_poll_interval, + CONF.benchmark.nova_server_unpause_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.unpause_server") + + def test__shelve_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._shelve_server(self.server) + self.server.shelve.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_shelve_poll_interval, + CONF.benchmark.nova_server_shelve_timeout) + self.res_is.mock.assert_has_calls([mock.call("SHELVED_OFFLOADED")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.shelve_server") + + def test__unshelve_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._unshelve_server(self.server) + self.server.unshelve.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_unshelve_poll_interval, + CONF.benchmark.nova_server_unshelve_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.unshelve_server") + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__create_image(self, mock_clients): + mock_clients("nova").images.get.return_value = self.image + nova_scenario = utils.NovaScenario() + return_image = nova_scenario._create_image(self.server) + self._test_assert_called_once_with( + self.wait_for.mock, self.image, + CONF.benchmark.nova_server_image_create_poll_interval, + CONF.benchmark.nova_server_image_create_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self.assertEqual(self.wait_for.mock(), return_image) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.create_image") + + def test__default_delete_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._delete_server(self.server) + self.server.delete.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for_delete.mock, self.server, + CONF.benchmark.nova_server_delete_poll_interval, + CONF.benchmark.nova_server_delete_timeout, + is_ready=None) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.delete_server") + + def test__force_delete_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._delete_server(self.server, force=True) + self.server.force_delete.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for_delete.mock, self.server, + CONF.benchmark.nova_server_delete_poll_interval, + CONF.benchmark.nova_server_delete_timeout, + is_ready=None) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.force_delete_server") + + def test__reboot_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._reboot_server(self.server) + self.server.reboot.assert_called_once_with(reboot_type="HARD") + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_reboot_poll_interval, + CONF.benchmark.nova_server_reboot_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.reboot_server") + + def test__soft_reboot_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._soft_reboot_server(self.server) + self.server.reboot.assert_called_once_with(reboot_type="SOFT") + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_reboot_poll_interval, + CONF.benchmark.nova_server_reboot_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.soft_reboot_server") + + def test__rebuild_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._rebuild_server(self.server, "img", fakearg="fakearg") + self.server.rebuild.assert_called_once_with("img", fakearg="fakearg") + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_rebuild_poll_interval, + CONF.benchmark.nova_server_rebuild_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.rebuild_server") + + def test__start_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._start_server(self.server) + self.server.start.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_start_poll_interval, + CONF.benchmark.nova_server_start_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.start_server") + + def test__stop_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._stop_server(self.server) + self.server.stop.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_stop_poll_interval, + CONF.benchmark.nova_server_stop_timeout) + self.res_is.mock.assert_has_calls([mock.call("SHUTOFF")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.stop_server") + + def test__rescue_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._rescue_server(self.server) + self.server.rescue.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_rescue_poll_interval, + CONF.benchmark.nova_server_rescue_timeout) + self.res_is.mock.assert_has_calls([mock.call("RESCUE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.rescue_server") + + def test__unrescue_server(self): + nova_scenario = utils.NovaScenario() + nova_scenario._unrescue_server(self.server) + self.server.unrescue.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_unrescue_poll_interval, + CONF.benchmark.nova_server_unrescue_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.unrescue_server") + + def test__delete_image(self): + nova_scenario = utils.NovaScenario() + nova_scenario._delete_image(self.image) + self.image.delete.assert_called_once_with() + self._test_assert_called_once_with( + self.wait_for_delete.mock, self.image, + CONF.benchmark.nova_server_image_delete_poll_interval, + CONF.benchmark.nova_server_image_delete_timeout, + is_ready=None) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.delete_image") + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__boot_servers(self, mock_clients): + mock_clients("nova").servers.list.return_value = [self.server, + self.server1] + nova_scenario = utils.NovaScenario() + nova_scenario._boot_servers("prefix", "image", "flavor", 2) + expected = [ + mock.call( + self.server, is_ready=self.res_is.mock(), + update_resource=self.gfm(), + check_interval=CONF.benchmark.nova_server_boot_poll_interval, + timeout=CONF.benchmark.nova_server_boot_timeout + ), + mock.call( + self.server1, is_ready=self.res_is.mock(), + update_resource=self.gfm(), + check_interval=CONF.benchmark.nova_server_boot_poll_interval, + timeout=CONF.benchmark.nova_server_boot_timeout + ) + ] + self.assertEqual(expected, self.wait_for.mock.mock_calls) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.boot_servers") + + def test__associate_floating_ip(self): + nova_scenario = utils.NovaScenario() + nova_scenario._associate_floating_ip(self.server, self.floating_ip) + self.server.add_floating_ip.assert_called_once_with(self.floating_ip, + fixed_address=None) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.associate_floating_ip") + + def test__dissociate_floating_ip(self): + nova_scenario = utils.NovaScenario() + nova_scenario._dissociate_floating_ip(self.server, self.floating_ip) + self.server.remove_floating_ip.assert_called_once_with( + self.floating_ip) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.dissociate_floating_ip") + + def test__check_ip_address(self): + nova_scenario = utils.NovaScenario() + fake_server = fakes.FakeServerManager().create("test_server", + "image_id_01", + "flavor_id_01") + fake_server.addresses = { + "private": [ + {"version": 4, "addr": "1.2.3.4"}, + ]} + floating_ip = fakes.FakeFloatingIP() + floating_ip.ip = "10.20.30.40" + + # Also test function check_ip_address accept a string as attr + self.assertFalse( + nova_scenario.check_ip_address(floating_ip.ip)(fake_server)) + self.assertTrue( + nova_scenario.check_ip_address(floating_ip.ip, must_exist=False) + (fake_server)) + + fake_server.addresses["private"].append( + {"version": 4, "addr": floating_ip.ip} + ) + # Also test function check_ip_address accept an object with attr ip + self.assertTrue( + nova_scenario.check_ip_address(floating_ip) + (fake_server)) + self.assertFalse( + nova_scenario.check_ip_address(floating_ip, must_exist=False) + (fake_server)) + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__list_networks(self, mock_clients): + network_list = [] + mock_clients("nova").networks.list.return_value = network_list + nova_scenario = utils.NovaScenario() + return_network_list = nova_scenario._list_networks() + self.assertEqual(network_list, return_network_list) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.list_networks") + + def test__resize(self): + nova_scenario = utils.NovaScenario() + to_flavor = mock.Mock() + nova_scenario._resize(self.server, to_flavor) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.resize") + + def test__resize_confirm(self): + nova_scenario = utils.NovaScenario() + nova_scenario._resize_confirm(self.server) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.resize_confirm") + + def test__resize_revert(self): + nova_scenario = utils.NovaScenario() + nova_scenario._resize_revert(self.server) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.resize_revert") + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__attach_volume(self, mock_clients): + mock_clients("nova").volumes.create_server_volume.return_value = None + nova_scenario = utils.NovaScenario() + nova_scenario._attach_volume(self.server, self.volume) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.attach_volume") + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__detach_volume(self, mock_clients): + mock_clients("nova").volumes.delete_server_volume.return_value = None + nova_scenario = utils.NovaScenario() + nova_scenario._detach_volume(self.server, self.volume) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.detach_volume") + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__live_migrate_server(self, mock_clients): + fake_host = mock.MagicMock() + mock_clients("nova").servers.get(return_value=self.server) + nova_scenario = utils.NovaScenario(admin_clients=mock_clients) + nova_scenario._live_migrate(self.server, + fake_host, + block_migration=False, + disk_over_commit=False, + skip_host_check=True) + + self._test_assert_called_once_with( + self.wait_for.mock, self.server, + CONF.benchmark.nova_server_live_migrate_poll_interval, + CONF.benchmark.nova_server_live_migrate_timeout) + self.res_is.mock.assert_has_calls([mock.call("ACTIVE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.live_migrate") + + @mock.patch(NOVA_UTILS + ".NovaScenario.admin_clients") + def test__find_host_to_migrate(self, mock_clients): + fake_server = self.server + fake_host = {"nova-compute": {"available": True}} + nova_client = mock.MagicMock() + mock_clients.return_value = nova_client + nova_client.servers.get.return_value = fake_server + nova_client.availability_zones.list.return_value = [ + mock.MagicMock(zoneName="a", + hosts={"a1": fake_host, "a2": fake_host, + "a3": fake_host}), + mock.MagicMock(zoneName="b", + hosts={"b1": fake_host, "b2": fake_host, + "b3": fake_host}), + mock.MagicMock(zoneName="c", + hosts={"c1": fake_host, + "c2": fake_host, "c3": fake_host}) + ] + setattr(fake_server, "OS-EXT-SRV-ATTR:host", "b2") + setattr(fake_server, "OS-EXT-AZ:availability_zone", "b") + nova_scenario = utils.NovaScenario(admin_clients=fakes.FakeClients()) + + self.assertIn( + nova_scenario._find_host_to_migrate(fake_server), ["b1", "b3"]) + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__migrate_server(self, mock_clients): + fake_server = self.server + setattr(fake_server, "OS-EXT-SRV-ATTR:host", "a1") + mock_clients("nova").servers.get(return_value=fake_server) + nova_scenario = utils.NovaScenario(admin_clients=mock_clients) + nova_scenario._migrate(fake_server, skip_host_check=True) + + self._test_assert_called_once_with( + self.wait_for.mock, fake_server, + CONF.benchmark.nova_server_migrate_poll_interval, + CONF.benchmark.nova_server_migrate_timeout) + self.res_is.mock.assert_has_calls([mock.call("VERIFY_RESIZE")]) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.migrate") + + self.assertRaises(rally_exceptions.MigrateException, + nova_scenario._migrate, + fake_server, skip_host_check=False) + + def test__create_security_groups(self): + clients = mock.MagicMock() + nova_scenario = utils.NovaScenario() + nova_scenario.clients = clients + nova_scenario._generate_random_name = mock.MagicMock() + + security_group_count = 5 + + sec_groups = nova_scenario._create_security_groups( + security_group_count) + + self.assertEqual(security_group_count, clients.call_count) + self.assertEqual(security_group_count, len(sec_groups)) + self.assertEqual(security_group_count, + nova_scenario._generate_random_name.call_count) + self.assertEqual(security_group_count, + clients().security_groups.create.call_count) + self._test_atomic_action_timer( + nova_scenario.atomic_actions(), + "nova.create_%s_security_groups" % security_group_count) + + def test__create_rules_for_security_group(self): + clients = mock.MagicMock() + nova_scenario = utils.NovaScenario() + nova_scenario.clients = clients + + fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, "uuid1"), + fakes.FakeSecurityGroup(None, None, 2, "uuid2")] + rules_per_security_group = 10 + + nova_scenario._create_rules_for_security_group( + fake_secgroups, rules_per_security_group) + + self.assertEqual(len(fake_secgroups) * rules_per_security_group, + clients.call_count) + self.assertEqual(len(fake_secgroups) * rules_per_security_group, + clients().security_group_rules.create.call_count) + self._test_atomic_action_timer( + nova_scenario.atomic_actions(), + "nova.create_%s_rules" % + (rules_per_security_group * len(fake_secgroups))) + + def test__delete_security_groups(self): + clients = mock.MagicMock() + nova_scenario = utils.NovaScenario() + nova_scenario.clients = clients + + fake_secgroups = [fakes.FakeSecurityGroup(None, None, 1, "uuid1"), + fakes.FakeSecurityGroup(None, None, 2, "uuid2")] + + nova_scenario._delete_security_groups(fake_secgroups) + + self.assertEqual(len(fake_secgroups), clients.call_count) + + self.assertSequenceEqual( + map(lambda x: mock.call(x.id), fake_secgroups), + clients().security_groups.delete.call_args_list) + self._test_atomic_action_timer( + nova_scenario.atomic_actions(), + "nova.delete_%s_security_groups" % len(fake_secgroups)) + + def test__list_security_groups(self): + clients = mock.MagicMock() + nova_scenario = utils.NovaScenario() + nova_scenario.clients = clients + + nova_scenario._list_security_groups() + + clients.assert_called_once_with("nova") + clients().security_groups.list.assert_called_once_with() + + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.list_security_groups") + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__list_keypairs(self, mock_clients): + keypairs_list = ["foo_keypair"] + mock_clients("nova").keypairs.list.return_value = keypairs_list + nova_scenario = utils.NovaScenario() + return_keypairs_list = nova_scenario._list_keypairs() + self.assertEqual(keypairs_list, return_keypairs_list) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.list_keypairs") + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__create_keypair(self, mock_clients): + (mock_clients("nova").keypairs.create. + return_value.name) = self.keypair + nova_scenario = utils.NovaScenario() + return_keypair = nova_scenario._create_keypair() + self.assertEqual(self.keypair, return_keypair) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.create_keypair") + + @mock.patch(NOVA_UTILS + ".NovaScenario.clients") + def test__delete_keypair(self, mock_clients): + nova_scenario = utils.NovaScenario() + nova_scenario._delete_keypair(self.keypair) + mock_clients("nova").keypairs.delete.assert_called_once_with( + self.keypair) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.delete_keypair") + + @mock.patch(NOVA_UTILS + ".NovaScenario.admin_clients") + def test__list_floating_ips_bulk(self, mock_clients): + floating_ips_bulk_list = ["foo_floating_ips_bulk"] + mock_clients("nova").floating_ips_bulk.list.return_value = ( + floating_ips_bulk_list) + nova_scenario = utils.NovaScenario() + return_floating_ips_bulk_list = nova_scenario._list_floating_ips_bulk() + self.assertEqual(floating_ips_bulk_list, return_floating_ips_bulk_list) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.list_floating_ips_bulk") + + @mock.patch(NOVA_UTILS + ".network_wrapper.generate_cidr") + @mock.patch(NOVA_UTILS + ".NovaScenario.admin_clients") + def test__create_floating_ips_bulk(self, mock_clients, mock_gencidr): + fake_cidr = "10.2.0.0/24" + fake_pool = "test1" + fake_floating_ips_bulk = mock.MagicMock() + fake_floating_ips_bulk.ip_range = fake_cidr + fake_floating_ips_bulk.pool = fake_pool + mock_clients("nova").floating_ips_bulk.create.return_value = ( + fake_floating_ips_bulk) + nova_scenario = utils.NovaScenario() + return_iprange = nova_scenario._create_floating_ips_bulk(fake_cidr) + mock_gencidr.assert_called_once_with(start_cidr=fake_cidr) + self.assertEqual(return_iprange, fake_floating_ips_bulk) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.create_floating_ips_bulk") + + @mock.patch(NOVA_UTILS + ".NovaScenario.admin_clients") + def test__delete_floating_ips_bulk(self, mock_clients): + fake_cidr = "10.2.0.0/24" + nova_scenario = utils.NovaScenario() + nova_scenario._delete_floating_ips_bulk(fake_cidr) + mock_clients("nova").floating_ips_bulk.delete.assert_called_once_with( + fake_cidr) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.delete_floating_ips_bulk") + + @mock.patch(NOVA_UTILS + ".NovaScenario.admin_clients") + def test__list_hypervisors(self, mock_clients): + nova_scenario = utils.NovaScenario() + nova_scenario._list_hypervisors(detailed=False) + mock_clients("nova").hypervisors.list.assert_called_once_with(False) + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.list_hypervisors") + + def test__lock_server(self): + server = mock.Mock() + nova_scenario = utils.NovaScenario() + nova_scenario._lock_server(server) + server.lock.assert_called_once_with() + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.lock_server") + + def test__unlock_server(self): + server = mock.Mock() + nova_scenario = utils.NovaScenario() + nova_scenario._unlock_server(server) + server.unlock.assert_called_once_with() + self._test_atomic_action_timer(nova_scenario.atomic_actions(), + "nova.unlock_server") diff --git a/tests/unit/plugins/openstack/scenarios/vm/__init__.py b/tests/unit/plugins/openstack/scenarios/vm/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit/plugins/openstack/scenarios/vm/test_utils.py b/tests/unit/plugins/openstack/scenarios/vm/test_utils.py new file mode 100644 index 00000000..9572ac0c --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/vm/test_utils.py @@ -0,0 +1,294 @@ +# Copyright 2013: Mirantis Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import subprocess + +import mock +from oslotest import mockpatch +import six + +from rally import exceptions +from rally.plugins.openstack.scenarios.vm import utils +from tests.unit import test + +VMTASKS_UTILS = "rally.plugins.openstack.scenarios.vm.utils" + + +class VMScenarioTestCase(test.TestCase): + + def setUp(self): + super(VMScenarioTestCase, self).setUp() + self.wait_for = mockpatch.Patch(VMTASKS_UTILS + + ".bench_utils.wait_for") + self.useFixture(self.wait_for) + + @mock.patch("%s.open" % VMTASKS_UTILS, + side_effect=mock.mock_open(), create=True) + def test__run_command_over_ssh(self, mock_open): + mock_ssh = mock.MagicMock() + vm_scenario = utils.VMScenario() + vm_scenario._run_command_over_ssh(mock_ssh, "interpreter", "script") + mock_ssh.execute.assert_called_once_with("interpreter", + stdin=mock_open.side_effect()) + + def test__run_command_over_ssh_stringio(self): + mock_ssh = mock.MagicMock() + vm_scenario = utils.VMScenario() + script = six.moves.StringIO("script") + vm_scenario._run_command_over_ssh(mock_ssh, "interpreter", script) + mock_ssh.execute.assert_called_once_with("interpreter", + stdin=script) + + def test__run_command_over_ssh_fails(self): + vm_scenario = utils.VMScenario() + self.assertRaises(exceptions.ScriptError, + vm_scenario._run_command_over_ssh, + None, "interpreter", 10) + + def test__wait_for_ssh(self): + ssh = mock.MagicMock() + vm_scenario = utils.VMScenario() + vm_scenario._wait_for_ssh(ssh) + ssh.wait.assert_called_once_with() + + @mock.patch(VMTASKS_UTILS + ".VMScenario._ping_ip_address", + return_value=True) + def test__wait_for_ping(self, mock__ping): + vm_scenario = utils.VMScenario() + vm_scenario._wait_for_ping("1.2.3.4") + self.wait_for.mock.assert_called_once_with("1.2.3.4", + is_ready=mock__ping, + timeout=120) + + @mock.patch(VMTASKS_UTILS + ".VMScenario._run_command_over_ssh") + @mock.patch("rally.common.sshutils.SSH") + def test__run_command(self, mock_ssh_class, mock_run_command_over_ssh): + mock_ssh_instance = mock.MagicMock() + mock_ssh_class.return_value = mock_ssh_instance + + vm_scenario = utils.VMScenario() + vm_scenario.context = {"user": {"keypair": {"private": "ssh"}}} + vm_scenario._run_command("1.2.3.4", 22, "username", + "password", "int", "/path/to/foo/script.sh", + is_file=True) + + mock_ssh_class.assert_called_once_with("username", "1.2.3.4", port=22, + pkey="ssh", + password="password") + mock_ssh_instance.wait.assert_called_once_with() + mock_run_command_over_ssh.assert_called_once_with( + mock_ssh_instance, "int", "/path/to/foo/script.sh", True) + + @mock.patch(VMTASKS_UTILS + ".sshutils.SSH") + def test__run_command_inline_script(self, mock_ssh): + mock_ssh_instance = mock.MagicMock() + mock_ssh.return_value = mock_ssh_instance + mock_ssh_instance.execute.return_value = "foobar" + vm_scenario = utils.VMScenario() + vm_scenario._wait_for_ssh = mock.Mock() + vm_scenario.context = {"user": {"keypair": {"private": "foo_pkey"}}} + result = vm_scenario._run_command("foo_ip", "foo_port", "foo_username", + "foo_password", "foo_interpreter", + "foo_script", is_file=False) + mock_ssh.assert_called_once_with("foo_username", "foo_ip", + port="foo_port", pkey="foo_pkey", + password="foo_password") + vm_scenario._wait_for_ssh.assert_called_once_with(mock_ssh_instance) + mock_ssh_instance.execute.assert_called_once_with("foo_interpreter", + stdin="foo_script") + self.assertEqual(result, "foobar") + + @mock.patch(VMTASKS_UTILS + ".sys") + @mock.patch("subprocess.Popen") + def test__ping_ip_address_linux(self, mock_subprocess, mock_sys): + ping_process = mock.MagicMock() + ping_process.returncode = 0 + mock_subprocess.return_value = ping_process + mock_sys.platform = "linux2" + + vm_scenario = utils.VMScenario() + host_ip = "1.2.3.4" + self.assertTrue(vm_scenario._ping_ip_address(host_ip)) + + mock_subprocess.assert_called_once_with( + ["ping", "-c1", "-w1", host_ip], + stderr=subprocess.PIPE, stdout=subprocess.PIPE) + ping_process.wait.assert_called_once_with() + + @mock.patch(VMTASKS_UTILS + ".sys") + @mock.patch("subprocess.Popen") + def test__ping_ip_address_linux_ipv6(self, mock_subprocess, mock_sys): + ping_process = mock.MagicMock() + ping_process.returncode = 0 + mock_subprocess.return_value = ping_process + mock_sys.platform = "linux2" + + vm_scenario = utils.VMScenario() + host_ip = "1ce:c01d:bee2:15:a5:900d:a5:11fe" + self.assertTrue(vm_scenario._ping_ip_address(host_ip)) + + mock_subprocess.assert_called_once_with( + ["ping6", "-c1", "-w1", host_ip], + stderr=subprocess.PIPE, stdout=subprocess.PIPE) + ping_process.wait.assert_called_once_with() + + @mock.patch(VMTASKS_UTILS + ".sys") + @mock.patch("subprocess.Popen") + def test__ping_ip_address_other_os(self, mock_subprocess, mock_sys): + ping_process = mock.MagicMock() + ping_process.returncode = 0 + mock_subprocess.return_value = ping_process + mock_sys.platform = "freebsd10" + + vm_scenario = utils.VMScenario() + host_ip = "1.2.3.4" + self.assertTrue(vm_scenario._ping_ip_address(host_ip)) + + mock_subprocess.assert_called_once_with( + ["ping", "-c1", host_ip], + stderr=subprocess.PIPE, stdout=subprocess.PIPE) + ping_process.wait.assert_called_once_with() + + @mock.patch(VMTASKS_UTILS + ".sys") + @mock.patch("subprocess.Popen") + def test__ping_ip_address_other_os_ipv6(self, mock_subprocess, mock_sys): + ping_process = mock.MagicMock() + ping_process.returncode = 0 + mock_subprocess.return_value = ping_process + mock_sys.platform = "freebsd10" + + vm_scenario = utils.VMScenario() + host_ip = "1ce:c01d:bee2:15:a5:900d:a5:11fe" + self.assertTrue(vm_scenario._ping_ip_address(host_ip)) + + mock_subprocess.assert_called_once_with( + ["ping6", "-c1", host_ip], + stderr=subprocess.PIPE, stdout=subprocess.PIPE) + ping_process.wait.assert_called_once_with() + + def get_scenario(self): + server = mock.Mock( + networks={"foo_net": "foo_data"}, + addresses={"foo_net": [{"addr": "foo_ip"}]}, + tenant_id="foo_tenant" + ) + scenario = utils.VMScenario(context={}) + + scenario._boot_server = mock.Mock(return_value=server) + scenario._delete_server = mock.Mock() + scenario._associate_floating_ip = mock.Mock() + scenario._wait_for_ping = mock.Mock() + + return scenario, server + + def test__boot_server_with_fip_without_networks(self): + scenario, server = self.get_scenario() + server.networks = {} + self.assertRaises(RuntimeError, + scenario._boot_server_with_fip, + "foo_image", "foo_flavor", foo_arg="foo_value") + scenario._boot_server.assert_called_once_with( + "foo_image", "foo_flavor", + foo_arg="foo_value", auto_assign_nic=True) + + def test__boot_server_with_fixed_ip(self): + scenario, server = self.get_scenario() + scenario._attach_floating_ip = mock.Mock() + server, ip = scenario._boot_server_with_fip( + "foo_image", "foo_flavor", floating_network="ext_network", + use_floating_ip=False, wait_for_ping=True, foo_arg="foo_value") + + self.assertEqual(ip, {"ip": "foo_ip", "id": None, + "is_floating": False}) + scenario._boot_server.assert_called_once_with( + "foo_image", "foo_flavor", + auto_assign_nic=True, foo_arg="foo_value") + self.assertEqual(scenario._attach_floating_ip.mock_calls, []) + scenario._wait_for_ping.assert_called_once_with("foo_ip") + + def test__boot_server_with_fip(self): + scenario, server = self.get_scenario() + scenario._attach_floating_ip = mock.Mock( + return_value={"id": "foo_id", "ip": "foo_ip"}) + server, ip = scenario._boot_server_with_fip( + "foo_image", "foo_flavor", floating_network="ext_network", + use_floating_ip=True, wait_for_ping=True, foo_arg="foo_value") + self.assertEqual(ip, {"ip": "foo_ip", "id": "foo_id", + "is_floating": True}) + + scenario._boot_server.assert_called_once_with( + "foo_image", "foo_flavor", + auto_assign_nic=True, foo_arg="foo_value") + scenario._attach_floating_ip.assert_called_once_with( + server, "ext_network") + scenario._wait_for_ping.assert_called_once_with("foo_ip") + + def test__delete_server_with_fixed_ip(self): + ip = {"ip": "foo_ip", "id": None, "is_floating": False} + scenario, server = self.get_scenario() + scenario._delete_floating_ip = mock.Mock() + scenario._delete_server_with_fip(server, ip, force_delete=True) + + self.assertEqual(scenario._delete_floating_ip.mock_calls, []) + scenario._delete_server.assert_called_once_with(server, force=True) + + def test__delete_server_with_fip(self): + fip = {"ip": "foo_ip", "id": "foo_id", "is_floating": True} + scenario, server = self.get_scenario() + scenario._delete_floating_ip = mock.Mock() + scenario._delete_server_with_fip(server, fip, force_delete=True) + + scenario._delete_floating_ip.assert_called_once_with(server, fip) + scenario._delete_server.assert_called_once_with(server, force=True) + + @mock.patch(VMTASKS_UTILS + ".network_wrapper.wrap") + def test__attach_floating_ip(self, mock_wrap): + scenario, server = self.get_scenario() + + netwrap = mock_wrap.return_value + netwrap.create_floating_ip.return_value = { + "id": "foo_id", "ip": "foo_ip"} + + scenario._attach_floating_ip( + server, floating_network="bar_network") + + mock_wrap.assert_called_once_with(scenario.clients) + netwrap.create_floating_ip.assert_called_once_with( + ext_network="bar_network", int_network="foo_net", + tenant_id="foo_tenant", fixed_ip="foo_ip") + + scenario._associate_floating_ip.assert_called_once_with( + server, "foo_ip", fixed_address="foo_ip") + + @mock.patch(VMTASKS_UTILS + ".network_wrapper.wrap") + def test__delete_floating_ip(self, mock_wrap): + scenario, server = self.get_scenario() + + _check_addr = mock.Mock(return_value=True) + scenario.check_ip_address = mock.Mock(return_value=_check_addr) + scenario._dissociate_floating_ip = mock.Mock() + + scenario._delete_floating_ip( + server, fip={"id": "foo_id", "ip": "foo_ip"}) + + scenario.check_ip_address.assert_called_once_with( + "foo_ip") + _check_addr.assert_called_once_with(server) + scenario._dissociate_floating_ip.assert_called_once_with( + server, "foo_ip") + mock_wrap.assert_called_once_with(scenario.clients) + mock_wrap.return_value.delete_floating_ip.assert_called_once_with( + "foo_id", wait=True) diff --git a/tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py b/tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py new file mode 100644 index 00000000..cd87fdbc --- /dev/null +++ b/tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py @@ -0,0 +1,81 @@ +# Copyright 2013: Rackspace UK +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from rally import exceptions +from rally.plugins.openstack.scenarios.vm import vmtasks +from tests.unit import test + + +class VMTasksTestCase(test.TestCase): + + def setUp(self): + super(VMTasksTestCase, self).setUp() + self.scenario = vmtasks.VMTasks( + context={"user": {"keypair": {"name": "keypair_name"}}}) + self.ip = {"id": "foo_id", "ip": "foo_ip", "is_floating": True} + self.scenario._boot_server_with_fip = mock.Mock( + return_value=("foo_server", self.ip)) + self.scenario._delete_server_with_fip = mock.Mock() + self.scenario._create_volume = mock.Mock( + return_value=mock.Mock(id="foo_volume")) + self.scenario._run_command = mock.MagicMock( + return_value=(0, "\"foo_out\"", "foo_err")) + + def test_boot_runcommand_delete(self): + self.scenario.boot_runcommand_delete( + "foo_image", "foo_flavor", "foo_script", + "foo_interpreter", "foo_username", + password="foo_password", + use_floating_ip="use_fip", + floating_network="ext_network", + force_delete="foo_force", + volume_args={"size": 16}, + foo_arg="foo_value") + + self.scenario._create_volume.assert_called_once_with( + 16, imageRef=None) + self.scenario._boot_server_with_fip.assert_called_once_with( + "foo_image", "foo_flavor", use_floating_ip="use_fip", + floating_network="ext_network", key_name="keypair_name", + block_device_mapping={"vdrally": "foo_volume:::1"}, + foo_arg="foo_value") + + self.scenario._run_command.assert_called_once_with( + "foo_ip", 22, "foo_username", "foo_password", + "foo_interpreter", "foo_script") + self.scenario._delete_server_with_fip.assert_called_once_with( + "foo_server", self.ip, force_delete="foo_force") + + def test_boot_runcommand_delete_script_fails(self): + self.scenario._run_command = mock.MagicMock( + return_value=(1, "\"foo_out\"", "foo_err")) + self.assertRaises(exceptions.ScriptError, + self.scenario.boot_runcommand_delete, + "foo_image", "foo_flavor", "foo_interpreter", + "foo_script", "foo_username") + self.scenario._delete_server_with_fip.assert_called_once_with( + "foo_server", self.ip, force_delete=False) + + @mock.patch("rally.plugins.openstack.scenarios.vm.vmtasks.json") + def test_boot_runcommand_delete_json_fails(self, mock_json): + mock_json.loads.side_effect = ValueError() + self.assertRaises(exceptions.ScriptError, + self.scenario.boot_runcommand_delete, + "foo_image", "foo_flavor", "foo_interpreter", + "foo_script", "foo_username") + self.scenario._delete_server_with_fip.assert_called_once_with( + "foo_server", self.ip, force_delete=False)