From e199f7bc38ad4bc8aedf655c23693763037933ac Mon Sep 17 00:00:00 2001 From: Marian Gasparovic Date: Fri, 17 Apr 2020 16:54:47 +0000 Subject: [PATCH] test designate resolving from VM Change-Id: I7e35aa387a6c488ffc6532a1e5d3eb9387dc7963 --- CHANGELOG.rst | 3 + rally_openstack/common/validators.py | 27 ++++ rally_openstack/task/cleanup/resources.py | 6 - .../task/contexts/designate/zones.py | 23 ++- .../task/scenarios/designate/utils.py | 4 + rally_openstack/task/scenarios/vm/vmtasks.py | 111 ++++++++++++++ .../vm/check-designate-dns-resolving.json | 40 +++++ .../vm/check-designate-dns-resolving.yaml | 28 ++++ tests/unit/common/test_validators.py | 63 +++++++- .../task/contexts/designate/test_zones.py | 50 +++++++ tests/unit/task/scenarios/vm/test_vmtasks.py | 137 ++++++++++++++++++ 11 files changed, 484 insertions(+), 8 deletions(-) create mode 100644 samples/tasks/scenarios/vm/check-designate-dns-resolving.json create mode 100644 samples/tasks/scenarios/vm/check-designate-dns-resolving.yaml diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 707aabb8..6c0bbaf6 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -33,6 +33,9 @@ Added * Regular automated builds for `docker image `_ +* VMTasks.check_designate_dns_resolving scenario which tests resolving + hostname from within a VM using existing designate DNS. + Changed ~~~~~~~ diff --git a/rally_openstack/common/validators.py b/rally_openstack/common/validators.py index e887f60c..77dcda39 100644 --- a/rally_openstack/common/validators.py +++ b/rally_openstack/common/validators.py @@ -618,3 +618,30 @@ class WorkbookContainsWorkflowValidator(validators.FileExistsValidator): if wf_name not in wb_def["workflows"]: self.fail("workflow '%s' not found in the definition '%s'" % (wf_name, wb_def)) + + +@validation.configure(name="required_context_config", platform="openstack") +class RequiredContextConfigValidator(validation.Validator): + + def __init__(self, context_name, context_config): + """Validate that context is configured according to requirements. + + :param context_name: string efining context name + :param context_config: dictionary of required key/value pairs + """ + super(RequiredContextConfigValidator, self).__init__() + self.context_name = context_name + self.context_config = context_config + + def validate(self, context, config, plugin_cls, plugin_cfg): + if self.context_name not in config.get("contexts", {}): + # fail silently. if it is required context, + # `required_contexts` validator should raise proper error + return + ctx_config = config["contexts"].get(self.context_name) + + for key, value in self.context_config.items(): + if key not in ctx_config or ctx_config[key] != value: + self.fail( + f"The '{self.context_name}' context " + f"expects '{self.context_config}'") diff --git a/rally_openstack/task/cleanup/resources.py b/rally_openstack/task/cleanup/resources.py index d8f37983..68f42fac 100644 --- a/rally_openstack/task/cleanup/resources.py +++ b/rally_openstack/task/cleanup/resources.py @@ -766,12 +766,6 @@ class DesignateResource(SynchronizedDeletion, base.ResourceManager): if item["name"].startswith(self.NAME_PREFIX)] -@base.resource("designate", "domains", order=next(_designate_order), - tenant_resource=True, threads=1) -class DesignateDomain(DesignateResource): - pass - - @base.resource("designate", "servers", order=next(_designate_order), admin_required=True, perform_for_admin_only=True, threads=1) class DesignateServer(DesignateResource): diff --git a/rally_openstack/task/contexts/designate/zones.py b/rally_openstack/task/contexts/designate/zones.py index 5f1c5630..2a667493 100644 --- a/rally_openstack/task/contexts/designate/zones.py +++ b/rally_openstack/task/contexts/designate/zones.py @@ -18,6 +18,7 @@ from rally_openstack.common import consts from rally_openstack.task.cleanup import manager as resource_manager from rally_openstack.task import context from rally_openstack.task.scenarios.designate import utils +from rally_openstack.task.scenarios.neutron import utils as neutron_utils @validation.add("required_platform", platform="openstack", users=True) @@ -33,12 +34,17 @@ class ZoneGenerator(context.OpenStackContext): "type": "integer", "minimum": 1 }, + "set_zone_in_network": { + "type": "boolean", + "description": "Update network with created DNS zone." + } }, "additionalProperties": False } DEFAULT_CONFIG = { - "zones_per_tenant": 1 + "zones_per_tenant": 1, + "set_zone_in_network": False } def setup(self): @@ -52,6 +58,21 @@ class ZoneGenerator(context.OpenStackContext): for i in range(self.config["zones_per_tenant"]): zone = designate_util._create_zone() self.context["tenants"][tenant_id]["zones"].append(zone) + if self.config["set_zone_in_network"]: + for user, tenant_id in self._iterate_per_tenants( + self.context["users"]): + tenant = self.context["tenants"][tenant_id] + + network_update_args = { + "dns_domain": tenant["zones"][0]["name"] + } + body = {"network": network_update_args} + scenario = neutron_utils.NeutronScenario( + context={"user": user, "task": self.context["task"], + "owner_id": self.context["owner_id"]} + ) + scenario.clients("neutron").update_network( + tenant["networks"][0]["id"], body) def cleanup(self): resource_manager.cleanup(names=["designate.zones"], diff --git a/rally_openstack/task/scenarios/designate/utils.py b/rally_openstack/task/scenarios/designate/utils.py index bae162bf..3ba05eff 100644 --- a/rally_openstack/task/scenarios/designate/utils.py +++ b/rally_openstack/task/scenarios/designate/utils.py @@ -22,6 +22,10 @@ from rally_openstack.task import scenario class DesignateScenario(scenario.OpenStackScenario): """Base class for Designate scenarios with basic atomic actions.""" + # valid domain name cannot contain underscore characters + # which are used in default autogenerated names + RESOURCE_NAME_FORMAT = "s-rally-XXXXXXXX-XXXXXXXX" + @atomic.action_timer("designate.create_domain") def _create_domain(self, domain=None): """Create domain. diff --git a/rally_openstack/task/scenarios/vm/vmtasks.py b/rally_openstack/task/scenarios/vm/vmtasks.py index c4f91032..dbdfb83e 100644 --- a/rally_openstack/task/scenarios/vm/vmtasks.py +++ b/rally_openstack/task/scenarios/vm/vmtasks.py @@ -16,6 +16,7 @@ import json import os import pkgutil +import re from rally.common import logging from rally.common import validation @@ -23,6 +24,7 @@ from rally import exceptions from rally.plugins.common import validators from rally.task import atomic from rally.task import types +from rally.task import utils as rally_utils from rally.utils import sshutils from rally_openstack.common import consts @@ -534,3 +536,112 @@ class DDLoadTest(BootRuncommandDelete): force_delete=force_delete, wait_for_ping=wait_for_ping, max_log_length=max_log_length, **kwargs) + +@types.convert(image={"type": "glance_image"}, + flavor={"type": "nova_flavor"}) +@validation.add("image_valid_on_flavor", flavor_param="flavor", + image_param="image", fail_on_404_image=False) +@validation.add("number", param_name="port", minval=1, maxval=65535, + nullable=True, integer_only=True) +@validation.add("external_network_exists", param_name="floating_network") +@validation.add("required_services", services=[consts.Service.DESIGNATE, + consts.Service.NEUTRON, + consts.Service.NOVA]) +@validation.add("required_contexts", contexts=["network", "zones"]) +@validation.add("required_platform", platform="openstack", users=True) +@validation.add("required_context_config", context_name="zones", + context_config={"set_zone_in_network": True}) +@scenario.configure(context={"cleanup@openstack": ["designate", + "nova", "neutron"], + "keypair@openstack": {}, + "allow_ssh@openstack": None}, + name="VMTasks.check_designate_dns_resolving", + platform="openstack") +class CheckDesignateDNSResolving(vm_utils.VMScenario): + + def run(self, image, flavor, username, password=None, + floating_network=None, port=22, + use_floating_ip=True, force_delete=False, max_log_length=None, + **kwargs): + """Try to resolve hostname from VM against existing designate DNS. + + - requires zone context with set_zone_in_network parameter + zones: + set_zone_in_network: True + - designate IP should be in default dns_nameservers list for new + networks or it can be specified in a network context + network: + dns_nameservers: + - 8.8.8.8 + - 192.168.210.45 + + :param image: glance image name to use for the vm + :param flavor: VM flavor name + :param username: ssh username on server + :param password: Password on SSH authentication + :param floating_network: external network name, for floating ip + :param port: ssh port for SSH connection + :param use_floating_ip: bool, floating or fixed IP for SSH connection + :param force_delete: whether to use force_delete for servers + :param max_log_length: The number of tail nova console-log lines user + would like to retrieve + :param kwargs: optional args + """ + + zone = self.context["tenant"]["zones"][0]["name"] + + server, fip = self._boot_server_with_fip( + image, flavor, use_floating_ip=use_floating_ip, + floating_network=floating_network, + key_name=self.context["user"]["keypair"]["name"], + **kwargs) + + script = f"cloud-init status -w; systemd-resolve --status; "\ + f"dig $(hostname).{zone}" + + command = { + "script_inline": script, + "interpreter": "/bin/bash" + } + try: + rally_utils.wait_for_status( + server, + ready_statuses=["ACTIVE"], + update_resource=rally_utils.get_from_manager(), + ) + + code, out, err = self._run_command( + fip["ip"], port, username, password, command=command) + if code: + raise exceptions.ScriptError( + "Error running command %(command)s. " + "Error %(code)s: %(error)s" % { + "command": command, "code": code, "error": err}) + else: + if not re.findall(".*ANSWER SECTION.*", out, re.MULTILINE): + raise exceptions.ScriptError( + f"Error running {script}. " + f"Error: Missing ANSWER section in the output {out}") + + except (exceptions.TimeoutException, + exceptions.SSHTimeout): + console_logs = self._get_server_console_output(server, + max_log_length) + LOG.debug("VM console logs:\n%s" % console_logs) + raise + + finally: + self._delete_server_with_fip(server, fip, + force_delete=force_delete) + + self.add_output(complete={ + "title": "Script StdOut", + "chart_plugin": "TextArea", + "data": str(out).split("\n") + }) + if err: + self.add_output(complete={ + "title": "Script StdErr", + "chart_plugin": "TextArea", + "data": err.split("\n") + }) diff --git a/samples/tasks/scenarios/vm/check-designate-dns-resolving.json b/samples/tasks/scenarios/vm/check-designate-dns-resolving.json new file mode 100644 index 00000000..fa0cd85e --- /dev/null +++ b/samples/tasks/scenarios/vm/check-designate-dns-resolving.json @@ -0,0 +1,40 @@ +{ + "VMTasks.check_designate_dns_resolving": [ + { + "args": { + "flavor": { + "name": "m1.tiny" + }, + "image": { + "name": "^cirros.*-disk$" + }, + "username": "ubuntu" + }, + "context": { + "users": { + "tenants": 2, + "users_per_tenant": 3 + }, + "network": { + "dns_nameservers": [ + "8.8.8.8", + "192.168.1.12" + ] + }, + "zones": { + "set_zone_in_network": true + } + }, + "runner": { + "concurrency": 3, + "times": 6, + "type": "constant" + }, + "sla": { + "failure_rate": { + "max": 0 + } + } + } + ] +} \ No newline at end of file diff --git a/samples/tasks/scenarios/vm/check-designate-dns-resolving.yaml b/samples/tasks/scenarios/vm/check-designate-dns-resolving.yaml new file mode 100644 index 00000000..98b4f7bc --- /dev/null +++ b/samples/tasks/scenarios/vm/check-designate-dns-resolving.yaml @@ -0,0 +1,28 @@ +--- +VMTasks.check_designate_dns_resolving: +- + args: + flavor: + "name": "m1.tiny" + image: + "name": "^cirros.*-disk$" + username: "ubuntu" + + context: + users: + tenants: 2 + users_per_tenant: 3 + network: + dns_nameservers: + - 8.8.8.8 + - 192.168.1.12 + zones: + set_zone_in_network: True + runner: + concurrency: 3 + times: 6 + type: "constant" + + sla: + failure_rate: + max: 0 diff --git a/tests/unit/common/test_validators.py b/tests/unit/common/test_validators.py index c4b3ed4c..b7b29b31 100644 --- a/tests/unit/common/test_validators.py +++ b/tests/unit/common/test_validators.py @@ -44,7 +44,8 @@ config = dict(args={"image": {"id": "fake_id", "foo_image": {"id": "fake_image_id"} }, context={"images": {"image_name": "foo_image"}, - "api_versions@openstack": mock.MagicMock()} + "api_versions@openstack": mock.MagicMock(), + "zones": {"set_zone_in_network": True}} ) @@ -985,3 +986,63 @@ class WorkbookContainsWorkflowValidatorTestCase(test.TestCase): self.assertEqual(1, mock_open.called) self.assertEqual(1, mock_access.called) self.assertEqual(1, mock_safe_load.called) + + +@ddt.ddt +class RequiredContextConfigValidatorTestCase(test.TestCase): + + def test_validator(self): + validator = validators.RequiredContextConfigValidator( + context_name="zones", + context_config={"set_zone_in_network": True}) + cfg = { + "contexts": { + "users": { + "tenants": 1, "users_per_tenant": 1 + }, + "network": { + "dns_nameservers": ["8.8.8.8", "192.168.210.45"] + }, + "zones": {"set_zone_in_network": True} + }, + } + validator.validate({}, cfg, None, None) + + def test_validator_context_not_in_contexts(self): + validator = validators.RequiredContextConfigValidator( + context_name="zones", + context_config={"set_zone_in_network": True}) + cfg = { + "contexts": { + "users": { + "tenants": 1, "users_per_tenant": 1 + }, + "network": { + "dns_nameservers": ["8.8.8.8", "192.168.210.45"] + }, + }, + } + validator.validate({}, cfg, None, None) + + def test_validator_failed(self): + validator = validators.RequiredContextConfigValidator( + context_name="zones", + context_config={"set_zone_in_network": True}) + cfg = { + "contexts": { + "users": { + "tenants": 1, "users_per_tenant": 1 + }, + "network": { + "dns_nameservers": ["8.8.8.8", "192.168.210.45"] + }, + "zones": {"set_zone_in_network": False} + }, + } + + e = self.assertRaises( + validators.validation.ValidationError, + validator.validate, {}, cfg, None, None) + self.assertEqual( + "The 'zones' context expects '{'set_zone_in_network': True}'", + e.message) diff --git a/tests/unit/task/contexts/designate/test_zones.py b/tests/unit/task/contexts/designate/test_zones.py index 282df535..ef012d2b 100644 --- a/tests/unit/task/contexts/designate/test_zones.py +++ b/tests/unit/task/contexts/designate/test_zones.py @@ -67,6 +67,7 @@ class ZoneGeneratorTestCase(test.ScenarioTestCase): }, "zones": { "zones_per_tenant": zones_per_tenant, + "set_zone_in_network": False } }, "admin": { @@ -86,6 +87,55 @@ class ZoneGeneratorTestCase(test.ScenarioTestCase): zones_ctx.setup() self.assertEqual(new_context, self.context) + @mock.patch("%s.neutron.utils.NeutronScenario" % SCN) + @mock.patch("%s.designate.utils.DesignateScenario._create_zone" % SCN, + return_value={"id": "uuid", "name": "fake_name"}) + def test_setup_for_existinge(self, mock_designate_scenario__create_zone, + mock_neutron_scenario): + tenants_count = 1 + users_per_tenant = 1 + + networks = [] + tenants = self._gen_tenants(tenants_count) + users = [] + for id_ in tenants.keys(): + networks.append( + {"id": f"foo_net_{id_}", + "tenant_id": id_, "subnets": ["foo_subnet"]}) + for i in range(users_per_tenant): + users.append({"id": i, "tenant_id": id_, + "credential": mock.MagicMock()}) + tenants["0"]["networks"] = networks + + self.context.update({ + "config": { + "users": { + "tenants": 1, + "users_per_tenant": 1, + "concurrent": 1, + }, + "zones": { + "set_zone_in_network": True + }, + "network": {} + }, + "admin": { + "credential": mock.MagicMock() + }, + "users": users, + "tenants": tenants + }) + + zones_ctx = zones.ZoneGenerator(self.context) + zones_ctx.setup() + + mock_neutron_scenario.assert_called_once() + scenario = mock_neutron_scenario.return_value + scenario.clients.assert_called_with("neutron") + neutron = scenario.clients.return_value + neutron.update_network.assert_called_with( + "foo_net_0", {"network": {"dns_domain": "fake_name"}}) + @mock.patch("%s.designate.zones.resource_manager.cleanup" % CTX) def test_cleanup(self, mock_cleanup): diff --git a/tests/unit/task/scenarios/vm/test_vmtasks.py b/tests/unit/task/scenarios/vm/test_vmtasks.py index a4e95098..10c2ccc6 100644 --- a/tests/unit/task/scenarios/vm/test_vmtasks.py +++ b/tests/unit/task/scenarios/vm/test_vmtasks.py @@ -290,6 +290,143 @@ class VMTasksTestCase(test.ScenarioTestCase): "title": "Workload summary"} scenario.add_output.assert_called_once_with(complete=expected) + def create_env_for_designate(self, zone_config=None): + scenario = vmtasks.CheckDesignateDNSResolving(self.context) + self.ip = {"id": "foo_id", "ip": "foo_ip", "is_floating": True} + scenario._boot_server_with_fip = mock.Mock( + return_value=("foo_server", self.ip)) + scenario._delete_server_with_fip = mock.Mock() + scenario._run_command = mock.MagicMock( + return_value=(0, "ANSWER SECTION", "foo_err")) + scenario.add_output = mock.Mock() + if zone_config is None: + zone_config = { + "test_existing_designate_from_VM": { + "bind_ip": "192.168.1.123" + } + } + self.context.update( + { + "config": { + "zones@openstack": zone_config + }, + "user": { + "keypair": {"name": "keypair_name"}, + "credential": mock.MagicMock() + }, + "tenant": { + "id": "0", + "name": "tenant1", + "zones": [ + {"name": "zone1.com."} + ], + "networks": [ + { + "name": "net1", + "subnets": [ + { + "name": "subnet1", + "dns_nameservers": "1.2.3.4" + } + ] + } + ] + } + } + ) + args = {"image": "some_image", "flavor": "m1.small", + "username": "chuck norris"} + return scenario, args + + @mock.patch("rally.task.utils.get_from_manager") + @mock.patch("rally.task.utils.wait_for_status") + def test_check_designate_dns_resolving_ok( + self, + mock_rally_task_utils_wait_for_status, + mock_rally_task_utils_get_from_manager): + scenario, args = self.create_env_for_designate() + scenario.run(**args) + + scenario._boot_server_with_fip.assert_called_once_with( + "some_image", "m1.small", floating_network=None, + key_name="keypair_name", use_floating_ip=True) + mock_rally_task_utils_wait_for_status.assert_called_once_with( + "foo_server", ready_statuses=["ACTIVE"], update_resource=mock.ANY) + scenario._delete_server_with_fip.assert_called_once_with( + "foo_server", {"id": "foo_id", "ip": "foo_ip", + "is_floating": True}, + force_delete=False) + scenario.add_output.assert_called_with( + complete={"chart_plugin": "TextArea", + "data": [ + "foo_err"], + "title": "Script StdErr"}) + + @mock.patch("rally.task.utils.get_from_manager") + @mock.patch("rally.task.utils.wait_for_status") + def test_test_existing_designate_from_vm_command_timeout( + self, + mock_rally_task_utils_wait_for_status, + mock_rally_task_utils_get_from_manager): + scenario, _ = self.create_env_for_designate() + + scenario._run_command.side_effect = exceptions.SSHTimeout() + self.assertRaises(exceptions.SSHTimeout, + scenario.run, + "foo_flavor", "foo_image", "foo_interpreter", + "foo_script", "foo_username") + scenario._delete_server_with_fip.assert_called_once_with( + "foo_server", self.ip, force_delete=False) + self.assertFalse(scenario.add_output.called) + + @mock.patch("rally.task.utils.get_from_manager") + @mock.patch("rally.task.utils.wait_for_status") + def test_test_existing_designate_from_vm_wait_timeout( + self, + mock_rally_task_utils_wait_for_status, + mock_rally_task_utils_get_from_manager): + scenario, args = self.create_env_for_designate() + + mock_rally_task_utils_wait_for_status.side_effect = \ + exceptions.TimeoutException( + resource_type="foo_resource", + resource_name="foo_name", + resource_id="foo_id", + desired_status="foo_desired_status", + resource_status="foo_resource_status", + timeout=2) + self.assertRaises(exceptions.TimeoutException, + scenario.run, + "foo_flavor", "foo_image", "foo_interpreter", + "foo_script", "foo_username") + scenario._delete_server_with_fip.assert_called_once_with( + "foo_server", self.ip, force_delete=False) + self.assertFalse(scenario.add_output.called) + + @ddt.data( + {"output": (1, "x y z", "error message"), + "raises": exceptions.ScriptError}, + {"output": (0, "[1, 2, 3, 4]", ""), + "raises": exceptions.ScriptError} + ) + @ddt.unpack + def test_test_existing_designate_from_vm_add_output(self, output, + expected=None, + raises=None): + scenario, _ = self.create_env_for_designate() + + scenario._run_command.return_value = output + kwargs = {"flavor": "foo_flavor", + "image": "foo_image", + "username": "foo_username", + "password": "foo_password", + "use_floating_ip": "use_fip", + "floating_network": "ext_network", + "force_delete": "foo_force"} + + self.assertRaises(raises, scenario.run, **kwargs) + self.assertFalse(scenario.add_output.called) + @ddt.ddt class ValidCommandValidatorTestCase(test.TestCase):