From 292a6775d7c29718f22d4eaa11f6aa0a168c8f28 Mon Sep 17 00:00:00 2001 From: Yaroslav Lobankov Date: Mon, 21 Dec 2015 16:40:50 +0300 Subject: [PATCH] [Verify] Adding "xfail" mechanism for Tempest tests This mechanism allows us to list some tests, that are expected to fail, in a YAML file and these tests will have "xfail" status instead of "fail". Tests that are in the YAML file and at the same time have "success" status will be marked as "uxsuccess". Test test_get_vnc_console[id-c6bc11bf-592e-4015-9319-1c98dc64daf5] was enabled to test out "xfail" mechanism. This test fails because VNC console is disabled in Devstack that is used for Tempest tests. So the mentioned test was added to expected_failures.yaml. Change-Id: If5a90ea6172f61eaadd640fea284aef4df7782bc --- etc/rally.bash_completion | 2 +- tests/ci/rally_verify.py | 48 ++++++++++++++++++++++++++++++++------- 2 files changed, 41 insertions(+), 9 deletions(-) diff --git a/etc/rally.bash_completion b/etc/rally.bash_completion index cff00610..8f1675a1 100644 --- a/etc/rally.bash_completion +++ b/etc/rally.bash_completion @@ -56,7 +56,7 @@ _rally() OPTS["verify_results"]="--uuid --html --json --output-file" OPTS["verify_show"]="--uuid --sort-by --detailed" OPTS["verify_showconfig"]="--deployment" - OPTS["verify_start"]="--deployment --set --regex --tests-file --tempest-config --no-use --system-wide-install --concurrency" + OPTS["verify_start"]="--deployment --set --regex --tests-file --tempest-config --xfails-file --no-use --system-wide-install --concurrency" OPTS["verify_uninstall"]="--deployment" OPTS["verify_use"]="--verification" diff --git a/tests/ci/rally_verify.py b/tests/ci/rally_verify.py index c6ce8b5b..eb73b003 100755 --- a/tests/ci/rally_verify.py +++ b/tests/ci/rally_verify.py @@ -19,6 +19,8 @@ import os import subprocess import sys +import yaml + from rally.cli import envutils from rally.ui import utils @@ -34,6 +36,13 @@ MODES_PARAMETERS = { BASE_DIR = "rally-verify" +EXPECTED_FAILURES_FILE = "expected_failures.yaml" +EXPECTED_FAILURES = { + "tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON." + "test_get_vnc_console[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]": + "This test fails because 'novnc' console type is unavailable." +} + # NOTE(andreykurilin): this variable is used to generate output file names # with prefix ${CALL_COUNT}_ . _call_count = 0 @@ -47,10 +56,20 @@ def call_rally(cmd, print_output=False, output_type=None): global _call_count _call_count += 1 - data = {"cmd": "rally --rally-debug %s " % cmd, - "stdout_file": "%(base)s/%(prefix)s_%(cmd)s.txt.gz" % { - "base": BASE_DIR, "prefix": _call_count, - "cmd": cmd.replace(" ", "_")}} + data = {"cmd": "rally --rally-debug %s" % cmd} + stdout_file = "{base}/{prefix}_{cmd}.txt.gz" + + if "--xfails-file" in cmd: + cmd_items = cmd.split() + for num, item in enumerate(cmd_items): + if EXPECTED_FAILURES_FILE in item: + cmd_items[num] = os.path.basename(item) + break + cmd = " ".join(cmd_items) + + data.update({"stdout_file": stdout_file.format(base=BASE_DIR, + prefix=_call_count, + cmd=cmd.replace(" ", "_"))}) if output_type: data["output_file"] = data["stdout_file"].replace( @@ -88,6 +107,14 @@ def call_rally(cmd, print_output=False, output_type=None): return data +def create_file_with_xfails(): + """Create a YAML file with a list of tests that are expected to fail.""" + with open(os.path.join(BASE_DIR, EXPECTED_FAILURES_FILE), "wb") as f: + yaml.dump(EXPECTED_FAILURES, f, default_flow_style=False) + + return os.path.join(os.getcwd(), BASE_DIR, EXPECTED_FAILURES_FILE) + + def launch_verification_once(launch_parameters): """Launch verification and show results in different formats.""" results = call_rally("verify start %s" % launch_parameters) @@ -152,13 +179,18 @@ def main(): render_vars["genconfig"] = call_rally("verify genconfig") render_vars["showconfig"] = call_rally("verify showconfig") + # Create a file with a list of tests that are expected to fail + xfails_file_path = create_file_with_xfails() + # Launch verification - render_vars["verifications"].append(launch_verification_once( - MODES_PARAMETERS[args.mode])) + launch_params = "%s --xfails-file %s" % ( + MODES_PARAMETERS[args.mode], xfails_file_path) + render_vars["verifications"].append( + launch_verification_once(launch_params)) if args.compare: - render_vars["verifications"].append(launch_verification_once( - MODES_PARAMETERS[args.mode])) + render_vars["verifications"].append( + launch_verification_once(launch_params)) render_vars["compare"] = do_compare( render_vars["verifications"][-2]["uuid"], render_vars["verifications"][-1]["uuid"])