From bb21495db743f35b1527c69b24a11b800821745f Mon Sep 17 00:00:00 2001 From: Andrey Kurilin Date: Mon, 31 Oct 2016 20:09:33 +0100 Subject: [PATCH] [CLI] group commands in help message In subcommand(for example in verification) we have several groups of methods(managements, launchers, results). It would be nice to split these groups in help message. Before: compare Deprecated. Use `rally verify results' instead. detailed Display results table of a verification with detailed errors. discover Show a list of discovered tests. genconfig Generate Tempest configuration file. import Import Tempest tests results into the Rally database. install Install Tempest. installplugin Install Tempest plugin. list List verification runs. listplugins List all installed Tempest plugins. reinstall Uninstall Tempest and install again. results Display results of verifications. show Display results table of a verification. showconfig Show Tempest configuration file. start Start verification (run Tempest tests). uninstall Remove the deployment's local Tempest installation. uninstallplugin Uninstall Tempest plugin. use Set active verification. After: genconfig Generate Tempest configuration file. install Install Tempest. installplugin Install Tempest plugin. listplugins List all installed Tempest plugins. reinstall Uninstall Tempest and install again. showconfig Show Tempest configuration file. uninstall Remove the deployment's local Tempest installation. uninstallplugin Uninstall Tempest plugin. discover Show a list of discovered tests. start Start verification (run Tempest tests). compare Deprecated. Use `rally verify results' instead. detailed Display results table of a verification with detailed errors. import-results Import Tempest tests results into the Rally database. list List verification runs. results Display results of verifications. show Display results table of a verification. use Set active verification. Also this change transforms all _ to - in cli methods names. Change-Id: I292e71d159ee35e933119f7fb57209f071aa37d4 --- etc/rally.bash_completion | 1 + tests/ci/rally_gate_functions.sh | 2 +- tests/ci/wip-rally-gate.py | 2 +- tests/functional/test_cli_task.py | 22 +++++++++++----------- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/etc/rally.bash_completion b/etc/rally.bash_completion index f17b52dd..73983254 100644 --- a/etc/rally.bash_completion +++ b/etc/rally.bash_completion @@ -40,6 +40,7 @@ _rally() OPTS["task_list"]="--deployment --all-deployments --status --uuids-only" OPTS["task_report"]="--tasks --out --open --html --html-static --junit" OPTS["task_results"]="--uuid" + OPTS["task_sla-check"]="--uuid --json" OPTS["task_sla_check"]="--uuid --json" OPTS["task_start"]="--deployment --task --task-args --task-args-file --tag --no-use --abort-on-sla-failure" OPTS["task_status"]="--uuid" diff --git a/tests/ci/rally_gate_functions.sh b/tests/ci/rally_gate_functions.sh index dbaba9ee..a1b50253 100644 --- a/tests/ci/rally_gate_functions.sh +++ b/tests/ci/rally_gate_functions.sh @@ -110,7 +110,7 @@ function run () { # NOTE(stpierre): if the sla check fails, we still want osresources.py # to run, so we turn off -e and save the return value set +e - rally task sla_check | tee rally-plot/sla.txt + rally task sla-check | tee rally-plot/sla.txt retval=$? set -e diff --git a/tests/ci/wip-rally-gate.py b/tests/ci/wip-rally-gate.py index 37930cef..4baee40a 100755 --- a/tests/ci/wip-rally-gate.py +++ b/tests/ci/wip-rally-gate.py @@ -111,7 +111,7 @@ def run_task(task, tags=None): "%s/%s.html" % (pub_dir, task_name)]) run(["rally", "task", "results"], stdout="%s/results-%s.json" % (pub_dir, task_name)) - status = run(["rally", "task", "sla_check"], + status = run(["rally", "task", "sla-check"], stdout="%s/%s.sla.txt" % (pub_dir, task_name)) run(["rally", "task", "detailed"], stdout="rally-plot/detailed-%s.txt" % task_name) diff --git a/tests/functional/test_cli_task.py b/tests/functional/test_cli_task.py index e8d9da49..4be26d3c 100644 --- a/tests/functional/test_cli_task.py +++ b/tests/functional/test_cli_task.py @@ -206,7 +206,7 @@ class TaskTestCase(unittest.TestCase): def test_sla_check_with_wrong_task_id(self): rally = utils.Rally() self.assertRaises(utils.RallyCliError, - rally, "task sla_check --uuid %s" % FAKE_TASK_UUID) + rally, "task sla-check --uuid %s" % FAKE_TASK_UUID) def test_status_with_wrong_task_id(self): rally = utils.Rally() @@ -883,13 +883,13 @@ class SLATestCase(unittest.TestCase): cfg = self._get_sample_task_config(max_seconds_per_iteration=0.001) config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) - self.assertRaises(utils.RallyCliError, rally, "task sla_check") + self.assertRaises(utils.RallyCliError, rally, "task sla-check") def test_sla_success(self): rally = utils.Rally() config = utils.TaskConfig(self._get_sample_task_config()) rally("task start --task %s" % config.filename) - rally("task sla_check") + rally("task sla-check") expected = [ {"benchmark": "KeystoneBasic.create_and_list_users", "criterion": "failure_rate", @@ -900,7 +900,7 @@ class SLATestCase(unittest.TestCase): "detail": mock.ANY, "pos": 0, "status": "PASS"} ] - data = rally("task sla_check --json", getjson=True) + data = rally("task sla-check --json", getjson=True) self.assertEqual(expected, data) @@ -935,11 +935,11 @@ class SLAExtraFlagsTestCase(unittest.TestCase): "pos": 0, "status": "FAIL"} ] try: - rally("task sla_check --json", getjson=True) + rally("task sla-check --json", getjson=True) except utils.RallyCliError as expected_error: self.assertEqual(json.loads(expected_error.output), expected) else: - self.fail("`rally task sla_check` command should return non-zero " + self.fail("`rally task sla-check` command should return non-zero " "exit code") def _test_broken_context(self, runner): @@ -963,11 +963,11 @@ class SLAExtraFlagsTestCase(unittest.TestCase): "pos": 0, "status": "FAIL"} ] try: - rally("task sla_check --json", getjson=True) + rally("task sla-check --json", getjson=True) except utils.RallyCliError as expected_error: self.assertEqual(json.loads(expected_error.output), expected) else: - self.fail("`rally task sla_check` command should return non-zero " + self.fail("`rally task sla-check` command should return non-zero " "exit code") def test_broken_context_with_constant_runner(self): @@ -1012,20 +1012,20 @@ class SLAPerfDegrTestCase(unittest.TestCase): cfg = self._get_sample_task_config(max_degradation=1) config = utils.TaskConfig(cfg) rally("task start --task %s" % config.filename) - self.assertRaises(utils.RallyCliError, rally, "task sla_check") + self.assertRaises(utils.RallyCliError, rally, "task sla-check") def test_sla_success(self): rally = utils.Rally() config = utils.TaskConfig(self._get_sample_task_config()) rally("task start --task %s" % config.filename) - rally("task sla_check") + rally("task sla-check") expected = [ {"benchmark": "Dummy.dummy_random_action", "criterion": "performance_degradation", "detail": mock.ANY, "pos": 0, "status": "PASS"}, ] - data = rally("task sla_check --json", getjson=True) + data = rally("task sla-check --json", getjson=True) self.assertEqual(expected, data)