[CLI] group commands in help message
In subcommand(for example in verification) we have several groups of methods(managements, launchers, results). It would be nice to split these groups in help message. Before: compare Deprecated. Use `rally verify results' instead. detailed Display results table of a verification with detailed errors. discover Show a list of discovered tests. genconfig Generate Tempest configuration file. import Import Tempest tests results into the Rally database. install Install Tempest. installplugin Install Tempest plugin. list List verification runs. listplugins List all installed Tempest plugins. reinstall Uninstall Tempest and install again. results Display results of verifications. show Display results table of a verification. showconfig Show Tempest configuration file. start Start verification (run Tempest tests). uninstall Remove the deployment's local Tempest installation. uninstallplugin Uninstall Tempest plugin. use Set active verification. After: genconfig Generate Tempest configuration file. install Install Tempest. installplugin Install Tempest plugin. listplugins List all installed Tempest plugins. reinstall Uninstall Tempest and install again. showconfig Show Tempest configuration file. uninstall Remove the deployment's local Tempest installation. uninstallplugin Uninstall Tempest plugin. discover Show a list of discovered tests. start Start verification (run Tempest tests). compare Deprecated. Use `rally verify results' instead. detailed Display results table of a verification with detailed errors. import-results Import Tempest tests results into the Rally database. list List verification runs. results Display results of verifications. show Display results table of a verification. use Set active verification. Also this change transforms all _ to - in cli methods names. Change-Id: I292e71d159ee35e933119f7fb57209f071aa37d4
This commit is contained in:
parent
6bbcfa1fdc
commit
bb21495db7
@ -40,6 +40,7 @@ _rally()
|
||||
OPTS["task_list"]="--deployment --all-deployments --status --uuids-only"
|
||||
OPTS["task_report"]="--tasks --out --open --html --html-static --junit"
|
||||
OPTS["task_results"]="--uuid"
|
||||
OPTS["task_sla-check"]="--uuid --json"
|
||||
OPTS["task_sla_check"]="--uuid --json"
|
||||
OPTS["task_start"]="--deployment --task --task-args --task-args-file --tag --no-use --abort-on-sla-failure"
|
||||
OPTS["task_status"]="--uuid"
|
||||
|
@ -110,7 +110,7 @@ function run () {
|
||||
# NOTE(stpierre): if the sla check fails, we still want osresources.py
|
||||
# to run, so we turn off -e and save the return value
|
||||
set +e
|
||||
rally task sla_check | tee rally-plot/sla.txt
|
||||
rally task sla-check | tee rally-plot/sla.txt
|
||||
retval=$?
|
||||
set -e
|
||||
|
||||
|
@ -111,7 +111,7 @@ def run_task(task, tags=None):
|
||||
"%s/%s.html" % (pub_dir, task_name)])
|
||||
run(["rally", "task", "results"],
|
||||
stdout="%s/results-%s.json" % (pub_dir, task_name))
|
||||
status = run(["rally", "task", "sla_check"],
|
||||
status = run(["rally", "task", "sla-check"],
|
||||
stdout="%s/%s.sla.txt" % (pub_dir, task_name))
|
||||
run(["rally", "task", "detailed"],
|
||||
stdout="rally-plot/detailed-%s.txt" % task_name)
|
||||
|
@ -206,7 +206,7 @@ class TaskTestCase(unittest.TestCase):
|
||||
def test_sla_check_with_wrong_task_id(self):
|
||||
rally = utils.Rally()
|
||||
self.assertRaises(utils.RallyCliError,
|
||||
rally, "task sla_check --uuid %s" % FAKE_TASK_UUID)
|
||||
rally, "task sla-check --uuid %s" % FAKE_TASK_UUID)
|
||||
|
||||
def test_status_with_wrong_task_id(self):
|
||||
rally = utils.Rally()
|
||||
@ -883,13 +883,13 @@ class SLATestCase(unittest.TestCase):
|
||||
cfg = self._get_sample_task_config(max_seconds_per_iteration=0.001)
|
||||
config = utils.TaskConfig(cfg)
|
||||
rally("task start --task %s" % config.filename)
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla_check")
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
|
||||
|
||||
def test_sla_success(self):
|
||||
rally = utils.Rally()
|
||||
config = utils.TaskConfig(self._get_sample_task_config())
|
||||
rally("task start --task %s" % config.filename)
|
||||
rally("task sla_check")
|
||||
rally("task sla-check")
|
||||
expected = [
|
||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
||||
"criterion": "failure_rate",
|
||||
@ -900,7 +900,7 @@ class SLATestCase(unittest.TestCase):
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"}
|
||||
]
|
||||
data = rally("task sla_check --json", getjson=True)
|
||||
data = rally("task sla-check --json", getjson=True)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
|
||||
@ -935,11 +935,11 @@ class SLAExtraFlagsTestCase(unittest.TestCase):
|
||||
"pos": 0, "status": "FAIL"}
|
||||
]
|
||||
try:
|
||||
rally("task sla_check --json", getjson=True)
|
||||
rally("task sla-check --json", getjson=True)
|
||||
except utils.RallyCliError as expected_error:
|
||||
self.assertEqual(json.loads(expected_error.output), expected)
|
||||
else:
|
||||
self.fail("`rally task sla_check` command should return non-zero "
|
||||
self.fail("`rally task sla-check` command should return non-zero "
|
||||
"exit code")
|
||||
|
||||
def _test_broken_context(self, runner):
|
||||
@ -963,11 +963,11 @@ class SLAExtraFlagsTestCase(unittest.TestCase):
|
||||
"pos": 0, "status": "FAIL"}
|
||||
]
|
||||
try:
|
||||
rally("task sla_check --json", getjson=True)
|
||||
rally("task sla-check --json", getjson=True)
|
||||
except utils.RallyCliError as expected_error:
|
||||
self.assertEqual(json.loads(expected_error.output), expected)
|
||||
else:
|
||||
self.fail("`rally task sla_check` command should return non-zero "
|
||||
self.fail("`rally task sla-check` command should return non-zero "
|
||||
"exit code")
|
||||
|
||||
def test_broken_context_with_constant_runner(self):
|
||||
@ -1012,20 +1012,20 @@ class SLAPerfDegrTestCase(unittest.TestCase):
|
||||
cfg = self._get_sample_task_config(max_degradation=1)
|
||||
config = utils.TaskConfig(cfg)
|
||||
rally("task start --task %s" % config.filename)
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla_check")
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
|
||||
|
||||
def test_sla_success(self):
|
||||
rally = utils.Rally()
|
||||
config = utils.TaskConfig(self._get_sample_task_config())
|
||||
rally("task start --task %s" % config.filename)
|
||||
rally("task sla_check")
|
||||
rally("task sla-check")
|
||||
expected = [
|
||||
{"benchmark": "Dummy.dummy_random_action",
|
||||
"criterion": "performance_degradation",
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"},
|
||||
]
|
||||
data = rally("task sla_check --json", getjson=True)
|
||||
data = rally("task sla-check --json", getjson=True)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user