Merge "[CLI] group commands in help message"
This commit is contained in:
commit
1e83e3e3db
@ -40,6 +40,7 @@ _rally()
|
||||
OPTS["task_list"]="--deployment --all-deployments --status --uuids-only"
|
||||
OPTS["task_report"]="--tasks --out --open --html --html-static --junit"
|
||||
OPTS["task_results"]="--uuid"
|
||||
OPTS["task_sla-check"]="--uuid --json"
|
||||
OPTS["task_sla_check"]="--uuid --json"
|
||||
OPTS["task_start"]="--deployment --task --task-args --task-args-file --tag --no-use --abort-on-sla-failure"
|
||||
OPTS["task_status"]="--uuid"
|
||||
|
@ -110,7 +110,7 @@ function run () {
|
||||
# NOTE(stpierre): if the sla check fails, we still want osresources.py
|
||||
# to run, so we turn off -e and save the return value
|
||||
set +e
|
||||
rally task sla_check | tee rally-plot/sla.txt
|
||||
rally task sla-check | tee rally-plot/sla.txt
|
||||
retval=$?
|
||||
set -e
|
||||
|
||||
|
@ -111,7 +111,7 @@ def run_task(task, tags=None):
|
||||
"%s/%s.html" % (pub_dir, task_name)])
|
||||
run(["rally", "task", "results"],
|
||||
stdout="%s/results-%s.json" % (pub_dir, task_name))
|
||||
status = run(["rally", "task", "sla_check"],
|
||||
status = run(["rally", "task", "sla-check"],
|
||||
stdout="%s/%s.sla.txt" % (pub_dir, task_name))
|
||||
run(["rally", "task", "detailed"],
|
||||
stdout="rally-plot/detailed-%s.txt" % task_name)
|
||||
|
@ -206,7 +206,7 @@ class TaskTestCase(unittest.TestCase):
|
||||
def test_sla_check_with_wrong_task_id(self):
|
||||
rally = utils.Rally()
|
||||
self.assertRaises(utils.RallyCliError,
|
||||
rally, "task sla_check --uuid %s" % FAKE_TASK_UUID)
|
||||
rally, "task sla-check --uuid %s" % FAKE_TASK_UUID)
|
||||
|
||||
def test_status_with_wrong_task_id(self):
|
||||
rally = utils.Rally()
|
||||
@ -883,13 +883,13 @@ class SLATestCase(unittest.TestCase):
|
||||
cfg = self._get_sample_task_config(max_seconds_per_iteration=0.001)
|
||||
config = utils.TaskConfig(cfg)
|
||||
rally("task start --task %s" % config.filename)
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla_check")
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
|
||||
|
||||
def test_sla_success(self):
|
||||
rally = utils.Rally()
|
||||
config = utils.TaskConfig(self._get_sample_task_config())
|
||||
rally("task start --task %s" % config.filename)
|
||||
rally("task sla_check")
|
||||
rally("task sla-check")
|
||||
expected = [
|
||||
{"benchmark": "KeystoneBasic.create_and_list_users",
|
||||
"criterion": "failure_rate",
|
||||
@ -900,7 +900,7 @@ class SLATestCase(unittest.TestCase):
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"}
|
||||
]
|
||||
data = rally("task sla_check --json", getjson=True)
|
||||
data = rally("task sla-check --json", getjson=True)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
|
||||
@ -935,11 +935,11 @@ class SLAExtraFlagsTestCase(unittest.TestCase):
|
||||
"pos": 0, "status": "FAIL"}
|
||||
]
|
||||
try:
|
||||
rally("task sla_check --json", getjson=True)
|
||||
rally("task sla-check --json", getjson=True)
|
||||
except utils.RallyCliError as expected_error:
|
||||
self.assertEqual(json.loads(expected_error.output), expected)
|
||||
else:
|
||||
self.fail("`rally task sla_check` command should return non-zero "
|
||||
self.fail("`rally task sla-check` command should return non-zero "
|
||||
"exit code")
|
||||
|
||||
def _test_broken_context(self, runner):
|
||||
@ -963,11 +963,11 @@ class SLAExtraFlagsTestCase(unittest.TestCase):
|
||||
"pos": 0, "status": "FAIL"}
|
||||
]
|
||||
try:
|
||||
rally("task sla_check --json", getjson=True)
|
||||
rally("task sla-check --json", getjson=True)
|
||||
except utils.RallyCliError as expected_error:
|
||||
self.assertEqual(json.loads(expected_error.output), expected)
|
||||
else:
|
||||
self.fail("`rally task sla_check` command should return non-zero "
|
||||
self.fail("`rally task sla-check` command should return non-zero "
|
||||
"exit code")
|
||||
|
||||
def test_broken_context_with_constant_runner(self):
|
||||
@ -1012,20 +1012,20 @@ class SLAPerfDegrTestCase(unittest.TestCase):
|
||||
cfg = self._get_sample_task_config(max_degradation=1)
|
||||
config = utils.TaskConfig(cfg)
|
||||
rally("task start --task %s" % config.filename)
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla_check")
|
||||
self.assertRaises(utils.RallyCliError, rally, "task sla-check")
|
||||
|
||||
def test_sla_success(self):
|
||||
rally = utils.Rally()
|
||||
config = utils.TaskConfig(self._get_sample_task_config())
|
||||
rally("task start --task %s" % config.filename)
|
||||
rally("task sla_check")
|
||||
rally("task sla-check")
|
||||
expected = [
|
||||
{"benchmark": "Dummy.dummy_random_action",
|
||||
"criterion": "performance_degradation",
|
||||
"detail": mock.ANY,
|
||||
"pos": 0, "status": "PASS"},
|
||||
]
|
||||
data = rally("task sla_check --json", getjson=True)
|
||||
data = rally("task sla-check --json", getjson=True)
|
||||
self.assertEqual(expected, data)
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user