Check verification component refactoring

Change-Id: Icbfc6173e5c2a45cc7779e0ae3b37b9910b803a5
This commit is contained in:
Yaroslav Lobankov 2016-12-07 15:57:28 +04:00
parent f080abfbab
commit 3389d36cb6

View File

@ -21,8 +21,6 @@ import subprocess
import sys import sys
import uuid import uuid
import yaml
from rally.cli import envutils from rally.cli import envutils
from rally.common import objects from rally.common import objects
from rally import osclients from rally import osclients
@ -31,22 +29,22 @@ from rally.ui import utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
LOG.setLevel(logging.DEBUG) LOG.setLevel(logging.DEBUG)
MODES_PARAMETERS = {
"full": "--set full",
"light": "--set smoke"
}
BASE_DIR = "rally-verify" BASE_DIR = "rally-verify"
EXPECTED_FAILURES_FILE = "expected_failures.yaml" MODES = {"full": "--pattern set=full", "light": "--pattern set=smoke"}
EXPECTED_FAILURES = { DEPLOYMENT_NAME = "devstack"
VERIFIER_TYPE = "tempest"
VERIFIER_SOURCE = "https://git.openstack.org/openstack/tempest"
VERIFIER_EXT_REPO = "https://git.openstack.org/openstack/keystone"
VERIFIER_EXT_NAME = "keystone_tests"
SKIPPED_TESTS = (
"tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON."
"test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]: "
"This test was skipped intentionally")
XFAILED_TESTS = (
"tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON." "tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON."
"test_get_vnc_console[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]": "test_get_vnc_console[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]: "
"This test fails because 'novnc' console type is unavailable." "This test fails because 'novnc' console type is unavailable")
}
TEMPEST_PLUGIN = "https://git.openstack.org/openstack/keystone"
# NOTE(andreykurilin): this variable is used to generate output file names # NOTE(andreykurilin): this variable is used to generate output file names
# with prefix ${CALL_COUNT}_ . # with prefix ${CALL_COUNT}_ .
@ -57,45 +55,40 @@ _return_status = 0
def call_rally(cmd, print_output=False, output_type=None): def call_rally(cmd, print_output=False, output_type=None):
"""Execute a Rally command and write result in files."""
global _return_status global _return_status
global _call_count global _call_count
_call_count += 1 _call_count += 1
data = {"cmd": "rally --rally-debug %s" % cmd} data = {"cmd": "rally --rally-debug %s" % cmd}
stdout_file = "{base}/{prefix}_{cmd}.txt.gz" stdout_file = "{base_dir}/{prefix}_{cmd}.txt.gz"
if "--xfails-file" in cmd or "--source" in cmd: cmd = cmd.replace("/", "_")
cmd_items = cmd.split() data.update({"stdout_file": stdout_file.format(base_dir=BASE_DIR,
for num, item in enumerate(cmd_items):
if EXPECTED_FAILURES_FILE in item or TEMPEST_PLUGIN in item:
cmd_items[num] = os.path.basename(item)
break
cmd = " ".join(cmd_items)
data.update({"stdout_file": stdout_file.format(base=BASE_DIR,
prefix=_call_count, prefix=_call_count,
cmd=cmd.replace(" ", "_"))}) cmd=cmd.replace(" ", "_"))})
if output_type: if output_type:
data["output_file"] = data["stdout_file"].replace( data["output_file"] = data["stdout_file"].replace(
".txt.", ".%s." % output_type) ".txt.", ".%s." % output_type)
data["cmd"] += " --%(type)s --output-file %(file)s" % { data["cmd"] += " --file %s" % data["output_file"]
"type": output_type, "file": data["output_file"]} if output_type == "html":
data["cmd"] += " --html"
try: try:
LOG.info("Try to launch `%s`." % data["cmd"]) LOG.info("Try to execute `%s`." % data["cmd"])
stdout = subprocess.check_output(data["cmd"], shell=True, stdout = subprocess.check_output(data["cmd"].split(),
stderr=subprocess.STDOUT) stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e: except subprocess.CalledProcessError as e:
LOG.error("Command `%s` is failed." % data["cmd"]) LOG.error("Command `%s` failed." % data["cmd"])
stdout = e.output stdout = e.output
data["status"] = "fail" data["status"] = "fail"
_return_status = 1 _return_status = 1
else: else:
data["status"] = "pass" data["status"] = "success"
if output_type: if output_type:
# lets gzip results # let's gzip results
with open(data["output_file"]) as f: with open(data["output_file"]) as f:
output = f.read() output = f.read()
with gzip.open(data["output_file"], "wb") as f: with gzip.open(data["output_file"], "wb") as f:
@ -112,39 +105,34 @@ def call_rally(cmd, print_output=False, output_type=None):
return data return data
def create_file_with_xfails(): def start_verification(args):
"""Create a YAML file with a list of tests that are expected to fail.""" """Start a verification, show results and generate reports."""
with open(os.path.join(BASE_DIR, EXPECTED_FAILURES_FILE), "wb") as f: results = call_rally("verify start %s" % args)
yaml.dump(EXPECTED_FAILURES, f, default_flow_style=False)
return os.path.join(os.getcwd(), BASE_DIR, EXPECTED_FAILURES_FILE)
def launch_verification_once(launch_parameters):
"""Launch verification and show results in different formats."""
results = call_rally("verify start %s" % launch_parameters)
results["uuid"] = envutils.get_global(envutils.ENV_VERIFICATION) results["uuid"] = envutils.get_global(envutils.ENV_VERIFICATION)
results["result_in_html"] = call_rally("verify results",
output_type="html")
results["result_in_json"] = call_rally("verify results",
output_type="json")
results["show"] = call_rally("verify show") results["show"] = call_rally("verify show")
results["show_detailed"] = call_rally("verify show --detailed") results["show_detailed"] = call_rally("verify show --detailed")
for ot in ("json", "html"):
results[ot] = call_rally("verify report", output_type=ot)
# NOTE(andreykurilin): we need to clean verification uuid from global # NOTE(andreykurilin): we need to clean verification uuid from global
# environment to be able to load it next time(for another verification). # environment to be able to load it next time(for another verification).
envutils.clear_global(envutils.ENV_VERIFICATION) envutils.clear_global(envutils.ENV_VERIFICATION)
return results return results
def do_compare(uuid_1, uuid_2): def write_file(filename, data):
"""Compare and save results in different formats.""" """Create a file and write some data to it."""
path = os.path.join(BASE_DIR, filename)
with open(path, "wb") as f:
f.write(data)
return path
def generate_trends_reports(uuid_1, uuid_2):
"""Generate trends reports."""
results = {} results = {}
for output_format in ("csv", "html", "json"): for ot in ("json", "html"):
cmd = "verify results --uuid %(uuid-1)s %(uuid-2)s" % { results[ot] = call_rally(
"uuid-1": uuid_1, "verify report --uuid %s %s" % (uuid_1, uuid_2), output_type=ot)
"uuid-2": uuid_2
}
results[output_format] = call_rally(cmd, output_type=output_format)
return results return results
@ -155,35 +143,29 @@ def render_page(**render_vars):
def main(): def main():
# NOTE(andreykurilin): We need to stop checking verification component to
# be able to split forthcoming redesign by several patches.
return 0
parser = argparse.ArgumentParser(description="Launch rally-verify job.") parser = argparse.ArgumentParser(description="Launch rally-verify job.")
parser.add_argument( parser.add_argument("--mode", type=str, default="light",
"--mode", help="Mode of job. The 'full' mode corresponds to the "
type=str, "full set of verifier tests. The 'light' mode "
default="light", "corresponds to the smoke set of verifier tests.",
help="Mode of job. The 'full' mode corresponds to the full set of " choices=MODES.keys())
"Tempest tests. The 'light' mode corresponds to the smoke set " parser.add_argument("--compare", action="store_true",
"of Tempest tests.", help="Start the second verification to generate a "
choices=MODES_PARAMETERS.keys()) "trends report for two verifications.")
parser.add_argument( # TODO(ylobankov): Remove hard-coded Tempest related things and make it
"--compare", # configurable.
action="store_true", parser.add_argument("--ctx-create-resources", action="store_true",
help="Launch 2 verifications and compare them.") help="Make Tempest context create needed resources "
parser.add_argument( "for the tests.")
"--ctx-create-resources",
action="store_true",
help="Make Tempest context create needed resources for the tests.")
args = parser.parse_args() args = parser.parse_args()
if not os.path.exists("%s/extra" % BASE_DIR): if not os.path.exists("%s/extra" % BASE_DIR):
os.makedirs("%s/extra" % BASE_DIR) os.makedirs("%s/extra" % BASE_DIR)
# Check deployment # Choose and check the deployment
call_rally("deployment use --deployment devstack", print_output=True) call_rally("deployment use --deployment %s" % DEPLOYMENT_NAME)
call_rally("deployment check", print_output=True) call_rally("deployment check")
config = json.loads( config = json.loads(
subprocess.check_output(["rally", "deployment", "config"])) subprocess.check_output(["rally", "deployment", "config"]))
@ -234,58 +216,75 @@ def main():
"= %dMB, VCPUs = 1, disk = 0GB" % (params["name"], flv_ram)) "= %dMB, VCPUs = 1, disk = 0GB" % (params["name"], flv_ram))
clients.nova().flavors.create(**params) clients.nova().flavors.create(**params)
render_vars = {"verifications": []} render_vars = dict(verifications=[])
# Install the latest Tempest version # List plugins for verifiers management
render_vars["install"] = call_rally("verify install") render_vars["list_plugins"] = call_rally("verify list-plugins")
# Get Rally deployment ID # Create a verifier
rally_deployment_id = envutils.get_global(envutils.ENV_DEPLOYMENT) render_vars["create_verifier"] = call_rally(
# Get the penultimate Tempest commit ID "verify create-verifier --type %s --name my-verifier --source %s"
tempest_dir = ( % (VERIFIER_TYPE, VERIFIER_SOURCE))
"/home/jenkins/.rally/tempest/for-deployment-%s" % rally_deployment_id)
tempest_commit_id = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=format:'%H'"],
cwd=tempest_dir).strip()
# Install the penultimate Tempest version
render_vars["reinstall"] = call_rally(
"verify reinstall --version %s" % tempest_commit_id)
# Install a Tempest plugin # List verifiers
render_vars["installplugin"] = call_rally( render_vars["list_verifiers"] = call_rally("verify list-verifiers")
"verify installplugin --source %s" % TEMPEST_PLUGIN)
# List installed Tempest plugins # Get verifier ID
render_vars["listplugins"] = call_rally("verify listplugins") verifier_id = envutils.get_global(envutils.ENV_VERIFIER)
# Get the penultimate verifier commit ID
repo_dir = os.path.join(
os.path.expanduser("~"),
".rally/verification/verifier-%s/repo" % verifier_id)
p_commit_id = subprocess.check_output(
["git", "log", "-n", "1", "--pretty=format:%H"], cwd=repo_dir).strip()
# Switch the verifier to the penultimate version
render_vars["update_verifier"] = call_rally(
"verify update-verifier --version %s --update-venv" % p_commit_id)
# Discover tests depending on Tempest suite # Generate and show the verifier config file
discover_cmd = "verify discover" render_vars["configure_verifier"] = call_rally(
if args.mode == "light": "verify configure-verifier --show")
discover_cmd += " --pattern smoke"
render_vars["discover"] = call_rally(discover_cmd)
# Generate and show Tempest config file # Add a verifier extension
render_vars["genconfig"] = call_rally("verify genconfig") render_vars["add_verifier_ext"] = call_rally(
render_vars["showconfig"] = call_rally("verify showconfig") "verify add-verifier-ext --source %s" % VERIFIER_EXT_REPO)
# Create a file with a list of tests that are expected to fail # List verifier extensions
xfails_file_path = create_file_with_xfails() render_vars["list_verifier_exts"] = call_rally("verify list-verifier-exts")
# Launch verification # List verifier tests
launch_params = "%s --xfails-file %s" % ( render_vars["list_verifier_tests"] = call_rally(
MODES_PARAMETERS[args.mode], xfails_file_path) "verify list-verifier-tests %s" % MODES[args.mode])
render_vars["verifications"].append(
launch_verification_once(launch_params)) # Start a verification, show results and generate reports
skip_list_path = write_file("skip-list.yaml", SKIPPED_TESTS)
xfail_list_path = write_file("xfail-list.yaml", XFAILED_TESTS)
run_args = ("%s --skip-list %s --xfail-list %s"
% (MODES[args.mode], skip_list_path, xfail_list_path))
render_vars["verifications"].append(start_verification(run_args))
if args.compare: if args.compare:
render_vars["verifications"].append( # Start another verification, show results and generate reports
launch_verification_once(launch_params)) with gzip.open(render_vars["list_verifier_tests"]["stdout_file"]) as f:
render_vars["compare"] = do_compare( load_list_path = write_file("load-list.txt", f.read())
run_args = "--load-list %s" % load_list_path
render_vars["verifications"].append(start_verification(run_args))
# Generate trends reports for two verifications
render_vars["compare"] = generate_trends_reports(
render_vars["verifications"][-2]["uuid"], render_vars["verifications"][-2]["uuid"],
render_vars["verifications"][-1]["uuid"]) render_vars["verifications"][-1]["uuid"])
# List verifications
render_vars["list"] = call_rally("verify list") render_vars["list"] = call_rally("verify list")
# Delete the verifier extension
render_vars["delete_verifier_ext"] = call_rally(
"verify delete-verifier-ext --name %s" % VERIFIER_EXT_NAME)
# Delete the verifier and all verifications
render_vars["delete_verifier"] = call_rally(
"verify delete-verifier --force")
render_page(**render_vars) render_page(**render_vars)
return _return_status return _return_status