Merge "make verification job great again"
This commit is contained in:
commit
947b4b23d3
@ -13,6 +13,7 @@
|
||||
# under the License.
|
||||
|
||||
import argparse
|
||||
import collections
|
||||
import gzip
|
||||
import json
|
||||
import logging
|
||||
@ -22,126 +23,462 @@ import subprocess
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from rally.cli import envutils
|
||||
from rally.plugins.openstack import credential
|
||||
from rally import api
|
||||
from rally.ui import utils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
|
||||
BASE_DIR = "rally-verify"
|
||||
|
||||
MODES = {"full": "--pattern set=full", "light": "--pattern set=smoke"}
|
||||
DEPLOYMENT_NAME = "devstack"
|
||||
VERIFIER_TYPE = "tempest"
|
||||
VERIFIER_SOURCE = "https://git.openstack.org/openstack/tempest"
|
||||
VERIFIER_EXT_REPO = "https://git.openstack.org/openstack/keystone"
|
||||
VERIFIER_EXT_NAME = "keystone_tests"
|
||||
SKIP_TESTS = (
|
||||
"tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON."
|
||||
"test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]: "
|
||||
"This test was skipped intentionally")
|
||||
XFAIL_TESTS = (
|
||||
"tempest.api.compute.servers.test_server_actions.ServerActionsTestJSON."
|
||||
"test_get_vnc_console[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]: "
|
||||
"This test fails because 'novnc' console type is unavailable")
|
||||
TEST_NAME_RE = re.compile(r"^[a-zA-Z_.0-9]+(\[[a-zA-Z-,=0-9]*\])?$")
|
||||
|
||||
# NOTE(andreykurilin): this variable is used to generate output file names
|
||||
# with prefix ${CALL_COUNT}_ .
|
||||
_call_count = 0
|
||||
# NOTE(andreykurilin): if some command fails, script should end with
|
||||
# error status
|
||||
_return_status = 0
|
||||
|
||||
|
||||
def call_rally(cmd, print_output=False, output_type=None):
|
||||
"""Execute a Rally command and write result in files."""
|
||||
global _return_status
|
||||
global _call_count
|
||||
_call_count += 1
|
||||
|
||||
data = {"cmd": "rally --rally-debug %s" % cmd}
|
||||
stdout_file = "{base_dir}/{prefix}_{cmd}.txt.gz"
|
||||
|
||||
cmd = cmd.replace("/", "_")
|
||||
data.update({"stdout_file": stdout_file.format(base_dir=BASE_DIR,
|
||||
prefix=_call_count,
|
||||
cmd=cmd.replace(" ", "_"))})
|
||||
|
||||
if output_type:
|
||||
data["output_file"] = data["stdout_file"].replace(
|
||||
".txt.", ".%s." % output_type)
|
||||
data["cmd"] += " --to %s" % data["output_file"]
|
||||
data["cmd"] += " --type %s" % output_type
|
||||
|
||||
try:
|
||||
LOG.info("Try to execute `%s`." % data["cmd"])
|
||||
stdout = subprocess.check_output(data["cmd"].split(),
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.error("Command `%s` failed." % data["cmd"])
|
||||
stdout = e.output
|
||||
data["status"] = "fail"
|
||||
_return_status = 1
|
||||
else:
|
||||
data["status"] = "success"
|
||||
|
||||
if output_type:
|
||||
# let's gzip results
|
||||
with open(data["output_file"]) as f:
|
||||
output = f.read()
|
||||
with gzip.open(data["output_file"], "wb") as f:
|
||||
f.write(output)
|
||||
|
||||
stdout = "$ %s\n%s" % (data["cmd"], stdout)
|
||||
|
||||
with gzip.open(data["stdout_file"], "wb") as f:
|
||||
f.write(stdout)
|
||||
|
||||
if print_output:
|
||||
print(stdout)
|
||||
|
||||
return data
|
||||
class Status(object):
|
||||
PASS = "success"
|
||||
ERROR = "error"
|
||||
SKIPPED = "skip"
|
||||
FAILURE = "fail"
|
||||
|
||||
|
||||
def start_verification(args):
|
||||
"""Start a verification, show results and generate reports."""
|
||||
results = call_rally("verify start %s" % args)
|
||||
results["uuid"] = envutils.get_global(envutils.ENV_VERIFICATION)
|
||||
results["show"] = call_rally("verify show")
|
||||
results["show_detailed"] = call_rally("verify show --detailed")
|
||||
for output_type in ("json", "html", "junit-xml"):
|
||||
results[output_type.replace("-", "_")] = call_rally(
|
||||
"verify report", output_type=output_type)
|
||||
# NOTE(andreykurilin): we need to clean verification uuid from global
|
||||
# environment to be able to load it next time(for another verification).
|
||||
envutils.clear_global(envutils.ENV_VERIFICATION)
|
||||
return results
|
||||
class Step(object):
|
||||
COMMAND = None
|
||||
DEPENDS_ON = None
|
||||
CALL_ARGS = {}
|
||||
|
||||
BASE_DIR = "rally-verify"
|
||||
HTML_TEMPLATE = ("<span class=\"%(status)s\">[%(status)s]</span>\n"
|
||||
"<a href=\"%(output_file)s\">%(doc)s</a>\n"
|
||||
"<code>$ %(cmd)s</code>")
|
||||
|
||||
def __init__(self, args, rapi):
|
||||
self.args = args
|
||||
self.rapi = rapi
|
||||
self.result = {"status": Status.PASS,
|
||||
"doc": self.__doc__}
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return " ".join(re.findall("[A-Z][^A-Z]*",
|
||||
self.__class__.__name__)).lower()
|
||||
|
||||
def check(self, results):
|
||||
"""Check weather this step should be executed or skipped."""
|
||||
if self.DEPENDS_ON is not None:
|
||||
if results[self.DEPENDS_ON].result["status"] in (
|
||||
Status.PASS, Status.FAILURE):
|
||||
return True
|
||||
else:
|
||||
self.result["status"] = Status.SKIPPED
|
||||
msg = ("Step '%s' is skipped, since depends on step '%s' is "
|
||||
"skipped or finished with an error." %
|
||||
(self.name, results[self.DEPENDS_ON].name))
|
||||
stdout_file = self._generate_path(
|
||||
"%s.txt.gz" % self.__class__.__name__)
|
||||
|
||||
self.result["output_file"] = self._write_file(
|
||||
stdout_file, msg, compress=True)
|
||||
return False
|
||||
return True
|
||||
|
||||
def setUp(self):
|
||||
"""Obtain variables required for execution"""
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
"""Execute step. The default action - execute the command"""
|
||||
self.setUp()
|
||||
|
||||
cmd = "rally --rally-debug verify %s" % (self.COMMAND % self.CALL_ARGS)
|
||||
self.result["cmd"] = cmd
|
||||
self.result["status"], self.result["output"] = self.call_rally(cmd)
|
||||
|
||||
stdout_file = self._generate_path("%s.txt.gz" % cmd)
|
||||
self.result["output_file"] = self._write_file(
|
||||
stdout_file, self.result["output"], compress=True)
|
||||
|
||||
@classmethod
|
||||
def _generate_path(cls, root):
|
||||
global _call_count
|
||||
_call_count += 1
|
||||
|
||||
root = root.replace("<", "").replace(">", "").replace("/", "_")
|
||||
parts = ["%s" % _call_count]
|
||||
for path in root.split(" "):
|
||||
if path.startswith(cls.BASE_DIR):
|
||||
path = path[len(cls.BASE_DIR) + 1:]
|
||||
parts.append(path)
|
||||
return os.path.join(cls.BASE_DIR, "_".join(parts))
|
||||
|
||||
@classmethod
|
||||
def _write_file(cls, path, data, compress=False):
|
||||
"""Create a file and write some data to it."""
|
||||
if compress:
|
||||
with gzip.open(path, "wb") as f:
|
||||
f.write(data)
|
||||
else:
|
||||
with open(path, "wb") as f:
|
||||
f.write(data)
|
||||
return path
|
||||
|
||||
@staticmethod
|
||||
def call_rally(command):
|
||||
"""Execute a Rally verify command."""
|
||||
try:
|
||||
LOG.info("Try to execute `%s`." % command)
|
||||
stdout = subprocess.check_output(command.split(),
|
||||
stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
LOG.error("Command `%s` failed." % command)
|
||||
return Status.ERROR, e.output
|
||||
else:
|
||||
return Status.PASS, stdout
|
||||
|
||||
def to_html(self):
|
||||
return self.HTML_TEMPLATE % self.result
|
||||
|
||||
|
||||
def write_file(filename, data):
|
||||
"""Create a file and write some data to it."""
|
||||
path = os.path.join(BASE_DIR, filename)
|
||||
with open(path, "wb") as f:
|
||||
f.write(data)
|
||||
return path
|
||||
class SetUpStep(Step):
|
||||
"""Validate deployment, create required resources and directories."""
|
||||
|
||||
DEPLOYMENT_NAME = "devstack"
|
||||
|
||||
def run(self):
|
||||
if not os.path.exists("%s/extra" % self.BASE_DIR):
|
||||
os.makedirs("%s/extra" % self.BASE_DIR)
|
||||
|
||||
# ensure that deployment exit
|
||||
deployment = self.rapi.deployment._get(self.DEPLOYMENT_NAME)
|
||||
# check it
|
||||
result = self.rapi.deployment.check(
|
||||
deployment=self.DEPLOYMENT_NAME)["openstack"]
|
||||
if "admin_error" in result[0] or "user_error" in result[0]:
|
||||
self.result["status"] = Status.ERROR
|
||||
return
|
||||
|
||||
try:
|
||||
subprocess.check_call(["rally", "deployment", "use",
|
||||
"--deployment", self.DEPLOYMENT_NAME],
|
||||
stdout=sys.stdout)
|
||||
except subprocess.CalledProcessError:
|
||||
self.result["status"] = Status.ERROR
|
||||
return
|
||||
|
||||
credentials = deployment.get_credentials_for("openstack")["admin"]
|
||||
clients = credentials.clients()
|
||||
|
||||
if self.args.ctx_create_resources:
|
||||
# If the 'ctx-create-resources' arg is provided, delete images and
|
||||
# flavors, and also create a shared network to make Tempest context
|
||||
# create needed resources.
|
||||
LOG.info("The 'ctx-create-resources' arg is provided. Deleting "
|
||||
"images and flavors, and also creating a shared network "
|
||||
"to make Tempest context create needed resources.")
|
||||
|
||||
LOG.info("Deleting images.")
|
||||
for image in clients.glance().images.list():
|
||||
clients.glance().images.delete(image.id)
|
||||
|
||||
LOG.info("Deleting flavors.")
|
||||
for flavor in clients.nova().flavors.list():
|
||||
clients.nova().flavors.delete(flavor.id)
|
||||
|
||||
LOG.info("Creating a shared network.")
|
||||
net_body = {
|
||||
"network": {
|
||||
"name": "shared-net-%s" % str(uuid.uuid4()),
|
||||
"tenant_id": clients.keystone.auth_ref.project_id,
|
||||
"shared": True
|
||||
}
|
||||
}
|
||||
clients.neutron().create_network(net_body)
|
||||
else:
|
||||
# Otherwise, just in case create only flavors with the following
|
||||
# properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make
|
||||
# Tempest context discover them.
|
||||
LOG.info("The 'ctx-create-resources' arg is not provided. "
|
||||
"Creating flavors to make Tempest context discover them.")
|
||||
for flv_ram in [64, 128]:
|
||||
params = {
|
||||
"name": "flavor-%s" % str(uuid.uuid4()),
|
||||
"ram": flv_ram,
|
||||
"vcpus": 1,
|
||||
"disk": 0
|
||||
}
|
||||
LOG.info("Creating flavor '%s' with the following properties: "
|
||||
"RAM = %dMB, VCPUs = 1, disk = 0GB" %
|
||||
(params["name"], flv_ram))
|
||||
clients.nova().flavors.create(**params)
|
||||
|
||||
def to_html(self):
|
||||
return ""
|
||||
|
||||
|
||||
def generate_trends_reports(uuid_1, uuid_2):
|
||||
"""Generate trends reports."""
|
||||
results = {}
|
||||
for output_type in ("json", "html", "junit-xml"):
|
||||
results[output_type.replace("-", "_")] = call_rally(
|
||||
"verify report --uuid %s %s" % (uuid_1, uuid_2),
|
||||
output_type=output_type)
|
||||
return results
|
||||
class ListPlugins(Step):
|
||||
"""List plugins for verifiers management."""
|
||||
|
||||
COMMAND = "list-plugins"
|
||||
DEPENDS_ON = SetUpStep
|
||||
|
||||
|
||||
def render_page(**render_vars):
|
||||
template = utils.get_template("ci/index_verify.html")
|
||||
with open(os.path.join(BASE_DIR, "extra/index.html"), "w") as f:
|
||||
f.write(template.render(**render_vars))
|
||||
class CreateVerifier(Step):
|
||||
"""Create a Tempest verifier."""
|
||||
|
||||
COMMAND = ("create-verifier --type %(type)s --name %(name)s "
|
||||
"--source %(source)s")
|
||||
DEPENDS_ON = ListPlugins
|
||||
CALL_ARGS = {"type": "tempest",
|
||||
"name": "my-verifier",
|
||||
"source": "https://git.openstack.org/openstack/tempest"}
|
||||
|
||||
|
||||
class ShowVerifier(Step):
|
||||
"""Show information about the created verifier."""
|
||||
|
||||
COMMAND = "show-verifier"
|
||||
DEPENDS_ON = CreateVerifier
|
||||
|
||||
|
||||
class ListVerifiers(Step):
|
||||
"""List all installed verifiers."""
|
||||
|
||||
COMMAND = "list-verifiers"
|
||||
DEPENDS_ON = CreateVerifier
|
||||
|
||||
|
||||
class UpdateVerifier(Step):
|
||||
"""Switch the verifier to the penultimate version."""
|
||||
|
||||
COMMAND = "update-verifier --version %(version)s --update-venv"
|
||||
DEPENDS_ON = CreateVerifier
|
||||
|
||||
def setUp(self):
|
||||
"""Obtain penultimate verifier commit for downgrading to it"""
|
||||
verifier_id = self.rapi.verifier.list()[0]["uuid"]
|
||||
verifications_dir = os.path.join(
|
||||
os.path.expanduser("~"),
|
||||
".rally/verification/verifier-%s/repo" % verifier_id)
|
||||
# Get the penultimate verifier commit ID
|
||||
p_commit_id = subprocess.check_output(
|
||||
["git", "log", "-n", "1", "--pretty=format:%H"],
|
||||
cwd=verifications_dir).strip()
|
||||
self.CALL_ARGS = {"version": p_commit_id}
|
||||
|
||||
|
||||
class ConfigureVerifier(Step):
|
||||
"""Generate and show the verifier config file."""
|
||||
|
||||
COMMAND = "configure-verifier --show"
|
||||
DEPENDS_ON = CreateVerifier
|
||||
|
||||
|
||||
class ExtendVerifier(Step):
|
||||
"""Extend verifier with keystone integration tests."""
|
||||
|
||||
COMMAND = "add-verifier-ext --source %(source)s"
|
||||
DEPENDS_ON = CreateVerifier
|
||||
CALL_ARGS = {"source": "https://git.openstack.org/openstack/keystone"}
|
||||
|
||||
|
||||
class ListVerifierExtensions(Step):
|
||||
"""List all extensions of verifier."""
|
||||
|
||||
COMMAND = "list-verifier-exts"
|
||||
DEPENDS_ON = ExtendVerifier
|
||||
|
||||
|
||||
class ListVerifierTests(Step):
|
||||
"""List all tests of specific verifier."""
|
||||
|
||||
COMMAND = "list-verifier-tests"
|
||||
DEPENDS_ON = CreateVerifier
|
||||
|
||||
|
||||
class RunVerification(Step):
|
||||
"""Run a verification."""
|
||||
|
||||
DEPENDS_ON = ConfigureVerifier
|
||||
COMMAND = ("start --pattern set=%(set)s --skip-list %(skip_tests)s "
|
||||
"--xfail-list %(xfail_tests)s --tag %(tag)s %(set)s-set "
|
||||
"--detailed")
|
||||
SKIP_TESTS = {
|
||||
"tempest.api.compute.flavors.test_flavors.FlavorsV2TestJSON."
|
||||
"test_get_flavor[id-1f12046b-753d-40d2-abb6-d8eb8b30cb2f,smoke]":
|
||||
"This test was skipped intentionally"}
|
||||
XFAIL_TESTS = {
|
||||
"tempest.api.compute.servers.test_server_actions."
|
||||
"ServerActionsTestJSON.test_get_vnc_console"
|
||||
"[id-c6bc11bf-592e-4015-9319-1c98dc64daf5]":
|
||||
"This test fails because 'novnc' console type is unavailable"}
|
||||
|
||||
def setUp(self):
|
||||
self.CALL_ARGS["tag"] = "tag-1 tag-2"
|
||||
self.CALL_ARGS["set"] = "full" if self.args.mode == "full" else "smoke"
|
||||
# Start a verification, show results and generate reports
|
||||
skip_tests = json.dumps(self.SKIP_TESTS)
|
||||
xfail_tests = json.dumps(self.XFAIL_TESTS)
|
||||
self.CALL_ARGS["skip_tests"] = self._write_file(
|
||||
self._generate_path("skip-list.json"), skip_tests)
|
||||
self.CALL_ARGS["xfail_tests"] = self._write_file(
|
||||
self._generate_path("xfail-list.json"), xfail_tests)
|
||||
|
||||
def run(self):
|
||||
super(RunVerification, self).run()
|
||||
if "Success: 0" in self.result["output"]:
|
||||
self.result["status"] = Status.FAILURE
|
||||
|
||||
|
||||
class ReRunVerification(RunVerification):
|
||||
"""Re-Run previous verification."""
|
||||
|
||||
COMMAND = "rerun --tag one-more-attempt"
|
||||
|
||||
def run(self):
|
||||
super(RunVerification, self).run()
|
||||
if "Success: 0" in self.result["output"]:
|
||||
self.result["status"] = Status.FAILURE
|
||||
|
||||
|
||||
class ShowVerification(Step):
|
||||
"""Show results of verification."""
|
||||
|
||||
COMMAND = "show"
|
||||
DEPENDS_ON = RunVerification
|
||||
|
||||
|
||||
class ShowSecondVerification(ShowVerification):
|
||||
"""Show results of verification."""
|
||||
|
||||
DEPENDS_ON = ReRunVerification
|
||||
|
||||
|
||||
class ShowDetailedVerification(Step):
|
||||
"""Show detailed results of verification."""
|
||||
|
||||
COMMAND = "show --detailed"
|
||||
DEPENDS_ON = RunVerification
|
||||
|
||||
|
||||
class ShowDetailedSecondVerification(ShowDetailedVerification):
|
||||
"""Show detailed results of verification."""
|
||||
|
||||
DEPENDS_ON = ReRunVerification
|
||||
|
||||
|
||||
class ReportVerificationMixin(Step):
|
||||
"""Mixin for obtaining reports of verifications."""
|
||||
|
||||
COMMAND = "report --uuid %(uuids)s --type %(type)s --to %(out)s"
|
||||
|
||||
HTML_TEMPLATE = ("<span class=\"%(status)s\">[%(status)s]</span>\n"
|
||||
"<a href=\"%(out)s\">%(doc)s</a> "
|
||||
"[<a href=\"%(output_file)s\">Output from CLI</a>]\n"
|
||||
"<code>$ %(cmd)s</code>")
|
||||
|
||||
def setUp(self):
|
||||
self.CALL_ARGS["out"] = "<path>"
|
||||
self.CALL_ARGS["uuids"] = "<uuid-1> <uuid-2>"
|
||||
cmd = self.COMMAND % self.CALL_ARGS
|
||||
report = "%s.%s" % (cmd.replace("/", "_").replace(" ", "_"),
|
||||
self.CALL_ARGS["type"])
|
||||
print(report)
|
||||
self.CALL_ARGS["out"] = self._generate_path(report)
|
||||
self.CALL_ARGS["uuids"] = " ".join(
|
||||
[v["uuid"] for v in self.rapi.verification.list()])
|
||||
print(self.COMMAND % self.CALL_ARGS)
|
||||
|
||||
def run(self):
|
||||
super(ReportVerificationMixin, self).run()
|
||||
creport = "%s.gz" % self.CALL_ARGS["out"]
|
||||
with open(self.CALL_ARGS["out"], "rb") as f_in:
|
||||
with gzip.open(creport, "wb") as f_out:
|
||||
f_out.writelines(f_in)
|
||||
self.result["out"] = creport
|
||||
|
||||
|
||||
class HtmlVerificationReport(ReportVerificationMixin):
|
||||
"""Generate HTML report for verification(s)."""
|
||||
|
||||
CALL_ARGS = {"type": "html"}
|
||||
DEPENDS_ON = RunVerification
|
||||
|
||||
|
||||
class JsonVerificationReport(ReportVerificationMixin):
|
||||
"""Generate JSON report for verification(s)."""
|
||||
|
||||
CALL_ARGS = {"type": "json"}
|
||||
DEPENDS_ON = RunVerification
|
||||
|
||||
|
||||
class JunitVerificationReport(ReportVerificationMixin):
|
||||
"""Generate JUNIT report for verification(s)."""
|
||||
|
||||
CALL_ARGS = {"type": "junit-xml"}
|
||||
DEPENDS_ON = RunVerification
|
||||
|
||||
|
||||
class ListVerifications(Step):
|
||||
"""List all verifications."""
|
||||
|
||||
COMMAND = "list"
|
||||
DEPENDS_ON = CreateVerifier
|
||||
|
||||
|
||||
class DeleteVerifierExtension(Step):
|
||||
"""Delete keystone extension."""
|
||||
|
||||
COMMAND = "delete-verifier-ext --name %(name)s"
|
||||
CALL_ARGS = {"name": "keystone_tests"}
|
||||
DEPENDS_ON = ExtendVerifier
|
||||
|
||||
|
||||
class DeleteVerifier(Step):
|
||||
"""Delete Tempest verifier and all verifications."""
|
||||
|
||||
COMMAND = "delete-verifier --id %(id)s --force"
|
||||
CALL_ARGS = {"id": CreateVerifier.CALL_ARGS["name"]}
|
||||
DEPENDS_ON = CreateVerifier
|
||||
|
||||
|
||||
def run(args):
|
||||
|
||||
steps = [SetUpStep,
|
||||
ListPlugins,
|
||||
CreateVerifier,
|
||||
ShowVerifier,
|
||||
ListVerifiers,
|
||||
UpdateVerifier,
|
||||
ConfigureVerifier,
|
||||
ExtendVerifier,
|
||||
ListVerifierExtensions,
|
||||
ListVerifierTests,
|
||||
RunVerification,
|
||||
ShowVerification,
|
||||
ShowDetailedVerification,
|
||||
HtmlVerificationReport,
|
||||
JsonVerificationReport,
|
||||
JunitVerificationReport,
|
||||
ListVerifications,
|
||||
DeleteVerifierExtension,
|
||||
DeleteVerifier]
|
||||
|
||||
if args.compare:
|
||||
# need to launch one more verification
|
||||
place_to_insert = steps.index(ShowDetailedVerification) + 1
|
||||
# insert steps in reverse order to be able to use the same index
|
||||
steps.insert(place_to_insert, ShowDetailedSecondVerification)
|
||||
steps.insert(place_to_insert, ShowSecondVerification)
|
||||
steps.insert(place_to_insert, ReRunVerification)
|
||||
|
||||
results = collections.OrderedDict()
|
||||
rapi = api.API()
|
||||
for step_cls in steps:
|
||||
step = step_cls(args, rapi=rapi)
|
||||
if step.check(results):
|
||||
step.run()
|
||||
results[step_cls] = step
|
||||
|
||||
return results.values()
|
||||
|
||||
|
||||
def main():
|
||||
@ -150,7 +487,7 @@ def main():
|
||||
help="Mode of job. The 'full' mode corresponds to the "
|
||||
"full set of verifier tests. The 'light' mode "
|
||||
"corresponds to the smoke set of verifier tests.",
|
||||
choices=MODES.keys())
|
||||
choices=["light", "full"])
|
||||
parser.add_argument("--compare", action="store_true",
|
||||
help="Start the second verification to generate a "
|
||||
"trends report for two verifications.")
|
||||
@ -162,140 +499,17 @@ def main():
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.exists("%s/extra" % BASE_DIR):
|
||||
os.makedirs("%s/extra" % BASE_DIR)
|
||||
steps = run(args)
|
||||
results = [step.to_html() for step in steps]
|
||||
|
||||
# Choose and check the deployment
|
||||
call_rally("deployment use --deployment %s" % DEPLOYMENT_NAME)
|
||||
call_rally("deployment check")
|
||||
template = utils.get_template("ci/index_verify.html")
|
||||
with open(os.path.join(Step.BASE_DIR, "extra/index.html"), "w") as f:
|
||||
f.write(template.render(steps=results))
|
||||
|
||||
config = json.loads(
|
||||
subprocess.check_output(["rally", "deployment", "config"]))
|
||||
config = config["creds"]["openstack"]
|
||||
config.update(config.pop("admin"))
|
||||
clients = credential.OpenStackCredential(**config).clients()
|
||||
|
||||
if args.ctx_create_resources:
|
||||
# If the 'ctx-create-resources' arg is provided, delete images and
|
||||
# flavors, and also create a shared network to make Tempest context
|
||||
# create needed resources.
|
||||
LOG.info("The 'ctx-create-resources' arg is provided. Deleting "
|
||||
"images and flavors, and also creating a shared network "
|
||||
"to make Tempest context create needed resources.")
|
||||
|
||||
LOG.info("Deleting images.")
|
||||
for image in clients.glance().images.list():
|
||||
clients.glance().images.delete(image.id)
|
||||
|
||||
LOG.info("Deleting flavors.")
|
||||
for flavor in clients.nova().flavors.list():
|
||||
clients.nova().flavors.delete(flavor.id)
|
||||
|
||||
LOG.info("Creating a shared network.")
|
||||
net_body = {
|
||||
"network": {
|
||||
"name": "shared-net-%s" % str(uuid.uuid4()),
|
||||
"tenant_id": clients.keystone.auth_ref.project_id,
|
||||
"shared": True
|
||||
}
|
||||
}
|
||||
clients.neutron().create_network(net_body)
|
||||
else:
|
||||
# Otherwise, just in case create only flavors with the following
|
||||
# properties: RAM = 64MB and 128MB, VCPUs = 1, disk = 0GB to make
|
||||
# Tempest context discover them.
|
||||
LOG.info("The 'ctx-create-resources' arg is not provided. "
|
||||
"Creating flavors to make Tempest context discover them.")
|
||||
for flv_ram in [64, 128]:
|
||||
params = {
|
||||
"name": "flavor-%s" % str(uuid.uuid4()),
|
||||
"ram": flv_ram,
|
||||
"vcpus": 1,
|
||||
"disk": 0
|
||||
}
|
||||
LOG.info(
|
||||
"Creating flavor '%s' with the following properties: RAM "
|
||||
"= %dMB, VCPUs = 1, disk = 0GB" % (params["name"], flv_ram))
|
||||
clients.nova().flavors.create(**params)
|
||||
|
||||
render_vars = dict(verifications=[])
|
||||
|
||||
# List plugins for verifiers management
|
||||
render_vars["list_plugins"] = call_rally("verify list-plugins")
|
||||
|
||||
# Create a verifier
|
||||
render_vars["create_verifier"] = call_rally(
|
||||
"verify create-verifier --type %s --name my-verifier --source %s"
|
||||
% (VERIFIER_TYPE, VERIFIER_SOURCE))
|
||||
|
||||
# Show the verifier
|
||||
render_vars["show_verifier"] = call_rally("verify show-verifier")
|
||||
|
||||
# List verifiers
|
||||
render_vars["list_verifiers"] = call_rally("verify list-verifiers")
|
||||
|
||||
# Get verifier ID
|
||||
verifier_id = envutils.get_global(envutils.ENV_VERIFIER)
|
||||
# Get the penultimate verifier commit ID
|
||||
repo_dir = os.path.join(
|
||||
os.path.expanduser("~"),
|
||||
".rally/verification/verifier-%s/repo" % verifier_id)
|
||||
p_commit_id = subprocess.check_output(
|
||||
["git", "log", "-n", "1", "--pretty=format:%H"], cwd=repo_dir).strip()
|
||||
# Switch the verifier to the penultimate version
|
||||
render_vars["update_verifier"] = call_rally(
|
||||
"verify update-verifier --version %s --update-venv" % p_commit_id)
|
||||
|
||||
# Generate and show the verifier config file
|
||||
render_vars["configure_verifier"] = call_rally(
|
||||
"verify configure-verifier --show")
|
||||
|
||||
# Add a verifier extension
|
||||
render_vars["add_verifier_ext"] = call_rally(
|
||||
"verify add-verifier-ext --source %s" % VERIFIER_EXT_REPO)
|
||||
|
||||
# List verifier extensions
|
||||
render_vars["list_verifier_exts"] = call_rally("verify list-verifier-exts")
|
||||
|
||||
# List verifier tests
|
||||
render_vars["list_verifier_tests"] = call_rally(
|
||||
"verify list-verifier-tests %s" % MODES[args.mode])
|
||||
|
||||
# Start a verification, show results and generate reports
|
||||
skip_list_path = write_file("skip-list.yaml", SKIP_TESTS)
|
||||
xfail_list_path = write_file("xfail-list.yaml", XFAIL_TESTS)
|
||||
run_args = ("%s --skip-list %s --xfail-list %s --tag first-run %s-set "
|
||||
"--detailed" % (MODES[args.mode], skip_list_path,
|
||||
xfail_list_path, args.mode))
|
||||
render_vars["verifications"].append(start_verification(run_args))
|
||||
|
||||
if args.compare:
|
||||
# Start another verification, show results and generate reports
|
||||
with gzip.open(render_vars["list_verifier_tests"]["stdout_file"]) as f:
|
||||
tests = [t for t in f.read().split("\n") if TEST_NAME_RE.match(t)]
|
||||
load_list_path = write_file("load-list.txt", "\n".join(tests))
|
||||
run_args = "--load-list %s --tag second-run %s-set --detailed" % (
|
||||
load_list_path, args.mode)
|
||||
render_vars["verifications"].append(start_verification(run_args))
|
||||
|
||||
# Generate trends reports for two verifications
|
||||
render_vars["compare"] = generate_trends_reports(
|
||||
render_vars["verifications"][-2]["uuid"],
|
||||
render_vars["verifications"][-1]["uuid"])
|
||||
|
||||
# List verifications
|
||||
render_vars["list"] = call_rally("verify list")
|
||||
|
||||
# Delete the verifier extension
|
||||
render_vars["delete_verifier_ext"] = call_rally(
|
||||
"verify delete-verifier-ext --name %s" % VERIFIER_EXT_NAME)
|
||||
# Delete the verifier and all verifications
|
||||
render_vars["delete_verifier"] = call_rally(
|
||||
"verify delete-verifier --id %s --force" % verifier_id)
|
||||
|
||||
render_page(**render_vars)
|
||||
|
||||
return _return_status
|
||||
if len([None for step in steps if step.result["status"] in (
|
||||
Status.PASS, Status.FAILURE)]) == len(steps):
|
||||
return 0
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
|
Loading…
x
Reference in New Issue
Block a user