Handle log message interpolation by the logger in common/
According to OpenStack Guideline[1], logged string message should be interpolated by the logger. [1]: http://docs.openstack.org/developer/oslo.i18n/guidelines.html#adding-variables-to-log-messages Change-Id: Ie7291889337a7ffe7877910d3c3a24fd670aba94
This commit is contained in:
parent
27fd7b1b12
commit
9c126664b2
@ -84,7 +84,7 @@ implement the Context API: the *setup()* method that creates a flavor and the
|
|||||||
ram=self.config.get("ram", 1),
|
ram=self.config.get("ram", 1),
|
||||||
vcpus=self.config.get("vcpus", 1),
|
vcpus=self.config.get("vcpus", 1),
|
||||||
disk=self.config.get("disk", 1)).to_dict()
|
disk=self.config.get("disk", 1)).to_dict()
|
||||||
LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"])
|
LOG.debug("Flavor with id '%s'", self.context["flavor"]["id"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg = "Can't create flavor: %s" % e.message
|
msg = "Can't create flavor: %s" % e.message
|
||||||
if logging.is_debug():
|
if logging.is_debug():
|
||||||
@ -97,7 +97,7 @@ implement the Context API: the *setup()* method that creates a flavor and the
|
|||||||
try:
|
try:
|
||||||
nova = osclients.Clients(self.context["admin"]["credential"]).nova()
|
nova = osclients.Clients(self.context["admin"]["credential"]).nova()
|
||||||
nova.flavors.delete(self.context["flavor"]["id"])
|
nova.flavors.delete(self.context["flavor"]["id"])
|
||||||
LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"])
|
LOG.debug("Flavor '%s' deleted", self.context["flavor"]["id"])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg = "Can't delete flavor: %s" % e.message
|
msg = "Can't delete flavor: %s" % e.message
|
||||||
if logging.is_debug():
|
if logging.is_debug():
|
||||||
|
@ -79,7 +79,7 @@ class SeekAndDestroy(object):
|
|||||||
}
|
}
|
||||||
|
|
||||||
LOG.debug(
|
LOG.debug(
|
||||||
"Deleting %(service)s %(resource)s object %(name)s (%(uuid)s)" %
|
"Deleting %(service)s %(resource)s object %(name)s (%(uuid)s)",
|
||||||
msg_kw)
|
msg_kw)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -280,7 +280,7 @@ def cleanup(names=None, admin_required=None, admin=None, users=None,
|
|||||||
rutils.RandomNameGeneratorMixin):
|
rutils.RandomNameGeneratorMixin):
|
||||||
resource_classes.append(superclass)
|
resource_classes.append(superclass)
|
||||||
for manager in find_resource_managers(names, admin_required):
|
for manager in find_resource_managers(names, admin_required):
|
||||||
LOG.debug("Cleaning up %(service)s %(resource)s objects" %
|
LOG.debug("Cleaning up %(service)s %(resource)s objects",
|
||||||
{"service": manager._service,
|
{"service": manager._service,
|
||||||
"resource": manager._resource})
|
"resource": manager._resource})
|
||||||
SeekAndDestroy(manager, admin, users,
|
SeekAndDestroy(manager, admin, users,
|
||||||
|
@ -445,8 +445,8 @@ class NeutronPort(NeutronMixin):
|
|||||||
except neutron_exceptions.PortNotFoundClient:
|
except neutron_exceptions.PortNotFoundClient:
|
||||||
# Port can be already auto-deleted, skip silently
|
# Port can be already auto-deleted, skip silently
|
||||||
LOG.debug("Port %s was not deleted. Skip silently because "
|
LOG.debug("Port %s was not deleted. Skip silently because "
|
||||||
"port can be already auto-deleted."
|
"port can be already auto-deleted.",
|
||||||
% self.id())
|
self.id())
|
||||||
|
|
||||||
|
|
||||||
@base.resource("neutron", "subnet", order=next(_neutron_order),
|
@base.resource("neutron", "subnet", order=next(_neutron_order),
|
||||||
|
@ -46,7 +46,7 @@ class VolumeTypeGenerator(context.Context):
|
|||||||
admin_clients, name_generator=self.generate_random_name)
|
admin_clients, name_generator=self.generate_random_name)
|
||||||
self.context["volume_types"] = []
|
self.context["volume_types"] = []
|
||||||
for vtype_name in self.config:
|
for vtype_name in self.config:
|
||||||
LOG.debug("Creating Cinder volume type %s" % vtype_name)
|
LOG.debug("Creating Cinder volume type %s", vtype_name)
|
||||||
vtype = cinder_service.create_volume_type(vtype_name)
|
vtype = cinder_service.create_volume_type(vtype_name)
|
||||||
self.context["volume_types"].append({"id": vtype.id,
|
self.context["volume_types"].append({"id": vtype.id,
|
||||||
"name": vtype_name})
|
"name": vtype_name})
|
||||||
|
@ -73,8 +73,8 @@ class EC2ServerGenerator(context.Context):
|
|||||||
|
|
||||||
for user, tenant_id in rutils.iterate_per_tenants(
|
for user, tenant_id in rutils.iterate_per_tenants(
|
||||||
self.context["users"]):
|
self.context["users"]):
|
||||||
LOG.debug("Booting servers for tenant %s "
|
LOG.debug("Booting servers for tenant %s ",
|
||||||
% (user["tenant_id"]))
|
(user["tenant_id"]))
|
||||||
ec2_scenario = ec2_utils.EC2Scenario({
|
ec2_scenario = ec2_utils.EC2Scenario({
|
||||||
"user": user,
|
"user": user,
|
||||||
"task": self.context["task"],
|
"task": self.context["task"],
|
||||||
|
@ -86,7 +86,7 @@ class RoleGenerator(context.Context):
|
|||||||
role = self._get_role_object(context_role)
|
role = self._get_role_object(context_role)
|
||||||
roles_dict[role.id] = role.name
|
roles_dict[role.id] = role.name
|
||||||
LOG.debug("Adding role %(role_name)s having ID %(role_id)s "
|
LOG.debug("Adding role %(role_name)s having ID %(role_id)s "
|
||||||
"to all users using %(threads)s threads" %
|
"to all users using %(threads)s threads",
|
||||||
{"role_name": role.name,
|
{"role_name": role.name,
|
||||||
"role_id": role.id,
|
"role_id": role.id,
|
||||||
"threads": threads})
|
"threads": threads})
|
||||||
@ -104,7 +104,7 @@ class RoleGenerator(context.Context):
|
|||||||
|
|
||||||
def publish(queue):
|
def publish(queue):
|
||||||
for role_id in self.context["roles"]:
|
for role_id in self.context["roles"]:
|
||||||
LOG.debug("Removing role %s from all users" % role_id)
|
LOG.debug("Removing role %s from all users", role_id)
|
||||||
for user in self.context["users"]:
|
for user in self.context["users"]:
|
||||||
args = (role_id, user["id"], user["tenant_id"])
|
args = (role_id, user["id"], user["tenant_id"])
|
||||||
queue.append(args)
|
queue.append(args)
|
||||||
|
@ -136,7 +136,7 @@ class UserGenerator(context.Context):
|
|||||||
use_sg, msg = network.wrap(clients, self).supports_extension(
|
use_sg, msg = network.wrap(clients, self).supports_extension(
|
||||||
"security-group")
|
"security-group")
|
||||||
if not use_sg:
|
if not use_sg:
|
||||||
LOG.debug("Security group context is disabled: %s" % msg)
|
LOG.debug("Security group context is disabled: %s", msg)
|
||||||
return
|
return
|
||||||
|
|
||||||
for user, tenant_id in rutils.iterate_per_tenants(
|
for user, tenant_id in rutils.iterate_per_tenants(
|
||||||
@ -261,7 +261,7 @@ class UserGenerator(context.Context):
|
|||||||
"""Create tenants and users, using the broker pattern."""
|
"""Create tenants and users, using the broker pattern."""
|
||||||
threads = self.config["resource_management_workers"]
|
threads = self.config["resource_management_workers"]
|
||||||
|
|
||||||
LOG.debug("Creating %(tenants)d tenants using %(threads)s threads" %
|
LOG.debug("Creating %(tenants)d tenants using %(threads)s threads",
|
||||||
{"tenants": self.config["tenants"], "threads": threads})
|
{"tenants": self.config["tenants"], "threads": threads})
|
||||||
self.context["tenants"] = self._create_tenants()
|
self.context["tenants"] = self._create_tenants()
|
||||||
|
|
||||||
@ -271,7 +271,7 @@ class UserGenerator(context.Context):
|
|||||||
msg=_("Failed to create the requested number of tenants."))
|
msg=_("Failed to create the requested number of tenants."))
|
||||||
|
|
||||||
users_num = self.config["users_per_tenant"] * self.config["tenants"]
|
users_num = self.config["users_per_tenant"] * self.config["tenants"]
|
||||||
LOG.debug("Creating %(users)d users using %(threads)s threads" %
|
LOG.debug("Creating %(users)d users using %(threads)s threads",
|
||||||
{"users": users_num, "threads": threads})
|
{"users": users_num, "threads": threads})
|
||||||
self.context["users"] = self._create_users()
|
self.context["users"] = self._create_users()
|
||||||
for user in self.context["users"]:
|
for user in self.context["users"]:
|
||||||
|
@ -97,7 +97,7 @@ class FlavorsGenerator(context.Context):
|
|||||||
flavor.set_keys(extra_specs)
|
flavor.set_keys(extra_specs)
|
||||||
|
|
||||||
self.context["flavors"][flavor_config["name"]] = flavor.to_dict()
|
self.context["flavors"][flavor_config["name"]] = flavor.to_dict()
|
||||||
LOG.debug("Created flavor with id '%s'" % flavor.id)
|
LOG.debug("Created flavor with id '%s'", flavor.id)
|
||||||
|
|
||||||
@logging.log_task_wrapper(LOG.info, _("Exit context: `flavors`"))
|
@logging.log_task_wrapper(LOG.info, _("Exit context: `flavors`"))
|
||||||
def cleanup(self):
|
def cleanup(self):
|
||||||
|
@ -104,8 +104,8 @@ class ServerGenerator(context.Context):
|
|||||||
|
|
||||||
for iter_, (user, tenant_id) in enumerate(rutils.iterate_per_tenants(
|
for iter_, (user, tenant_id) in enumerate(rutils.iterate_per_tenants(
|
||||||
self.context["users"])):
|
self.context["users"])):
|
||||||
LOG.debug("Booting servers for user tenant %s "
|
LOG.debug("Booting servers for user tenant %s ",
|
||||||
% (user["tenant_id"]))
|
(user["tenant_id"]))
|
||||||
tmp_context = {"user": user,
|
tmp_context = {"user": user,
|
||||||
"tenant": self.context["tenants"][tenant_id],
|
"tenant": self.context["tenants"][tenant_id],
|
||||||
"task": self.context["task"],
|
"task": self.context["task"],
|
||||||
@ -115,10 +115,10 @@ class ServerGenerator(context.Context):
|
|||||||
|
|
||||||
LOG.debug("Calling _boot_servers with image_id=%(image_id)s "
|
LOG.debug("Calling _boot_servers with image_id=%(image_id)s "
|
||||||
"flavor_id=%(flavor_id)s "
|
"flavor_id=%(flavor_id)s "
|
||||||
"servers_per_tenant=%(servers_per_tenant)s"
|
"servers_per_tenant=%(servers_per_tenant)s",
|
||||||
% {"image_id": image_id,
|
{"image_id": image_id,
|
||||||
"flavor_id": flavor_id,
|
"flavor_id": flavor_id,
|
||||||
"servers_per_tenant": servers_per_tenant})
|
"servers_per_tenant": servers_per_tenant})
|
||||||
|
|
||||||
servers = nova_scenario._boot_servers(image_id, flavor_id,
|
servers = nova_scenario._boot_servers(image_id, flavor_id,
|
||||||
requests=servers_per_tenant,
|
requests=servers_per_tenant,
|
||||||
@ -127,8 +127,8 @@ class ServerGenerator(context.Context):
|
|||||||
|
|
||||||
current_servers = [server.id for server in servers]
|
current_servers = [server.id for server in servers]
|
||||||
|
|
||||||
LOG.debug("Adding booted servers %s to context"
|
LOG.debug("Adding booted servers %s to context",
|
||||||
% current_servers)
|
current_servers)
|
||||||
|
|
||||||
self.context["tenants"][tenant_id][
|
self.context["tenants"][tenant_id][
|
||||||
"servers"] = current_servers
|
"servers"] = current_servers
|
||||||
|
@ -66,8 +66,8 @@ class SwiftObjectGenerator(swift_utils.SwiftObjectMixin, context.Context):
|
|||||||
|
|
||||||
containers_per_tenant = self.config["containers_per_tenant"]
|
containers_per_tenant = self.config["containers_per_tenant"]
|
||||||
containers_num = len(self.context["tenants"]) * containers_per_tenant
|
containers_num = len(self.context["tenants"]) * containers_per_tenant
|
||||||
LOG.debug("Creating %d containers using %d threads." % (containers_num,
|
LOG.debug("Creating %d containers using %d threads.", (containers_num,
|
||||||
threads))
|
threads))
|
||||||
containers_count = len(self._create_containers(self.context,
|
containers_count = len(self._create_containers(self.context,
|
||||||
containers_per_tenant,
|
containers_per_tenant,
|
||||||
threads))
|
threads))
|
||||||
@ -80,8 +80,8 @@ class SwiftObjectGenerator(swift_utils.SwiftObjectMixin, context.Context):
|
|||||||
|
|
||||||
objects_per_container = self.config["objects_per_container"]
|
objects_per_container = self.config["objects_per_container"]
|
||||||
objects_num = containers_num * objects_per_container
|
objects_num = containers_num * objects_per_container
|
||||||
LOG.debug("Creating %d objects using %d threads." % (objects_num,
|
LOG.debug("Creating %d objects using %d threads.", (objects_num,
|
||||||
threads))
|
threads))
|
||||||
objects_count = len(self._create_objects(self.context,
|
objects_count = len(self._create_objects(self.context,
|
||||||
objects_per_container,
|
objects_per_container,
|
||||||
self.config["object_size"],
|
self.config["object_size"],
|
||||||
|
@ -292,7 +292,7 @@ class HeatScenario(scenario.OpenStackScenario):
|
|||||||
"""
|
"""
|
||||||
num_instances = self._count_instances(stack)
|
num_instances = self._count_instances(stack)
|
||||||
expected_instances = num_instances + delta
|
expected_instances = num_instances + delta
|
||||||
LOG.debug("Scaling stack %s from %s to %s instances with %s" %
|
LOG.debug("Scaling stack %s from %s to %s instances with %s",
|
||||||
(stack.id, num_instances, expected_instances, output_key))
|
(stack.id, num_instances, expected_instances, output_key))
|
||||||
with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key):
|
with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key):
|
||||||
self._stack_webhook(stack, output_key)
|
self._stack_webhook(stack, output_key)
|
||||||
|
@ -95,7 +95,7 @@ class CreateAndDeleteCluster(utils.SaharaScenario):
|
|||||||
|
|
||||||
image_id = self.context["tenant"]["sahara"]["image"]
|
image_id = self.context["tenant"]["sahara"]["image"]
|
||||||
|
|
||||||
LOG.debug("Using Image: %s" % image_id)
|
LOG.debug("Using Image: %s", image_id)
|
||||||
|
|
||||||
cluster = self._launch_cluster(
|
cluster = self._launch_cluster(
|
||||||
flavor_id=flavor,
|
flavor_id=flavor,
|
||||||
@ -194,7 +194,7 @@ class CreateScaleDeleteCluster(utils.SaharaScenario):
|
|||||||
|
|
||||||
image_id = self.context["tenant"]["sahara"]["image"]
|
image_id = self.context["tenant"]["sahara"]["image"]
|
||||||
|
|
||||||
LOG.debug("Using Image: %s" % image_id)
|
LOG.debug("Using Image: %s", image_id)
|
||||||
|
|
||||||
cluster = self._launch_cluster(
|
cluster = self._launch_cluster(
|
||||||
flavor_id=flavor,
|
flavor_id=flavor,
|
||||||
|
@ -95,7 +95,7 @@ class CreateLaunchJobSequence(utils.SaharaScenario):
|
|||||||
launch_job = CreateLaunchJob(self.context)
|
launch_job = CreateLaunchJob(self.context)
|
||||||
|
|
||||||
for idx, job in enumerate(jobs):
|
for idx, job in enumerate(jobs):
|
||||||
LOG.debug("Launching Job. Sequence #%d" % idx)
|
LOG.debug("Launching Job. Sequence #%d", idx)
|
||||||
launch_job.run(job["job_type"], job["configs"], idx)
|
launch_job.run(job["job_type"], job["configs"], idx)
|
||||||
|
|
||||||
|
|
||||||
@ -130,7 +130,7 @@ class CreateLaunchJobSequenceWithScaling(utils.SaharaScenario,):
|
|||||||
# correct 'count' values.
|
# correct 'count' values.
|
||||||
cluster = self.clients("sahara").clusters.get(cluster_id)
|
cluster = self.clients("sahara").clusters.get(cluster_id)
|
||||||
|
|
||||||
LOG.debug("Scaling cluster %s with delta %d" %
|
LOG.debug("Scaling cluster %s with delta %d",
|
||||||
(cluster.name, delta))
|
(cluster.name, delta))
|
||||||
if delta == 0:
|
if delta == 0:
|
||||||
# Zero scaling makes no sense.
|
# Zero scaling makes no sense.
|
||||||
|
@ -167,7 +167,7 @@ class SaharaScenario(scenario.OpenStackScenario):
|
|||||||
floating_ip_pool)
|
floating_ip_pool)
|
||||||
|
|
||||||
if floating_ip_pool_value:
|
if floating_ip_pool_value:
|
||||||
LOG.debug("Using floating ip pool %s." % floating_ip_pool_value)
|
LOG.debug("Using floating ip pool %s.", floating_ip_pool_value)
|
||||||
# If the pool is set by any means assign it to all node groups.
|
# If the pool is set by any means assign it to all node groups.
|
||||||
# If the proxy node feature is enabled, Master Node Group and
|
# If the proxy node feature is enabled, Master Node Group and
|
||||||
# Proxy Workers should have a floating ip pool set up
|
# Proxy Workers should have a floating ip pool set up
|
||||||
@ -233,7 +233,7 @@ class SaharaScenario(scenario.OpenStackScenario):
|
|||||||
replication_value = min(workers_count, 3)
|
replication_value = min(workers_count, 3)
|
||||||
# 3 is a default Hadoop replication
|
# 3 is a default Hadoop replication
|
||||||
conf = sahara_consts.REPLICATION_CONFIGS[plugin_name][hadoop_version]
|
conf = sahara_consts.REPLICATION_CONFIGS[plugin_name][hadoop_version]
|
||||||
LOG.debug("Using replication factor: %s" % replication_value)
|
LOG.debug("Using replication factor: %s", replication_value)
|
||||||
replication_config = {
|
replication_config = {
|
||||||
conf["target"]: {
|
conf["target"]: {
|
||||||
conf["config_name"]: replication_value
|
conf["config_name"]: replication_value
|
||||||
@ -394,7 +394,7 @@ class SaharaScenario(scenario.OpenStackScenario):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if wait_active:
|
if wait_active:
|
||||||
LOG.debug("Starting cluster `%s`" % name)
|
LOG.debug("Starting cluster `%s`", name)
|
||||||
self._wait_active(cluster_object)
|
self._wait_active(cluster_object)
|
||||||
|
|
||||||
return self.clients("sahara").clusters.get(cluster_object.id)
|
return self.clients("sahara").clusters.get(cluster_object.id)
|
||||||
@ -454,7 +454,7 @@ class SaharaScenario(scenario.OpenStackScenario):
|
|||||||
:param cluster: cluster to delete
|
:param cluster: cluster to delete
|
||||||
"""
|
"""
|
||||||
|
|
||||||
LOG.debug("Deleting cluster `%s`" % cluster.name)
|
LOG.debug("Deleting cluster `%s`", cluster.name)
|
||||||
self.clients("sahara").clusters.delete(cluster.id)
|
self.clients("sahara").clusters.delete(cluster.id)
|
||||||
|
|
||||||
utils.wait_for(
|
utils.wait_for(
|
||||||
@ -464,7 +464,7 @@ class SaharaScenario(scenario.OpenStackScenario):
|
|||||||
is_ready=self._is_cluster_deleted)
|
is_ready=self._is_cluster_deleted)
|
||||||
|
|
||||||
def _is_cluster_deleted(self, cluster):
|
def _is_cluster_deleted(self, cluster):
|
||||||
LOG.debug("Checking cluster `%s` to be deleted. Status: `%s`" %
|
LOG.debug("Checking cluster `%s` to be deleted. Status: `%s`",
|
||||||
(cluster.name, cluster.status))
|
(cluster.name, cluster.status))
|
||||||
try:
|
try:
|
||||||
self.clients("sahara").clusters.get(cluster.id)
|
self.clients("sahara").clusters.get(cluster.id)
|
||||||
@ -534,7 +534,7 @@ class SaharaScenario(scenario.OpenStackScenario):
|
|||||||
status = self.clients("sahara").job_executions.get(je_id).info[
|
status = self.clients("sahara").job_executions.get(je_id).info[
|
||||||
"status"].lower()
|
"status"].lower()
|
||||||
|
|
||||||
LOG.debug("Checking for Job Execution %s to complete. Status: %s" %
|
LOG.debug("Checking for Job Execution %s to complete. Status: %s",
|
||||||
(je_id, status))
|
(je_id, status))
|
||||||
if status in ("success", "succeeded"):
|
if status in ("success", "succeeded"):
|
||||||
return True
|
return True
|
||||||
@ -574,8 +574,8 @@ class SaharaScenario(scenario.OpenStackScenario):
|
|||||||
# Taking net id from context.
|
# Taking net id from context.
|
||||||
net = self.context["tenant"]["networks"][0]
|
net = self.context["tenant"]["networks"][0]
|
||||||
neutron_net_id = net["id"]
|
neutron_net_id = net["id"]
|
||||||
LOG.debug("Using neutron network %s." % neutron_net_id)
|
LOG.debug("Using neutron network %s.", neutron_net_id)
|
||||||
LOG.debug("Using neutron router %s." % net["router_id"])
|
LOG.debug("Using neutron router %s.", net["router_id"])
|
||||||
|
|
||||||
return neutron_net_id
|
return neutron_net_id
|
||||||
|
|
||||||
|
@ -63,8 +63,8 @@ class Host(object):
|
|||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE)
|
stderr=subprocess.PIPE)
|
||||||
proc.wait()
|
proc.wait()
|
||||||
LOG.debug("Host %s is ICMP %s"
|
LOG.debug("Host %s is ICMP %s",
|
||||||
% (server.ip.format(), proc.returncode and "down" or "up"))
|
(server.ip.format(), proc.returncode and "down" or "up"))
|
||||||
if proc.returncode == 0:
|
if proc.returncode == 0:
|
||||||
server.status = cls.ICMP_UP_STATUS
|
server.status = cls.ICMP_UP_STATUS
|
||||||
else:
|
else:
|
||||||
|
@ -108,7 +108,7 @@ class TempestConfigfileManager(object):
|
|||||||
uri = parse.urljoin(uri_v3, "/v2.0")
|
uri = parse.urljoin(uri_v3, "/v2.0")
|
||||||
else:
|
else:
|
||||||
# Does Keystone released new version of API ?!
|
# Does Keystone released new version of API ?!
|
||||||
LOG.debug("Discovered keystone versions: %s" % versions)
|
LOG.debug("Discovered keystone versions: %s", versions)
|
||||||
raise exceptions.RallyException("Failed to discover keystone "
|
raise exceptions.RallyException("Failed to discover keystone "
|
||||||
"auth urls.")
|
"auth urls.")
|
||||||
|
|
||||||
|
@ -130,7 +130,7 @@ class TempestContext(context.VerifierContext):
|
|||||||
|
|
||||||
for role in roles:
|
for role in roles:
|
||||||
if role not in existing_roles:
|
if role not in existing_roles:
|
||||||
LOG.debug("Creating role '%s'." % role)
|
LOG.debug("Creating role '%s'.", role)
|
||||||
self._created_roles.append(keystoneclient.roles.create(role))
|
self._created_roles.append(keystoneclient.roles.create(role))
|
||||||
|
|
||||||
def _configure_option(self, section, option, value=None,
|
def _configure_option(self, section, option, value=None,
|
||||||
@ -138,12 +138,12 @@ class TempestContext(context.VerifierContext):
|
|||||||
option_value = self.conf.get(section, option)
|
option_value = self.conf.get(section, option)
|
||||||
if not option_value:
|
if not option_value:
|
||||||
LOG.debug("Option '%s' from '%s' section "
|
LOG.debug("Option '%s' from '%s' section "
|
||||||
"is not configured." % (option, section))
|
"is not configured.", (option, section))
|
||||||
if helper_method:
|
if helper_method:
|
||||||
res = helper_method(*args, **kwargs)
|
res = helper_method(*args, **kwargs)
|
||||||
if res:
|
if res:
|
||||||
value = res["name"] if "network" in option else res.id
|
value = res["name"] if "network" in option else res.id
|
||||||
LOG.debug("Setting value '%s' to option '%s'." % (value, option))
|
LOG.debug("Setting value '%s' to option '%s'.", (value, option))
|
||||||
self.conf.set(section, option, value)
|
self.conf.set(section, option, value)
|
||||||
LOG.debug("Option '{opt}' is configured. "
|
LOG.debug("Option '{opt}' is configured. "
|
||||||
"{opt} = {value}".format(opt=option, value=value))
|
"{opt} = {value}".format(opt=option, value=value))
|
||||||
@ -155,7 +155,7 @@ class TempestContext(context.VerifierContext):
|
|||||||
def _discover_image(self):
|
def _discover_image(self):
|
||||||
LOG.debug("Trying to discover a public image with name matching "
|
LOG.debug("Trying to discover a public image with name matching "
|
||||||
"regular expression '%s'. Note that case insensitive "
|
"regular expression '%s'. Note that case insensitive "
|
||||||
"matching is performed." % conf.CONF.tempest.img_name_regex)
|
"matching is performed.", conf.CONF.tempest.img_name_regex)
|
||||||
image_service = image.Image(self.clients)
|
image_service = image.Image(self.clients)
|
||||||
images = image_service.list_images(status="active",
|
images = image_service.list_images(status="active",
|
||||||
visibility="public")
|
visibility="public")
|
||||||
@ -163,22 +163,22 @@ class TempestContext(context.VerifierContext):
|
|||||||
if image_obj.name and re.match(conf.CONF.tempest.img_name_regex,
|
if image_obj.name and re.match(conf.CONF.tempest.img_name_regex,
|
||||||
image_obj.name, re.IGNORECASE):
|
image_obj.name, re.IGNORECASE):
|
||||||
LOG.debug("The following public "
|
LOG.debug("The following public "
|
||||||
"image discovered: '%s'." % image_obj.name)
|
"image discovered: '%s'.", image_obj.name)
|
||||||
return image_obj
|
return image_obj
|
||||||
|
|
||||||
LOG.debug("There is no public image with name matching regular "
|
LOG.debug("There is no public image with name matching regular "
|
||||||
"expression '%s'." % conf.CONF.tempest.img_name_regex)
|
"expression '%s'.", conf.CONF.tempest.img_name_regex)
|
||||||
|
|
||||||
def _download_image_from_source(self, target_path, image=None):
|
def _download_image_from_source(self, target_path, image=None):
|
||||||
if image:
|
if image:
|
||||||
LOG.debug("Downloading image '%s' "
|
LOG.debug("Downloading image '%s' "
|
||||||
"from Glance to %s." % (image.name, target_path))
|
"from Glance to %s.", (image.name, target_path))
|
||||||
with open(target_path, "wb") as image_file:
|
with open(target_path, "wb") as image_file:
|
||||||
for chunk in self.clients.glance().images.data(image.id):
|
for chunk in self.clients.glance().images.data(image.id):
|
||||||
image_file.write(chunk)
|
image_file.write(chunk)
|
||||||
else:
|
else:
|
||||||
LOG.debug("Downloading image from %s "
|
LOG.debug("Downloading image from %s "
|
||||||
"to %s." % (conf.CONF.tempest.img_url, target_path))
|
"to %s.", (conf.CONF.tempest.img_url, target_path))
|
||||||
try:
|
try:
|
||||||
response = requests.get(conf.CONF.tempest.img_url, stream=True)
|
response = requests.get(conf.CONF.tempest.img_url, stream=True)
|
||||||
except requests.ConnectionError as err:
|
except requests.ConnectionError as err:
|
||||||
@ -206,7 +206,7 @@ class TempestContext(context.VerifierContext):
|
|||||||
def _download_image(self):
|
def _download_image(self):
|
||||||
image_path = os.path.join(self.data_dir, self.image_name)
|
image_path = os.path.join(self.data_dir, self.image_name)
|
||||||
if os.path.isfile(image_path):
|
if os.path.isfile(image_path):
|
||||||
LOG.debug("Image is already downloaded to %s." % image_path)
|
LOG.debug("Image is already downloaded to %s.", image_path)
|
||||||
return
|
return
|
||||||
|
|
||||||
if conf.CONF.tempest.img_name_regex:
|
if conf.CONF.tempest.img_name_regex:
|
||||||
@ -221,7 +221,7 @@ class TempestContext(context.VerifierContext):
|
|||||||
image_obj = self._discover_image()
|
image_obj = self._discover_image()
|
||||||
if image_obj:
|
if image_obj:
|
||||||
LOG.debug("Using image '%s' (ID = %s) "
|
LOG.debug("Using image '%s' (ID = %s) "
|
||||||
"for the tests." % (image_obj.name, image_obj.id))
|
"for the tests.", (image_obj.name, image_obj.id))
|
||||||
return image_obj
|
return image_obj
|
||||||
|
|
||||||
params = {
|
params = {
|
||||||
@ -235,7 +235,7 @@ class TempestContext(context.VerifierContext):
|
|||||||
image_service = image.Image(self.clients)
|
image_service = image.Image(self.clients)
|
||||||
image_obj = image_service.create_image(**params)
|
image_obj = image_service.create_image(**params)
|
||||||
LOG.debug("Image '%s' (ID = %s) has been "
|
LOG.debug("Image '%s' (ID = %s) has been "
|
||||||
"successfully created!" % (image_obj.name, image_obj.id))
|
"successfully created!", (image_obj.name, image_obj.id))
|
||||||
self._created_images.append(image_obj)
|
self._created_images.append(image_obj)
|
||||||
|
|
||||||
return image_obj
|
return image_obj
|
||||||
@ -244,7 +244,7 @@ class TempestContext(context.VerifierContext):
|
|||||||
novaclient = self.clients.nova()
|
novaclient = self.clients.nova()
|
||||||
|
|
||||||
LOG.debug("Trying to discover a flavor with the following "
|
LOG.debug("Trying to discover a flavor with the following "
|
||||||
"properties: RAM = %dMB, VCPUs = 1, disk = 0GB." % flv_ram)
|
"properties: RAM = %dMB, VCPUs = 1, disk = 0GB.", flv_ram)
|
||||||
for flavor in novaclient.flavors.list():
|
for flavor in novaclient.flavors.list():
|
||||||
if (flavor.ram == flv_ram and
|
if (flavor.ram == flv_ram and
|
||||||
flavor.vcpus == 1 and flavor.disk == 0):
|
flavor.vcpus == 1 and flavor.disk == 0):
|
||||||
@ -262,10 +262,10 @@ class TempestContext(context.VerifierContext):
|
|||||||
"disk": 0
|
"disk": 0
|
||||||
}
|
}
|
||||||
LOG.debug("Creating flavor '%s' with the following properties: RAM "
|
LOG.debug("Creating flavor '%s' with the following properties: RAM "
|
||||||
"= %dMB, VCPUs = 1, disk = 0GB." % (params["name"], flv_ram))
|
"= %dMB, VCPUs = 1, disk = 0GB.", (params["name"], flv_ram))
|
||||||
flavor = novaclient.flavors.create(**params)
|
flavor = novaclient.flavors.create(**params)
|
||||||
LOG.debug("Flavor '%s' (ID = %s) has been "
|
LOG.debug("Flavor '%s' (ID = %s) has been "
|
||||||
"successfully created!" % (flavor.name, flavor.id))
|
"successfully created!", (flavor.name, flavor.id))
|
||||||
self._created_flavors.append(flavor)
|
self._created_flavors.append(flavor)
|
||||||
|
|
||||||
return flavor
|
return flavor
|
||||||
@ -285,14 +285,14 @@ class TempestContext(context.VerifierContext):
|
|||||||
def _cleanup_tempest_roles(self):
|
def _cleanup_tempest_roles(self):
|
||||||
keystoneclient = self.clients.keystone()
|
keystoneclient = self.clients.keystone()
|
||||||
for role in self._created_roles:
|
for role in self._created_roles:
|
||||||
LOG.debug("Deleting role '%s'." % role.name)
|
LOG.debug("Deleting role '%s'.", role.name)
|
||||||
keystoneclient.roles.delete(role.id)
|
keystoneclient.roles.delete(role.id)
|
||||||
LOG.debug("Role '%s' has been deleted." % role.name)
|
LOG.debug("Role '%s' has been deleted.", role.name)
|
||||||
|
|
||||||
def _cleanup_images(self):
|
def _cleanup_images(self):
|
||||||
image_service = image.Image(self.clients)
|
image_service = image.Image(self.clients)
|
||||||
for image_obj in self._created_images:
|
for image_obj in self._created_images:
|
||||||
LOG.debug("Deleting image '%s'." % image_obj.name)
|
LOG.debug("Deleting image '%s'.", image_obj.name)
|
||||||
self.clients.glance().images.delete(image_obj.id)
|
self.clients.glance().images.delete(image_obj.id)
|
||||||
task_utils.wait_for_status(
|
task_utils.wait_for_status(
|
||||||
image_obj, ["deleted", "pending_delete"],
|
image_obj, ["deleted", "pending_delete"],
|
||||||
@ -301,15 +301,15 @@ class TempestContext(context.VerifierContext):
|
|||||||
timeout=conf.CONF.benchmark.glance_image_delete_timeout,
|
timeout=conf.CONF.benchmark.glance_image_delete_timeout,
|
||||||
check_interval=conf.CONF.benchmark.
|
check_interval=conf.CONF.benchmark.
|
||||||
glance_image_delete_poll_interval)
|
glance_image_delete_poll_interval)
|
||||||
LOG.debug("Image '%s' has been deleted." % image_obj.name)
|
LOG.debug("Image '%s' has been deleted.", image_obj.name)
|
||||||
self._remove_opt_value_from_config("compute", image_obj.id)
|
self._remove_opt_value_from_config("compute", image_obj.id)
|
||||||
|
|
||||||
def _cleanup_flavors(self):
|
def _cleanup_flavors(self):
|
||||||
novaclient = self.clients.nova()
|
novaclient = self.clients.nova()
|
||||||
for flavor in self._created_flavors:
|
for flavor in self._created_flavors:
|
||||||
LOG.debug("Deleting flavor '%s'." % flavor.name)
|
LOG.debug("Deleting flavor '%s'.", flavor.name)
|
||||||
novaclient.flavors.delete(flavor.id)
|
novaclient.flavors.delete(flavor.id)
|
||||||
LOG.debug("Flavor '%s' has been deleted." % flavor.name)
|
LOG.debug("Flavor '%s' has been deleted.", flavor.name)
|
||||||
self._remove_opt_value_from_config("compute", flavor.id)
|
self._remove_opt_value_from_config("compute", flavor.id)
|
||||||
self._remove_opt_value_from_config("orchestration", flavor.id)
|
self._remove_opt_value_from_config("orchestration", flavor.id)
|
||||||
|
|
||||||
@ -325,6 +325,6 @@ class TempestContext(context.VerifierContext):
|
|||||||
for option, value in self.conf.items(section):
|
for option, value in self.conf.items(section):
|
||||||
if opt_value == value:
|
if opt_value == value:
|
||||||
LOG.debug("Removing value '%s' of option '%s' "
|
LOG.debug("Removing value '%s' of option '%s' "
|
||||||
"from Tempest config file." % (opt_value, option))
|
"from Tempest config file.", (opt_value, option))
|
||||||
self.conf.set(section, option, "")
|
self.conf.set(section, option, "")
|
||||||
LOG.debug("Value '%s' has been removed." % opt_value)
|
LOG.debug("Value '%s' has been removed.", opt_value)
|
||||||
|
@ -44,7 +44,7 @@ def generate_cidr(start_cidr="10.2.0.0/24"):
|
|||||||
:returns: next available CIDR str
|
:returns: next available CIDR str
|
||||||
"""
|
"""
|
||||||
cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr)))
|
cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr)))
|
||||||
LOG.debug("CIDR generated: %s" % cidr)
|
LOG.debug("CIDR generated: %s", cidr)
|
||||||
return cidr
|
return cidr
|
||||||
|
|
||||||
|
|
||||||
|
@ -243,7 +243,7 @@ def _read_requirements():
|
|||||||
"""Read all rally requirements."""
|
"""Read all rally requirements."""
|
||||||
LOG.info("Reading rally requirements...")
|
LOG.info("Reading rally requirements...")
|
||||||
for file_name in RALLY_REQUIREMENTS_FILES:
|
for file_name in RALLY_REQUIREMENTS_FILES:
|
||||||
LOG.debug("Try to read '%s'." % file_name)
|
LOG.debug("Try to read '%s'.", file_name)
|
||||||
with open(file_name) as f:
|
with open(file_name) as f:
|
||||||
data = f.read()
|
data = f.read()
|
||||||
LOG.info("Parsing requirements from %s." % file_name)
|
LOG.info("Parsing requirements from %s." % file_name)
|
||||||
@ -263,7 +263,7 @@ def _sync():
|
|||||||
LOG.info("Obtaining global-requirements...")
|
LOG.info("Obtaining global-requirements...")
|
||||||
for i in range(0, len(GLOBAL_REQUIREMENTS_LOCATIONS)):
|
for i in range(0, len(GLOBAL_REQUIREMENTS_LOCATIONS)):
|
||||||
url = GLOBAL_REQUIREMENTS_LOCATIONS[i] + GLOBAL_REQUIREMENTS_FILENAME
|
url = GLOBAL_REQUIREMENTS_LOCATIONS[i] + GLOBAL_REQUIREMENTS_FILENAME
|
||||||
LOG.debug("Try to obtain global-requirements from %s" % url)
|
LOG.debug("Try to obtain global-requirements from %s", url)
|
||||||
try:
|
try:
|
||||||
raw_gr = requests.get(url).text
|
raw_gr = requests.get(url).text
|
||||||
except requests.ConnectionError as e:
|
except requests.ConnectionError as e:
|
||||||
@ -309,7 +309,7 @@ def format_requirements():
|
|||||||
def add_uppers():
|
def add_uppers():
|
||||||
"""Obtains latest version of packages and put them to requirements."""
|
"""Obtains latest version of packages and put them to requirements."""
|
||||||
for filename, requirements in _sync():
|
for filename, requirements in _sync():
|
||||||
LOG.info("Obtaining latest versions of packages for %s." % filename)
|
LOG.info("Obtaining latest versions of packages for %s.", filename)
|
||||||
for req in requirements:
|
for req in requirements:
|
||||||
if isinstance(req, Requirement):
|
if isinstance(req, Requirement):
|
||||||
if isinstance(req.version, dict) and not req.version["max"]:
|
if isinstance(req.version, dict) and not req.version["max"]:
|
||||||
|
Loading…
Reference in New Issue
Block a user