From 9c126664b2ef9e7c36c79eee0df24af647f4ab1a Mon Sep 17 00:00:00 2001 From: "bhavani.cr" Date: Thu, 13 Jul 2017 17:26:17 +0530 Subject: [PATCH] Handle log message interpolation by the logger in common/ According to OpenStack Guideline[1], logged string message should be interpolated by the logger. [1]: http://docs.openstack.org/developer/oslo.i18n/guidelines.html#adding-variables-to-log-messages Change-Id: Ie7291889337a7ffe7877910d3c3a24fd670aba94 --- .../plugins/implementation/context_plugin.rst | 4 +- rally/plugins/openstack/cleanup/manager.py | 4 +- rally/plugins/openstack/cleanup/resources.py | 4 +- .../openstack/context/cinder/volume_types.py | 2 +- .../plugins/openstack/context/ec2/servers.py | 4 +- .../openstack/context/keystone/roles.py | 4 +- .../openstack/context/keystone/users.py | 6 +-- .../plugins/openstack/context/nova/flavors.py | 2 +- .../plugins/openstack/context/nova/servers.py | 16 +++---- .../openstack/context/swift/objects.py | 8 ++-- .../plugins/openstack/scenarios/heat/utils.py | 2 +- .../openstack/scenarios/sahara/clusters.py | 4 +- .../openstack/scenarios/sahara/jobs.py | 4 +- .../openstack/scenarios/sahara/utils.py | 16 +++---- rally/plugins/openstack/scenarios/vm/utils.py | 4 +- .../openstack/verification/tempest/config.py | 2 +- .../openstack/verification/tempest/context.py | 44 +++++++++---------- rally/plugins/openstack/wrappers/network.py | 2 +- tests/ci/sync_requirements.py | 6 +-- 19 files changed, 69 insertions(+), 69 deletions(-) diff --git a/doc/source/plugins/implementation/context_plugin.rst b/doc/source/plugins/implementation/context_plugin.rst index 166b0dfa..13f8c829 100644 --- a/doc/source/plugins/implementation/context_plugin.rst +++ b/doc/source/plugins/implementation/context_plugin.rst @@ -84,7 +84,7 @@ implement the Context API: the *setup()* method that creates a flavor and the ram=self.config.get("ram", 1), vcpus=self.config.get("vcpus", 1), disk=self.config.get("disk", 1)).to_dict() - LOG.debug("Flavor with id '%s'" % self.context["flavor"]["id"]) + LOG.debug("Flavor with id '%s'", self.context["flavor"]["id"]) except Exception as e: msg = "Can't create flavor: %s" % e.message if logging.is_debug(): @@ -97,7 +97,7 @@ implement the Context API: the *setup()* method that creates a flavor and the try: nova = osclients.Clients(self.context["admin"]["credential"]).nova() nova.flavors.delete(self.context["flavor"]["id"]) - LOG.debug("Flavor '%s' deleted" % self.context["flavor"]["id"]) + LOG.debug("Flavor '%s' deleted", self.context["flavor"]["id"]) except Exception as e: msg = "Can't delete flavor: %s" % e.message if logging.is_debug(): diff --git a/rally/plugins/openstack/cleanup/manager.py b/rally/plugins/openstack/cleanup/manager.py index 7c8b6679..ee090303 100644 --- a/rally/plugins/openstack/cleanup/manager.py +++ b/rally/plugins/openstack/cleanup/manager.py @@ -79,7 +79,7 @@ class SeekAndDestroy(object): } LOG.debug( - "Deleting %(service)s %(resource)s object %(name)s (%(uuid)s)" % + "Deleting %(service)s %(resource)s object %(name)s (%(uuid)s)", msg_kw) try: @@ -280,7 +280,7 @@ def cleanup(names=None, admin_required=None, admin=None, users=None, rutils.RandomNameGeneratorMixin): resource_classes.append(superclass) for manager in find_resource_managers(names, admin_required): - LOG.debug("Cleaning up %(service)s %(resource)s objects" % + LOG.debug("Cleaning up %(service)s %(resource)s objects", {"service": manager._service, "resource": manager._resource}) SeekAndDestroy(manager, admin, users, diff --git a/rally/plugins/openstack/cleanup/resources.py b/rally/plugins/openstack/cleanup/resources.py index 0f8b832d..2f5dac0c 100644 --- a/rally/plugins/openstack/cleanup/resources.py +++ b/rally/plugins/openstack/cleanup/resources.py @@ -445,8 +445,8 @@ class NeutronPort(NeutronMixin): except neutron_exceptions.PortNotFoundClient: # Port can be already auto-deleted, skip silently LOG.debug("Port %s was not deleted. Skip silently because " - "port can be already auto-deleted." - % self.id()) + "port can be already auto-deleted.", + self.id()) @base.resource("neutron", "subnet", order=next(_neutron_order), diff --git a/rally/plugins/openstack/context/cinder/volume_types.py b/rally/plugins/openstack/context/cinder/volume_types.py index dd676978..3605f545 100644 --- a/rally/plugins/openstack/context/cinder/volume_types.py +++ b/rally/plugins/openstack/context/cinder/volume_types.py @@ -46,7 +46,7 @@ class VolumeTypeGenerator(context.Context): admin_clients, name_generator=self.generate_random_name) self.context["volume_types"] = [] for vtype_name in self.config: - LOG.debug("Creating Cinder volume type %s" % vtype_name) + LOG.debug("Creating Cinder volume type %s", vtype_name) vtype = cinder_service.create_volume_type(vtype_name) self.context["volume_types"].append({"id": vtype.id, "name": vtype_name}) diff --git a/rally/plugins/openstack/context/ec2/servers.py b/rally/plugins/openstack/context/ec2/servers.py index a133c7a3..1596240e 100644 --- a/rally/plugins/openstack/context/ec2/servers.py +++ b/rally/plugins/openstack/context/ec2/servers.py @@ -73,8 +73,8 @@ class EC2ServerGenerator(context.Context): for user, tenant_id in rutils.iterate_per_tenants( self.context["users"]): - LOG.debug("Booting servers for tenant %s " - % (user["tenant_id"])) + LOG.debug("Booting servers for tenant %s ", + (user["tenant_id"])) ec2_scenario = ec2_utils.EC2Scenario({ "user": user, "task": self.context["task"], diff --git a/rally/plugins/openstack/context/keystone/roles.py b/rally/plugins/openstack/context/keystone/roles.py index 18c5a62e..f3bc0869 100644 --- a/rally/plugins/openstack/context/keystone/roles.py +++ b/rally/plugins/openstack/context/keystone/roles.py @@ -86,7 +86,7 @@ class RoleGenerator(context.Context): role = self._get_role_object(context_role) roles_dict[role.id] = role.name LOG.debug("Adding role %(role_name)s having ID %(role_id)s " - "to all users using %(threads)s threads" % + "to all users using %(threads)s threads", {"role_name": role.name, "role_id": role.id, "threads": threads}) @@ -104,7 +104,7 @@ class RoleGenerator(context.Context): def publish(queue): for role_id in self.context["roles"]: - LOG.debug("Removing role %s from all users" % role_id) + LOG.debug("Removing role %s from all users", role_id) for user in self.context["users"]: args = (role_id, user["id"], user["tenant_id"]) queue.append(args) diff --git a/rally/plugins/openstack/context/keystone/users.py b/rally/plugins/openstack/context/keystone/users.py index def8989f..fece3c22 100644 --- a/rally/plugins/openstack/context/keystone/users.py +++ b/rally/plugins/openstack/context/keystone/users.py @@ -136,7 +136,7 @@ class UserGenerator(context.Context): use_sg, msg = network.wrap(clients, self).supports_extension( "security-group") if not use_sg: - LOG.debug("Security group context is disabled: %s" % msg) + LOG.debug("Security group context is disabled: %s", msg) return for user, tenant_id in rutils.iterate_per_tenants( @@ -261,7 +261,7 @@ class UserGenerator(context.Context): """Create tenants and users, using the broker pattern.""" threads = self.config["resource_management_workers"] - LOG.debug("Creating %(tenants)d tenants using %(threads)s threads" % + LOG.debug("Creating %(tenants)d tenants using %(threads)s threads", {"tenants": self.config["tenants"], "threads": threads}) self.context["tenants"] = self._create_tenants() @@ -271,7 +271,7 @@ class UserGenerator(context.Context): msg=_("Failed to create the requested number of tenants.")) users_num = self.config["users_per_tenant"] * self.config["tenants"] - LOG.debug("Creating %(users)d users using %(threads)s threads" % + LOG.debug("Creating %(users)d users using %(threads)s threads", {"users": users_num, "threads": threads}) self.context["users"] = self._create_users() for user in self.context["users"]: diff --git a/rally/plugins/openstack/context/nova/flavors.py b/rally/plugins/openstack/context/nova/flavors.py index 7de90367..c65e21a5 100644 --- a/rally/plugins/openstack/context/nova/flavors.py +++ b/rally/plugins/openstack/context/nova/flavors.py @@ -97,7 +97,7 @@ class FlavorsGenerator(context.Context): flavor.set_keys(extra_specs) self.context["flavors"][flavor_config["name"]] = flavor.to_dict() - LOG.debug("Created flavor with id '%s'" % flavor.id) + LOG.debug("Created flavor with id '%s'", flavor.id) @logging.log_task_wrapper(LOG.info, _("Exit context: `flavors`")) def cleanup(self): diff --git a/rally/plugins/openstack/context/nova/servers.py b/rally/plugins/openstack/context/nova/servers.py index 234367e0..fd1c041c 100755 --- a/rally/plugins/openstack/context/nova/servers.py +++ b/rally/plugins/openstack/context/nova/servers.py @@ -104,8 +104,8 @@ class ServerGenerator(context.Context): for iter_, (user, tenant_id) in enumerate(rutils.iterate_per_tenants( self.context["users"])): - LOG.debug("Booting servers for user tenant %s " - % (user["tenant_id"])) + LOG.debug("Booting servers for user tenant %s ", + (user["tenant_id"])) tmp_context = {"user": user, "tenant": self.context["tenants"][tenant_id], "task": self.context["task"], @@ -115,10 +115,10 @@ class ServerGenerator(context.Context): LOG.debug("Calling _boot_servers with image_id=%(image_id)s " "flavor_id=%(flavor_id)s " - "servers_per_tenant=%(servers_per_tenant)s" - % {"image_id": image_id, - "flavor_id": flavor_id, - "servers_per_tenant": servers_per_tenant}) + "servers_per_tenant=%(servers_per_tenant)s", + {"image_id": image_id, + "flavor_id": flavor_id, + "servers_per_tenant": servers_per_tenant}) servers = nova_scenario._boot_servers(image_id, flavor_id, requests=servers_per_tenant, @@ -127,8 +127,8 @@ class ServerGenerator(context.Context): current_servers = [server.id for server in servers] - LOG.debug("Adding booted servers %s to context" - % current_servers) + LOG.debug("Adding booted servers %s to context", + current_servers) self.context["tenants"][tenant_id][ "servers"] = current_servers diff --git a/rally/plugins/openstack/context/swift/objects.py b/rally/plugins/openstack/context/swift/objects.py index ac42b099..6cf83fd9 100644 --- a/rally/plugins/openstack/context/swift/objects.py +++ b/rally/plugins/openstack/context/swift/objects.py @@ -66,8 +66,8 @@ class SwiftObjectGenerator(swift_utils.SwiftObjectMixin, context.Context): containers_per_tenant = self.config["containers_per_tenant"] containers_num = len(self.context["tenants"]) * containers_per_tenant - LOG.debug("Creating %d containers using %d threads." % (containers_num, - threads)) + LOG.debug("Creating %d containers using %d threads.", (containers_num, + threads)) containers_count = len(self._create_containers(self.context, containers_per_tenant, threads)) @@ -80,8 +80,8 @@ class SwiftObjectGenerator(swift_utils.SwiftObjectMixin, context.Context): objects_per_container = self.config["objects_per_container"] objects_num = containers_num * objects_per_container - LOG.debug("Creating %d objects using %d threads." % (objects_num, - threads)) + LOG.debug("Creating %d objects using %d threads.", (objects_num, + threads)) objects_count = len(self._create_objects(self.context, objects_per_container, self.config["object_size"], diff --git a/rally/plugins/openstack/scenarios/heat/utils.py b/rally/plugins/openstack/scenarios/heat/utils.py index c6c4ce75..dd57f335 100644 --- a/rally/plugins/openstack/scenarios/heat/utils.py +++ b/rally/plugins/openstack/scenarios/heat/utils.py @@ -292,7 +292,7 @@ class HeatScenario(scenario.OpenStackScenario): """ num_instances = self._count_instances(stack) expected_instances = num_instances + delta - LOG.debug("Scaling stack %s from %s to %s instances with %s" % + LOG.debug("Scaling stack %s from %s to %s instances with %s", (stack.id, num_instances, expected_instances, output_key)) with atomic.ActionTimer(self, "heat.scale_with_%s" % output_key): self._stack_webhook(stack, output_key) diff --git a/rally/plugins/openstack/scenarios/sahara/clusters.py b/rally/plugins/openstack/scenarios/sahara/clusters.py index beaf9fa3..6902d18e 100644 --- a/rally/plugins/openstack/scenarios/sahara/clusters.py +++ b/rally/plugins/openstack/scenarios/sahara/clusters.py @@ -95,7 +95,7 @@ class CreateAndDeleteCluster(utils.SaharaScenario): image_id = self.context["tenant"]["sahara"]["image"] - LOG.debug("Using Image: %s" % image_id) + LOG.debug("Using Image: %s", image_id) cluster = self._launch_cluster( flavor_id=flavor, @@ -194,7 +194,7 @@ class CreateScaleDeleteCluster(utils.SaharaScenario): image_id = self.context["tenant"]["sahara"]["image"] - LOG.debug("Using Image: %s" % image_id) + LOG.debug("Using Image: %s", image_id) cluster = self._launch_cluster( flavor_id=flavor, diff --git a/rally/plugins/openstack/scenarios/sahara/jobs.py b/rally/plugins/openstack/scenarios/sahara/jobs.py index 9619c4d1..ea64869b 100644 --- a/rally/plugins/openstack/scenarios/sahara/jobs.py +++ b/rally/plugins/openstack/scenarios/sahara/jobs.py @@ -95,7 +95,7 @@ class CreateLaunchJobSequence(utils.SaharaScenario): launch_job = CreateLaunchJob(self.context) for idx, job in enumerate(jobs): - LOG.debug("Launching Job. Sequence #%d" % idx) + LOG.debug("Launching Job. Sequence #%d", idx) launch_job.run(job["job_type"], job["configs"], idx) @@ -130,7 +130,7 @@ class CreateLaunchJobSequenceWithScaling(utils.SaharaScenario,): # correct 'count' values. cluster = self.clients("sahara").clusters.get(cluster_id) - LOG.debug("Scaling cluster %s with delta %d" % + LOG.debug("Scaling cluster %s with delta %d", (cluster.name, delta)) if delta == 0: # Zero scaling makes no sense. diff --git a/rally/plugins/openstack/scenarios/sahara/utils.py b/rally/plugins/openstack/scenarios/sahara/utils.py index e3577340..ea1073b3 100644 --- a/rally/plugins/openstack/scenarios/sahara/utils.py +++ b/rally/plugins/openstack/scenarios/sahara/utils.py @@ -167,7 +167,7 @@ class SaharaScenario(scenario.OpenStackScenario): floating_ip_pool) if floating_ip_pool_value: - LOG.debug("Using floating ip pool %s." % floating_ip_pool_value) + LOG.debug("Using floating ip pool %s.", floating_ip_pool_value) # If the pool is set by any means assign it to all node groups. # If the proxy node feature is enabled, Master Node Group and # Proxy Workers should have a floating ip pool set up @@ -233,7 +233,7 @@ class SaharaScenario(scenario.OpenStackScenario): replication_value = min(workers_count, 3) # 3 is a default Hadoop replication conf = sahara_consts.REPLICATION_CONFIGS[plugin_name][hadoop_version] - LOG.debug("Using replication factor: %s" % replication_value) + LOG.debug("Using replication factor: %s", replication_value) replication_config = { conf["target"]: { conf["config_name"]: replication_value @@ -394,7 +394,7 @@ class SaharaScenario(scenario.OpenStackScenario): ) if wait_active: - LOG.debug("Starting cluster `%s`" % name) + LOG.debug("Starting cluster `%s`", name) self._wait_active(cluster_object) return self.clients("sahara").clusters.get(cluster_object.id) @@ -454,7 +454,7 @@ class SaharaScenario(scenario.OpenStackScenario): :param cluster: cluster to delete """ - LOG.debug("Deleting cluster `%s`" % cluster.name) + LOG.debug("Deleting cluster `%s`", cluster.name) self.clients("sahara").clusters.delete(cluster.id) utils.wait_for( @@ -464,7 +464,7 @@ class SaharaScenario(scenario.OpenStackScenario): is_ready=self._is_cluster_deleted) def _is_cluster_deleted(self, cluster): - LOG.debug("Checking cluster `%s` to be deleted. Status: `%s`" % + LOG.debug("Checking cluster `%s` to be deleted. Status: `%s`", (cluster.name, cluster.status)) try: self.clients("sahara").clusters.get(cluster.id) @@ -534,7 +534,7 @@ class SaharaScenario(scenario.OpenStackScenario): status = self.clients("sahara").job_executions.get(je_id).info[ "status"].lower() - LOG.debug("Checking for Job Execution %s to complete. Status: %s" % + LOG.debug("Checking for Job Execution %s to complete. Status: %s", (je_id, status)) if status in ("success", "succeeded"): return True @@ -574,8 +574,8 @@ class SaharaScenario(scenario.OpenStackScenario): # Taking net id from context. net = self.context["tenant"]["networks"][0] neutron_net_id = net["id"] - LOG.debug("Using neutron network %s." % neutron_net_id) - LOG.debug("Using neutron router %s." % net["router_id"]) + LOG.debug("Using neutron network %s.", neutron_net_id) + LOG.debug("Using neutron router %s.", net["router_id"]) return neutron_net_id diff --git a/rally/plugins/openstack/scenarios/vm/utils.py b/rally/plugins/openstack/scenarios/vm/utils.py index 7895a6b8..0b8baf66 100644 --- a/rally/plugins/openstack/scenarios/vm/utils.py +++ b/rally/plugins/openstack/scenarios/vm/utils.py @@ -63,8 +63,8 @@ class Host(object): stdout=subprocess.PIPE, stderr=subprocess.PIPE) proc.wait() - LOG.debug("Host %s is ICMP %s" - % (server.ip.format(), proc.returncode and "down" or "up")) + LOG.debug("Host %s is ICMP %s", + (server.ip.format(), proc.returncode and "down" or "up")) if proc.returncode == 0: server.status = cls.ICMP_UP_STATUS else: diff --git a/rally/plugins/openstack/verification/tempest/config.py b/rally/plugins/openstack/verification/tempest/config.py index 0cbb5072..83df12f5 100644 --- a/rally/plugins/openstack/verification/tempest/config.py +++ b/rally/plugins/openstack/verification/tempest/config.py @@ -108,7 +108,7 @@ class TempestConfigfileManager(object): uri = parse.urljoin(uri_v3, "/v2.0") else: # Does Keystone released new version of API ?! - LOG.debug("Discovered keystone versions: %s" % versions) + LOG.debug("Discovered keystone versions: %s", versions) raise exceptions.RallyException("Failed to discover keystone " "auth urls.") diff --git a/rally/plugins/openstack/verification/tempest/context.py b/rally/plugins/openstack/verification/tempest/context.py index a6649748..fba6513b 100644 --- a/rally/plugins/openstack/verification/tempest/context.py +++ b/rally/plugins/openstack/verification/tempest/context.py @@ -130,7 +130,7 @@ class TempestContext(context.VerifierContext): for role in roles: if role not in existing_roles: - LOG.debug("Creating role '%s'." % role) + LOG.debug("Creating role '%s'.", role) self._created_roles.append(keystoneclient.roles.create(role)) def _configure_option(self, section, option, value=None, @@ -138,12 +138,12 @@ class TempestContext(context.VerifierContext): option_value = self.conf.get(section, option) if not option_value: LOG.debug("Option '%s' from '%s' section " - "is not configured." % (option, section)) + "is not configured.", (option, section)) if helper_method: res = helper_method(*args, **kwargs) if res: value = res["name"] if "network" in option else res.id - LOG.debug("Setting value '%s' to option '%s'." % (value, option)) + LOG.debug("Setting value '%s' to option '%s'.", (value, option)) self.conf.set(section, option, value) LOG.debug("Option '{opt}' is configured. " "{opt} = {value}".format(opt=option, value=value)) @@ -155,7 +155,7 @@ class TempestContext(context.VerifierContext): def _discover_image(self): LOG.debug("Trying to discover a public image with name matching " "regular expression '%s'. Note that case insensitive " - "matching is performed." % conf.CONF.tempest.img_name_regex) + "matching is performed.", conf.CONF.tempest.img_name_regex) image_service = image.Image(self.clients) images = image_service.list_images(status="active", visibility="public") @@ -163,22 +163,22 @@ class TempestContext(context.VerifierContext): if image_obj.name and re.match(conf.CONF.tempest.img_name_regex, image_obj.name, re.IGNORECASE): LOG.debug("The following public " - "image discovered: '%s'." % image_obj.name) + "image discovered: '%s'.", image_obj.name) return image_obj LOG.debug("There is no public image with name matching regular " - "expression '%s'." % conf.CONF.tempest.img_name_regex) + "expression '%s'.", conf.CONF.tempest.img_name_regex) def _download_image_from_source(self, target_path, image=None): if image: LOG.debug("Downloading image '%s' " - "from Glance to %s." % (image.name, target_path)) + "from Glance to %s.", (image.name, target_path)) with open(target_path, "wb") as image_file: for chunk in self.clients.glance().images.data(image.id): image_file.write(chunk) else: LOG.debug("Downloading image from %s " - "to %s." % (conf.CONF.tempest.img_url, target_path)) + "to %s.", (conf.CONF.tempest.img_url, target_path)) try: response = requests.get(conf.CONF.tempest.img_url, stream=True) except requests.ConnectionError as err: @@ -206,7 +206,7 @@ class TempestContext(context.VerifierContext): def _download_image(self): image_path = os.path.join(self.data_dir, self.image_name) if os.path.isfile(image_path): - LOG.debug("Image is already downloaded to %s." % image_path) + LOG.debug("Image is already downloaded to %s.", image_path) return if conf.CONF.tempest.img_name_regex: @@ -221,7 +221,7 @@ class TempestContext(context.VerifierContext): image_obj = self._discover_image() if image_obj: LOG.debug("Using image '%s' (ID = %s) " - "for the tests." % (image_obj.name, image_obj.id)) + "for the tests.", (image_obj.name, image_obj.id)) return image_obj params = { @@ -235,7 +235,7 @@ class TempestContext(context.VerifierContext): image_service = image.Image(self.clients) image_obj = image_service.create_image(**params) LOG.debug("Image '%s' (ID = %s) has been " - "successfully created!" % (image_obj.name, image_obj.id)) + "successfully created!", (image_obj.name, image_obj.id)) self._created_images.append(image_obj) return image_obj @@ -244,7 +244,7 @@ class TempestContext(context.VerifierContext): novaclient = self.clients.nova() LOG.debug("Trying to discover a flavor with the following " - "properties: RAM = %dMB, VCPUs = 1, disk = 0GB." % flv_ram) + "properties: RAM = %dMB, VCPUs = 1, disk = 0GB.", flv_ram) for flavor in novaclient.flavors.list(): if (flavor.ram == flv_ram and flavor.vcpus == 1 and flavor.disk == 0): @@ -262,10 +262,10 @@ class TempestContext(context.VerifierContext): "disk": 0 } LOG.debug("Creating flavor '%s' with the following properties: RAM " - "= %dMB, VCPUs = 1, disk = 0GB." % (params["name"], flv_ram)) + "= %dMB, VCPUs = 1, disk = 0GB.", (params["name"], flv_ram)) flavor = novaclient.flavors.create(**params) LOG.debug("Flavor '%s' (ID = %s) has been " - "successfully created!" % (flavor.name, flavor.id)) + "successfully created!", (flavor.name, flavor.id)) self._created_flavors.append(flavor) return flavor @@ -285,14 +285,14 @@ class TempestContext(context.VerifierContext): def _cleanup_tempest_roles(self): keystoneclient = self.clients.keystone() for role in self._created_roles: - LOG.debug("Deleting role '%s'." % role.name) + LOG.debug("Deleting role '%s'.", role.name) keystoneclient.roles.delete(role.id) - LOG.debug("Role '%s' has been deleted." % role.name) + LOG.debug("Role '%s' has been deleted.", role.name) def _cleanup_images(self): image_service = image.Image(self.clients) for image_obj in self._created_images: - LOG.debug("Deleting image '%s'." % image_obj.name) + LOG.debug("Deleting image '%s'.", image_obj.name) self.clients.glance().images.delete(image_obj.id) task_utils.wait_for_status( image_obj, ["deleted", "pending_delete"], @@ -301,15 +301,15 @@ class TempestContext(context.VerifierContext): timeout=conf.CONF.benchmark.glance_image_delete_timeout, check_interval=conf.CONF.benchmark. glance_image_delete_poll_interval) - LOG.debug("Image '%s' has been deleted." % image_obj.name) + LOG.debug("Image '%s' has been deleted.", image_obj.name) self._remove_opt_value_from_config("compute", image_obj.id) def _cleanup_flavors(self): novaclient = self.clients.nova() for flavor in self._created_flavors: - LOG.debug("Deleting flavor '%s'." % flavor.name) + LOG.debug("Deleting flavor '%s'.", flavor.name) novaclient.flavors.delete(flavor.id) - LOG.debug("Flavor '%s' has been deleted." % flavor.name) + LOG.debug("Flavor '%s' has been deleted.", flavor.name) self._remove_opt_value_from_config("compute", flavor.id) self._remove_opt_value_from_config("orchestration", flavor.id) @@ -325,6 +325,6 @@ class TempestContext(context.VerifierContext): for option, value in self.conf.items(section): if opt_value == value: LOG.debug("Removing value '%s' of option '%s' " - "from Tempest config file." % (opt_value, option)) + "from Tempest config file.", (opt_value, option)) self.conf.set(section, option, "") - LOG.debug("Value '%s' has been removed." % opt_value) + LOG.debug("Value '%s' has been removed.", opt_value) diff --git a/rally/plugins/openstack/wrappers/network.py b/rally/plugins/openstack/wrappers/network.py index e69d32d7..cdf0c8ff 100644 --- a/rally/plugins/openstack/wrappers/network.py +++ b/rally/plugins/openstack/wrappers/network.py @@ -44,7 +44,7 @@ def generate_cidr(start_cidr="10.2.0.0/24"): :returns: next available CIDR str """ cidr = str(netaddr.IPNetwork(start_cidr).next(next(cidr_incr))) - LOG.debug("CIDR generated: %s" % cidr) + LOG.debug("CIDR generated: %s", cidr) return cidr diff --git a/tests/ci/sync_requirements.py b/tests/ci/sync_requirements.py index 641cebc9..ed0fecab 100644 --- a/tests/ci/sync_requirements.py +++ b/tests/ci/sync_requirements.py @@ -243,7 +243,7 @@ def _read_requirements(): """Read all rally requirements.""" LOG.info("Reading rally requirements...") for file_name in RALLY_REQUIREMENTS_FILES: - LOG.debug("Try to read '%s'." % file_name) + LOG.debug("Try to read '%s'.", file_name) with open(file_name) as f: data = f.read() LOG.info("Parsing requirements from %s." % file_name) @@ -263,7 +263,7 @@ def _sync(): LOG.info("Obtaining global-requirements...") for i in range(0, len(GLOBAL_REQUIREMENTS_LOCATIONS)): url = GLOBAL_REQUIREMENTS_LOCATIONS[i] + GLOBAL_REQUIREMENTS_FILENAME - LOG.debug("Try to obtain global-requirements from %s" % url) + LOG.debug("Try to obtain global-requirements from %s", url) try: raw_gr = requests.get(url).text except requests.ConnectionError as e: @@ -309,7 +309,7 @@ def format_requirements(): def add_uppers(): """Obtains latest version of packages and put them to requirements.""" for filename, requirements in _sync(): - LOG.info("Obtaining latest versions of packages for %s." % filename) + LOG.info("Obtaining latest versions of packages for %s.", filename) for req in requirements: if isinstance(req, Requirement): if isinstance(req.version, dict) and not req.version["max"]: