remove redundant str() in logging and other

fix the use of %  in exceptions message also

Depends-On: I6b857347879ced1054f8cdb75f10a287b219ccb0
Change-Id: Idefc70c3bed8ed2ce1da0c6df1ad5e4d0a51fc20
This commit is contained in:
Eyal 2019-02-26 15:15:17 +02:00
parent ad7c9d38ad
commit 45b54f5a9e
42 changed files with 83 additions and 87 deletions

View File

@ -79,7 +79,7 @@ class ResourcesController(RootRestController):
pecan.request.enforcer, {})
LOG.info('get resources with type: %s, all_tenants: %s, query: %s',
resource_type, all_tenants, str(query))
resource_type, all_tenants, query)
resources = pecan.request.client.call(
pecan.request.context,

View File

@ -67,7 +67,7 @@ class TemplateController(RootRestController):
@pecan.expose('json')
def delete(self, **kwargs):
uuid = kwargs['uuid']
LOG.info("delete template. uuid: %s", str(uuid))
LOG.info("delete template. uuid: %s", uuid)
enforce("template delete",
pecan.request.headers,

View File

@ -85,8 +85,7 @@ class WebhookController(RootRestController):
@pecan.expose('json')
def post(self, **kwargs):
LOG.info("Add webhook with following props: %s" % str(
kwargs))
LOG.info("Add webhook with following props: %s" % kwargs)
enforce('webhook add', pecan.request.headers,
pecan.request.enforcer, {})
try:

View File

@ -69,5 +69,5 @@ class EventApis(object):
driver='messagingv2',
publisher_id=self.publisher,
topics=['vitrage_notifications'])
except Exception as e:
LOG.info('Failed to initialize oslo notifier %s', str(e))
except Exception:
LOG.exception('Failed to initialize oslo notifier')

View File

@ -35,7 +35,7 @@ class RcaApis(base.EntityGraphApisBase):
@base.lock_graph
def get_rca(self, ctx, root, all_tenants):
LOG.debug("RcaApis get_rca - root: %s, all_tenants=%s",
str(root), all_tenants)
root, all_tenants)
project_id = ctx.get(TenantProps.TENANT, None)
is_admin_project = ctx.get(TenantProps.IS_ADMIN, False)

View File

@ -41,9 +41,9 @@ class ResourceApis(base.EntityGraphApisBase):
LOG.debug(
'ResourceApis get_resources - resource_type: %s, all_tenants: %s,'
' query: %s',
str(resource_type),
resource_type,
all_tenants,
str(query))
query)
query = self._get_query(ctx, resource_type, all_tenants, query)
resources = self.entity_graph.get_vertices(query_dict=query)
@ -57,10 +57,10 @@ class ResourceApis(base.EntityGraphApisBase):
LOG.debug(
'ResourceApis count_resources - type: %s, all_tenants: %s,'
' query: %s, group_by: %s',
str(resource_type),
resource_type,
all_tenants,
str(query),
str(group_by))
query,
group_by)
query = self._get_query(ctx, resource_type, all_tenants, query)
if group_by is None:

View File

@ -35,7 +35,7 @@ class TemplateApis(object):
def validate_template(self, ctx, templates, template_type, params=None):
LOG.debug("TemplateApis validate_template type: %s content: %s",
str(template_type), str(templates))
template_type, templates)
files_content = [t[1] for t in templates]
paths = [t[0] for t in templates]
@ -78,7 +78,7 @@ class TemplateApis(object):
if type(uuids) != list:
uuids = [uuids]
LOG.info("Deleting templates %s ", str(uuids))
LOG.info("Deleting templates %s ", uuids)
templates = [t for _id in uuids for t in db.templates.query(uuid=_id)
if t.status != TStatus.DELETED]
if not templates:

View File

@ -41,7 +41,7 @@ class TopologyApis(base.EntityGraphApisBase):
@base.lock_graph
def get_topology(self, ctx, graph_type, depth, query, root, all_tenants):
LOG.debug("TopologyApis get_topology - root: %s, all_tenants=%s",
str(root), all_tenants)
root, all_tenants)
project_id = ctx.get(TenantProps.TENANT, None)
is_admin_project = ctx.get(TenantProps.IS_ADMIN, False)

View File

@ -40,8 +40,7 @@ class WebhookApis(object):
def delete_webhook(self, ctx, id):
LOG.info("Delete webhook with id: %s",
str(id))
LOG.info("Delete webhook with id: %s", id)
deleted_rows_count = self.db_conn.webhooks.delete(id)
@ -72,7 +71,7 @@ class WebhookApis(object):
self.db_conn.webhooks.create(db_row)
return db_row_to_dict(db_row)
except Exception as e:
LOG.exception("Failed to add webhook to DB: %s", str(e))
LOG.exception("Failed to add webhook to DB")
return {"ERROR": str(e)}
def get_webhook(self, ctx, id):
@ -92,7 +91,7 @@ class WebhookApis(object):
id)
return None
except Exception as e:
LOG.exception("Failed to get webhook: %s", str(e))
LOG.exception("Failed to get webhook")
return {"ERROR": str(e)}
def _webhook_to_db_row(self, url, headers, regex_filter, ctx):

View File

@ -76,8 +76,8 @@ def get_portion(lst, num_of_portions, portion_index):
if num_of_portions < 1 or portion_index < 0 or \
portion_index >= num_of_portions:
raise Exception('Cannot get_portion %s %s',
str(num_of_portions),
str(portion_index))
num_of_portions,
portion_index)
list_copy = copy.copy(lst)
random.Random(0.5).shuffle(list_copy)
@ -107,7 +107,7 @@ def md5(obj):
return hashlib.md5(obj).hexdigest()
else:
return hashlib.md5(obj.encode('utf-8')).hexdigest()
raise Exception('Unknown object for md5 %s', str(obj))
raise Exception('Unknown object for md5 %s' % obj)
def fmt(docstr):

View File

@ -72,7 +72,7 @@ class CollectdDriver(AlarmDriverBase):
CProps.RESOURCE_NAME: element_config['name']
}
LOG.debug('collectd mappings: %s', str(mappings))
LOG.debug('collectd mappings: %s', mappings)
return mappings
except Exception:

View File

@ -95,7 +95,7 @@ class DoctorDriver(AlarmDriverBase):
"""
LOG.debug('Going to enrich event: %s', str(event))
LOG.debug('Going to enrich event: %s', event)
event[DSProps.EVENT_TYPE] = event[EventProps.TYPE]
@ -109,7 +109,7 @@ class DoctorDriver(AlarmDriverBase):
self._filter_get_erroneous,
event[EventProps.TIME])
LOG.debug('Enriched event: %s', str(event))
LOG.debug('Enriched event: %s', event)
if event:
return DoctorDriver.make_pickleable([event], DOCTOR_DATASOURCE,

View File

@ -51,11 +51,11 @@ class NagiosParser(object):
service = self._parse_service_row(service_row)
if service:
LOG.debug('Appending service: %s', str(service))
LOG.debug('Appending service: %s', service)
services.append(service)
else:
LOG.debug('service is None for service_row: %s',
str(service_row))
service_row)
LOG.debug('Done parsing Nagios status')

View File

@ -117,7 +117,7 @@ class InstanceTransformer(ResourceTransformerBase):
return [host_neighbor]
def _create_entity_key(self, event):
LOG.debug('Creating key for instance event: %s', str(event))
LOG.debug('Creating key for instance event: %s', event)
instance_id = self._get_field_extractor(event).entity_id(event)
key_fields = self._key_values(NOVA_INSTANCE_DATASOURCE, instance_id)

View File

@ -257,7 +257,7 @@ class PrometheusDriver(AlarmDriverBase):
"""
LOG.debug('Going to enrich event: %s', str(event))
LOG.debug('Going to enrich event: %s', event)
alarms = []
details = event.get(EProps.DETAILS)
@ -265,7 +265,7 @@ class PrometheusDriver(AlarmDriverBase):
alarms = self._enrich_alerts(details.get(PProps.ALERTS, []),
event_type)
LOG.debug('Enriched event. Created alert events: %s', str(alarms))
LOG.debug('Enriched event. Created alert events: %s', alarms)
return self.make_pickleable(alarms, PROMETHEUS_DATASOURCE,
DatasourceAction.UPDATE)

View File

@ -95,6 +95,6 @@ class PrometheusTransformer(AlarmTransformerBase):
@staticmethod
def get_enrich_query(event):
LOG.debug('event for enrich query: %s', str(event))
LOG.debug('event for enrich query: %s', event)
entity_unique_props = event.get(PDProps.ENTITY_UNIQUE_PROPS)
return entity_unique_props

View File

@ -107,7 +107,7 @@ class VitrageGraphInit(object):
for v in self.graph.get_vertices():
if not v.get(VProps.VITRAGE_CACHED_ID):
LOG.warning("Missing vitrage_cached_id in the vertex. "
"Vertex is not added to the ID cache %s", str(v))
"Vertex is not added to the ID cache %s", v)
else:
TransformerBase.key_to_uuid_cache[v[VProps.VITRAGE_CACHED_ID]]\
= v.vertex_id
@ -137,7 +137,7 @@ class EventsCoordination(object):
try:
return do_work_func(event)
except Exception:
LOG.exception('Got Exception for event %s', str(event))
LOG.exception('Got Exception for event %s' % event)
self._do_work_func = do_work

View File

@ -53,8 +53,8 @@ class GraphNotifier(object):
notifier_plugins = conf.notifiers
if notifier_topic and notifier_plugins:
topics.append(notifier_topic)
except Exception as e:
LOG.info('Graph Notifier - missing configuration %s' % str(e))
except Exception:
LOG.exception('Graph Notifier - missing configuration')
try:
machine_learning_topic = \
@ -62,8 +62,8 @@ class GraphNotifier(object):
machine_learning_plugins = conf.machine_learning.plugins
if machine_learning_topic and machine_learning_plugins:
topics.append(machine_learning_topic)
except Exception as e:
LOG.info('Machine Learning - missing configuration %s' % str(e))
except Exception:
LOG.info('Machine Learning - missing configuration')
return topics
@ -81,7 +81,7 @@ class GraphNotifier(object):
curr.properties[VProps.RESOURCE] = graph.get_vertex(
curr.get(VProps.VITRAGE_RESOURCE_ID))
LOG.debug('notification_types : %s', str(notification_types))
LOG.debug('notification_types : %s', notification_types)
LOG.debug('notification properties : %s', curr.properties)
for notification_type in notification_types:
@ -142,7 +142,7 @@ class PersistNotifier(object):
curr.properties[EProps.SOURCE_ID] = curr.source_id
curr.properties[EProps.TARGET_ID] = curr.target_id
LOG.debug('persist_notification_types : %s', str(notification_types))
LOG.debug('persist_notification_types : %s', notification_types)
LOG.debug('persist_notification properties : %s', curr.properties)
for notification_type in notification_types:

View File

@ -56,7 +56,7 @@ class Processor(processor.ProcessorBase):
entity = self.transformer_manager.transform(event)
if entity.action not in self.actions:
LOG.warning('Deprecated or unknown entity %s ignored', str(entity))
LOG.warning('Deprecated or unknown entity %s ignored', entity)
return
self._calculate_vitrage_aggregated_values(entity.vertex, entity.action)

View File

@ -67,7 +67,7 @@ def delete_placeholder_vertex(g, vertex):
"""Checks if it is a placeholder vertex, and if so deletes it """
LOG.debug('Asked to delete a placeholder vertex: %s with %d neighbors',
str(vertex), len(g.get_edges(vertex.vertex_id)))
vertex, len(g.get_edges(vertex.vertex_id)))
if not vertex[VProps.VITRAGE_IS_PLACEHOLDER]:
return
@ -90,7 +90,7 @@ def get_vertex_types(vertex):
vitrage_category = vertex.get(VProps.VITRAGE_CATEGORY)
vitrage_type = vertex.get(VProps.VITRAGE_TYPE)
if not vitrage_category:
LOG.warning('no vitrage_category in vertex: %s', str(vertex))
LOG.warning('no vitrage_category in vertex: %s', vertex)
return vitrage_category, vitrage_type

View File

@ -137,7 +137,7 @@ class ActionExecutor(object):
LOG.debug('Notifying external engine %s. Properties: %s',
execution_engine,
str(payload))
payload)
self.notifier.notify(execution_engine, payload)
@staticmethod

View File

@ -46,8 +46,8 @@ class EvaluatorNotifier(object):
publisher_id='vitrage.evaluator',
topics=[topic_prefix + '.' + notifier])
except Exception as e:
LOG.info('Evaluator Notifier - missing configuration %s' % str(e))
except Exception:
LOG.exception('Evaluator Notifier - missing configuration')
@property
def enabled(self):
@ -63,7 +63,7 @@ class EvaluatorNotifier(object):
LOG.debug('execution_engine: %s, properties: %s',
execution_engine,
str(properties))
properties)
try:
if execution_engine in self.oslo_notifiers:

View File

@ -93,7 +93,7 @@ class ScenarioEvaluator(object):
self.process_event(vertex, None, True)
LOG.info(
'Run %s Evaluator on %s items - took %s',
action_mode, str(len(vertices)), str(time.time() - start_time))
action_mode, len(vertices), (time.time() - start_time))
def process_event(self, before, current, is_vertex, *args, **kwargs):
"""Notification of a change in the entity graph.
@ -112,8 +112,8 @@ class ScenarioEvaluator(object):
LOG.debug('Process event - starting')
LOG.debug("Element before event: %s, Current element: %s",
str(before),
str(current))
before,
current)
before_scenarios = self._get_element_scenarios(before, is_vertex)
current_scenarios = self._get_element_scenarios(current, is_vertex)
@ -122,8 +122,8 @@ class ScenarioEvaluator(object):
if len(before_scenarios) + len(current_scenarios):
LOG.debug("Number of relevant scenarios found: undo = %s, do = %s",
str(len(before_scenarios)),
str(len(current_scenarios)))
len(before_scenarios),
len(current_scenarios))
actions = self._process_and_get_actions(before,
before_scenarios,
@ -136,7 +136,7 @@ class ScenarioEvaluator(object):
actions_to_preform = self._analyze_and_filter_actions(actions)
except Exception:
LOG.exception("Evaluator error, will not execute actions %s",
str(actions))
actions)
self._action_executor.execute(actions_to_preform)
LOG.debug('Process event - completed')
@ -168,7 +168,7 @@ class ScenarioEvaluator(object):
def _process_and_get_actions(self, element, triggered_scenarios, mode):
actions = []
for triggered_scenario in triggered_scenarios:
LOG.debug("Processing: %s", str(triggered_scenario))
LOG.debug("Processing: %s", triggered_scenario)
scenario_element = triggered_scenario[0]
scenario = triggered_scenario[1]
actions.extend(self._process_scenario(element,

View File

@ -179,8 +179,8 @@ class ScenarioRepository(object):
target_set = frozenset(edge_desc.target.properties.items())
except Exception as e:
LOG.error('frozenset for edge failed - Source:%s Target:%s',
str(edge_desc.source),
str(edge_desc.target))
edge_desc.source,
edge_desc.target)
raise e
return EdgeKeyScenario(edge_desc.edge.label, source_set, target_set)

View File

@ -89,7 +89,7 @@ def _validate_template(db, template, template_type, params=None):
elif template_type == TType.EQUIVALENCE:
result = base.Result("", True, "", "No Validation")
else:
raise VitrageError("Unknown template type %s", template_type)
raise VitrageError("Unknown template type %s" % template_type)
return result

View File

@ -70,11 +70,11 @@ def get_attr(match, *args):
if attr is None:
LOG.warning('Attribute %s not found for vertex %s',
attr_name, str(vertex))
attr_name, vertex)
LOG.debug('Function get_attr called with template_id %s and attr_name %s.'
'Matched vertex properties: %s. Returned attribute value: %s',
template_id, attr_name, str(entity_props), attr)
template_id, attr_name, entity_props, attr)
return attr

View File

@ -45,7 +45,7 @@ class ScenarioLoader(object):
def build_scenarios(self, scenarios_defs):
scenarios = []
for counter, scenario_def in enumerate(scenarios_defs):
scenario_id = "%s-scenario%s" % (self.name, str(counter))
scenario_id = "%s-scenario%s" % (self.name, counter)
scenario_dict = scenario_def[TFields.SCENARIO]
condition = parse_condition(scenario_dict[TFields.CONDITION])
self.valid_target = calculate_action_target(
@ -101,7 +101,7 @@ class ScenarioLoader(object):
actions = []
for counter, action_def in enumerate(actions_def):
action_id = '%s-action%s' % (scenario_id, str(counter))
action_id = '%s-action%s' % (scenario_id, counter)
action_type = action_def[TFields.ACTION][TFields.ACTION_TYPE]
action_loader = self._template_schema.loaders.get(action_type)

View File

@ -158,7 +158,7 @@ class DefinitionsValidator(object):
re.compile(value)
except Exception:
LOG.error('%s %s status code: %s' % (status_msgs[47],
str(key), 47))
key, 47))
return get_content_fault_result(47)
return get_content_correct_result()

View File

@ -57,7 +57,7 @@ class NXAlgorithm(GraphAlgorithm):
if match_func and not match_func(root_data):
LOG.info('graph_query_vertices: root %s does not match filter %s',
str(root_id), str(query_dict))
root_id, query_dict)
return graph
n_result = []

View File

@ -73,7 +73,7 @@ def subgraph_matching(base_graph, subgraph, matches, validate=False):
validate)
if not initial_sg:
LOG.warning('subgraph_matching:Initial sub-graph creation failed')
LOG.warning('subgraph_matching: Known matches: %s', str(matches))
LOG.warning('subgraph_matching: Known matches: %s', matches)
return final_subgraphs
queue = [initial_sg]
@ -201,7 +201,7 @@ def _get_edges_to_mapped_vertices(graph, vertex_id):
for e in graph.get_edges(vertex_id):
t_neighbor = graph.get_vertex(e.other_vertex(vertex_id))
if not t_neighbor:
raise VitrageAlgorithmError('Cant get vertex for edge' + str(e))
raise VitrageAlgorithmError('Cant get vertex for edge %s' % e)
if t_neighbor and t_neighbor.get(MAPPED_V_ID):
subgraph_edges_to_mapped_vertices.append(e)
return set(subgraph_edges_to_mapped_vertices)
@ -222,7 +222,7 @@ def _graph_contains_subgraph_edges(graph, subgraph, subgraph_edges):
graph_v_id_source = subgraph.get_vertex(e.source_id).get(MAPPED_V_ID)
graph_v_id_target = subgraph.get_vertex(e.target_id).get(MAPPED_V_ID)
if not graph_v_id_source or not graph_v_id_target:
raise VitrageAlgorithmError('Cant get vertex for edge' + str(e))
raise VitrageAlgorithmError('Cant get vertex for edge %s' % e)
found_graph_edge = graph.get_edge(graph_v_id_source,
graph_v_id_target,
e.label)

View File

@ -70,7 +70,7 @@ class Vertex(PropertiesElement):
def __repr__(self):
return '{vertex_id : %s, properties : %s}' % \
(str(self.vertex_id), str(self.properties))
(self.vertex_id, self.properties)
def __eq__(self, other):
"""Compare two vertices
@ -132,8 +132,7 @@ class Edge(PropertiesElement):
self.label = label
def __hash__(self):
return hash('%s%s%s' % (str(self.source_id), str(self.target_id),
str(self.label)))
return hash('%s%s%s' % (self.source_id, self.target_id, self.label))
def __repr__(self):
return '{source_id : %s, target_id : %s, ' \

View File

@ -115,7 +115,7 @@ class NXGraph(Graph):
properties = self._g.node.get(v_id, None)
if properties is not None:
return vertex_copy(v_id, properties)
LOG.debug("get_vertex item not found. v_id=%s", str(v_id))
LOG.debug("get_vertex item not found. v_id=%s", v_id)
return None
def get_edge(self, source_id, target_id, label):
@ -123,7 +123,7 @@ class NXGraph(Graph):
properties = self._g.adj[source_id][target_id][label]
except KeyError:
LOG.debug("get_edge item not found. source_id=%s, target_id=%s, "
"label=%s", str(source_id), str(target_id), str(label))
"label=%s", source_id, target_id, label)
return None
if properties is not None:
return edge_copy(source_id, target_id, label, properties)

View File

@ -29,8 +29,8 @@ class AlarmDataAccumulator(object):
def append_active(self, alarm_id, timestamp):
if alarm_id in self.active_start_times:
LOG.debug("Active alarm {} was started twice. Second time at {}".
format(alarm_id, str(timestamp)))
LOG.debug("Active alarm %s was started twice. Second time at %s",
alarm_id, timestamp)
return
self.active_start_times[alarm_id] = timestamp

View File

@ -63,7 +63,7 @@ class AlarmDataProcessor(MachineLearningBase):
# flush all data once num_of_events_to_flush is achieved
if self.event_counter == self.num_of_events_to_flush:
LOG.debug("Persisting: {}".format(str(data)))
LOG.debug("Persisting: %s", data)
self.data_manager.flush_accumulations()
APersistor.save_accumulated_data(self.data_manager)
self.correlation_manager.output_correlations(self.data_manager)

View File

@ -76,6 +76,5 @@ class MistralNotifier(NotifierBase):
else:
LOG.error('Failed to execute Mistral action')
except Exception as e:
LOG.warning('Failed to execute Mistral action. Exception: %s',
str(e))
except Exception:
LOG.exception('Failed to execute Mistral action.')

View File

@ -57,10 +57,10 @@ class NovaNotifier(NotifierBase):
def _mark_host_down(self, host_id, is_down):
try:
LOG.info('Nova services.force_down - host id: %s, is_down: %s',
str(host_id), str(is_down))
host_id, is_down)
response = self.client.services.force_down(
host_id, 'nova-compute', is_down)
LOG.info('RESPONSE %s', str(response.to_dict()))
LOG.info('RESPONSE %s', response.to_dict())
except Exception:
LOG.exception('Failed to services.force_down.')
@ -70,6 +70,6 @@ class NovaNotifier(NotifierBase):
LOG.info('Nova servers.reset_state - server: %s, state: %s',
server_id, state)
response = self.client.servers.reset_state(server_id, state)
LOG.info('RESPONSE %s', str(response))
LOG.info('RESPONSE %s', response)
except Exception:
LOG.exception('Failed to execute servers.reset_state.')

View File

@ -117,4 +117,4 @@ def register_opts(conf, package_name, paths):
except ImportError:
pass
LOG.error("Failed to import config options for %s. Not found in %s",
package_name, str(paths))
package_name, paths)

View File

@ -123,8 +123,8 @@ class SnmpParsingService(coord.Service):
driver='messagingv2',
publisher_id=self.publisher,
topics=['vitrage_notifications'])
except Exception as e:
LOG.warning('Failed to initialize oslo notifier %s', str(e))
except Exception:
LOG.exception('Failed to initialize oslo notifier')
def _send_snmp_to_queue(self, snmp_trap):
if str == type(snmp_trap):

View File

@ -39,8 +39,8 @@ def get_connection_from_config(conf):
{'name': connection_scheme, 'namespace': _NAMESPACE})
mgr = driver.DriverManager(_NAMESPACE, connection_scheme)
except Exception as e:
LOG.exception('Failed to get scheme %s. Exception: %s ', str(url), e)
except Exception:
LOG.exception('Failed to get scheme %s.' % url)
return None
@tenacity.retry(

View File

@ -253,7 +253,7 @@ class HistoryFacadeConnection(object):
"'filter_by' and 'filter_vals' differs")
for d in sort_dirs:
if d not in (ASC, DESC):
raise VitrageInputError("Unknown sort direction %s", str(d))
raise VitrageInputError("Unknown sort direction %s" % d)
@staticmethod
def _add_time_frame_to_query(query, start, end):

View File

@ -60,7 +60,7 @@ class TestGraphPersistor(TestFunctionalBase, TestConfiguration):
graph_persistor.persist_event(
pre_item, current_item, is_vertex, graph, self.event_id)
except Exception as e:
self.fail_msg = 'persist_event failed with exception ' + str(e)
self.fail_msg = 'persist_event failed with exception %s' % e
self.event_id = self.event_id + 1
# Subscribe graph changes to callback, so events are written to db

View File

@ -165,7 +165,7 @@ class GraphGenerator(object):
props = self._load_resource_file(filename, 'vertices')
if props.get(VProps.ID):
props[VProps.ID] = self.generate_mock_uuid()
props[VProps.NAME] = "%s-%s" % (props[VProps.VITRAGE_TYPE], str(index))
props[VProps.NAME] = "%s-%s" % (props[VProps.VITRAGE_TYPE], index)
props[VProps.VITRAGE_ID] = self.generate_mock_uuid()
return Vertex(props[VProps.VITRAGE_ID], props)