debug level logs should not be translated
According to the OpenStack translation policy available at https://wiki.openstack.org/wiki/LoggingStandards debug messages should not be translated. Like mentioned in several changes in Nova by garyk this is to help prioritize log translation. Change-Id: I59486b1110f08510d83a4aec2a1666805c59d1cd Closes-Bug: #1318333
This commit is contained in:
parent
1dfe518654
commit
4cd3478b4b
@ -116,7 +116,7 @@ class AccountAuditor(Daemon):
|
||||
broker.get_info()
|
||||
self.logger.increment('passes')
|
||||
self.account_passes += 1
|
||||
self.logger.debug(_('Audit passed for %s') % broker)
|
||||
self.logger.debug('Audit passed for %s' % broker)
|
||||
except (Exception, Timeout):
|
||||
self.logger.increment('failures')
|
||||
self.account_failures += 1
|
||||
|
@ -101,7 +101,7 @@ class AccountReaper(Daemon):
|
||||
This repeatedly calls :func:`reap_once` no quicker than the
|
||||
configuration interval.
|
||||
"""
|
||||
self.logger.debug(_('Daemon started.'))
|
||||
self.logger.debug('Daemon started.')
|
||||
sleep(random.random() * self.interval)
|
||||
while True:
|
||||
begin = time()
|
||||
@ -117,7 +117,7 @@ class AccountReaper(Daemon):
|
||||
repeatedly by :func:`run_forever`. This will call :func:`reap_device`
|
||||
once for each device on the server.
|
||||
"""
|
||||
self.logger.debug(_('Begin devices pass: %s'), self.devices)
|
||||
self.logger.debug('Begin devices pass: %s', self.devices)
|
||||
begin = time()
|
||||
try:
|
||||
for device in os.listdir(self.devices):
|
||||
|
@ -121,8 +121,8 @@ class BufferedHTTPConnection(HTTPConnection):
|
||||
|
||||
def getresponse(self):
|
||||
response = HTTPConnection.getresponse(self)
|
||||
logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s "
|
||||
"%(host)s:%(port)s %(path)s)"),
|
||||
logging.debug("HTTP PERF: %(time).5f seconds to %(method)s "
|
||||
"%(host)s:%(port)s %(path)s)",
|
||||
{'time': time.time() - self._connected_time,
|
||||
'method': self._method, 'host': self.host,
|
||||
'port': self.port, 'path': self._path})
|
||||
|
@ -276,7 +276,7 @@ class Replicator(Daemon):
|
||||
"""
|
||||
self.stats['diff'] += 1
|
||||
self.logger.increment('diffs')
|
||||
self.logger.debug(_('Syncing chunks with %s'), http.host)
|
||||
self.logger.debug('Syncing chunks with %s', http.host)
|
||||
sync_table = broker.get_syncs()
|
||||
objects = broker.get_items_since(point, self.per_diff)
|
||||
diffs = 0
|
||||
@ -294,9 +294,9 @@ class Replicator(Daemon):
|
||||
point = objects[-1]['ROWID']
|
||||
objects = broker.get_items_since(point, self.per_diff)
|
||||
if objects:
|
||||
self.logger.debug(_(
|
||||
self.logger.debug(
|
||||
'Synchronization for %s has fallen more than '
|
||||
'%s rows behind; moving on and will try again next pass.'),
|
||||
'%s rows behind; moving on and will try again next pass.',
|
||||
broker, self.max_diffs * self.per_diff)
|
||||
self.stats['diff_capped'] += 1
|
||||
self.logger.increment('diff_caps')
|
||||
@ -407,7 +407,7 @@ class Replicator(Daemon):
|
||||
:param node_id: node id of the node to be replicated to
|
||||
"""
|
||||
start_time = now = time.time()
|
||||
self.logger.debug(_('Replicating db %s'), object_file)
|
||||
self.logger.debug('Replicating db %s', object_file)
|
||||
self.stats['attempted'] += 1
|
||||
self.logger.increment('attempts')
|
||||
shouldbehere = True
|
||||
@ -611,15 +611,15 @@ class ReplicatorRpc(object):
|
||||
raise
|
||||
timespan = time.time() - timemark
|
||||
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
||||
self.logger.debug(_('replicator-rpc-sync time for info: %.02fs') %
|
||||
self.logger.debug('replicator-rpc-sync time for info: %.02fs' %
|
||||
timespan)
|
||||
if metadata:
|
||||
timemark = time.time()
|
||||
broker.update_metadata(simplejson.loads(metadata))
|
||||
timespan = time.time() - timemark
|
||||
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
||||
self.logger.debug(_('replicator-rpc-sync time for '
|
||||
'update_metadata: %.02fs') % timespan)
|
||||
self.logger.debug('replicator-rpc-sync time for '
|
||||
'update_metadata: %.02fs' % timespan)
|
||||
if info['put_timestamp'] != put_timestamp or \
|
||||
info['created_at'] != created_at or \
|
||||
info['delete_timestamp'] != delete_timestamp:
|
||||
@ -628,14 +628,14 @@ class ReplicatorRpc(object):
|
||||
created_at, put_timestamp, delete_timestamp)
|
||||
timespan = time.time() - timemark
|
||||
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
||||
self.logger.debug(_('replicator-rpc-sync time for '
|
||||
'merge_timestamps: %.02fs') % timespan)
|
||||
self.logger.debug('replicator-rpc-sync time for '
|
||||
'merge_timestamps: %.02fs' % timespan)
|
||||
timemark = time.time()
|
||||
info['point'] = broker.get_sync(id_)
|
||||
timespan = time.time() - timemark
|
||||
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
||||
self.logger.debug(_('replicator-rpc-sync time for get_sync: '
|
||||
'%.02fs') % timespan)
|
||||
self.logger.debug('replicator-rpc-sync time for get_sync: '
|
||||
'%.02fs' % timespan)
|
||||
if hash_ == info['hash'] and info['point'] < remote_sync:
|
||||
timemark = time.time()
|
||||
broker.merge_syncs([{'remote_id': id_,
|
||||
@ -643,8 +643,8 @@ class ReplicatorRpc(object):
|
||||
info['point'] = remote_sync
|
||||
timespan = time.time() - timemark
|
||||
if timespan > DEBUG_TIMINGS_THRESHOLD:
|
||||
self.logger.debug(_('replicator-rpc-sync time for '
|
||||
'merge_syncs: %.02fs') % timespan)
|
||||
self.logger.debug('replicator-rpc-sync time for '
|
||||
'merge_syncs: %.02fs' % timespan)
|
||||
return Response(simplejson.dumps(info))
|
||||
|
||||
def merge_syncs(self, broker, args):
|
||||
|
@ -116,7 +116,7 @@ class ContainerAuditor(Daemon):
|
||||
broker.get_info()
|
||||
self.logger.increment('passes')
|
||||
self.container_passes += 1
|
||||
self.logger.debug(_('Audit passed for %s'), broker)
|
||||
self.logger.debug('Audit passed for %s', broker)
|
||||
except (Exception, Timeout):
|
||||
self.logger.increment('failures')
|
||||
self.container_failures += 1
|
||||
|
@ -100,7 +100,7 @@ class ObjectExpirer(Daemon):
|
||||
self.report_first_time = self.report_last_time = time()
|
||||
self.report_objects = 0
|
||||
try:
|
||||
self.logger.debug(_('Run begin'))
|
||||
self.logger.debug('Run begin')
|
||||
containers, objects = \
|
||||
self.swift.get_account_info(self.expiring_objects_account)
|
||||
self.logger.info(_('Pass beginning; %s possible containers; %s '
|
||||
@ -138,7 +138,7 @@ class ObjectExpirer(Daemon):
|
||||
self.logger.exception(
|
||||
_('Exception while deleting container %s %s') %
|
||||
(container, str(err)))
|
||||
self.logger.debug(_('Run end'))
|
||||
self.logger.debug('Run end')
|
||||
self.report(final=True)
|
||||
except (Exception, Timeout):
|
||||
self.logger.exception(_('Unhandled exception'))
|
||||
|
@ -526,6 +526,6 @@ class ObjectReplicator(Daemon):
|
||||
dump_recon_cache({'object_replication_time': total,
|
||||
'object_replication_last': time.time()},
|
||||
self.rcache, self.logger)
|
||||
self.logger.debug(_('Replication sleeping for %s seconds.'),
|
||||
self.logger.debug('Replication sleeping for %s seconds.',
|
||||
self.run_pause)
|
||||
sleep(self.run_pause)
|
||||
|
@ -206,14 +206,14 @@ class ObjectUpdater(Daemon):
|
||||
if success:
|
||||
self.successes += 1
|
||||
self.logger.increment('successes')
|
||||
self.logger.debug(_('Update sent for %(obj)s %(path)s'),
|
||||
self.logger.debug('Update sent for %(obj)s %(path)s',
|
||||
{'obj': obj, 'path': update_path})
|
||||
self.logger.increment("unlinks")
|
||||
os.unlink(update_path)
|
||||
else:
|
||||
self.failures += 1
|
||||
self.logger.increment('failures')
|
||||
self.logger.debug(_('Update failed for %(obj)s %(path)s'),
|
||||
self.logger.debug('Update failed for %(obj)s %(path)s',
|
||||
{'obj': obj, 'path': update_path})
|
||||
if new_successes:
|
||||
update['successes'] = successes
|
||||
|
Loading…
Reference in New Issue
Block a user