From 442e3c8a1a92eb3da140fc39f78b4ff86c50d11a Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Thu, 2 Dec 2010 13:37:49 +0000 Subject: [PATCH 1/8] use logging formatter to standardize transaction id logging --- swift/account/server.py | 1 + swift/common/utils.py | 48 +++++++++++++++++++++++++++------------ swift/container/server.py | 1 + swift/obj/server.py | 1 + swift/proxy/server.py | 1 + 5 files changed, 37 insertions(+), 15 deletions(-) diff --git a/swift/account/server.py b/swift/account/server.py index 67a67c4854..5bd7bba517 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -297,6 +297,7 @@ class AccountController(object): def __call__(self, env, start_response): start_time = time.time() req = Request(env) + self.logger.txn_id = req.headers.get('x-cf-trans-id', None) if not check_utf8(req.path_info): res = HTTPPreconditionFailed(body='Invalid UTF8') else: diff --git a/swift/common/utils.py b/swift/common/utils.py index 1c48c61339..d4dc078916 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -284,23 +284,24 @@ class LoggerFileObject(object): return self -class NamedLogger(object): +class LogAdapter(object): """Cheesy version of the LoggerAdapter available in Python 3""" - def __init__(self, logger, server): + def __init__(self, logger): self.logger = logger - self.server = server - for proxied_method in ('debug', 'info', 'log', 'warn', 'warning', - 'error', 'critical'): - setattr(self, proxied_method, - self._proxy(getattr(logger, proxied_method))) + self._txn_id = threading.local() + for proxied_method in ('debug', 'log', 'warn', 'warning', 'error', + 'critical', 'info'): + setattr(self, proxied_method, getattr(logger, proxied_method)) - def _proxy(self, logger_meth): + @property + def txn_id(self): + if hasattr(self._txn_id, 'value'): + return self._txn_id.value - def _inner_proxy(msg, *args, **kwargs): - msg = '%s %s' % (self.server, msg) - logger_meth(msg, *args, **kwargs) - return _inner_proxy + @txn_id.setter + def txn_id(self, value): + self._txn_id.value = value def getEffectiveLevel(self): return self.logger.getEffectiveLevel() @@ -330,7 +331,21 @@ class NamedLogger(object): emsg += ' %s' % exc.msg else: call = self.logger.exception - call('%s %s: %s' % (self.server, msg, emsg), *args) + call('%s: %s' % (msg, emsg), *args) + + +class NamedFormatter(logging.Formatter): + def __init__(self, server, logger): + logging.Formatter.__init__(self) + self.server = server + self.logger = logger + + def format(self, record): + msg = logging.Formatter.format(self, record) + if record.levelno != logging.INFO and self.logger.txn_id: + return '%s %s (txn: %s)' % (self.server, msg, self.logger.txn_id) + else: + return '%s %s' % (self.server, msg) def get_logger(conf, name=None, log_to_console=False): @@ -359,7 +374,8 @@ def get_logger(conf, name=None, log_to_console=False): root_logger.addHandler(get_logger.console) if conf is None: root_logger.setLevel(logging.INFO) - return NamedLogger(root_logger, name) + adapted_logger = LogAdapter(root_logger) + return adapted_logger if name is None: name = conf.get('log_name', 'swift') get_logger.handler = SysLogHandler(address='/dev/log', @@ -369,7 +385,9 @@ def get_logger(conf, name=None, log_to_console=False): root_logger.addHandler(get_logger.handler) root_logger.setLevel( getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO)) - return NamedLogger(root_logger, name) + adapted_logger = LogAdapter(root_logger) + get_logger.handler.setFormatter(NamedFormatter(name, adapted_logger)) + return adapted_logger def drop_privileges(user): diff --git a/swift/container/server.py b/swift/container/server.py index 45ed00fd4d..82e222435a 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -384,6 +384,7 @@ class ContainerController(object): def __call__(self, env, start_response): start_time = time.time() req = Request(env) + self.logger.txn_id = req.headers.get('x-cf-trans-id', None) if not check_utf8(req.path_info): res = HTTPPreconditionFailed(body='Invalid UTF8') else: diff --git a/swift/obj/server.py b/swift/obj/server.py index 632a0c04cc..cdddf72edf 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -555,6 +555,7 @@ class ObjectController(object): """WSGI Application entry point for the Swift Object Server.""" start_time = time.time() req = Request(env) + self.logger.txn_id = req.headers.get('x-cf-trans-id', None) if not check_utf8(req.path_info): res = HTTPPreconditionFailed(body='Invalid UTF8') else: diff --git a/swift/proxy/server.py b/swift/proxy/server.py index e48052a398..b9429ef875 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -1410,6 +1410,7 @@ class BaseApplication(object): controller = controller(self, **path_parts) controller.trans_id = req.headers.get('x-cf-trans-id', '-') + self.logger.txn_id = req.headers.get('x-cf-trans-id', None) try: handler = getattr(controller, req.method) if not getattr(handler, 'publicly_accessible'): From d7dd3ec0659aaee87f480bcbe5d3d5cdb33d8c5b Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Mon, 20 Dec 2010 21:47:50 +0000 Subject: [PATCH 2/8] gettext updates --- swift/account/auditor.py | 27 +++--- swift/account/reaper.py | 32 ++++---- swift/account/server.py | 10 +-- swift/common/bench.py | 11 +-- swift/common/bufferedhttp.py | 14 ++-- swift/common/db.py | 11 +-- swift/common/db_replicator.py | 54 ++++++------ swift/common/memcached.py | 10 ++- swift/common/middleware/catch_errors.py | 4 +- swift/common/middleware/ratelimit.py | 16 ++-- swift/common/utils.py | 24 +++--- swift/container/auditor.py | 27 +++--- swift/container/server.py | 29 ++++--- swift/container/updater.py | 39 +++++---- swift/obj/auditor.py | 27 +++--- swift/obj/replicator.py | 104 +++++++++++++----------- swift/obj/server.py | 21 ++--- swift/obj/updater.py | 41 ++++++---- swift/proxy/server.py | 62 +++++++------- test/unit/auth/test_server.py | 8 +- test/unit/common/test_daemon.py | 2 +- test/unit/common/test_utils.py | 18 ++-- 22 files changed, 306 insertions(+), 285 deletions(-) diff --git a/swift/account/auditor.py b/swift/account/auditor.py index fc14ec520c..01afb7d469 100644 --- a/swift/account/auditor.py +++ b/swift/account/auditor.py @@ -16,6 +16,7 @@ import os import time from random import random +from gettext import gettext as _ from swift.account import server as account_server from swift.common.db import AccountBroker @@ -49,11 +50,11 @@ class AccountAuditor(Daemon): for path, device, partition in all_locs: self.account_audit(path) if time.time() - reported >= 3600: # once an hour - self.logger.info( - 'Since %s: Account audits: %s passed audit, ' - '%s failed audit' % (time.ctime(reported), - self.account_passes, - self.account_failures)) + self.logger.info(_('Since %(time)s: Account audits: ' + '%(passed)s passed audit, %(failed)s failed audit'), + {'time': time.ctime(reported), + 'passed': self.account_passes, + 'failed': self.account_failures}) reported = time.time() self.account_passes = 0 self.account_failures = 0 @@ -72,17 +73,17 @@ class AccountAuditor(Daemon): for path, device, partition in all_locs: self.account_audit(path) if time.time() - reported >= 3600: # once an hour - self.logger.info( - 'Since %s: Account audits: %s passed audit, ' - '%s failed audit' % (time.ctime(reported), - self.account_passes, - self.account_failures)) + self.logger.info(_('Since %(time)s: Account audits: ' + '%(passed)s passed audit, %(failed)s failed audit'), + {'time': time.ctime(reported), + 'passed': self.account_passes, + 'failed': self.account_failures}) reported = time.time() self.account_passes = 0 self.account_failures = 0 elapsed = time.time() - begin self.logger.info( - 'Account audit "once" mode completed: %.02fs' % elapsed) + 'Account audit "once" mode completed: %.02fs', elapsed) def account_audit(self, path): """ @@ -97,8 +98,8 @@ class AccountAuditor(Daemon): if not broker.is_deleted(): info = broker.get_info() self.account_passes += 1 - self.logger.debug('Audit passed for %s' % broker.db_file) + self.logger.debug(_('Audit passed for %s') % broker.db_file) except Exception: self.account_failures += 1 - self.logger.exception('ERROR Could not get account info %s' % + self.logger.exception(_('ERROR Could not get account info %s'), (broker.db_file)) diff --git a/swift/account/reaper.py b/swift/account/reaper.py index 814f7551e5..0225209392 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -18,6 +18,7 @@ import random from logging import DEBUG from math import sqrt from time import time +from gettext import gettext as _ from eventlet import GreenPool, sleep @@ -77,7 +78,7 @@ class AccountReaper(Daemon): """ The account :class:`swift.common.ring.Ring` for the cluster. """ if not self.account_ring: self.logger.debug( - 'Loading account ring from %s' % self.account_ring_path) + _('Loading account ring from %s'), self.account_ring_path) self.account_ring = Ring(self.account_ring_path) return self.account_ring @@ -85,7 +86,7 @@ class AccountReaper(Daemon): """ The container :class:`swift.common.ring.Ring` for the cluster. """ if not self.container_ring: self.logger.debug( - 'Loading container ring from %s' % self.container_ring_path) + _('Loading container ring from %s'), self.container_ring_path) self.container_ring = Ring(self.container_ring_path) return self.container_ring @@ -93,7 +94,7 @@ class AccountReaper(Daemon): """ The object :class:`swift.common.ring.Ring` for the cluster. """ if not self.object_ring: self.logger.debug( - 'Loading object ring from %s' % self.object_ring_path) + _('Loading object ring from %s'), self.object_ring_path) self.object_ring = Ring(self.object_ring_path) return self.object_ring @@ -103,7 +104,7 @@ class AccountReaper(Daemon): This repeatedly calls :func:`reap_once` no quicker than the configuration interval. """ - self.logger.debug('Daemon started.') + self.logger.debug(_('Daemon started.')) sleep(random.random() * self.interval) while True: begin = time() @@ -119,17 +120,17 @@ class AccountReaper(Daemon): repeatedly by :func:`run_forever`. This will call :func:`reap_device` once for each device on the server. """ - self.logger.debug('Begin devices pass: %s' % self.devices) + self.logger.debug(_('Begin devices pass: %s'), self.devices) begin = time() for device in os.listdir(self.devices): if self.mount_check and \ not os.path.ismount(os.path.join(self.devices, device)): self.logger.debug( - 'Skipping %s as it is not mounted' % device) + _('Skipping %s as it is not mounted'), device) continue self.reap_device(device) elapsed = time() - begin - self.logger.info('Devices pass completed: %.02fs' % elapsed) + self.logger.info(_('Devices pass completed: %.02fs'), elapsed) def reap_device(self, device): """ @@ -212,7 +213,7 @@ class AccountReaper(Daemon): """ begin = time() account = broker.get_info()['account'] - self.logger.info('Beginning pass on account %s' % account) + self.logger.info(_('Beginning pass on account %s'), account) self.stats_return_codes = {} self.stats_containers_deleted = 0 self.stats_objects_deleted = 0 @@ -235,12 +236,12 @@ class AccountReaper(Daemon): self.container_pool.waitall() except Exception: self.logger.exception( - 'Exception with containers for account %s' % account) + _('Exception with containers for account %s'), account) marker = containers[-1][0] log = 'Completed pass on account %s' % account except Exception: self.logger.exception( - 'Exception with account %s' % account) + _('Exception with account %s'), account) log = 'Incomplete pass on account %s' % account if self.stats_containers_deleted: log += ', %s containers deleted' % self.stats_containers_deleted @@ -317,7 +318,7 @@ class AccountReaper(Daemon): except ClientException, err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( - 'Exception with %(ip)s:%(port)s/%(device)s' % node) + _('Exception with %(ip)s:%(port)s/%(device)s'), node) self.stats_return_codes[err.http_status / 100] = \ self.stats_return_codes.get(err.http_status / 100, 0) + 1 if not objects: @@ -330,8 +331,9 @@ class AccountReaper(Daemon): nodes, obj['name']) pool.waitall() except Exception: - self.logger.exception('Exception with objects for container ' - '%s for account %s' % (container, account)) + self.logger.exception(_('Exception with objects for container ' + '%(container)s for account %(account)s'), + {'container': container, 'account': account}) marker = objects[-1]['name'] successes = 0 failures = 0 @@ -351,7 +353,7 @@ class AccountReaper(Daemon): except ClientException, err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( - 'Exception with %(ip)s:%(port)s/%(device)s' % node) + _('Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.stats_return_codes[err.http_status / 100] = \ self.stats_return_codes.get(err.http_status / 100, 0) + 1 @@ -402,7 +404,7 @@ class AccountReaper(Daemon): except ClientException, err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( - 'Exception with %(ip)s:%(port)s/%(device)s' % node) + _('Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.stats_return_codes[err.http_status / 100] = \ self.stats_return_codes.get(err.http_status / 100, 0) + 1 diff --git a/swift/account/server.py b/swift/account/server.py index 5bd7bba517..67572165f5 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -18,15 +18,15 @@ from __future__ import with_statement import os import time import traceback - from urllib import unquote +from xml.sax import saxutils +from gettext import gettext as _ from webob import Request, Response from webob.exc import HTTPAccepted, HTTPBadRequest, \ HTTPCreated, HTTPForbidden, HTTPInternalServerError, \ HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, HTTPPreconditionFailed import simplejson -from xml.sax import saxutils from swift.common.db import AccountBroker from swift.common.utils import get_logger, get_param, hash_path, \ @@ -307,10 +307,8 @@ class AccountController(object): else: res = HTTPMethodNotAllowed() except: - self.logger.exception('ERROR __call__ error with %s %s ' - 'transaction %s' % (env.get('REQUEST_METHOD', '-'), - env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID', - '-'))) + self.logger.exception(_('ERROR __call__ error with %(method)s' + ' %(path)s '), {'method': req.method, 'path': req.path}) res = HTTPInternalServerError(body=traceback.format_exc()) trans_time = '%.4f' % (time.time() - start_time) additional_info = '' diff --git a/swift/common/bench.py b/swift/common/bench.py index b50df4dcb8..1e525c2e7d 100644 --- a/swift/common/bench.py +++ b/swift/common/bench.py @@ -18,6 +18,7 @@ import time import random from urlparse import urlparse from contextlib import contextmanager +from gettext import gettext as _ import eventlet.pools from eventlet.green.httplib import CannotSendRequest @@ -82,10 +83,10 @@ class Bench(object): def _log_status(self, title): total = time.time() - self.beginbeat - self.logger.info('%s %s [%s failures], %.01f/s' % ( - self.complete, title, self.failures, - (float(self.complete) / total), - )) + self.logger.info(_('%(complete)s %(title)s [%(fail)s failures], ' + '%(rate).01f/s'), + {'title': title, 'complete': self.complete, 'fail': self.failures, + 'rate': (float(self.complete) / total)}) @contextmanager def connection(self): @@ -94,7 +95,7 @@ class Bench(object): try: yield hc except CannotSendRequest: - self.logger.info("CannotSendRequest. Skipping...") + self.logger.info(_("CannotSendRequest. Skipping...")) try: hc.close() except: diff --git a/swift/common/bufferedhttp.py b/swift/common/bufferedhttp.py index 6b308e5b01..536793fc87 100644 --- a/swift/common/bufferedhttp.py +++ b/swift/common/bufferedhttp.py @@ -29,6 +29,7 @@ BufferedHTTPResponse. from urllib import quote import logging import time +from gettext import gettext as _ from eventlet.green.httplib import CONTINUE, HTTPConnection, HTTPMessage, \ HTTPResponse, HTTPSConnection, _UNKNOWN @@ -82,15 +83,9 @@ class BufferedHTTPConnection(HTTPConnection): def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): self._method = method self._path = url - self._txn_id = '-' return HTTPConnection.putrequest(self, method, url, skip_host, skip_accept_encoding) - def putheader(self, header, value): - if header.lower() == 'x-cf-trans-id': - self._txn_id = value - return HTTPConnection.putheader(self, header, value) - def getexpect(self): response = BufferedHTTPResponse(self.sock, strict=self.strict, method=self._method) @@ -99,9 +94,10 @@ class BufferedHTTPConnection(HTTPConnection): def getresponse(self): response = HTTPConnection.getresponse(self) - logging.debug("HTTP PERF: %.5f seconds to %s %s:%s %s (%s)" % - (time.time() - self._connected_time, self._method, self.host, - self.port, self._path, self._txn_id)) + logging.debug(_("HTTP PERF: %(time).5f seconds to %(method)s " + "%(host)s:%(port)s %(path)s)"), + {'time': time.time() - self._connected_time, 'method': self._method, + 'host': self.host, 'port': self.port, 'path': self._path}) return response diff --git a/swift/common/db.py b/swift/common/db.py index 41854407d6..6f7372c22c 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -27,6 +27,7 @@ import cPickle as pickle import errno from random import randint from tempfile import mkstemp +from gettext import gettext as _ from eventlet import sleep import simplejson as json @@ -295,7 +296,7 @@ class DatabaseBroker(object): self.conn = conn except: # pragma: no cover logging.exception( - 'Broker error trying to rollback locked connection') + _('Broker error trying to rollback locked connection')) conn.close() def newid(self, remote_id): @@ -750,8 +751,8 @@ class ContainerBroker(DatabaseBroker): 'deleted': deleted}) except: self.logger.exception( - 'Invalid pending entry %s: %s' - % (self.pending_file, entry)) + _('Invalid pending entry %(file)s: %(entry)s'), + {'file': self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list) try: @@ -1217,8 +1218,8 @@ class AccountBroker(DatabaseBroker): 'deleted': deleted}) except: self.logger.exception( - 'Invalid pending entry %s: %s' - % (self.pending_file, entry)) + _('Invalid pending entry %(file)s: %(entry)s'), + {'file': self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list) try: diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 8519e7128f..89e0590f7e 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -20,6 +20,7 @@ import random import math import time import shutil +from gettext import gettext as _ from eventlet import GreenPool, sleep, Timeout from eventlet.green import subprocess @@ -81,7 +82,7 @@ class ReplConnection(BufferedHTTPConnection): return response except: self.logger.exception( - 'ERROR reading HTTP response from %s' % self.node) + _('ERROR reading HTTP response from %s'), self.node) return None @@ -120,12 +121,14 @@ class Replicator(Daemon): def _report_stats(self): """Report the current stats to the logs.""" self.logger.info( - 'Attempted to replicate %d dbs in %.5f seconds (%.5f/s)' - % (self.stats['attempted'], time.time() - self.stats['start'], - self.stats['attempted'] / - (time.time() - self.stats['start'] + 0.0000001))) - self.logger.info('Removed %(remove)d dbs' % self.stats) - self.logger.info('%(success)s successes, %(failure)s failures' + _('Attempted to replicate %(count)d dbs in %(time).5f seconds ' + '(%(rate).5f/s)'), + {'count': self.stats['attempted'], + 'time': time.time() - self.stats['start'], + 'rate': self.stats['attempted'] / + (time.time() - self.stats['start'] + 0.0000001)}) + self.logger.info(_('Removed %(remove)d dbs') % self.stats) + self.logger.info(_('%(success)s successes, %(failure)s failures') % self.stats) self.logger.info(' '.join(['%s:%s' % item for item in self.stats.items() if item[0] in @@ -150,8 +153,8 @@ class Replicator(Daemon): proc = subprocess.Popen(popen_args) proc.communicate() if proc.returncode != 0: - self.logger.error('ERROR rsync failed with %s: %s' % - (proc.returncode, popen_args)) + self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'), + {'code': proc.returncode, 'args': popen_args}) return proc.returncode == 0 def _rsync_db(self, broker, device, http, local_id, @@ -200,7 +203,7 @@ class Replicator(Daemon): :returns: boolean indicating completion and success """ self.stats['diff'] += 1 - self.logger.debug('Syncing chunks with %s', http.host) + self.logger.debug(_('Syncing chunks with %s'), http.host) sync_table = broker.get_syncs() objects = broker.get_items_since(point, self.per_diff) while len(objects): @@ -208,8 +211,9 @@ class Replicator(Daemon): response = http.replicate('merge_items', objects, local_id) if not response or response.status >= 300 or response.status < 200: if response: - self.logger.error('ERROR Bad response %s from %s' % - (response.status, http.host)) + self.logger.error(_('ERROR Bad response %(status)s from ' + '%(host)s'), + {'status': response.status, 'host': http.host}) return False point = objects[-1]['ROWID'] objects = broker.get_items_since(point, self.per_diff) @@ -272,7 +276,7 @@ class Replicator(Daemon): http = self._http_connect(node, partition, broker.db_file) if not http: self.logger.error( - 'ERROR Unable to connect to remote server: %s' % node) + _('ERROR Unable to connect to remote server: %s'), node) return False with Timeout(self.node_timeout): response = http.replicate('sync', info['max_row'], info['hash'], @@ -310,7 +314,7 @@ class Replicator(Daemon): :param object_file: DB file name to be replicated :param node_id: node id of the node to be replicated to """ - self.logger.debug('Replicating db %s' % object_file) + self.logger.debug(_('Replicating db %s'), object_file) self.stats['attempted'] += 1 try: broker = self.brokerclass(object_file, pending_timeout=30) @@ -319,10 +323,10 @@ class Replicator(Daemon): info = broker.get_replication_info() except Exception, e: if 'no such table' in str(e): - self.logger.error('Quarantining DB %s' % object_file) + self.logger.error(_('Quarantining DB %s'), object_file) quarantine_db(broker.db_file, broker.db_type) else: - self.logger.exception('ERROR reading db %s' % object_file) + self.logger.exception(_('ERROR reading db %s'), object_file) self.stats['failure'] += 1 return # The db is considered deleted if the delete_timestamp value is greater @@ -355,10 +359,10 @@ class Replicator(Daemon): success = self._repl_to_node(node, broker, partition, info) except DriveNotMounted: repl_nodes.append(more_nodes.next()) - self.logger.error('ERROR Remote drive not mounted %s' % node) + self.logger.error(_('ERROR Remote drive not mounted %s'), node) except: - self.logger.exception('ERROR syncing %s with node %s' % - (object_file, node)) + self.logger.exception(_('ERROR syncing %(file)s with node' + ' %(node)s'), {'file': object_file, 'node': node}) self.stats['success' if success else 'failure'] += 1 responses.append(success) if not shouldbehere and all(responses): @@ -399,14 +403,14 @@ class Replicator(Daemon): dirs = [] ips = whataremyips() if not ips: - self.logger.error('ERROR Failed to get my own IPs?') + self.logger.error(_('ERROR Failed to get my own IPs?')) return for node in self.ring.devs: if node and node['ip'] in ips and node['port'] == self.port: if self.mount_check and not os.path.ismount( os.path.join(self.root, node['device'])): self.logger.warn( - 'Skipping %(device)s as it is not mounted' % node) + _('Skipping %(device)s as it is not mounted') % node) continue unlink_older_than( os.path.join(self.root, node['device'], 'tmp'), @@ -414,12 +418,12 @@ class Replicator(Daemon): datadir = os.path.join(self.root, node['device'], self.datadir) if os.path.isdir(datadir): dirs.append((datadir, node['id'])) - self.logger.info('Beginning replication run') + self.logger.info(_('Beginning replication run')) for part, object_file, node_id in self.roundrobin_datadirs(dirs): self.cpool.spawn_n( self._replicate_object, part, object_file, node_id) self.cpool.waitall() - self.logger.info('Replication run OVER') + self.logger.info(_('Replication run OVER')) self._report_stats() def run_forever(self): @@ -430,7 +434,7 @@ class Replicator(Daemon): try: self.run_once() except: - self.logger.exception('ERROR trying to replicate') + self.logger.exception(_('ERROR trying to replicate')) sleep(self.run_pause) @@ -473,7 +477,7 @@ class ReplicatorRpc(object): except Exception, e: if 'no such table' in str(e): # TODO(unknown): find a real logger - print "Quarantining DB %s" % broker.db_file + print _("Quarantining DB %s") % broker.db_file quarantine_db(broker.db_file, broker.db_type) return HTTPNotFound() raise diff --git a/swift/common/memcached.py b/swift/common/memcached.py index d40d86c21e..d41b25616f 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -26,7 +26,7 @@ import socket import time from bisect import bisect from hashlib import md5 - +from gettext import gettext as _ CONN_TIMEOUT = 0.3 IO_TIMEOUT = 2.0 @@ -67,9 +67,11 @@ class MemcacheRing(object): def _exception_occurred(self, server, e, action='talking'): if isinstance(e, socket.timeout): - logging.error("Timeout %s to memcached: %s" % (action, server)) + logging.error(_("Timeout %(action)s to memcached: %(server)s"), + {'action': action, 'server': server}) else: - logging.exception("Error %s to memcached: %s" % (action, server)) + logging.exception(_("Error %(action)s to memcached: %(server)s"), + {'action': action, 'server': server}) now = time.time() self._errors[server].append(time.time()) if len(self._errors[server]) > ERROR_LIMIT_COUNT: @@ -77,7 +79,7 @@ class MemcacheRing(object): if err > now - ERROR_LIMIT_TIME] if len(self._errors[server]) > ERROR_LIMIT_COUNT: self._error_limited[server] = now + ERROR_LIMIT_DURATION - logging.error('Error limiting server %s' % server) + logging.error(_('Error limiting server %s'), server) def _get_conns(self, key): """ diff --git a/swift/common/middleware/catch_errors.py b/swift/common/middleware/catch_errors.py index e94133627e..0dcedd201a 100644 --- a/swift/common/middleware/catch_errors.py +++ b/swift/common/middleware/catch_errors.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +from gettext import gettext as _ + from webob import Request from webob.exc import HTTPServerError @@ -32,7 +34,7 @@ class CatchErrorMiddleware(object): try: return self.app(env, start_response) except Exception, err: - self.logger.exception('Error: %s' % err) + self.logger.exception(_('Error: %s'), err) resp = HTTPServerError(request=Request(env), body='An error occurred', content_type='text/plain') diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 82f3569067..1679c1548c 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -15,6 +15,7 @@ import time import eventlet from webob import Request, Response from webob.exc import HTTPNotFound +from gettext import gettext as _ from swift.common.utils import split_path, cache_from_env, get_logger from swift.proxy.server import get_container_memcache_key @@ -167,7 +168,7 @@ class RateLimitMiddleware(object): :param obj_name: object name from path ''' if account_name in self.ratelimit_blacklist: - self.logger.error('Returning 497 because of blacklisting') + self.logger.error(_('Returning 497 because of blacklisting')) return Response(status='497 Blacklisted', body='Your account has been blacklisted', request=req) if account_name in self.ratelimit_whitelist: @@ -181,14 +182,15 @@ class RateLimitMiddleware(object): need_to_sleep = self._get_sleep_time(key, max_rate) if self.log_sleep_time_seconds and \ need_to_sleep > self.log_sleep_time_seconds: - self.logger.info("Ratelimit sleep log: %s for %s/%s/%s" % ( - need_to_sleep, account_name, - container_name, obj_name)) + self.logger.info(_("Ratelimit sleep log: %(sleep)s for " + "%(account)s/%(container)s/%(object)s"), + {'sleep': need_to_sleep, 'account': account_name, + 'container': container_name, 'object': obj_name}) if need_to_sleep > 0: eventlet.sleep(need_to_sleep) except MaxSleepTimeHit, e: - self.logger.error('Returning 498 because of ops ' + \ - 'rate limiting (Max Sleep) %s' % e) + self.logger.error(_('Returning 498 because of ops rate ' + 'limiting (Max Sleep) %s') % str(e)) error_resp = Response(status='498 Rate Limited', body='Slow down', request=req) return error_resp @@ -207,7 +209,7 @@ class RateLimitMiddleware(object): self.memcache_client = cache_from_env(env) if not self.memcache_client: self.logger.warning( - 'Warning: Cannot ratelimit without a memcached client') + _('Warning: Cannot ratelimit without a memcached client')) return self.app(env, start_response) try: version, account, container, obj = split_path(req.path, 1, 4, True) diff --git a/swift/common/utils.py b/swift/common/utils.py index d4dc078916..5ddac9ce84 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -34,7 +34,7 @@ from ConfigParser import ConfigParser, NoSectionError, NoOptionError from optparse import OptionParser from tempfile import mkstemp import cPickle as pickle - +from gettext import gettext as _ import eventlet from eventlet import greenio, GreenPool, sleep, Timeout, listen @@ -85,8 +85,8 @@ def load_libc_function(func_name): libc = ctypes.CDLL(ctypes.util.find_library('c')) return getattr(libc, func_name) except AttributeError: - logging.warn("Unable to locate %s in libc. Leaving as a no-op." - % func_name) + logging.warn(_("Unable to locate %s in libc. Leaving as a no-op."), + func_name) def noop_libc_function(*args): return 0 @@ -252,12 +252,12 @@ class LoggerFileObject(object): value = value.strip() if value: if 'Connection reset by peer' in value: - self.logger.error('STDOUT: Connection reset by peer') + self.logger.error(_('STDOUT: Connection reset by peer')) else: - self.logger.error('STDOUT: %s' % value) + self.logger.error(_('STDOUT: %s'), value) def writelines(self, values): - self.logger.error('STDOUT: %s' % '#012'.join(values)) + self.logger.error(_('STDOUT: %s'), '#012'.join(values)) def close(self): pass @@ -462,12 +462,12 @@ def parse_options(usage="%prog CONFIG [options]", once=False, test_args=None): if not args: parser.print_usage() - print "Error: missing config file argument" + print _("Error: missing config file argument") sys.exit(1) config = os.path.abspath(args.pop(0)) if not os.path.exists(config): parser.print_usage() - print "Error: unable to locate %s" % config + print _("Error: unable to locate %s") % config sys.exit(1) extra_args = [] @@ -690,14 +690,14 @@ def readconf(conf, section_name=None, log_name=None, defaults=None): defaults = {} c = ConfigParser(defaults) if not c.read(conf): - print "Unable to read config file %s" % conf + print _("Unable to read config file %s") % conf sys.exit(1) if section_name: if c.has_section(section_name): conf = dict(c.items(section_name)) else: - print "Unable to find %s config section in %s" % (section_name, - conf) + print _("Unable to find %s config section in %s") % \ + (section_name, conf) sys.exit(1) if "log_name" not in conf: if log_name is not None: @@ -749,7 +749,7 @@ def audit_location_generator(devices, datadir, mount_check=True, logger=None): os.path.ismount(os.path.join(devices, device)): if logger: logger.debug( - 'Skipping %s as it is not mounted' % device) + _('Skipping %s as it is not mounted'), device) continue datadir = os.path.join(devices, device, datadir) if not os.path.exists(datadir): diff --git a/swift/container/auditor.py b/swift/container/auditor.py index 0d237a1c82..a6f25538f5 100644 --- a/swift/container/auditor.py +++ b/swift/container/auditor.py @@ -16,6 +16,7 @@ import os import time from random import random +from gettext import gettext as _ from swift.container import server as container_server from swift.common.db import ContainerBroker @@ -51,10 +52,11 @@ class ContainerAuditor(Daemon): self.container_audit(path) if time.time() - reported >= 3600: # once an hour self.logger.info( - 'Since %s: Container audits: %s passed audit, ' - '%s failed audit' % (time.ctime(reported), - self.container_passes, - self.container_failures)) + _('Since %(time)s: Container audits: %(pass)s passed ' + 'audit, %(fail)s failed audit'), + {'time': time.ctime(reported), + 'pass': self.container_passes, + 'fail': self.container_failures}) reported = time.time() self.container_passes = 0 self.container_failures = 0 @@ -64,7 +66,7 @@ class ContainerAuditor(Daemon): def run_once(self): """Run the container audit once.""" - self.logger.info('Begin container audit "once" mode') + self.logger.info(_('Begin container audit "once" mode')) begin = reported = time.time() all_locs = audit_location_generator(self.devices, container_server.DATADIR, @@ -74,16 +76,17 @@ class ContainerAuditor(Daemon): self.container_audit(path) if time.time() - reported >= 3600: # once an hour self.logger.info( - 'Since %s: Container audits: %s passed audit, ' - '%s failed audit' % (time.ctime(reported), - self.container_passes, - self.container_failures)) + _('Since %(time)s: Container audits: %(pass)s passed ' + 'audit, %(fail)s failed audit'), + {'time': time.ctime(reported), + 'pass': self.container_passes, + 'fail': self.container_failures}) reported = time.time() self.container_passes = 0 self.container_failures = 0 elapsed = time.time() - begin self.logger.info( - 'Container audit "once" mode completed: %.02fs' % elapsed) + _('Container audit "once" mode completed: %.02fs'), elapsed) def container_audit(self, path): """ @@ -98,8 +101,8 @@ class ContainerAuditor(Daemon): if not broker.is_deleted(): info = broker.get_info() self.container_passes += 1 - self.logger.debug('Audit passed for %s' % broker.db_file) + self.logger.debug(_('Audit passed for %s'), broker.db_file) except Exception: self.container_failures += 1 - self.logger.exception('ERROR Could not get container info %s' % + self.logger.exception(_('ERROR Could not get container info %s'), (broker.db_file)) diff --git a/swift/container/server.py b/swift/container/server.py index 82e222435a..4eb92f0dd9 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -21,6 +21,7 @@ import traceback from urllib import unquote from xml.sax import saxutils from datetime import datetime +from gettext import gettext as _ import simplejson from eventlet.timeout import Timeout @@ -111,18 +112,18 @@ class ContainerController(object): return HTTPNotFound(request=req) elif account_response.status < 200 or \ account_response.status > 299: - self.logger.error('ERROR Account update failed ' - 'with %s:%s/%s transaction %s (will retry ' - 'later): Response %s %s' % (account_ip, - account_port, account_device, - req.headers.get('x-cf-trans-id'), - account_response.status, - account_response.reason)) + self.logger.error(_('ERROR Account update failed ' + 'with %(ip)s:%(port)s/%(device)s (will retry ' + 'later): Response %(status)s %(reason)s'), + {'ip': account_ip, 'port': account_port, + 'device': account_device, + 'status': account_response.status, + 'reason': account_response.reason}) except: - self.logger.exception('ERROR account update failed with ' - '%s:%s/%s transaction %s (will retry later)' % - (account_ip, account_port, account_device, - req.headers.get('x-cf-trans-id', '-'))) + self.logger.exception(_('ERROR account update failed with ' + '%(ip)s:%(port)s/%(device)s (will retry later)'), + {'ip': account_ip, 'port': account_port, + 'device': account_device}) return None def DELETE(self, req): @@ -394,10 +395,8 @@ class ContainerController(object): else: res = HTTPMethodNotAllowed() except: - self.logger.exception('ERROR __call__ error with %s %s ' - 'transaction %s' % (env.get('REQUEST_METHOD', '-'), - env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID', - '-'))) + self.logger.exception(_('ERROR __call__ error with %(method)s' + ' %(path)s '), {'method': req.method, 'path': req.path}) res = HTTPInternalServerError(body=traceback.format_exc()) trans_time = '%.4f' % (time.time() - start_time) log_message = '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %s' % ( diff --git a/swift/container/updater.py b/swift/container/updater.py index 646815257b..36f567ddc7 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -19,6 +19,7 @@ import signal import sys import time from random import random, shuffle +from gettext import gettext as _ from eventlet import spawn, patcher, Timeout @@ -56,7 +57,7 @@ class ContainerUpdater(Daemon): """Get the account ring. Load it if it hasn't been yet.""" if not self.account_ring: self.logger.debug( - 'Loading account ring from %s' % self.account_ring_path) + _('Loading account ring from %s'), self.account_ring_path) self.account_ring = Ring(self.account_ring_path) return self.account_ring @@ -70,7 +71,7 @@ class ContainerUpdater(Daemon): for device in os.listdir(self.devices): dev_path = os.path.join(self.devices, device) if self.mount_check and not os.path.ismount(dev_path): - self.logger.warn('%s is not mounted' % device) + self.logger.warn(_('%s is not mounted'), device) continue con_path = os.path.join(dev_path, DATADIR) if not os.path.exists(con_path): @@ -86,7 +87,7 @@ class ContainerUpdater(Daemon): """ time.sleep(random() * self.interval) while True: - self.logger.info('Begin container update sweep') + self.logger.info(_('Begin container update sweep')) begin = time.time() pids = [] # read from account ring to ensure it's fresh @@ -107,15 +108,17 @@ class ContainerUpdater(Daemon): self.container_sweep(path) elapsed = time.time() - forkbegin self.logger.debug( - 'Container update sweep of %s completed: ' - '%.02fs, %s successes, %s failures, %s with no changes' - % (path, elapsed, self.successes, self.failures, - self.no_changes)) + _('Container update sweep of %(path)s completed: ' + '%(elapsed).02fs, %(success)s successes, %(fail)s ' + 'failures, %(no_change)s with no changes'), + {'path': path, 'elapsed': elapsed, + 'success': self.successes, 'fail': self.failures, + 'no_change': self.no_changes}) sys.exit() while pids: pids.remove(os.wait()[0]) elapsed = time.time() - begin - self.logger.info('Container update sweep completed: %.02fs' % + self.logger.info(_('Container update sweep completed: %.02fs'), elapsed) if elapsed < self.interval: time.sleep(self.interval - elapsed) @@ -133,9 +136,11 @@ class ContainerUpdater(Daemon): for path in self.get_paths(): self.container_sweep(path) elapsed = time.time() - begin - self.logger.info('Container update single threaded sweep completed: ' - '%.02fs, %s successes, %s failures, %s with no changes' % - (elapsed, self.successes, self.failures, self.no_changes)) + self.logger.info(_('Container update single threaded sweep completed: ' + '%(elapsed).02fs, %(success)s successes, %(fail)s failures, ' + '%(no_change)s with no changes'), + {'elapsed': elapsed, 'success': self.successes, + 'fail': self.failures, 'no_change': self.no_changes}) def container_sweep(self, path): """ @@ -181,14 +186,16 @@ class ContainerUpdater(Daemon): if successes > failures: self.successes += 1 self.logger.debug( - 'Update report sent for %s %s' % (container, dbfile)) + _('Update report sent for %(container)s %(dbfile)s'), + {'container': container, 'dbfile': dbfile}) broker.reported(info['put_timestamp'], info['delete_timestamp'], info['object_count'], info['bytes_used']) else: self.failures += 1 self.logger.debug( - 'Update report failed for %s %s' % (container, dbfile)) + _('Update report failed for %(container)s %(dbfile)s'), + {'container': container, 'dbfile': dbfile}) else: self.no_changes += 1 @@ -216,8 +223,8 @@ class ContainerUpdater(Daemon): 'X-Bytes-Used': bytes, 'X-Account-Override-Deleted': 'yes'}) except: - self.logger.exception('ERROR account update failed with ' - '%(ip)s:%(port)s/%(device)s (will retry later): ' % node) + self.logger.exception(_('ERROR account update failed with ' + '%(ip)s:%(port)s/%(device)s (will retry later): '), node) return 500 with Timeout(self.node_timeout): try: @@ -227,5 +234,5 @@ class ContainerUpdater(Daemon): except: if self.logger.getEffectiveLevel() <= logging.DEBUG: self.logger.exception( - 'Exception with %(ip)s:%(port)s/%(device)s' % node) + _('Exception with %(ip)s:%(port)s/%(device)s'), node) return 500 diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index cdcb0c3ecf..bdd47479b1 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -17,6 +17,7 @@ import os import time from hashlib import md5 from random import random +from gettext import gettext as _ from swift.obj import server as object_server from swift.obj.replicator import invalidate_hash @@ -52,10 +53,10 @@ class ObjectAuditor(Daemon): for path, device, partition in all_locs: self.object_audit(path, device, partition) if time.time() - reported >= 3600: # once an hour - self.logger.info( - 'Since %s: Locally: %d passed audit, %d quarantined, ' - '%d errors' % (time.ctime(reported), self.passes, - self.quarantines, self.errors)) + self.logger.info(_('Since %(time)s: Locally: %(pass)d ' + 'passed audit, %(quar)d quarantined, %(error)d errors'), + {'time': time.ctime(reported), 'pass': self.passes, + 'quar': self.quarantines, 'error': self.errors}) reported = time.time() self.passes = 0 self.quarantines = 0 @@ -66,7 +67,7 @@ class ObjectAuditor(Daemon): def run_once(self): """Run the object audit once.""" - self.logger.info('Begin object audit "once" mode') + self.logger.info(_('Begin object audit "once" mode')) begin = reported = time.time() all_locs = audit_location_generator(self.devices, object_server.DATADIR, @@ -75,17 +76,17 @@ class ObjectAuditor(Daemon): for path, device, partition in all_locs: self.object_audit(path, device, partition) if time.time() - reported >= 3600: # once an hour - self.logger.info( - 'Since %s: Locally: %d passed audit, %d quarantined, ' - '%d errors' % (time.ctime(reported), self.passes, - self.quarantines, self.errors)) + self.logger.info(_('Since %(time)s: Locally: %(pass)d ' + 'passed audit, %(quar)d quarantined, %(error)d errors'), + {'time': time.ctime(reported), 'pass': self.passes, + 'quar': self.quarantines, 'error': self.errors}) reported = time.time() self.passes = 0 self.quarantines = 0 self.errors = 0 elapsed = time.time() - begin self.logger.info( - 'Object audit "once" mode completed: %.02fs' % elapsed) + _('Object audit "once" mode completed: %.02fs'), elapsed) def object_audit(self, path, device, partition): """ @@ -124,8 +125,8 @@ class ObjectAuditor(Daemon): "%s" % (df.metadata['ETag'], etag)) except AuditException, err: self.quarantines += 1 - self.logger.error('ERROR Object %s failed audit and will be ' - 'quarantined: %s' % (path, err)) + self.logger.error(_('ERROR Object %(obj)s failed audit and will be ' + 'quarantined: %(err)s'), {'obj': path, 'err': err}) invalidate_hash(os.path.dirname(path)) renamer_path = os.path.dirname(path) renamer(renamer_path, os.path.join(self.devices, device, @@ -133,6 +134,6 @@ class ObjectAuditor(Daemon): return except Exception: self.errors += 1 - self.logger.exception('ERROR Trying to audit %s' % path) + self.logger.exception(_('ERROR Trying to audit %s'), path) return self.passes += 1 diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index 7559cd0933..bd2b09af25 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -22,6 +22,7 @@ import logging import hashlib import itertools import cPickle as pickle +from gettext import gettext as _ import eventlet from eventlet import GreenPool, tpool, Timeout, sleep, hubs @@ -243,26 +244,27 @@ class ObjectReplicator(Daemon): results = proc.stdout.read() ret_val = proc.wait() except Timeout: - self.logger.error("Killing long-running rsync: %s" % str(args)) + self.logger.error(_("Killing long-running rsync: %s"), str(args)) proc.kill() return 1 # failure response code total_time = time.time() - start_time - if results: - for result in results.split('\n'): - if result == '': - continue - if result.startswith('cd+'): - continue - self.logger.info(result) + for result in results.split('\n'): + if result == '': + continue + if result.startswith('cd+'): + continue + self.logger.info(result) + if ret_val: + self.logger.error(_('Bad rsync return code: %s -> %d'), + (str(args), ret_val)) + elif results: self.logger.info( - "Sync of %s at %s complete (%.03f) [%d]" % ( - args[-2], args[-1], total_time, ret_val)) + _("Successful rsync of %(src)s at %(dst)s (%(time).03f)"), + {'src': args[-2], 'dst': args[-1], 'time': total_time}) else: self.logger.debug( - "Sync of %s at %s complete (%.03f) [%d]" % ( - args[-2], args[-1], total_time, ret_val)) - if ret_val: - self.logger.error('Bad rsync return code: %d' % ret_val) + _("Successful rsync of %(src)s at %(dst)s (%(time).03f)"), + {'src': args[-2], 'dst': args[-1], 'time': total_time}) return ret_val def rsync(self, node, job, suffixes): @@ -346,10 +348,10 @@ class ObjectReplicator(Daemon): responses.append(success) if not suffixes or (len(responses) == \ self.object_ring.replica_count and all(responses)): - self.logger.info("Removing partition: %s" % job['path']) + self.logger.info(_("Removing partition: %s"), job['path']) tpool.execute(shutil.rmtree, job['path'], ignore_errors=True) except (Exception, Timeout): - self.logger.exception("Error syncing handoff partition") + self.logger.exception(_("Error syncing handoff partition")) finally: self.partition_times.append(time.time() - begin) @@ -379,13 +381,14 @@ class ObjectReplicator(Daemon): node['device'], job['partition'], 'REPLICATE', '', headers={'Content-Length': '0'}).getresponse() if resp.status == 507: - self.logger.error('%s/%s responded as unmounted' % - (node['ip'], node['device'])) + self.logger.error(_('%(ip)s/%(device)s responded' + ' as unmounted'), node) attempts_left += 1 continue if resp.status != 200: - self.logger.error("Invalid response %s from %s" % - (resp.status, node['ip'])) + self.logger.error(_("Invalid response %(resp)s " + "from %(ip)s"), + {'resp': resp.status, 'ip': node['ip']}) continue remote_hash = pickle.loads(resp.read()) del resp @@ -408,7 +411,7 @@ class ObjectReplicator(Daemon): logging.exception("Error syncing with node: %s" % node) self.suffix_count += len(local_hash) except (Exception, Timeout): - self.logger.exception("Error syncing partition") + self.logger.exception(_("Error syncing partition")) finally: self.partition_times.append(time.time() - begin) @@ -418,27 +421,30 @@ class ObjectReplicator(Daemon): """ if self.replication_count: rate = self.replication_count / (time.time() - self.start) - self.logger.info("%d/%d (%.2f%%) partitions replicated in %.2f " - "seconds (%.2f/sec, %s remaining)" - % (self.replication_count, self.job_count, - self.replication_count * 100.0 / self.job_count, - time.time() - self.start, rate, - '%d%s' % compute_eta(self.start, - self.replication_count, self.job_count))) + self.logger.info(_("%(replicated)d/%(total)d (%(percentage).2f%%)" + " partitions replicated in %(time).2fs (%(rate).2f/sec, " + "%(remaining)s remaining)"), + {'replicated': self.replication_count, 'total': self.job_count, + 'percentage': self.replication_count * 100.0 / self.job_count, + 'time': time.time() - self.start, 'rate': rate, + 'remaining': '%d%s' % compute_eta(self.start, + self.replication_count, self.job_count)}) if self.suffix_count: - self.logger.info("%d suffixes checked - %.2f%% hashed, " - "%.2f%% synced" % - (self.suffix_count, - (self.suffix_hash * 100.0) / self.suffix_count, - (self.suffix_sync * 100.0) / self.suffix_count)) + self.logger.info(_("%(checked)d suffixes checked - " + "%(hashed).2f%% hashed, %(synced).2f%% synced"), + {'checked': self.suffix_count, + 'hashed': (self.suffix_hash * 100.0) / self.suffix_count, + 'synced': (self.suffix_sync * 100.0) / self.suffix_count}) self.partition_times.sort() - self.logger.info("Partition times: max %.4fs, min %.4fs, " - "med %.4fs" - % (self.partition_times[-1], self.partition_times[0], - self.partition_times[len(self.partition_times) // 2])) + self.logger.info(_("Partition times: max %(max).4fs, " + "min %(min).4fs, med %(med).4fs"), + {'max': self.partition_times[-1], + 'min': self.partition_times[0], + 'med': self.partition_times[ + len(self.partition_times) // 2]}) else: - self.logger.info("Nothing replicated for %s seconds." - % (time.time() - self.start)) + self.logger.info(_("Nothing replicated for %s seconds."), + (time.time() - self.start)) def kill_coros(self): """Utility function that kills all coroutines currently running.""" @@ -466,7 +472,7 @@ class ObjectReplicator(Daemon): while True: eventlet.sleep(self.lockup_timeout) if self.replication_count == self.last_replication_count: - self.logger.error("Lockup detected.. killing live coros.") + self.logger.error(_("Lockup detected.. killing live coros.")) self.kill_coros() self.last_replication_count = self.replication_count @@ -483,7 +489,7 @@ class ObjectReplicator(Daemon): obj_path = join(dev_path, 'objects') tmp_path = join(dev_path, 'tmp') if self.mount_check and not os.path.ismount(dev_path): - self.logger.warn('%s is not mounted' % local_dev['device']) + self.logger.warn(_('%s is not mounted'), local_dev['device']) continue unlink_older_than(tmp_path, time.time() - self.reclaim_age) if not os.path.exists(obj_path): @@ -521,8 +527,8 @@ class ObjectReplicator(Daemon): jobs = self.collect_jobs() for job in jobs: if not self.check_ring(): - self.logger.info( - "Ring change detected. Aborting current replication pass.") + self.logger.info(_("Ring change detected. Aborting " + "current replication pass.")) return if job['delete']: self.run_pool.spawn(self.update_deleted, job) @@ -531,7 +537,7 @@ class ObjectReplicator(Daemon): with Timeout(self.lockup_timeout): self.run_pool.waitall() except (Exception, Timeout): - self.logger.exception("Exception in top-level replication loop") + self.logger.exception(_("Exception in top-level replication loop")) self.kill_coros() finally: stats.kill() @@ -540,23 +546,23 @@ class ObjectReplicator(Daemon): def run_once(self): start = time.time() - self.logger.info("Running object replicator in script mode.") + self.logger.info(_("Running object replicator in script mode.")) self.replicate() total = (time.time() - start) / 60 self.logger.info( - "Object replication complete. (%.02f minutes)" % total) + _("Object replication complete. (%.02f minutes)"), total) def run_forever(self): self.logger.info("Starting object replicator in daemon mode.") # Run the replicator continually while True: start = time.time() - self.logger.info("Starting object replication pass.") + self.logger.info(_("Starting object replication pass.")) # Run the replicator self.replicate() total = (time.time() - start) / 60 self.logger.info( - "Object replication complete. (%.02f minutes)" % total) - self.logger.debug('Replication sleeping for %s seconds.' % + _("Object replication complete. (%.02f minutes)"), total) + self.logger.debug(_('Replication sleeping for %s seconds.'), self.run_pause) sleep(self.run_pause) diff --git a/swift/obj/server.py b/swift/obj/server.py index cdddf72edf..f724c7100b 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -26,6 +26,7 @@ from hashlib import md5 from tempfile import mkstemp from urllib import unquote from contextlib import contextmanager +from gettext import gettext as _ from webob import Request, Response, UTC from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPCreated, \ @@ -292,13 +293,15 @@ class ObjectController(object): if 200 <= response.status < 300: return else: - self.logger.error('ERROR Container update failed (saving ' - 'for async update later): %d response from %s:%s/%s' % - (response.status, ip, port, contdevice)) + self.logger.error(_('ERROR Container update failed ' + '(saving for async update later): %(status)d ' + 'response from %(ip)s:%(port)s/%(dev)s'), + {'status': response.status, 'ip': ip, 'port': port, + 'dev': contdevice}) except: - self.logger.exception('ERROR container update failed with ' - '%s:%s/%s transaction %s (saving for async update later)' % - (ip, port, contdevice, headers_in.get('x-cf-trans-id', '-'))) + self.logger.exception(_('ERROR container update failed with ' + '%(ip)s:%(port)s/%(dev)s (saving for async update later)'), + {'ip': ip, 'port': port, 'dev': contdevice}) async_dir = os.path.join(self.devices, objdevice, ASYNCDIR) ohash = hash_path(account, container, obj) write_pickle( @@ -565,10 +568,8 @@ class ObjectController(object): else: res = HTTPMethodNotAllowed() except: - self.logger.exception('ERROR __call__ error with %s %s ' - 'transaction %s' % (env.get('REQUEST_METHOD', '-'), - env.get('PATH_INFO', '-'), env.get('HTTP_X_CF_TRANS_ID', - '-'))) + self.logger.exception(_('ERROR __call__ error with %(method)s' + ' %(path)s '), {'method': req.method, 'path': req.path}) res = HTTPInternalServerError(body=traceback.format_exc()) trans_time = time.time() - start_time if self.log_requests: diff --git a/swift/obj/updater.py b/swift/obj/updater.py index 3d6a15cc4f..eb4638ea16 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -19,6 +19,7 @@ import signal import sys import time from random import random +from gettext import gettext as _ from eventlet import patcher, Timeout @@ -54,7 +55,7 @@ class ObjectUpdater(Daemon): """Get the container ring. Load it, if it hasn't been yet.""" if not self.container_ring: self.logger.debug( - 'Loading container ring from %s' % self.container_ring_path) + _('Loading container ring from %s'), self.container_ring_path) self.container_ring = Ring(self.container_ring_path) return self.container_ring @@ -62,7 +63,7 @@ class ObjectUpdater(Daemon): """Run the updater continuously.""" time.sleep(random() * self.interval) while True: - self.logger.info('Begin object update sweep') + self.logger.info(_('Begin object update sweep')) begin = time.time() pids = [] # read from container ring to ensure it's fresh @@ -71,7 +72,7 @@ class ObjectUpdater(Daemon): if self.mount_check and not \ os.path.ismount(os.path.join(self.devices, device)): self.logger.warn( - 'Skipping %s as it is not mounted' % device) + _('Skipping %s as it is not mounted'), device) continue while len(pids) >= self.concurrency: pids.remove(os.wait()[0]) @@ -86,20 +87,23 @@ class ObjectUpdater(Daemon): forkbegin = time.time() self.object_sweep(os.path.join(self.devices, device)) elapsed = time.time() - forkbegin - self.logger.info('Object update sweep of %s completed: ' - '%.02fs, %s successes, %s failures' % - (device, elapsed, self.successes, self.failures)) + self.logger.info(_('Object update sweep of %(device)s' + ' completed: %(elapsed).02fs, %(success)s successes' + ', %(fail)s failures'), + {'device': device, 'elapsed': elapsed, + 'success': self.successes, 'fail': self.failures}) sys.exit() while pids: pids.remove(os.wait()[0]) elapsed = time.time() - begin - self.logger.info('Object update sweep completed: %.02fs' % elapsed) + self.logger.info(_('Object update sweep completed: %.02fs'), + elapsed) if elapsed < self.interval: time.sleep(self.interval - elapsed) def run_once(self): """Run the updater once""" - self.logger.info('Begin object update single threaded sweep') + self.logger.info(_('Begin object update single threaded sweep')) begin = time.time() self.successes = 0 self.failures = 0 @@ -107,13 +111,14 @@ class ObjectUpdater(Daemon): if self.mount_check and \ not os.path.ismount(os.path.join(self.devices, device)): self.logger.warn( - 'Skipping %s as it is not mounted' % device) + _('Skipping %s as it is not mounted'), device) continue self.object_sweep(os.path.join(self.devices, device)) elapsed = time.time() - begin - self.logger.info('Object update single threaded sweep completed: ' - '%.02fs, %s successes, %s failures' % - (elapsed, self.successes, self.failures)) + self.logger.info(_('Object update single threaded sweep completed: ' + '%(elapsed).02fs, %(success)s successes, %(fail)s failures'), + {'elapsed': elapsed, 'success': self.successes, + 'fail': self.failures}) def object_sweep(self, device): """ @@ -150,7 +155,7 @@ class ObjectUpdater(Daemon): update = pickle.load(open(update_path, 'rb')) except Exception: self.logger.exception( - 'ERROR Pickle problem, quarantining %s' % update_path) + _('ERROR Pickle problem, quarantining %s'), update_path) renamer(update_path, os.path.join(device, 'quarantined', 'objects', os.path.basename(update_path))) return @@ -170,11 +175,13 @@ class ObjectUpdater(Daemon): successes.append(node['id']) if success: self.successes += 1 - self.logger.debug('Update sent for %s %s' % (obj, update_path)) + self.logger.debug(_('Update sent for %(obj)s %(path)s'), + {'obj': obj, 'path': update_path}) os.unlink(update_path) else: self.failures += 1 - self.logger.debug('Update failed for %s %s' % (obj, update_path)) + self.logger.debug(_('Update failed for %(obj)s %(path)s'), + {'obj': obj, 'path': update_path}) update['successes'] = successes write_pickle(update, update_path, os.path.join(device, 'tmp')) @@ -197,6 +204,6 @@ class ObjectUpdater(Daemon): resp.read() return resp.status except: - self.logger.exception('ERROR with remote server ' - '%(ip)s:%(port)s/%(device)s' % node) + self.logger.exception(_('ERROR with remote server ' + '%(ip)s:%(port)s/%(device)s'), node) return 500 diff --git a/swift/proxy/server.py b/swift/proxy/server.py index aa532cdd85..c7f48de022 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -22,6 +22,7 @@ from ConfigParser import ConfigParser from urllib import unquote, quote import uuid import functools +from gettext import gettext as _ from eventlet.timeout import Timeout from webob.exc import HTTPBadRequest, HTTPMethodNotAllowed, \ @@ -120,8 +121,8 @@ class Controller(object): :param msg: error message """ self.error_increment(node) - self.app.logger.error( - '%s %s:%s' % (msg, node['ip'], node['port'])) + self.app.logger.error(_('%(msg)s %(ip)s:%(port)s'), + {'msg': msg, 'ip': node['ip'], 'port': node['port']}) def exception_occurred(self, node, typ, additional_info): """ @@ -132,9 +133,9 @@ class Controller(object): :param additional_info: additional information to log """ self.app.logger.exception( - 'ERROR with %s server %s:%s/%s transaction %s re: %s' % (typ, - node['ip'], node['port'], node['device'], self.trans_id, - additional_info)) + _('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s'), + {'type': typ, 'ip': node['ip'], 'port': node['port'], + 'device': node['device'], 'info': additional_info}) def error_limited(self, node): """ @@ -155,8 +156,7 @@ class Controller(object): limited = node['errors'] > self.app.error_suppression_limit if limited: self.app.logger.debug( - 'Node error limited %s:%s (%s)' % ( - node['ip'], node['port'], node['device'])) + _('Node error limited %(ip)s:%(port)s (%(device)s)'), node) return limited def error_limit(self, node): @@ -380,8 +380,8 @@ class Controller(object): if etag: resp.headers['etag'] = etag.strip('"') return resp - self.app.logger.error('%s returning 503 for %s, transaction %s' % - (server_type, statuses, self.trans_id)) + self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'), + {'type': server_type, 'statuses': statuses}) resp.status = '503 Internal Server Error' return resp @@ -454,9 +454,7 @@ class Controller(object): res.bytes_transferred += len(chunk) except GeneratorExit: res.client_disconnect = True - self.app.logger.info( - 'Client disconnected on read transaction %s' % - self.trans_id) + self.app.logger.info(_('Client disconnected on read')) except: self.exception_occurred(node, 'Object', 'Trying to read during GET of %s' % req.path) @@ -561,7 +559,7 @@ class ObjectController(Controller): error_response = check_metadata(req, 'object') if error_response: return error_response - container_partition, containers, _, req.acl = \ + container_partition, containers, _junk, req.acl = \ self.container_info(self.account_name, self.container_name) if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) @@ -603,7 +601,7 @@ class ObjectController(Controller): @delay_denial def PUT(self, req): """HTTP PUT request handler.""" - container_partition, containers, _, req.acl = \ + container_partition, containers, _junk, req.acl = \ self.container_info(self.account_name, self.container_name) if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) @@ -618,7 +616,7 @@ class ObjectController(Controller): req.headers['X-Timestamp'] = normalize_timestamp(time.time()) # Sometimes the 'content-type' header exists, but is set to None. if not req.headers.get('content-type'): - guessed_type, _ = mimetypes.guess_type(req.path_info) + guessed_type, _junk = mimetypes.guess_type(req.path_info) if not guessed_type: req.headers['Content-Type'] = 'application/octet-stream' else: @@ -698,9 +696,9 @@ class ObjectController(Controller): containers.insert(0, container) if len(conns) <= len(nodes) / 2: self.app.logger.error( - 'Object PUT returning 503, %s/%s required connections, ' - 'transaction %s' % - (len(conns), len(nodes) / 2 + 1, self.trans_id)) + _('Object PUT returning 503, %(conns)s/%(nodes)s ' + 'required connections'), + {'conns': len(conns), 'nodes': len(nodes) // 2 + 1}) return HTTPServiceUnavailable(request=req) try: req.bytes_transferred = 0 @@ -730,27 +728,26 @@ class ObjectController(Controller): conns.remove(conn) if len(conns) <= len(nodes) / 2: self.app.logger.error( - 'Object PUT exceptions during send, %s/%s ' - 'required connections, transaction %s' % - (len(conns), len(nodes) // 2 + 1, - self.trans_id)) + _('Object PUT exceptions during send, ' + '%(conns)s/%(nodes)s required connections'), + {'conns': len(conns), + 'nodes': len(nodes) // 2 + 1}) return HTTPServiceUnavailable(request=req) if req.headers.get('transfer-encoding') and chunk == '': break except ChunkReadTimeout, err: self.app.logger.info( - 'ERROR Client read timeout (%ss)' % err.seconds) + _('ERROR Client read timeout (%ss)'), err.seconds) return HTTPRequestTimeout(request=req) except: req.client_disconnect = True self.app.logger.exception( - 'ERROR Exception causing client disconnect') + _('ERROR Exception causing client disconnect')) return Response(status='499 Client Disconnect') if req.content_length and req.bytes_transferred < req.content_length: req.client_disconnect = True self.app.logger.info( - 'Client disconnected without sending enough data %s' % - self.trans_id) + _('Client disconnected without sending enough data')) return Response(status='499 Client Disconnect') statuses = [] reasons = [] @@ -774,7 +771,7 @@ class ObjectController(Controller): 'Trying to get final status of PUT to %s' % req.path) if len(etags) > 1: self.app.logger.error( - 'Object servers returned %s mismatched etags' % len(etags)) + _('Object servers returned %s mismatched etags'), len(etags)) return HTTPServerError(request=req) etag = len(etags) and etags.pop() or None while len(statuses) < len(nodes): @@ -798,7 +795,7 @@ class ObjectController(Controller): @delay_denial def DELETE(self, req): """HTTP DELETE request handler.""" - container_partition, containers, _, req.acl = \ + container_partition, containers, _junk, req.acl = \ self.container_info(self.account_name, self.container_name) if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) @@ -848,7 +845,7 @@ class ObjectController(Controller): if not dest.startswith('/'): dest = '/' + dest try: - _, dest_container, dest_object = dest.split('/', 2) + _junk, dest_container, dest_object = dest.split('/', 2) except ValueError: return HTTPPreconditionFailed(request=req, body='Destination header must be of the form ' @@ -1116,9 +1113,8 @@ class ContainerController(Controller): # If even one node doesn't do the delete, we can't be sure # what the outcome will be once everything is in sync; so # we 503. - self.app.logger.error('Returning 503 because not all ' - 'container nodes confirmed DELETE, transaction %s' % - self.trans_id) + self.app.logger.error(_('Returning 503 because not all ' + 'container nodes confirmed DELETE')) return HTTPServiceUnavailable(request=req) if resp.status_int == 202: # Indicates no server had the container return HTTPNotFound(request=req) @@ -1440,7 +1436,7 @@ class BaseApplication(object): return resp return handler(req) except Exception: - self.logger.exception('ERROR Unhandled exception in request') + self.logger.exception(_('ERROR Unhandled exception in request')) return HTTPServerError(request=req) diff --git a/test/unit/auth/test_server.py b/test/unit/auth/test_server.py index cb9070a22f..1d691454b3 100644 --- a/test/unit/auth/test_server.py +++ b/test/unit/auth/test_server.py @@ -462,7 +462,7 @@ class TestAuthServer(unittest.TestCase): auth_server.http_connect = fake_http_connect(201) url = self.controller.create_user('test', 'tester', 'testing') self.assertEquals(log.getvalue().rsplit(' ', 1)[0], - "auth SUCCESS create_user('test', 'tester', _, False, False) " + "SUCCESS create_user('test', 'tester', _, False, False) " "= %s" % repr(url)) log.truncate(0) def start_response(*args): @@ -491,7 +491,7 @@ class TestAuthServer(unittest.TestCase): logsegs[1] = '[01/Jan/2001:01:02:03 +0000]' logsegs[2:] = logsegs[2].split(' ') logsegs[-1] = '0.1234' - self.assertEquals(' '.join(logsegs), 'auth testhost - - ' + self.assertEquals(' '.join(logsegs), 'testhost - - ' '[01/Jan/2001:01:02:03 +0000] "GET /v1/test/auth?test=True ' 'HTTP/1.0" 204 - "-" "-" - - - - - - - - - "-" "None" "-" ' '0.1234') @@ -519,7 +519,7 @@ class TestAuthServer(unittest.TestCase): logsegs[1] = '[01/Jan/2001:01:02:03 +0000]' logsegs[2:] = logsegs[2].split(' ') logsegs[-1] = '0.1234' - self.assertEquals(' '.join(logsegs), 'auth None - - [01/Jan/2001:' + self.assertEquals(' '.join(logsegs), 'None - - [01/Jan/2001:' '01:02:03 +0000] "GET /v1/test/auth HTTP/1.0" 204 - "-" "-" - ' '- - - - - - - - "-" "None" "Content-Length: 0\n' 'X-Storage-User: tester\nX-Storage-Pass: testing" 0.1234') @@ -556,7 +556,7 @@ class TestAuthServer(unittest.TestCase): 'HTTP_X_STORAGE_PASS': 'testing'}, start_response) self.assert_(log.getvalue().startswith( - 'auth ERROR Unhandled exception in ReST request'), + 'ERROR Unhandled exception in ReST request'), log.getvalue()) log.truncate(0) finally: diff --git a/test/unit/common/test_daemon.py b/test/unit/common/test_daemon.py index c5b95a3013..aa85987d25 100644 --- a/test/unit/common/test_daemon.py +++ b/test/unit/common/test_daemon.py @@ -50,7 +50,7 @@ class TestDaemon(unittest.TestCase): def test_create(self): d = daemon.Daemon({}) self.assertEquals(d.conf, {}) - self.assert_(isinstance(d.logger, utils.NamedLogger)) + self.assert_(isinstance(d.logger, utils.LogAdapter)) def test_stubs(self): d = daemon.Daemon({}) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index c41d147f79..7abc857ce8 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -283,35 +283,27 @@ Error: unable to locate %s utils.sys.stdout = orig_stdout utils.sys.stderr = orig_stderr - def test_NamedLogger(self): - sio = StringIO() - logger = logging.getLogger() - logger.addHandler(logging.StreamHandler(sio)) - nl = utils.NamedLogger(logger, 'server') - nl.warn('test') - self.assertEquals(sio.getvalue(), 'server test\n') - def test_get_logger(self): sio = StringIO() logger = logging.getLogger() logger.addHandler(logging.StreamHandler(sio)) logger = utils.get_logger(None, 'server') logger.warn('test1') - self.assertEquals(sio.getvalue(), 'server test1\n') + self.assertEquals(sio.getvalue(), 'test1\n') logger.debug('test2') - self.assertEquals(sio.getvalue(), 'server test1\n') + self.assertEquals(sio.getvalue(), 'test1\n') logger = utils.get_logger({'log_level': 'DEBUG'}, 'server') logger.debug('test3') - self.assertEquals(sio.getvalue(), 'server test1\nserver test3\n') + self.assertEquals(sio.getvalue(), 'test1\ntest3\n') # Doesn't really test that the log facility is truly being used all the # way to syslog; but exercises the code. logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server') logger.warn('test4') self.assertEquals(sio.getvalue(), - 'server test1\nserver test3\nserver test4\n') + 'test1\nserver test3\nserver test4\n') logger.debug('test5') self.assertEquals(sio.getvalue(), - 'server test1\nserver test3\nserver test4\n') + 'test1\nserver test3\nserver test4\n') def test_storage_directory(self): self.assertEquals(utils.storage_directory('objects', '1', 'ABCDEF'), From ef4e23ee435487cd7c94d9a7f092138cf05ad749 Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Mon, 20 Dec 2010 21:57:19 +0000 Subject: [PATCH 3/8] tests fixed --- test/unit/common/test_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index 7abc857ce8..b888686660 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -300,10 +300,10 @@ Error: unable to locate %s logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server') logger.warn('test4') self.assertEquals(sio.getvalue(), - 'test1\nserver test3\nserver test4\n') + 'test1\ntest3\ntest4\n') logger.debug('test5') self.assertEquals(sio.getvalue(), - 'test1\nserver test3\nserver test4\n') + 'test1\ntest3\ntest4\n') def test_storage_directory(self): self.assertEquals(utils.storage_directory('objects', '1', 'ABCDEF'), From 0b3fac8992360fe203d8e88caa650aa4a285ddeb Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Mon, 20 Dec 2010 22:10:58 +0000 Subject: [PATCH 4/8] add more gettext calls --- swift/proxy/server.py | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 9f34824c08..af0b1a48cc 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -172,10 +172,11 @@ class SegmentedIterable(object): raise except Exception, err: if not getattr(err, 'swift_logged', False): - self.controller.app.logger.exception('ERROR: While processing ' - 'manifest /%s/%s/%s %s' % (self.controller.account_name, - self.controller.container_name, - self.controller.object_name, self.controller.trans_id)) + self.controller.app.logger.exception(_('ERROR: While ' + 'processing manifest /%(acc)s/%(cont)s/%(obj)s'), + {'acc': self.controller.account_name, + 'cont': self.controller.container_name, + 'obj': self.controller.object_name}) err.swift_logged = True self.response.status_int = 503 raise @@ -204,10 +205,11 @@ class SegmentedIterable(object): raise except Exception, err: if not getattr(err, 'swift_logged', False): - self.controller.app.logger.exception('ERROR: While processing ' - 'manifest /%s/%s/%s %s' % (self.controller.account_name, - self.controller.container_name, - self.controller.object_name, self.controller.trans_id)) + self.controller.app.logger.exception(_('ERROR: While ' + 'processing manifest /%(acc)s/%(cont)s/%(obj)s'), + {'acc': self.controller.account_name, + 'cont': self.controller.container_name, + 'obj': self.controller.object_name}) err.swift_logged = True self.response.status_int = 503 raise @@ -250,10 +252,11 @@ class SegmentedIterable(object): raise except Exception, err: if not getattr(err, 'swift_logged', False): - self.controller.app.logger.exception('ERROR: While processing ' - 'manifest /%s/%s/%s %s' % (self.controller.account_name, - self.controller.container_name, - self.controller.object_name, self.controller.trans_id)) + self.controller.app.logger.exception(_('ERROR: While ' + 'processing manifest /%(acc)s/%(cont)s/%(obj)s'), + {'acc': self.controller.account_name, + 'cont': self.controller.container_name, + 'obj': self.controller.object_name}) err.swift_logged = True self.response.status_int = 503 raise From 24590669d8e132c9b8fa4ac6f6bb14d7f93acc91 Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Wed, 22 Dec 2010 16:36:31 +0000 Subject: [PATCH 5/8] i18n finishup --- swift/account/auditor.py | 1 - swift/account/reaper.py | 1 - swift/account/server.py | 1 - swift/common/bench.py | 1 - swift/common/bufferedhttp.py | 1 - swift/common/daemon.py | 3 +++ swift/common/db.py | 1 - swift/common/db_replicator.py | 1 - swift/common/memcached.py | 1 - swift/common/middleware/catch_errors.py | 2 -- swift/common/middleware/ratelimit.py | 1 - swift/common/utils.py | 1 - swift/common/wsgi.py | 2 ++ swift/container/auditor.py | 1 - swift/container/server.py | 1 - swift/container/updater.py | 1 - swift/obj/auditor.py | 1 - swift/obj/replicator.py | 1 - swift/obj/server.py | 1 - swift/obj/updater.py | 1 - swift/proxy/server.py | 1 - test/__init__.py | 7 +++++++ test/unit/proxy/test_server.py | 20 ++++++++++---------- 23 files changed, 22 insertions(+), 30 deletions(-) diff --git a/swift/account/auditor.py b/swift/account/auditor.py index 01afb7d469..36e1e0a0d8 100644 --- a/swift/account/auditor.py +++ b/swift/account/auditor.py @@ -16,7 +16,6 @@ import os import time from random import random -from gettext import gettext as _ from swift.account import server as account_server from swift.common.db import AccountBroker diff --git a/swift/account/reaper.py b/swift/account/reaper.py index 0225209392..6d2112927f 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -18,7 +18,6 @@ import random from logging import DEBUG from math import sqrt from time import time -from gettext import gettext as _ from eventlet import GreenPool, sleep diff --git a/swift/account/server.py b/swift/account/server.py index 67572165f5..53d604ce93 100644 --- a/swift/account/server.py +++ b/swift/account/server.py @@ -20,7 +20,6 @@ import time import traceback from urllib import unquote from xml.sax import saxutils -from gettext import gettext as _ from webob import Request, Response from webob.exc import HTTPAccepted, HTTPBadRequest, \ diff --git a/swift/common/bench.py b/swift/common/bench.py index 1e525c2e7d..b698ff310b 100644 --- a/swift/common/bench.py +++ b/swift/common/bench.py @@ -18,7 +18,6 @@ import time import random from urlparse import urlparse from contextlib import contextmanager -from gettext import gettext as _ import eventlet.pools from eventlet.green.httplib import CannotSendRequest diff --git a/swift/common/bufferedhttp.py b/swift/common/bufferedhttp.py index 536793fc87..81c54d0722 100644 --- a/swift/common/bufferedhttp.py +++ b/swift/common/bufferedhttp.py @@ -29,7 +29,6 @@ BufferedHTTPResponse. from urllib import quote import logging import time -from gettext import gettext as _ from eventlet.green.httplib import CONTINUE, HTTPConnection, HTTPMessage, \ HTTPResponse, HTTPSConnection, _UNKNOWN diff --git a/swift/common/daemon.py b/swift/common/daemon.py index d305c247f6..e5ed3f7caa 100644 --- a/swift/common/daemon.py +++ b/swift/common/daemon.py @@ -16,7 +16,9 @@ import os import sys import signal +import gettext from re import sub + from swift.common import utils @@ -40,6 +42,7 @@ class Daemon(object): utils.validate_configuration() utils.capture_stdio(self.logger, **kwargs) utils.drop_privileges(self.conf.get('user', 'swift')) + gettext.install('swift', unicode=1) def kill_children(*args): signal.signal(signal.SIGTERM, signal.SIG_IGN) diff --git a/swift/common/db.py b/swift/common/db.py index 6f7372c22c..7315159bb7 100644 --- a/swift/common/db.py +++ b/swift/common/db.py @@ -27,7 +27,6 @@ import cPickle as pickle import errno from random import randint from tempfile import mkstemp -from gettext import gettext as _ from eventlet import sleep import simplejson as json diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 89e0590f7e..0588b841a0 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -20,7 +20,6 @@ import random import math import time import shutil -from gettext import gettext as _ from eventlet import GreenPool, sleep, Timeout from eventlet.green import subprocess diff --git a/swift/common/memcached.py b/swift/common/memcached.py index d41b25616f..193456524a 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -26,7 +26,6 @@ import socket import time from bisect import bisect from hashlib import md5 -from gettext import gettext as _ CONN_TIMEOUT = 0.3 IO_TIMEOUT = 2.0 diff --git a/swift/common/middleware/catch_errors.py b/swift/common/middleware/catch_errors.py index 0dcedd201a..5fb8c33592 100644 --- a/swift/common/middleware/catch_errors.py +++ b/swift/common/middleware/catch_errors.py @@ -13,8 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from gettext import gettext as _ - from webob import Request from webob.exc import HTTPServerError diff --git a/swift/common/middleware/ratelimit.py b/swift/common/middleware/ratelimit.py index 1679c1548c..c0827da88b 100644 --- a/swift/common/middleware/ratelimit.py +++ b/swift/common/middleware/ratelimit.py @@ -15,7 +15,6 @@ import time import eventlet from webob import Request, Response from webob.exc import HTTPNotFound -from gettext import gettext as _ from swift.common.utils import split_path, cache_from_env, get_logger from swift.proxy.server import get_container_memcache_key diff --git a/swift/common/utils.py b/swift/common/utils.py index 5ddac9ce84..c837456591 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -34,7 +34,6 @@ from ConfigParser import ConfigParser, NoSectionError, NoOptionError from optparse import OptionParser from tempfile import mkstemp import cPickle as pickle -from gettext import gettext as _ import eventlet from eventlet import greenio, GreenPool, sleep, Timeout, listen diff --git a/swift/common/wsgi.py b/swift/common/wsgi.py index a93c21aa8a..079e4277e1 100644 --- a/swift/common/wsgi.py +++ b/swift/common/wsgi.py @@ -21,6 +21,7 @@ import signal import sys import time import mimetools +import gettext import eventlet from eventlet import greenio, GreenPool, sleep, wsgi, listen @@ -120,6 +121,7 @@ def run_wsgi(conf_file, app_section, *args, **kwargs): sock = get_socket(conf, default_port=kwargs.get('default_port', 8080)) # remaining tasks should not require elevated privileges drop_privileges(conf.get('user', 'swift')) + gettext.install('swift', unicode=1) # finally after binding to ports and privilege drop, run app __init__ code app = loadapp('config:%s' % conf_file, global_conf={'log_name': log_name}) diff --git a/swift/container/auditor.py b/swift/container/auditor.py index a6f25538f5..082e6e2b37 100644 --- a/swift/container/auditor.py +++ b/swift/container/auditor.py @@ -16,7 +16,6 @@ import os import time from random import random -from gettext import gettext as _ from swift.container import server as container_server from swift.common.db import ContainerBroker diff --git a/swift/container/server.py b/swift/container/server.py index 4eb92f0dd9..fc06194de6 100644 --- a/swift/container/server.py +++ b/swift/container/server.py @@ -21,7 +21,6 @@ import traceback from urllib import unquote from xml.sax import saxutils from datetime import datetime -from gettext import gettext as _ import simplejson from eventlet.timeout import Timeout diff --git a/swift/container/updater.py b/swift/container/updater.py index 36f567ddc7..9056de3202 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -19,7 +19,6 @@ import signal import sys import time from random import random, shuffle -from gettext import gettext as _ from eventlet import spawn, patcher, Timeout diff --git a/swift/obj/auditor.py b/swift/obj/auditor.py index bdd47479b1..1d445ec65f 100644 --- a/swift/obj/auditor.py +++ b/swift/obj/auditor.py @@ -17,7 +17,6 @@ import os import time from hashlib import md5 from random import random -from gettext import gettext as _ from swift.obj import server as object_server from swift.obj.replicator import invalidate_hash diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index bd2b09af25..ed77bf5a10 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -22,7 +22,6 @@ import logging import hashlib import itertools import cPickle as pickle -from gettext import gettext as _ import eventlet from eventlet import GreenPool, tpool, Timeout, sleep, hubs diff --git a/swift/obj/server.py b/swift/obj/server.py index d47f2b0634..7c139d7775 100644 --- a/swift/obj/server.py +++ b/swift/obj/server.py @@ -26,7 +26,6 @@ from hashlib import md5 from tempfile import mkstemp from urllib import unquote from contextlib import contextmanager -from gettext import gettext as _ from webob import Request, Response, UTC from webob.exc import HTTPAccepted, HTTPBadRequest, HTTPCreated, \ diff --git a/swift/obj/updater.py b/swift/obj/updater.py index eb4638ea16..f958166679 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -19,7 +19,6 @@ import signal import sys import time from random import random -from gettext import gettext as _ from eventlet import patcher, Timeout diff --git a/swift/proxy/server.py b/swift/proxy/server.py index af0b1a48cc..9838f86802 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -28,7 +28,6 @@ from datetime import datetime from urllib import unquote, quote import uuid import functools -from gettext import gettext as _ from hashlib import md5 from eventlet import sleep diff --git a/test/__init__.py b/test/__init__.py index e69de29bb2..db145240dc 100644 --- a/test/__init__.py +++ b/test/__init__.py @@ -0,0 +1,7 @@ +# See http://code.google.com/p/python-nose/issues/detail?id=373 +# The code below enables nosetests to work with i18n _() blocks + +import __builtin__ + +setattr(__builtin__, '_', lambda x: x) + diff --git a/test/unit/proxy/test_server.py b/test/unit/proxy/test_server.py index b7d43c0fb2..4577cd4dac 100644 --- a/test/unit/proxy/test_server.py +++ b/test/unit/proxy/test_server.py @@ -3019,8 +3019,8 @@ class TestSegmentedIterable(unittest.TestCase): self.assertRaises(Exception, proxy_server.SegmentedIterable(self.controller, None, [None])._load_next_segment) - self.assertEquals(self.controller.exception_args[0], - 'ERROR: While processing manifest /a/c/o tx1') + self.assert_(self.controller.exception_args[0].startswith( + 'ERROR: While processing manifest')) def test_load_next_segment_with_no_segments(self): self.assertRaises(StopIteration, @@ -3079,8 +3079,8 @@ class TestSegmentedIterable(unittest.TestCase): self.assertRaises(Exception, proxy_server.SegmentedIterable(self.controller, 'lc', [{'name': 'o1'}])._load_next_segment) - self.assertEquals(self.controller.exception_args[0], - 'ERROR: While processing manifest /a/c/o tx1') + self.assert_(self.controller.exception_args[0].startswith( + 'ERROR: While processing manifest')) self.assertEquals(str(self.controller.exception_info[1]), 'Could not load object segment /a/lc/o1: 404') @@ -3088,8 +3088,8 @@ class TestSegmentedIterable(unittest.TestCase): # Iterator value isn't a dict self.assertRaises(Exception, ''.join, proxy_server.SegmentedIterable(self.controller, None, [None])) - self.assertEquals(self.controller.exception_args[0], - 'ERROR: While processing manifest /a/c/o tx1') + self.assert_(self.controller.exception_args[0].startswith( + 'ERROR: While processing manifest')) def test_iter_with_no_segments(self): segit = proxy_server.SegmentedIterable(self.controller, 'lc', []) @@ -3118,8 +3118,8 @@ class TestSegmentedIterable(unittest.TestCase): self.assertRaises(Exception, ''.join, proxy_server.SegmentedIterable(self.controller, 'lc', [{'name': 'o1'}])) - self.assertEquals(self.controller.exception_args[0], - 'ERROR: While processing manifest /a/c/o tx1') + self.assert_(self.controller.exception_args[0].startswith( + 'ERROR: While processing manifest')) self.assertEquals(str(self.controller.exception_info[1]), 'Could not load object segment /a/lc/o1: 404') @@ -3128,8 +3128,8 @@ class TestSegmentedIterable(unittest.TestCase): self.assertRaises(Exception, proxy_server.SegmentedIterable(self.controller, None, [None]).app_iter_range(None, None).next) - self.assertEquals(self.controller.exception_args[0], - 'ERROR: While processing manifest /a/c/o tx1') + self.assert_(self.controller.exception_args[0].startswith( + 'ERROR: While processing manifest')) def test_app_iter_range_with_no_segments(self): self.assertEquals(''.join(proxy_server.SegmentedIterable( From 8ad87f634c3403085964619d410eae60b3c64afe Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Wed, 22 Dec 2010 16:48:25 +0000 Subject: [PATCH 6/8] fix fancy txnid logging --- bin/st | 2 ++ swift/common/utils.py | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bin/st b/bin/st index b41aca67ec..f7009daf21 100755 --- a/bin/st +++ b/bin/st @@ -44,6 +44,8 @@ except: try: from swift.common.bufferedhttp \ import BufferedHTTPConnection as HTTPConnection + import __builtin__ # bufferedhttp uses automagic gettext + setattr(__builtin__, '_', lambda x: x) except: from httplib import HTTPConnection diff --git a/swift/common/utils.py b/swift/common/utils.py index c837456591..da71253e7b 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -341,7 +341,8 @@ class NamedFormatter(logging.Formatter): def format(self, record): msg = logging.Formatter.format(self, record) - if record.levelno != logging.INFO and self.logger.txn_id: + if self.logger.txn_id and (record.levelno != logging.INFO or + self.logger.txn_id not in msg): return '%s %s (txn: %s)' % (self.server, msg, self.logger.txn_id) else: return '%s %s' % (self.server, msg) From f432269013238b196681e44ed8d6726a71696795 Mon Sep 17 00:00:00 2001 From: Michael Barton Date: Wed, 22 Dec 2010 17:18:30 +0000 Subject: [PATCH 7/8] add gettext to all binaries --- bin/st | 4 ++-- bin/swift-account-audit | 2 ++ bin/swift-auth-add-user | 2 ++ bin/swift-auth-recreate-accounts | 2 ++ bin/swift-auth-update-reseller-prefixes | 2 ++ bin/swift-bench | 2 ++ bin/swift-drive-audit | 2 ++ bin/swift-get-nodes | 3 +++ bin/swift-log-uploader | 2 ++ bin/swift-object-info | 2 ++ bin/swift-ring-builder | 2 ++ bin/swift-stats-populate | 2 ++ bin/swift-stats-report | 2 ++ 13 files changed, 27 insertions(+), 2 deletions(-) diff --git a/bin/st b/bin/st index f7009daf21..79f331558f 100755 --- a/bin/st +++ b/bin/st @@ -44,8 +44,8 @@ except: try: from swift.common.bufferedhttp \ import BufferedHTTPConnection as HTTPConnection - import __builtin__ # bufferedhttp uses automagic gettext - setattr(__builtin__, '_', lambda x: x) + import gettext + gettext.install('swift', unicode=1) except: from httplib import HTTPConnection diff --git a/bin/swift-account-audit b/bin/swift-account-audit index fe611562d7..1f6aceb2c8 100755 --- a/bin/swift-account-audit +++ b/bin/swift-account-audit @@ -20,6 +20,7 @@ from urllib import quote from hashlib import md5 import getopt from itertools import chain +import gettext import simplejson from eventlet.greenpool import GreenPool @@ -324,6 +325,7 @@ class Auditor(object): if __name__ == '__main__': + gettext.install('swift', unicode=1) try: optlist, args = getopt.getopt(sys.argv[1:], 'c:r:e:d') except getopt.GetoptError, err: diff --git a/bin/swift-auth-add-user b/bin/swift-auth-add-user index d502dc83a8..2d9819dfc8 100755 --- a/bin/swift-auth-add-user +++ b/bin/swift-auth-add-user @@ -18,11 +18,13 @@ from ConfigParser import ConfigParser from optparse import OptionParser from os.path import basename from sys import argv, exit +import gettext from swift.common.bufferedhttp import http_connect_raw as http_connect if __name__ == '__main__': + gettext.install('swift', unicode=1) default_conf = '/etc/swift/auth-server.conf' parser = OptionParser( usage='Usage: %prog [options] ') diff --git a/bin/swift-auth-recreate-accounts b/bin/swift-auth-recreate-accounts index e17bf2da3b..a8ee20e0e8 100755 --- a/bin/swift-auth-recreate-accounts +++ b/bin/swift-auth-recreate-accounts @@ -17,10 +17,12 @@ from ConfigParser import ConfigParser from optparse import OptionParser from sys import argv, exit +import gettext from swift.common.bufferedhttp import http_connect_raw as http_connect if __name__ == '__main__': + gettext.install('swift', unicode=1) default_conf = '/etc/swift/auth-server.conf' parser = OptionParser(usage='Usage: %prog [options]') parser.add_option('-c', '--conf', dest='conf', default=default_conf, diff --git a/bin/swift-auth-update-reseller-prefixes b/bin/swift-auth-update-reseller-prefixes index 41a4bf6a76..cb09bd9872 100755 --- a/bin/swift-auth-update-reseller-prefixes +++ b/bin/swift-auth-update-reseller-prefixes @@ -16,11 +16,13 @@ from os.path import basename from sys import argv, exit +import gettext from swift.common.db import get_db_connection if __name__ == '__main__': + gettext.install('swift', unicode=1) app = basename(argv[0]) if len(argv) != 3: exit(''' diff --git a/bin/swift-bench b/bin/swift-bench index ab332482cd..2c3e08318b 100755 --- a/bin/swift-bench +++ b/bin/swift-bench @@ -20,6 +20,7 @@ import sys import signal import uuid from optparse import OptionParser +import gettext from swift.common.bench import BenchController from swift.common.utils import readconf, NamedLogger @@ -55,6 +56,7 @@ SAIO_DEFAULTS = { } if __name__ == '__main__': + gettext.install('swift', unicode=1) usage = "usage: %prog [OPTIONS] [CONF_FILE]" usage += """\n\nConf file with SAIO defaults: diff --git a/bin/swift-drive-audit b/bin/swift-drive-audit index cde28c1ed7..64c478e203 100755 --- a/bin/swift-drive-audit +++ b/bin/swift-drive-audit @@ -20,6 +20,7 @@ import re import subprocess import sys from ConfigParser import ConfigParser +import gettext from swift.common.utils import get_logger @@ -86,6 +87,7 @@ def comment_fstab(mount_point): os.rename('/etc/fstab.new', '/etc/fstab') if __name__ == '__main__': + gettext.install('swift', unicode=1) c = ConfigParser() try: conf_path = sys.argv[1] diff --git a/bin/swift-get-nodes b/bin/swift-get-nodes index f24dd48f96..69643f6a84 100755 --- a/bin/swift-get-nodes +++ b/bin/swift-get-nodes @@ -16,11 +16,14 @@ import sys import urllib +import gettext from swift.common.ring import Ring from swift.common.utils import hash_path +gettext.install('swift', unicode=1) + if len(sys.argv) < 3 or len(sys.argv) > 5: print 'Usage: %s [] []' \ % sys.argv[0] diff --git a/bin/swift-log-uploader b/bin/swift-log-uploader index e533cad824..972303f67b 100755 --- a/bin/swift-log-uploader +++ b/bin/swift-log-uploader @@ -15,12 +15,14 @@ # limitations under the License. import sys +import gettext from swift.stats.log_uploader import LogUploader from swift.common.utils import parse_options from swift.common import utils if __name__ == '__main__': + gettext.install('swift', unicode=1) conf_file, options = parse_options(usage="Usage: %prog CONFIG_FILE PLUGIN") try: plugin = options['extra_args'][0] diff --git a/bin/swift-object-info b/bin/swift-object-info index 57f2522071..268b991bee 100755 --- a/bin/swift-object-info +++ b/bin/swift-object-info @@ -18,12 +18,14 @@ import sys import cPickle as pickle from datetime import datetime from hashlib import md5 +import gettext from swift.common.ring import Ring from swift.obj.server import read_metadata from swift.common.utils import hash_path if __name__ == '__main__': + gettext.install('swift', unicode=1) if len(sys.argv) <= 1: print "Usage: %s OBJECT_FILE" % sys.argv[0] sys.exit(1) diff --git a/bin/swift-ring-builder b/bin/swift-ring-builder index 50353df256..1d53a30973 100755 --- a/bin/swift-ring-builder +++ b/bin/swift-ring-builder @@ -21,6 +21,7 @@ from os import mkdir from os.path import basename, dirname, exists, join as pathjoin from sys import argv, exit from time import time +import gettext from swift.common.ring import RingBuilder @@ -174,6 +175,7 @@ swift-ring-builder set_min_part_hours if __name__ == '__main__': + gettext.install('swift', unicode=1) if len(argv) < 2: print ''' swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s diff --git a/bin/swift-stats-populate b/bin/swift-stats-populate index 8ea210cb65..985fa50c82 100755 --- a/bin/swift-stats-populate +++ b/bin/swift-stats-populate @@ -21,6 +21,7 @@ from optparse import OptionParser from sys import exit, argv from time import time from uuid import uuid4 +import gettext from eventlet import GreenPool, patcher, sleep from eventlet.pools import Pool @@ -75,6 +76,7 @@ def report(success): if __name__ == '__main__': global begun, created, item_type, next_report, need_to_create, retries_done + gettext.install('swift', unicode=1) patcher.monkey_patch() parser = OptionParser() diff --git a/bin/swift-stats-report b/bin/swift-stats-report index 3f735877cf..158ae37c75 100755 --- a/bin/swift-stats-report +++ b/bin/swift-stats-report @@ -23,6 +23,7 @@ from optparse import OptionParser from sys import argv, exit, stderr from time import time from uuid import uuid4 +import gettext from eventlet import GreenPool, hubs, patcher, sleep, Timeout from eventlet.pools import Pool @@ -746,6 +747,7 @@ def object_delete_report(coropool, connpool, options): if __name__ == '__main__': + gettext.install('swift', unicode=1) patcher.monkey_patch() hubs.get_hub().debug_exceptions = False From af99fb17e05dde0d769dfda1c90fd7dda36fdd32 Mon Sep 17 00:00:00 2001 From: gholt Date: Wed, 22 Dec 2010 11:35:11 -0800 Subject: [PATCH 8/8] Fixed probe tests to not use relativity (on imports) --- test/probe/__init__.py | 0 test/probe/test_account_failures.py | 2 +- test/probe/test_container_failures.py | 2 +- test/probe/test_object_async_update.py | 2 +- test/probe/test_object_handoff.py | 2 +- test/probe/test_running_with_each_type_down.py | 2 +- 6 files changed, 5 insertions(+), 5 deletions(-) create mode 100644 test/probe/__init__.py diff --git a/test/probe/__init__.py b/test/probe/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/test/probe/test_account_failures.py b/test/probe/test_account_failures.py index 10eba803bc..5ad2f965cb 100755 --- a/test/probe/test_account_failures.py +++ b/test/probe/test_account_failures.py @@ -21,7 +21,7 @@ from subprocess import Popen from time import sleep from swift.common import client -from common import get_to_final_state, kill_pids, reset_environment +from test.probe.common import get_to_final_state, kill_pids, reset_environment class TestAccountFailures(unittest.TestCase): diff --git a/test/probe/test_container_failures.py b/test/probe/test_container_failures.py index 9c497ca88b..b24e1bc169 100755 --- a/test/probe/test_container_failures.py +++ b/test/probe/test_container_failures.py @@ -23,7 +23,7 @@ from uuid import uuid4 from swift.common import client -from common import get_to_final_state, kill_pids, reset_environment +from test.probe.common import get_to_final_state, kill_pids, reset_environment class TestContainerFailures(unittest.TestCase): diff --git a/test/probe/test_object_async_update.py b/test/probe/test_object_async_update.py index a5d5852c68..7db3a75fca 100755 --- a/test/probe/test_object_async_update.py +++ b/test/probe/test_object_async_update.py @@ -23,7 +23,7 @@ from uuid import uuid4 from swift.common import client, direct_client -from common import kill_pids, reset_environment +from test.probe.common import kill_pids, reset_environment class TestObjectAsyncUpdate(unittest.TestCase): diff --git a/test/probe/test_object_handoff.py b/test/probe/test_object_handoff.py index 006f0d3a1e..7086c11de8 100755 --- a/test/probe/test_object_handoff.py +++ b/test/probe/test_object_handoff.py @@ -23,7 +23,7 @@ from uuid import uuid4 from swift.common import client, direct_client -from common import kill_pids, reset_environment +from test.probe.common import kill_pids, reset_environment class TestObjectHandoff(unittest.TestCase): diff --git a/test/probe/test_running_with_each_type_down.py b/test/probe/test_running_with_each_type_down.py index 7f2352d6ce..46fe1c5851 100755 --- a/test/probe/test_running_with_each_type_down.py +++ b/test/probe/test_running_with_each_type_down.py @@ -22,7 +22,7 @@ from time import sleep from swift.common import client -from common import get_to_final_state, kill_pids, reset_environment +from test.probe.common import get_to_final_state, kill_pids, reset_environment class TestRunningWithEachTypeDown(unittest.TestCase):