diff --git a/swift/account/reaper.py b/swift/account/reaper.py index 5ac491cac9..696277ca2d 100644 --- a/swift/account/reaper.py +++ b/swift/account/reaper.py @@ -311,8 +311,8 @@ class AccountReaper(Daemon): delete_timestamp = Timestamp(info['delete_timestamp']) if self.stats_containers_remaining and \ begin - float(delete_timestamp) >= self.reap_not_done_after: - self.logger.warn(_('Account %s has not been reaped since %s') % - (account, delete_timestamp.isoformat)) + self.logger.warning(_('Account %s has not been reaped since %s') % + (account, delete_timestamp.isoformat)) return True def reap_container(self, account, account_partition, account_nodes, diff --git a/swift/common/db_replicator.py b/swift/common/db_replicator.py index 616e742ca6..b67b71520e 100644 --- a/swift/common/db_replicator.py +++ b/swift/common/db_replicator.py @@ -632,7 +632,7 @@ class Replicator(Daemon): [(failure_dev['replication_ip'], failure_dev['device']) for failure_dev in self.ring.devs if failure_dev]) - self.logger.warn( + self.logger.warning( _('Skipping %(device)s as it is not mounted') % node) continue unlink_older_than( diff --git a/swift/common/memcached.py b/swift/common/memcached.py index 65e0da8afe..bb359539ae 100644 --- a/swift/common/memcached.py +++ b/swift/common/memcached.py @@ -357,7 +357,6 @@ class MemcacheRing(object): :returns: result of decrementing :raises MemcacheConnectionError: """ - return self.incr(key, delta=-delta, time=time) def delete(self, key): diff --git a/swift/common/middleware/keystoneauth.py b/swift/common/middleware/keystoneauth.py index 6a0b91bbc4..a00701c39b 100644 --- a/swift/common/middleware/keystoneauth.py +++ b/swift/common/middleware/keystoneauth.py @@ -325,9 +325,9 @@ class KeystoneAuth(object): # unknown domain, update if req confirms domain new_id = req_id or '' elif req_has_id and sysmeta_id != req_id: - self.logger.warn("Inconsistent project domain id: " + - "%s in token vs %s in account metadata." - % (req_id, sysmeta_id)) + self.logger.warning("Inconsistent project domain id: " + + "%s in token vs %s in account metadata." + % (req_id, sysmeta_id)) if new_id is not None: req.headers[PROJECT_DOMAIN_ID_SYSMETA_HEADER] = new_id diff --git a/swift/common/middleware/tempauth.py b/swift/common/middleware/tempauth.py index 48f791ada3..9eec784a6e 100644 --- a/swift/common/middleware/tempauth.py +++ b/swift/common/middleware/tempauth.py @@ -429,10 +429,12 @@ class TempAuth(object): try: acls = acls_from_account_info(info) except ValueError as e1: - self.logger.warn("Invalid ACL stored in metadata: %r" % e1) + self.logger.warning("Invalid ACL stored in metadata: %r" % e1) return None except NotImplementedError as e2: - self.logger.warn("ACL version exceeds middleware version: %r" % e2) + self.logger.warning( + "ACL version exceeds middleware version: %r" + % e2) return None return acls diff --git a/swift/common/utils.py b/swift/common/utils.py index ab80487a02..dd9377dbfb 100644 --- a/swift/common/utils.py +++ b/swift/common/utils.py @@ -389,8 +389,8 @@ def load_libc_function(func_name, log_error=True, if fail_if_missing: raise if log_error: - logging.warn(_("Unable to locate %s in libc. Leaving as a " - "no-op."), func_name) + logging.warning(_("Unable to locate %s in libc. Leaving as a " + "no-op."), func_name) return noop_libc_function @@ -580,8 +580,8 @@ class FallocateWrapper(object): if self.fallocate is not noop_libc_function: break if self.fallocate is noop_libc_function: - logging.warn(_("Unable to locate fallocate, posix_fallocate in " - "libc. Leaving as a no-op.")) + logging.warning(_("Unable to locate fallocate, posix_fallocate in " + "libc. Leaving as a no-op.")) def __call__(self, fd, mode, offset, length): """The length parameter must be a ctypes.c_uint64.""" @@ -664,8 +664,8 @@ def fsync_dir(dirpath): if err.errno == errno.ENOTDIR: # Raise error if someone calls fsync_dir on a non-directory raise - logging.warn(_("Unable to perform fsync() on directory %s: %s"), - dirpath, os.strerror(err.errno)) + logging.warning(_("Unable to perform fsync() on directory %s: %s"), + dirpath, os.strerror(err.errno)) finally: if dirfd: os.close(dirfd) @@ -686,9 +686,9 @@ def drop_buffer_cache(fd, offset, length): ret = _posix_fadvise(fd, ctypes.c_uint64(offset), ctypes.c_uint64(length), 4) if ret != 0: - logging.warn("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) " - "-> %(ret)s", {'fd': fd, 'offset': offset, - 'length': length, 'ret': ret}) + logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) " + "-> %(ret)s", {'fd': fd, 'offset': offset, + 'length': length, 'ret': ret}) NORMAL_FORMAT = "%016.05f" @@ -1176,7 +1176,7 @@ class StatsdClient(object): return sock.sendto('|'.join(parts), self._target) except IOError as err: if self.logger: - self.logger.warn( + self.logger.warning( 'Error sending UDP message to %r: %s', self._target, err) @@ -1261,7 +1261,6 @@ class LogAdapter(logging.LoggerAdapter, object): def __init__(self, logger, server): logging.LoggerAdapter.__init__(self, logger, {}) self.server = server - setattr(self, 'warn', self.warning) @property def txn_id(self): @@ -3561,7 +3560,8 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart, except StopIteration: pass else: - logger.warn("More than one part in a single-part response?") + logger.warning( + "More than one part in a single-part response?") return string_along(response_body_iter, ranges_iter, logger) diff --git a/swift/container/updater.py b/swift/container/updater.py index f070e5f570..3d79db2032 100644 --- a/swift/container/updater.py +++ b/swift/container/updater.py @@ -89,7 +89,7 @@ class ContainerUpdater(Daemon): for device in self._listdir(self.devices): dev_path = os.path.join(self.devices, device) if self.mount_check and not ismount(dev_path): - self.logger.warn(_('%s is not mounted'), device) + self.logger.warning(_('%s is not mounted'), device) continue con_path = os.path.join(dev_path, DATADIR) if not os.path.exists(con_path): diff --git a/swift/obj/diskfile.py b/swift/obj/diskfile.py index ebb849a9e9..e0ff11d330 100644 --- a/swift/obj/diskfile.py +++ b/swift/obj/diskfile.py @@ -303,8 +303,8 @@ def object_audit_location_generator(devices, mount_check=True, logger=None, base, policy = split_policy_string(dir_) except PolicyError as e: if logger: - logger.warn(_('Directory %r does not map ' - 'to a valid policy (%s)') % (dir_, e)) + logger.warning(_('Directory %r does not map ' + 'to a valid policy (%s)') % (dir_, e)) continue datadir_path = os.path.join(devices, device, dir_) partitions = listdir(datadir_path) @@ -420,7 +420,7 @@ class BaseDiskFileManager(object): # If the operator wants zero-copy with splice() but we don't have the # requisite kernel support, complain so they can go fix it. if conf_wants_splice and not splice.available: - self.logger.warn( + self.logger.warning( "Use of splice() requested (config says \"splice = %s\"), " "but the system does not support it. " "splice() will not be used." % conf.get('splice')) @@ -434,8 +434,8 @@ class BaseDiskFileManager(object): # AF_ALG support), we can't use zero-copy. if err.errno != errno.EAFNOSUPPORT: raise - self.logger.warn("MD5 sockets not supported. " - "splice() will not be used.") + self.logger.warning("MD5 sockets not supported. " + "splice() will not be used.") else: self.use_splice = True with open('/proc/sys/fs/pipe-max-size') as f: @@ -1404,7 +1404,7 @@ class BaseDiskFileReader(object): self._quarantined_dir = self._threadpool.run_in_thread( self.manager.quarantine_renamer, self._device_path, self._data_file) - self._logger.warn("Quarantined object %s: %s" % ( + self._logger.warning("Quarantined object %s: %s" % ( self._data_file, msg)) self._logger.increment('quarantines') self._quarantine_hook(msg) @@ -1674,7 +1674,7 @@ class BaseDiskFile(object): """ self._quarantined_dir = self._threadpool.run_in_thread( self.manager.quarantine_renamer, self._device_path, data_file) - self._logger.warn("Quarantined object %s: %s" % ( + self._logger.warning("Quarantined object %s: %s" % ( data_file, msg)) self._logger.increment('quarantines') return DiskFileQuarantined(msg) diff --git a/swift/obj/reconstructor.py b/swift/obj/reconstructor.py index 9ead83b1ac..151c00c1e7 100644 --- a/swift/obj/reconstructor.py +++ b/swift/obj/reconstructor.py @@ -819,8 +819,8 @@ class ObjectReconstructor(Daemon): dev_path = self._df_router[policy].get_dev_path( local_dev['device']) if not dev_path: - self.logger.warn(_('%s is not mounted'), - local_dev['device']) + self.logger.warning(_('%s is not mounted'), + local_dev['device']) continue obj_path = join(dev_path, data_dir) tmp_path = join(dev_path, get_tmp_dir(int(policy))) diff --git a/swift/obj/replicator.py b/swift/obj/replicator.py index aa9686133d..8daeb051f2 100644 --- a/swift/obj/replicator.py +++ b/swift/obj/replicator.py @@ -85,10 +85,11 @@ class ObjectReplicator(Daemon): if not self.rsync_module: self.rsync_module = '{replication_ip}::object' if config_true_value(conf.get('vm_test_mode', 'no')): - self.logger.warn('Option object-replicator/vm_test_mode is ' - 'deprecated and will be removed in a future ' - 'version. Update your configuration to use ' - 'option object-replicator/rsync_module.') + self.logger.warning('Option object-replicator/vm_test_mode ' + 'is deprecated and will be removed in a ' + 'future version. Update your ' + 'configuration to use option ' + 'object-replicator/rsync_module.') self.rsync_module += '{replication_port}' self.http_timeout = int(conf.get('http_timeout', 60)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) @@ -109,10 +110,10 @@ class ObjectReplicator(Daemon): self.handoff_delete = config_auto_int_value( conf.get('handoff_delete', 'auto'), 0) if any((self.handoff_delete, self.handoffs_first)): - self.logger.warn('Handoff only mode is not intended for normal ' - 'operation, please disable handoffs_first and ' - 'handoff_delete before the next ' - 'normal rebalance') + self.logger.warning('Handoff only mode is not intended for normal ' + 'operation, please disable handoffs_first and ' + 'handoff_delete before the next ' + 'normal rebalance') self._diskfile_mgr = DiskFileManager(conf, self.logger) def _zero_stats(self): @@ -585,7 +586,8 @@ class ObjectReplicator(Daemon): failure_dev['device']) for failure_dev in policy.object_ring.devs if failure_dev]) - self.logger.warn(_('%s is not mounted'), local_dev['device']) + self.logger.warning( + _('%s is not mounted'), local_dev['device']) continue unlink_older_than(tmp_path, time.time() - self.reclaim_age) if not os.path.exists(obj_path): @@ -701,7 +703,7 @@ class ObjectReplicator(Daemon): self._add_failure_stats([(failure_dev['replication_ip'], failure_dev['device']) for failure_dev in job['nodes']]) - self.logger.warn(_('%s is not mounted'), job['device']) + self.logger.warning(_('%s is not mounted'), job['device']) continue if not self.check_ring(job['policy'].object_ring): self.logger.info(_("Ring change detected. Aborting " diff --git a/swift/obj/updater.py b/swift/obj/updater.py index 675c7c509f..e84ddfd466 100644 --- a/swift/obj/updater.py +++ b/swift/obj/updater.py @@ -84,7 +84,7 @@ class ObjectUpdater(Daemon): if self.mount_check and \ not ismount(os.path.join(self.devices, device)): self.logger.increment('errors') - self.logger.warn( + self.logger.warning( _('Skipping %s as it is not mounted'), device) continue while len(pids) >= self.concurrency: @@ -127,7 +127,7 @@ class ObjectUpdater(Daemon): if self.mount_check and \ not ismount(os.path.join(self.devices, device)): self.logger.increment('errors') - self.logger.warn( + self.logger.warning( _('Skipping %s as it is not mounted'), device) continue self.object_sweep(os.path.join(self.devices, device)) @@ -159,8 +159,9 @@ class ObjectUpdater(Daemon): try: base, policy = split_policy_string(asyncdir) except PolicyError as e: - self.logger.warn(_('Directory %r does not map ' - 'to a valid policy (%s)') % (asyncdir, e)) + self.logger.warning(_('Directory %r does not map ' + 'to a valid policy (%s)') % + (asyncdir, e)) continue for prefix in self._listdir(async_pending): prefix_path = os.path.join(async_pending, prefix) diff --git a/swift/proxy/controllers/base.py b/swift/proxy/controllers/base.py index 1f98097c94..7fc08a06e2 100644 --- a/swift/proxy/controllers/base.py +++ b/swift/proxy/controllers/base.py @@ -941,13 +941,13 @@ class ResumingGetter(object): _('Trying to read during GET')) raise except ChunkWriteTimeout: - self.app.logger.warn( + self.app.logger.warning( _('Client did not read from proxy within %ss') % self.app.client_timeout) self.app.logger.increment('client_timeouts') except GeneratorExit: if not req.environ.get('swift.non_client_disconnect'): - self.app.logger.warn(_('Client disconnected on read')) + self.app.logger.warning(_('Client disconnected on read')) except Exception: self.app.logger.exception(_('Trying to send to client')) raise diff --git a/swift/proxy/controllers/obj.py b/swift/proxy/controllers/obj.py index 8c6b6bbab3..e5910d312e 100644 --- a/swift/proxy/controllers/obj.py +++ b/swift/proxy/controllers/obj.py @@ -981,7 +981,7 @@ class ReplicatedObjectController(BaseObjectController): msg='Object PUT exceptions after last send, ' '%(conns)s/%(nodes)s required connections') except ChunkReadTimeout as err: - self.app.logger.warn( + self.app.logger.warning( _('ERROR Client read timeout (%ss)'), err.seconds) self.app.logger.increment('client_timeouts') raise HTTPRequestTimeout(request=req) @@ -989,7 +989,7 @@ class ReplicatedObjectController(BaseObjectController): raise except ChunkReadError: req.client_disconnect = True - self.app.logger.warn( + self.app.logger.warning( _('Client disconnected without sending last chunk')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) @@ -1004,7 +1004,7 @@ class ReplicatedObjectController(BaseObjectController): raise HTTPInternalServerError(request=req) if req.content_length and bytes_transferred < req.content_length: req.client_disconnect = True - self.app.logger.warn( + self.app.logger.warning( _('Client disconnected without sending enough data')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) @@ -2209,7 +2209,7 @@ class ECObjectController(BaseObjectController): if req.content_length and ( bytes_transferred < req.content_length): req.client_disconnect = True - self.app.logger.warn( + self.app.logger.warning( _('Client disconnected without sending enough data')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) @@ -2278,13 +2278,13 @@ class ECObjectController(BaseObjectController): for putter in putters: putter.wait() except ChunkReadTimeout as err: - self.app.logger.warn( + self.app.logger.warning( _('ERROR Client read timeout (%ss)'), err.seconds) self.app.logger.increment('client_timeouts') raise HTTPRequestTimeout(request=req) except ChunkReadError: req.client_disconnect = True - self.app.logger.warn( + self.app.logger.warning( _('Client disconnected without sending last chunk')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) diff --git a/swift/proxy/server.py b/swift/proxy/server.py index 0747a861bb..3ecf93dbe8 100644 --- a/swift/proxy/server.py +++ b/swift/proxy/server.py @@ -229,9 +229,10 @@ class Application(object): Check the configuration for possible errors """ if self._read_affinity and self.sorting_method != 'affinity': - self.logger.warn("sorting_method is set to '%s', not 'affinity'; " - "read_affinity setting will have no effect." % - self.sorting_method) + self.logger.warning( + "sorting_method is set to '%s', not 'affinity'; " + "read_affinity setting will have no effect." % + self.sorting_method) def get_object_ring(self, policy_idx): """ diff --git a/test/unit/__init__.py b/test/unit/__init__.py index 6ab3618780..ec6a2a0985 100644 --- a/test/unit/__init__.py +++ b/test/unit/__init__.py @@ -477,6 +477,12 @@ class UnmockTimeModule(object): logging.time = UnmockTimeModule() +class WARN_DEPRECATED(Exception): + def __init__(self, msg): + self.msg = msg + print(self.msg) + + class FakeLogger(logging.Logger, object): # a thread safe fake logger @@ -499,6 +505,9 @@ class FakeLogger(logging.Logger, object): NOTICE: 'notice', } + def warn(self, *args, **kwargs): + raise WARN_DEPRECATED("Deprecated Method warn use warning instead") + def notice(self, msg, *args, **kwargs): """ Convenience function for syslog priority LOG_NOTICE. The python diff --git a/test/unit/common/test_utils.py b/test/unit/common/test_utils.py index a336e78b60..dcc24042ba 100644 --- a/test/unit/common/test_utils.py +++ b/test/unit/common/test_utils.py @@ -1221,7 +1221,7 @@ class TestUtils(unittest.TestCase): logger = logging.getLogger('server') logger.addHandler(logging.StreamHandler(sio)) logger = utils.get_logger(None, 'server', log_route='server') - logger.warn('test1') + logger.warning('test1') self.assertEqual(sio.getvalue(), 'test1\n') logger.debug('test2') self.assertEqual(sio.getvalue(), 'test1\n') @@ -1233,7 +1233,7 @@ class TestUtils(unittest.TestCase): # way to syslog; but exercises the code. logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server', log_route='server') - logger.warn('test4') + logger.warning('test4') self.assertEqual(sio.getvalue(), 'test1\ntest3\ntest4\n') # make sure debug doesn't log by default @@ -1491,7 +1491,7 @@ class TestUtils(unittest.TestCase): self.assertTrue('12345' not in log_msg) # test txn already in message self.assertEqual(logger.txn_id, '12345') - logger.warn('test 12345 test') + logger.warning('test 12345 test') self.assertEqual(strip_value(sio), 'test 12345 test\n') # Test multi line collapsing logger.error('my\nerror\nmessage') @@ -1517,7 +1517,7 @@ class TestUtils(unittest.TestCase): self.assertTrue('1.2.3.4' not in log_msg) # test client_ip (and txn) already in message self.assertEqual(logger.client_ip, '1.2.3.4') - logger.warn('test 1.2.3.4 test 12345') + logger.warning('test 1.2.3.4 test 12345') self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n') finally: logger.logger.removeHandler(handler) diff --git a/test/unit/obj/test_updater.py b/test/unit/obj/test_updater.py index 3900bf4944..db6541e956 100644 --- a/test/unit/obj/test_updater.py +++ b/test/unit/obj/test_updater.py @@ -183,7 +183,7 @@ class TestObjectUpdater(unittest.TestCase): 'node_timeout': '5'}) cu.logger = mock_logger = mock.MagicMock() cu.object_sweep(self.sda1) - self.assertEqual(mock_logger.warn.call_count, warn) + self.assertEqual(mock_logger.warning.call_count, warn) self.assertTrue( os.path.exists(os.path.join(self.sda1, 'not_a_dir'))) if should_skip: