Merge "Python 3 deprecated the logger.warn method in favor of warning"

This commit is contained in:
Jenkins 2015-12-23 17:26:18 +00:00 committed by Gerrit Code Review
commit 53c4735b02
17 changed files with 75 additions and 61 deletions

View File

@ -311,8 +311,8 @@ class AccountReaper(Daemon):
delete_timestamp = Timestamp(info['delete_timestamp']) delete_timestamp = Timestamp(info['delete_timestamp'])
if self.stats_containers_remaining and \ if self.stats_containers_remaining and \
begin - float(delete_timestamp) >= self.reap_not_done_after: begin - float(delete_timestamp) >= self.reap_not_done_after:
self.logger.warn(_('Account %s has not been reaped since %s') % self.logger.warning(_('Account %s has not been reaped since %s') %
(account, delete_timestamp.isoformat)) (account, delete_timestamp.isoformat))
return True return True
def reap_container(self, account, account_partition, account_nodes, def reap_container(self, account, account_partition, account_nodes,

View File

@ -632,7 +632,7 @@ class Replicator(Daemon):
[(failure_dev['replication_ip'], [(failure_dev['replication_ip'],
failure_dev['device']) failure_dev['device'])
for failure_dev in self.ring.devs if failure_dev]) for failure_dev in self.ring.devs if failure_dev])
self.logger.warn( self.logger.warning(
_('Skipping %(device)s as it is not mounted') % node) _('Skipping %(device)s as it is not mounted') % node)
continue continue
unlink_older_than( unlink_older_than(

View File

@ -357,7 +357,6 @@ class MemcacheRing(object):
:returns: result of decrementing :returns: result of decrementing
:raises MemcacheConnectionError: :raises MemcacheConnectionError:
""" """
return self.incr(key, delta=-delta, time=time) return self.incr(key, delta=-delta, time=time)
def delete(self, key): def delete(self, key):

View File

@ -325,9 +325,9 @@ class KeystoneAuth(object):
# unknown domain, update if req confirms domain # unknown domain, update if req confirms domain
new_id = req_id or '' new_id = req_id or ''
elif req_has_id and sysmeta_id != req_id: elif req_has_id and sysmeta_id != req_id:
self.logger.warn("Inconsistent project domain id: " + self.logger.warning("Inconsistent project domain id: " +
"%s in token vs %s in account metadata." "%s in token vs %s in account metadata."
% (req_id, sysmeta_id)) % (req_id, sysmeta_id))
if new_id is not None: if new_id is not None:
req.headers[PROJECT_DOMAIN_ID_SYSMETA_HEADER] = new_id req.headers[PROJECT_DOMAIN_ID_SYSMETA_HEADER] = new_id

View File

@ -429,10 +429,12 @@ class TempAuth(object):
try: try:
acls = acls_from_account_info(info) acls = acls_from_account_info(info)
except ValueError as e1: except ValueError as e1:
self.logger.warn("Invalid ACL stored in metadata: %r" % e1) self.logger.warning("Invalid ACL stored in metadata: %r" % e1)
return None return None
except NotImplementedError as e2: except NotImplementedError as e2:
self.logger.warn("ACL version exceeds middleware version: %r" % e2) self.logger.warning(
"ACL version exceeds middleware version: %r"
% e2)
return None return None
return acls return acls

View File

@ -389,8 +389,8 @@ def load_libc_function(func_name, log_error=True,
if fail_if_missing: if fail_if_missing:
raise raise
if log_error: if log_error:
logging.warn(_("Unable to locate %s in libc. Leaving as a " logging.warning(_("Unable to locate %s in libc. Leaving as a "
"no-op."), func_name) "no-op."), func_name)
return noop_libc_function return noop_libc_function
@ -580,8 +580,8 @@ class FallocateWrapper(object):
if self.fallocate is not noop_libc_function: if self.fallocate is not noop_libc_function:
break break
if self.fallocate is noop_libc_function: if self.fallocate is noop_libc_function:
logging.warn(_("Unable to locate fallocate, posix_fallocate in " logging.warning(_("Unable to locate fallocate, posix_fallocate in "
"libc. Leaving as a no-op.")) "libc. Leaving as a no-op."))
def __call__(self, fd, mode, offset, length): def __call__(self, fd, mode, offset, length):
"""The length parameter must be a ctypes.c_uint64.""" """The length parameter must be a ctypes.c_uint64."""
@ -664,8 +664,8 @@ def fsync_dir(dirpath):
if err.errno == errno.ENOTDIR: if err.errno == errno.ENOTDIR:
# Raise error if someone calls fsync_dir on a non-directory # Raise error if someone calls fsync_dir on a non-directory
raise raise
logging.warn(_("Unable to perform fsync() on directory %s: %s"), logging.warning(_("Unable to perform fsync() on directory %s: %s"),
dirpath, os.strerror(err.errno)) dirpath, os.strerror(err.errno))
finally: finally:
if dirfd: if dirfd:
os.close(dirfd) os.close(dirfd)
@ -686,9 +686,9 @@ def drop_buffer_cache(fd, offset, length):
ret = _posix_fadvise(fd, ctypes.c_uint64(offset), ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
ctypes.c_uint64(length), 4) ctypes.c_uint64(length), 4)
if ret != 0: if ret != 0:
logging.warn("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) " logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
"-> %(ret)s", {'fd': fd, 'offset': offset, "-> %(ret)s", {'fd': fd, 'offset': offset,
'length': length, 'ret': ret}) 'length': length, 'ret': ret})
NORMAL_FORMAT = "%016.05f" NORMAL_FORMAT = "%016.05f"
@ -1176,7 +1176,7 @@ class StatsdClient(object):
return sock.sendto('|'.join(parts), self._target) return sock.sendto('|'.join(parts), self._target)
except IOError as err: except IOError as err:
if self.logger: if self.logger:
self.logger.warn( self.logger.warning(
'Error sending UDP message to %r: %s', 'Error sending UDP message to %r: %s',
self._target, err) self._target, err)
@ -1261,7 +1261,6 @@ class LogAdapter(logging.LoggerAdapter, object):
def __init__(self, logger, server): def __init__(self, logger, server):
logging.LoggerAdapter.__init__(self, logger, {}) logging.LoggerAdapter.__init__(self, logger, {})
self.server = server self.server = server
setattr(self, 'warn', self.warning)
@property @property
def txn_id(self): def txn_id(self):
@ -3561,7 +3560,8 @@ def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
except StopIteration: except StopIteration:
pass pass
else: else:
logger.warn("More than one part in a single-part response?") logger.warning(
"More than one part in a single-part response?")
return string_along(response_body_iter, ranges_iter, logger) return string_along(response_body_iter, ranges_iter, logger)

View File

@ -89,7 +89,7 @@ class ContainerUpdater(Daemon):
for device in self._listdir(self.devices): for device in self._listdir(self.devices):
dev_path = os.path.join(self.devices, device) dev_path = os.path.join(self.devices, device)
if self.mount_check and not ismount(dev_path): if self.mount_check and not ismount(dev_path):
self.logger.warn(_('%s is not mounted'), device) self.logger.warning(_('%s is not mounted'), device)
continue continue
con_path = os.path.join(dev_path, DATADIR) con_path = os.path.join(dev_path, DATADIR)
if not os.path.exists(con_path): if not os.path.exists(con_path):

View File

@ -303,8 +303,8 @@ def object_audit_location_generator(devices, mount_check=True, logger=None,
base, policy = split_policy_string(dir_) base, policy = split_policy_string(dir_)
except PolicyError as e: except PolicyError as e:
if logger: if logger:
logger.warn(_('Directory %r does not map ' logger.warning(_('Directory %r does not map '
'to a valid policy (%s)') % (dir_, e)) 'to a valid policy (%s)') % (dir_, e))
continue continue
datadir_path = os.path.join(devices, device, dir_) datadir_path = os.path.join(devices, device, dir_)
partitions = listdir(datadir_path) partitions = listdir(datadir_path)
@ -420,7 +420,7 @@ class BaseDiskFileManager(object):
# If the operator wants zero-copy with splice() but we don't have the # If the operator wants zero-copy with splice() but we don't have the
# requisite kernel support, complain so they can go fix it. # requisite kernel support, complain so they can go fix it.
if conf_wants_splice and not splice.available: if conf_wants_splice and not splice.available:
self.logger.warn( self.logger.warning(
"Use of splice() requested (config says \"splice = %s\"), " "Use of splice() requested (config says \"splice = %s\"), "
"but the system does not support it. " "but the system does not support it. "
"splice() will not be used." % conf.get('splice')) "splice() will not be used." % conf.get('splice'))
@ -434,8 +434,8 @@ class BaseDiskFileManager(object):
# AF_ALG support), we can't use zero-copy. # AF_ALG support), we can't use zero-copy.
if err.errno != errno.EAFNOSUPPORT: if err.errno != errno.EAFNOSUPPORT:
raise raise
self.logger.warn("MD5 sockets not supported. " self.logger.warning("MD5 sockets not supported. "
"splice() will not be used.") "splice() will not be used.")
else: else:
self.use_splice = True self.use_splice = True
with open('/proc/sys/fs/pipe-max-size') as f: with open('/proc/sys/fs/pipe-max-size') as f:
@ -1404,7 +1404,7 @@ class BaseDiskFileReader(object):
self._quarantined_dir = self._threadpool.run_in_thread( self._quarantined_dir = self._threadpool.run_in_thread(
self.manager.quarantine_renamer, self._device_path, self.manager.quarantine_renamer, self._device_path,
self._data_file) self._data_file)
self._logger.warn("Quarantined object %s: %s" % ( self._logger.warning("Quarantined object %s: %s" % (
self._data_file, msg)) self._data_file, msg))
self._logger.increment('quarantines') self._logger.increment('quarantines')
self._quarantine_hook(msg) self._quarantine_hook(msg)
@ -1674,7 +1674,7 @@ class BaseDiskFile(object):
""" """
self._quarantined_dir = self._threadpool.run_in_thread( self._quarantined_dir = self._threadpool.run_in_thread(
self.manager.quarantine_renamer, self._device_path, data_file) self.manager.quarantine_renamer, self._device_path, data_file)
self._logger.warn("Quarantined object %s: %s" % ( self._logger.warning("Quarantined object %s: %s" % (
data_file, msg)) data_file, msg))
self._logger.increment('quarantines') self._logger.increment('quarantines')
return DiskFileQuarantined(msg) return DiskFileQuarantined(msg)

View File

@ -819,8 +819,8 @@ class ObjectReconstructor(Daemon):
dev_path = self._df_router[policy].get_dev_path( dev_path = self._df_router[policy].get_dev_path(
local_dev['device']) local_dev['device'])
if not dev_path: if not dev_path:
self.logger.warn(_('%s is not mounted'), self.logger.warning(_('%s is not mounted'),
local_dev['device']) local_dev['device'])
continue continue
obj_path = join(dev_path, data_dir) obj_path = join(dev_path, data_dir)
tmp_path = join(dev_path, get_tmp_dir(int(policy))) tmp_path = join(dev_path, get_tmp_dir(int(policy)))

View File

@ -85,10 +85,11 @@ class ObjectReplicator(Daemon):
if not self.rsync_module: if not self.rsync_module:
self.rsync_module = '{replication_ip}::object' self.rsync_module = '{replication_ip}::object'
if config_true_value(conf.get('vm_test_mode', 'no')): if config_true_value(conf.get('vm_test_mode', 'no')):
self.logger.warn('Option object-replicator/vm_test_mode is ' self.logger.warning('Option object-replicator/vm_test_mode '
'deprecated and will be removed in a future ' 'is deprecated and will be removed in a '
'version. Update your configuration to use ' 'future version. Update your '
'option object-replicator/rsync_module.') 'configuration to use option '
'object-replicator/rsync_module.')
self.rsync_module += '{replication_port}' self.rsync_module += '{replication_port}'
self.http_timeout = int(conf.get('http_timeout', 60)) self.http_timeout = int(conf.get('http_timeout', 60))
self.lockup_timeout = int(conf.get('lockup_timeout', 1800)) self.lockup_timeout = int(conf.get('lockup_timeout', 1800))
@ -109,10 +110,10 @@ class ObjectReplicator(Daemon):
self.handoff_delete = config_auto_int_value( self.handoff_delete = config_auto_int_value(
conf.get('handoff_delete', 'auto'), 0) conf.get('handoff_delete', 'auto'), 0)
if any((self.handoff_delete, self.handoffs_first)): if any((self.handoff_delete, self.handoffs_first)):
self.logger.warn('Handoff only mode is not intended for normal ' self.logger.warning('Handoff only mode is not intended for normal '
'operation, please disable handoffs_first and ' 'operation, please disable handoffs_first and '
'handoff_delete before the next ' 'handoff_delete before the next '
'normal rebalance') 'normal rebalance')
self._diskfile_mgr = DiskFileManager(conf, self.logger) self._diskfile_mgr = DiskFileManager(conf, self.logger)
def _zero_stats(self): def _zero_stats(self):
@ -585,7 +586,8 @@ class ObjectReplicator(Daemon):
failure_dev['device']) failure_dev['device'])
for failure_dev in policy.object_ring.devs for failure_dev in policy.object_ring.devs
if failure_dev]) if failure_dev])
self.logger.warn(_('%s is not mounted'), local_dev['device']) self.logger.warning(
_('%s is not mounted'), local_dev['device'])
continue continue
unlink_older_than(tmp_path, time.time() - self.reclaim_age) unlink_older_than(tmp_path, time.time() - self.reclaim_age)
if not os.path.exists(obj_path): if not os.path.exists(obj_path):
@ -701,7 +703,7 @@ class ObjectReplicator(Daemon):
self._add_failure_stats([(failure_dev['replication_ip'], self._add_failure_stats([(failure_dev['replication_ip'],
failure_dev['device']) failure_dev['device'])
for failure_dev in job['nodes']]) for failure_dev in job['nodes']])
self.logger.warn(_('%s is not mounted'), job['device']) self.logger.warning(_('%s is not mounted'), job['device'])
continue continue
if not self.check_ring(job['policy'].object_ring): if not self.check_ring(job['policy'].object_ring):
self.logger.info(_("Ring change detected. Aborting " self.logger.info(_("Ring change detected. Aborting "

View File

@ -84,7 +84,7 @@ class ObjectUpdater(Daemon):
if self.mount_check and \ if self.mount_check and \
not ismount(os.path.join(self.devices, device)): not ismount(os.path.join(self.devices, device)):
self.logger.increment('errors') self.logger.increment('errors')
self.logger.warn( self.logger.warning(
_('Skipping %s as it is not mounted'), device) _('Skipping %s as it is not mounted'), device)
continue continue
while len(pids) >= self.concurrency: while len(pids) >= self.concurrency:
@ -127,7 +127,7 @@ class ObjectUpdater(Daemon):
if self.mount_check and \ if self.mount_check and \
not ismount(os.path.join(self.devices, device)): not ismount(os.path.join(self.devices, device)):
self.logger.increment('errors') self.logger.increment('errors')
self.logger.warn( self.logger.warning(
_('Skipping %s as it is not mounted'), device) _('Skipping %s as it is not mounted'), device)
continue continue
self.object_sweep(os.path.join(self.devices, device)) self.object_sweep(os.path.join(self.devices, device))
@ -159,8 +159,9 @@ class ObjectUpdater(Daemon):
try: try:
base, policy = split_policy_string(asyncdir) base, policy = split_policy_string(asyncdir)
except PolicyError as e: except PolicyError as e:
self.logger.warn(_('Directory %r does not map ' self.logger.warning(_('Directory %r does not map '
'to a valid policy (%s)') % (asyncdir, e)) 'to a valid policy (%s)') %
(asyncdir, e))
continue continue
for prefix in self._listdir(async_pending): for prefix in self._listdir(async_pending):
prefix_path = os.path.join(async_pending, prefix) prefix_path = os.path.join(async_pending, prefix)

View File

@ -941,13 +941,13 @@ class ResumingGetter(object):
_('Trying to read during GET')) _('Trying to read during GET'))
raise raise
except ChunkWriteTimeout: except ChunkWriteTimeout:
self.app.logger.warn( self.app.logger.warning(
_('Client did not read from proxy within %ss') % _('Client did not read from proxy within %ss') %
self.app.client_timeout) self.app.client_timeout)
self.app.logger.increment('client_timeouts') self.app.logger.increment('client_timeouts')
except GeneratorExit: except GeneratorExit:
if not req.environ.get('swift.non_client_disconnect'): if not req.environ.get('swift.non_client_disconnect'):
self.app.logger.warn(_('Client disconnected on read')) self.app.logger.warning(_('Client disconnected on read'))
except Exception: except Exception:
self.app.logger.exception(_('Trying to send to client')) self.app.logger.exception(_('Trying to send to client'))
raise raise

View File

@ -981,7 +981,7 @@ class ReplicatedObjectController(BaseObjectController):
msg='Object PUT exceptions after last send, ' msg='Object PUT exceptions after last send, '
'%(conns)s/%(nodes)s required connections') '%(conns)s/%(nodes)s required connections')
except ChunkReadTimeout as err: except ChunkReadTimeout as err:
self.app.logger.warn( self.app.logger.warning(
_('ERROR Client read timeout (%ss)'), err.seconds) _('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts') self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req) raise HTTPRequestTimeout(request=req)
@ -989,7 +989,7 @@ class ReplicatedObjectController(BaseObjectController):
raise raise
except ChunkReadError: except ChunkReadError:
req.client_disconnect = True req.client_disconnect = True
self.app.logger.warn( self.app.logger.warning(
_('Client disconnected without sending last chunk')) _('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects') self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req) raise HTTPClientDisconnect(request=req)
@ -1004,7 +1004,7 @@ class ReplicatedObjectController(BaseObjectController):
raise HTTPInternalServerError(request=req) raise HTTPInternalServerError(request=req)
if req.content_length and bytes_transferred < req.content_length: if req.content_length and bytes_transferred < req.content_length:
req.client_disconnect = True req.client_disconnect = True
self.app.logger.warn( self.app.logger.warning(
_('Client disconnected without sending enough data')) _('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects') self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req) raise HTTPClientDisconnect(request=req)
@ -2209,7 +2209,7 @@ class ECObjectController(BaseObjectController):
if req.content_length and ( if req.content_length and (
bytes_transferred < req.content_length): bytes_transferred < req.content_length):
req.client_disconnect = True req.client_disconnect = True
self.app.logger.warn( self.app.logger.warning(
_('Client disconnected without sending enough data')) _('Client disconnected without sending enough data'))
self.app.logger.increment('client_disconnects') self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req) raise HTTPClientDisconnect(request=req)
@ -2278,13 +2278,13 @@ class ECObjectController(BaseObjectController):
for putter in putters: for putter in putters:
putter.wait() putter.wait()
except ChunkReadTimeout as err: except ChunkReadTimeout as err:
self.app.logger.warn( self.app.logger.warning(
_('ERROR Client read timeout (%ss)'), err.seconds) _('ERROR Client read timeout (%ss)'), err.seconds)
self.app.logger.increment('client_timeouts') self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req) raise HTTPRequestTimeout(request=req)
except ChunkReadError: except ChunkReadError:
req.client_disconnect = True req.client_disconnect = True
self.app.logger.warn( self.app.logger.warning(
_('Client disconnected without sending last chunk')) _('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects') self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req) raise HTTPClientDisconnect(request=req)

View File

@ -229,9 +229,10 @@ class Application(object):
Check the configuration for possible errors Check the configuration for possible errors
""" """
if self._read_affinity and self.sorting_method != 'affinity': if self._read_affinity and self.sorting_method != 'affinity':
self.logger.warn("sorting_method is set to '%s', not 'affinity'; " self.logger.warning(
"read_affinity setting will have no effect." % "sorting_method is set to '%s', not 'affinity'; "
self.sorting_method) "read_affinity setting will have no effect." %
self.sorting_method)
def get_object_ring(self, policy_idx): def get_object_ring(self, policy_idx):
""" """

View File

@ -477,6 +477,12 @@ class UnmockTimeModule(object):
logging.time = UnmockTimeModule() logging.time = UnmockTimeModule()
class WARN_DEPRECATED(Exception):
def __init__(self, msg):
self.msg = msg
print(self.msg)
class FakeLogger(logging.Logger, object): class FakeLogger(logging.Logger, object):
# a thread safe fake logger # a thread safe fake logger
@ -499,6 +505,9 @@ class FakeLogger(logging.Logger, object):
NOTICE: 'notice', NOTICE: 'notice',
} }
def warn(self, *args, **kwargs):
raise WARN_DEPRECATED("Deprecated Method warn use warning instead")
def notice(self, msg, *args, **kwargs): def notice(self, msg, *args, **kwargs):
""" """
Convenience function for syslog priority LOG_NOTICE. The python Convenience function for syslog priority LOG_NOTICE. The python

View File

@ -1221,7 +1221,7 @@ class TestUtils(unittest.TestCase):
logger = logging.getLogger('server') logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio)) logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server') logger = utils.get_logger(None, 'server', log_route='server')
logger.warn('test1') logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n') self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2') logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n') self.assertEqual(sio.getvalue(), 'test1\n')
@ -1233,7 +1233,7 @@ class TestUtils(unittest.TestCase):
# way to syslog; but exercises the code. # way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server', logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server') log_route='server')
logger.warn('test4') logger.warning('test4')
self.assertEqual(sio.getvalue(), self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n') 'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default # make sure debug doesn't log by default
@ -1491,7 +1491,7 @@ class TestUtils(unittest.TestCase):
self.assertTrue('12345' not in log_msg) self.assertTrue('12345' not in log_msg)
# test txn already in message # test txn already in message
self.assertEqual(logger.txn_id, '12345') self.assertEqual(logger.txn_id, '12345')
logger.warn('test 12345 test') logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n') self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing # Test multi line collapsing
logger.error('my\nerror\nmessage') logger.error('my\nerror\nmessage')
@ -1517,7 +1517,7 @@ class TestUtils(unittest.TestCase):
self.assertTrue('1.2.3.4' not in log_msg) self.assertTrue('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message # test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4') self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warn('test 1.2.3.4 test 12345') logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n') self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally: finally:
logger.logger.removeHandler(handler) logger.logger.removeHandler(handler)

View File

@ -183,7 +183,7 @@ class TestObjectUpdater(unittest.TestCase):
'node_timeout': '5'}) 'node_timeout': '5'})
cu.logger = mock_logger = mock.MagicMock() cu.logger = mock_logger = mock.MagicMock()
cu.object_sweep(self.sda1) cu.object_sweep(self.sda1)
self.assertEqual(mock_logger.warn.call_count, warn) self.assertEqual(mock_logger.warning.call_count, warn)
self.assertTrue( self.assertTrue(
os.path.exists(os.path.join(self.sda1, 'not_a_dir'))) os.path.exists(os.path.join(self.sda1, 'not_a_dir')))
if should_skip: if should_skip: