Merge "diskfile: Quarantine hashdirs on ENODATA"
This commit is contained in:
commit
92bb7108c6
@ -330,7 +330,11 @@ def quarantine_renamer(device_path, corrupted_file_path):
|
||||
to_dir = join(device_path, 'quarantined',
|
||||
get_data_dir(policy),
|
||||
basename(from_dir))
|
||||
invalidate_hash(dirname(from_dir))
|
||||
if len(basename(from_dir)) == 3:
|
||||
# quarantining whole suffix
|
||||
invalidate_hash(from_dir)
|
||||
else:
|
||||
invalidate_hash(dirname(from_dir))
|
||||
try:
|
||||
renamer(from_dir, to_dir, fsync=False)
|
||||
except OSError as e:
|
||||
@ -1173,10 +1177,10 @@ class BaseDiskFileManager(object):
|
||||
ondisk_info = self.cleanup_ondisk_files(
|
||||
hsh_path, policy=policy)
|
||||
except OSError as err:
|
||||
partition_path = dirname(path)
|
||||
objects_path = dirname(partition_path)
|
||||
device_path = dirname(objects_path)
|
||||
if err.errno == errno.ENOTDIR:
|
||||
partition_path = dirname(path)
|
||||
objects_path = dirname(partition_path)
|
||||
device_path = dirname(objects_path)
|
||||
# The made-up filename is so that the eventual dirpath()
|
||||
# will result in this object directory that we care about.
|
||||
# Some failures will result in an object directory
|
||||
@ -1190,6 +1194,24 @@ class BaseDiskFileManager(object):
|
||||
'it is not a directory', {'hsh_path': hsh_path,
|
||||
'quar_path': quar_path})
|
||||
continue
|
||||
elif err.errno == errno.ENODATA:
|
||||
try:
|
||||
# We've seen cases where bad sectors lead to ENODATA
|
||||
# here; use a similar hack as above
|
||||
quar_path = quarantine_renamer(
|
||||
device_path,
|
||||
join(hsh_path, "made-up-filename"))
|
||||
orig_path = hsh_path
|
||||
except (OSError, IOError):
|
||||
# We've *also* seen the bad sectors lead to us needing
|
||||
# to quarantine the whole suffix
|
||||
quar_path = quarantine_renamer(device_path, hsh_path)
|
||||
orig_path = path
|
||||
logging.exception(
|
||||
'Quarantined %(orig_path)s to %(quar_path)s because '
|
||||
'it could not be listed', {'orig_path': orig_path,
|
||||
'quar_path': quar_path})
|
||||
continue
|
||||
raise
|
||||
if not ondisk_info['files']:
|
||||
continue
|
||||
@ -1527,6 +1549,24 @@ class BaseDiskFileManager(object):
|
||||
'it is not a directory', {'object_path': object_path,
|
||||
'quar_path': quar_path})
|
||||
raise DiskFileNotExist()
|
||||
elif err.errno == errno.ENODATA:
|
||||
try:
|
||||
# We've seen cases where bad sectors lead to ENODATA here;
|
||||
# use a similar hack as above
|
||||
quar_path = self.quarantine_renamer(
|
||||
dev_path,
|
||||
join(object_path, "made-up-filename"))
|
||||
orig_path = object_path
|
||||
except (OSError, IOError):
|
||||
# We've *also* seen the bad sectors lead to us needing to
|
||||
# quarantine the whole suffix, not just the hash dir
|
||||
quar_path = self.quarantine_renamer(dev_path, object_path)
|
||||
orig_path = os.path.dirname(object_path)
|
||||
logging.exception(
|
||||
'Quarantined %(orig_path)s to %(quar_path)s because '
|
||||
'it could not be listed', {'orig_path': orig_path,
|
||||
'quar_path': quar_path})
|
||||
raise DiskFileNotExist()
|
||||
if err.errno != errno.ENOENT:
|
||||
raise
|
||||
raise DiskFileNotExist()
|
||||
@ -2528,6 +2568,20 @@ class BaseDiskFile(object):
|
||||
# want this one file and not its parent.
|
||||
os.path.join(self._datadir, "made-up-filename"),
|
||||
"Expected directory, found file at %s" % self._datadir)
|
||||
elif err.errno == errno.ENODATA:
|
||||
try:
|
||||
# We've seen cases where bad sectors lead to ENODATA here
|
||||
raise self._quarantine(
|
||||
# similar hack to above
|
||||
os.path.join(self._datadir, "made-up-filename"),
|
||||
"Failed to list directory at %s" % self._datadir)
|
||||
except (OSError, IOError):
|
||||
# We've *also* seen the bad sectors lead to us needing to
|
||||
# quarantine the whole suffix, not just the hash dir
|
||||
raise self._quarantine(
|
||||
# skip the above hack to rename the suffix
|
||||
self._datadir,
|
||||
"Failed to list directory at %s" % self._datadir)
|
||||
elif err.errno != errno.ENOENT:
|
||||
raise DiskFileError(
|
||||
"Error listing directory %s: %s" % (self._datadir, err))
|
||||
|
@ -1589,6 +1589,27 @@ class DiskFileManagerMixin(BaseDiskFileTestMixin):
|
||||
('/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/' +
|
||||
'made-up-filename'))
|
||||
|
||||
def test_get_diskfile_from_hash_no_data(self):
|
||||
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
|
||||
with mock.patch(self._manager_mock('diskfile_cls')), \
|
||||
mock.patch(self._manager_mock(
|
||||
'cleanup_ondisk_files')) as cleanup, \
|
||||
mock.patch('swift.obj.diskfile.read_metadata') as readmeta, \
|
||||
mock.patch(self._manager_mock(
|
||||
'quarantine_renamer')) as quarantine_renamer:
|
||||
osexc = OSError()
|
||||
osexc.errno = errno.ENODATA
|
||||
cleanup.side_effect = osexc
|
||||
readmeta.return_value = {'name': '/a/c/o'}
|
||||
self.assertRaises(
|
||||
DiskFileNotExist,
|
||||
self.df_mgr.get_diskfile_from_hash,
|
||||
'dev', '9', '9a7175077c01a23ade5956b8a2bba900', POLICIES[0])
|
||||
quarantine_renamer.assert_called_once_with(
|
||||
'/srv/dev/',
|
||||
('/srv/dev/objects/9/900/9a7175077c01a23ade5956b8a2bba900/' +
|
||||
'made-up-filename'))
|
||||
|
||||
def test_get_diskfile_from_hash_no_dir(self):
|
||||
self.df_mgr.get_dev_path = mock.MagicMock(return_value='/srv/dev/')
|
||||
with mock.patch(self._manager_mock('diskfile_cls')), \
|
||||
@ -4681,6 +4702,21 @@ class DiskFileMixin(BaseDiskFileTestMixin):
|
||||
self.assertFalse(os.path.exists(hashdir))
|
||||
self.assertTrue(os.path.exists(os.path.dirname(hashdir)))
|
||||
|
||||
def test_quarantine_hashdir_not_listable(self):
|
||||
df, df_data = self._create_test_file(b'1234567890', account="abc",
|
||||
container='123', obj='xyz')
|
||||
hashdir = df._datadir
|
||||
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
|
||||
'xyz', policy=POLICIES.legacy)
|
||||
with mock.patch('os.listdir',
|
||||
side_effect=OSError(errno.ENODATA, 'nope')):
|
||||
self.assertRaises(DiskFileQuarantined, df.open)
|
||||
|
||||
# make sure the right thing got quarantined; the suffix dir should not
|
||||
# have moved, as that could have many objects in it
|
||||
self.assertFalse(os.path.exists(hashdir))
|
||||
self.assertTrue(os.path.exists(os.path.dirname(hashdir)))
|
||||
|
||||
def test_create_prealloc(self):
|
||||
df = self.df_mgr.get_diskfile(self.existing_device, '0', 'abc', '123',
|
||||
'xyz', policy=POLICIES.legacy)
|
||||
@ -8524,6 +8560,38 @@ class TestSuffixHashes(unittest.TestCase):
|
||||
)
|
||||
self.assertTrue(os.path.exists(quarantine_path))
|
||||
|
||||
def test_hash_suffix_cleanup_ondisk_files_enodata_quarantined(self):
|
||||
for policy in self.iter_policies():
|
||||
df = self.df_router[policy].get_diskfile(
|
||||
self.existing_device, '0', 'a', 'c', 'o', policy=policy)
|
||||
# make everything down to the hash directory
|
||||
os.makedirs(df._datadir)
|
||||
suffix = os.path.basename(os.path.dirname(df._datadir))
|
||||
orig_listdir = os.listdir
|
||||
|
||||
def fake_listdir(path):
|
||||
if path == df._datadir:
|
||||
raise OSError(errno.ENODATA, 'nope')
|
||||
return orig_listdir(path)
|
||||
|
||||
df_mgr = self.df_router[policy]
|
||||
with mock.patch('os.listdir', side_effect=fake_listdir):
|
||||
hashes = df_mgr.get_hashes(self.existing_device, '0', [suffix],
|
||||
policy)
|
||||
self.assertEqual(hashes, {})
|
||||
# and hash path is quarantined
|
||||
self.assertFalse(os.path.exists(df._datadir))
|
||||
# each device a quarantined directory
|
||||
quarantine_base = os.path.join(self.devices,
|
||||
self.existing_device, 'quarantined')
|
||||
# the quarantine path is...
|
||||
quarantine_path = os.path.join(
|
||||
quarantine_base, # quarantine root
|
||||
diskfile.get_data_dir(policy), # per-policy data dir
|
||||
os.path.basename(df._datadir) # name of quarantined file
|
||||
)
|
||||
self.assertTrue(os.path.exists(quarantine_path))
|
||||
|
||||
def test_hash_suffix_cleanup_ondisk_files_other_oserror(self):
|
||||
for policy in self.iter_policies():
|
||||
timestamp = self.ts()
|
||||
|
Loading…
x
Reference in New Issue
Block a user