Refactor DiskDir and DiskAccount to use DiskCommon

We have refactored DiskDir and DiskAccount so that truly common,
shared functions live in the base class DiskCommon, and each of these
then sub-class that. This allows us to isolate unique behaviors.

We have also enabled all the skipped unit tests in test_diskdir.py
that we could, removing a few that we don't need right now.

Change-Id: I48d9c915108df8cc92c3e9a764563e1d10c50050
Signed-off-by: Peter Portante <peter.portante@redhat.com>
Reviewed-on: http://review.gluster.org/5148
Reviewed-by: Mohammed Junaid <junaid@redhat.com>
Reviewed-by: Luis Pabon <lpabon@redhat.com>
Tested-by: Luis Pabon <lpabon@redhat.com>
This commit is contained in:
Peter Portante 2013-06-04 23:46:39 -04:00 committed by Luis Pabon
parent f80872e852
commit a88accbe27
2 changed files with 175 additions and 221 deletions

View File

@ -77,8 +77,6 @@ def filter_delimiter(objects, delimiter, prefix, marker, path=None):
1. begin with "prefix" (empty string matches all)
2. does not match the "path" argument
3. does not contain the delimiter in the given prefix length
4.
be those that start with the prefix.
"""
assert delimiter
assert prefix is not None
@ -148,9 +146,55 @@ def filter_end_marker(objects, end_marker):
class DiskCommon(object):
"""
Common fields and methods shared between DiskDir and DiskAccount classes.
"""
def __init__(self, root, drive, account, logger):
# WARNING: The following four fields are referenced as fields by our
# callers outside of this module, do not remove.
# Create a dummy db_file in Glusterfs.RUN_DIR
global _db_file
if not _db_file:
_db_file = os.path.join(Glusterfs.RUN_DIR, 'db_file.db')
if not os.path.exists(_db_file):
file(_db_file, 'w+')
self.db_file = _db_file
self.metadata = {}
self.pending_timeout = 0
self.stale_reads_ok = False
# The following fields are common
self.root = root
assert logger is not None
self.logger = logger
self.account = account
self.datadir = os.path.join(root, drive)
self._dir_exists = None
def _dir_exists_read_metadata(self):
self._dir_exists = os_path.exists(self.datadir)
if self._dir_exists:
self.metadata = _read_metadata(self.datadir)
return self._dir_exists
def is_deleted(self):
# The intention of this method is to check the file system to see if
# the directory actually exists.
return not os_path.exists(self.datadir)
def empty(self):
# FIXME: Common because ported swift AccountBroker unit tests use it.
return dir_empty(self.datadir)
def update_metadata(self, metadata):
assert self.metadata, "Valid container/account metadata should have " \
"been created by now"
if metadata:
new_metadata = self.metadata.copy()
new_metadata.update(metadata)
if new_metadata != self.metadata:
write_metadata(self.datadir, new_metadata)
self.metadata = new_metadata
class DiskDir(DiskCommon):
"""
@ -236,53 +280,24 @@ class DiskDir(DiskCommon):
def __init__(self, path, drive, account, container, logger,
uid=DEFAULT_UID, gid=DEFAULT_GID):
self.root = path
if container:
self.container = container
else:
self.container = None
if self.container:
self.datadir = os.path.join(path, drive, self.container)
else:
self.datadir = os.path.join(path, drive)
self.account = account
assert logger is not None
self.logger = logger
self.metadata = {}
self.container_info = None
super(DiskDir, self).__init__(path, drive, account, logger)
self.uid = int(uid)
self.gid = int(gid)
# Create a dummy db_file in Glusterfs.RUN_DIR
global _db_file
if not _db_file:
_db_file = os.path.join(Glusterfs.RUN_DIR, 'db_file.db')
if not os.path.exists(_db_file):
file(_db_file, 'w+')
self.db_file = _db_file
self.dir_exists = os_path.exists(self.datadir)
if self.dir_exists:
self.container = container
self.datadir = os.path.join(self.datadir, self.container)
if not self._dir_exists_read_metadata():
return
if not self.metadata:
create_container_metadata(self.datadir)
self.metadata = _read_metadata(self.datadir)
else:
return
if self.container:
if not self.metadata:
if not validate_container(self.metadata):
create_container_metadata(self.datadir)
self.metadata = _read_metadata(self.datadir)
else:
if not validate_container(self.metadata):
create_container_metadata(self.datadir)
self.metadata = _read_metadata(self.datadir)
else:
if not self.metadata:
create_account_metadata(self.datadir)
self.metadata = _read_metadata(self.datadir)
else:
if not validate_account(self.metadata):
create_account_metadata(self.datadir)
self.metadata = _read_metadata(self.datadir)
def empty(self):
return dir_empty(self.datadir)
def list_objects_iter(self, limit, marker, end_marker,
prefix, delimiter, path=None):
@ -303,7 +318,7 @@ class DiskDir(DiskCommon):
container_list = []
objects = self.update_object_count()
objects = self._update_object_count()
if objects:
objects.sort()
else:
@ -368,7 +383,7 @@ class DiskDir(DiskCommon):
return container_list
def update_object_count(self):
def _update_object_count(self):
objects, object_count, bytes_used = get_container_details(self.datadir)
if X_OBJECTS_COUNT not in self.metadata \
@ -381,33 +396,21 @@ class DiskDir(DiskCommon):
return objects
def update_container_count(self):
containers, container_count = get_account_details(self.datadir)
if X_CONTAINER_COUNT not in self.metadata \
or int(self.metadata[X_CONTAINER_COUNT][0]) != container_count:
self.metadata[X_CONTAINER_COUNT] = (container_count, 0)
write_metadata(self.datadir, self.metadata)
return containers
def get_info(self, include_metadata=False):
def get_info(self):
"""
Get global data for the container.
:returns: dict with keys: account, container, object_count, bytes_used,
hash, id, created_at, put_timestamp, delete_timestamp,
reported_put_timestamp, reported_delete_timestamp,
reported_object_count, and reported_bytes_used.
If include_metadata is set, metadata is included as a key
pointing to a dict of tuples of the metadata
"""
if not Glusterfs.OBJECT_ONLY:
# If we are not configured for object only environments, we should
# update the object counts in case they changed behind our back.
self.update_object_count()
self._update_object_count()
else:
# FIXME: to facilitate testing, we need to update all the time
self.update_object_count()
self._update_object_count()
data = {'account': self.account, 'container': self.container,
'object_count': self.metadata.get(
@ -425,8 +428,6 @@ class DiskDir(DiskCommon):
'x_container_sync_point2': self.metadata.get(
'x_container_sync_point2', -1),
}
if include_metadata:
data['metadata'] = self.metadata
return data
def put_object(self, name, timestamp, size, content_type, etag, deleted=0):
@ -439,7 +440,7 @@ class DiskDir(DiskCommon):
Create and write metatdata to directory/container.
:param metadata: Metadata to write.
"""
if not self.dir_exists:
if not self._dir_exists:
mkdirs(self.datadir)
# If we create it, ensure we own it.
os.chown(self.datadir, self.uid, self.gid)
@ -447,48 +448,47 @@ class DiskDir(DiskCommon):
metadata[X_TIMESTAMP] = timestamp
write_metadata(self.datadir, metadata)
self.metadata = metadata
self.dir_exists = True
self._dir_exists = True
def update_put_timestamp(self, timestamp):
"""
Create the container if it doesn't exist and update the timestamp
Update the PUT timestamp for the container.
If the container does not exist, create it using a PUT timestamp of
the given value.
If the container does exist, update the PUT timestamp only if it is
later than the existing value.
"""
if not os_path.exists(self.datadir):
self.initialize(timestamp)
else:
self.metadata[X_PUT_TIMESTAMP] = timestamp
write_metadata(self.datadir, self.metadata)
if timestamp > self.metadata[X_PUT_TIMESTAMP]:
self.metadata[X_PUT_TIMESTAMP] = (timestamp, 0)
write_metadata(self.datadir, self.metadata)
def delete_object(self, name, timestamp):
# NOOP - should never be called since object file removal occurs
# within a directory implicitly.
pass
return
def delete_db(self, timestamp):
"""
Delete the container
Delete the container (directory) if empty.
:param timestamp: delete timestamp
"""
if dir_empty(self.datadir):
rmdirs(self.datadir)
def update_metadata(self, metadata):
assert self.metadata, "Valid container/account metadata should have" \
" been created by now"
if metadata:
new_metadata = self.metadata.copy()
new_metadata.update(metadata)
if new_metadata != self.metadata:
write_metadata(self.datadir, new_metadata)
self.metadata = new_metadata
if not dir_empty(self.datadir):
# FIXME: This is a failure condition here, isn't it?
return
rmdirs(self.datadir)
def set_x_container_sync_points(self, sync_point1, sync_point2):
self.metadata['x_container_sync_point1'] = sync_point1
self.metadata['x_container_sync_point2'] = sync_point2
class DiskAccount(DiskDir):
class DiskAccount(DiskCommon):
"""
Usage pattern from account/server.py (Havana, 1.8.0+):
DELETE:
@ -528,11 +528,26 @@ class DiskAccount(DiskDir):
"""
def __init__(self, root, drive, account, logger):
super(DiskAccount, self).__init__(root, drive, account, None, logger)
assert self.dir_exists
super(DiskAccount, self).__init__(root, drive, account, logger)
# Since accounts should always exist (given an account maps to a
# gluster volume directly, and the mount has already been checked at
# the beginning of the REST API handling), just assert that that
# assumption still holds.
assert self._dir_exists_read_metadata()
assert self._dir_exists
if not self.metadata or not validate_account(self.metadata):
create_account_metadata(self.datadir)
self.metadata = _read_metadata(self.datadir)
def is_status_deleted(self):
"""Only returns true if the status field is set to DELETED."""
"""
Only returns true if the status field is set to DELETED.
"""
# This function should always return False. Accounts are not created
# and deleted, they exist if a Gluster volume can be mounted. There is
# no way to delete accounts, so this could never return True.
return False
def initialize(self, timestamp):
@ -545,14 +560,30 @@ class DiskAccount(DiskDir):
write_metadata(self.datadir, metadata)
self.metadata = metadata
def update_put_timestamp(self, timestamp):
# Since accounts always exists at this point, just update the account
# PUT timestamp if this given timestamp is later than what we already
# know.
assert self._dir_exists
if timestamp > self.metadata[X_PUT_TIMESTAMP][0]:
self.metadata[X_PUT_TIMESTAMP] = (timestamp, 0)
write_metadata(self.datadir, self.metadata)
def delete_db(self, timestamp):
"""
Mark the account as deleted
:param timestamp: delete timestamp
"""
# NOOP - Accounts map to gluster volumes, and so they cannot be
# deleted.
# Deleting an account is a no-op, since accounts are one-to-one
# mappings to gluster volumes.
#
# FIXME: This means the caller will end up returning a success status
# code for an operation that really should not be allowed. Instead, we
# should modify the account server to not allow the DELETE method, and
# should probably modify the proxy account controller to not allow the
# DELETE method as well.
return
def put_container(self, container, put_timestamp, del_timestamp,
@ -570,6 +601,16 @@ class DiskAccount(DiskDir):
# occurs from within the account directory implicitly.
return
def _update_container_count(self):
containers, container_count = get_account_details(self.datadir)
if X_CONTAINER_COUNT not in self.metadata \
or int(self.metadata[X_CONTAINER_COUNT][0]) != container_count:
self.metadata[X_CONTAINER_COUNT] = (container_count, 0)
write_metadata(self.datadir, self.metadata)
return containers
def list_containers_iter(self, limit, marker, end_marker,
prefix, delimiter):
"""
@ -580,7 +621,7 @@ class DiskAccount(DiskDir):
prefix = ''
account_list = []
containers = self.update_container_count()
containers = self._update_container_count()
if containers:
containers.sort()
else:
@ -623,8 +664,8 @@ class DiskAccount(DiskDir):
try:
metadata = create_container_metadata(cont_path)
except OSError as e:
# FIXME - total hack to get port unit test cases
# working for now.
# FIXME - total hack to get upstream swift ported unit
# test cases working for now.
if e.errno != errno.ENOENT:
raise
if metadata:
@ -638,7 +679,7 @@ class DiskAccount(DiskDir):
return account_list
def get_info(self, include_metadata=False):
def get_info(self):
"""
Get global data for the account.
:returns: dict with keys: account, created_at, put_timestamp,
@ -648,10 +689,10 @@ class DiskAccount(DiskDir):
if not Glusterfs.OBJECT_ONLY:
# If we are not configured for object only environments, we should
# update the container counts in case they changed behind our back.
self.update_container_count()
self._update_container_count()
else:
# FIXME: to facilitate testing, we need to update all the time
self.update_container_count()
self._update_container_count()
data = {'account': self.account, 'created_at': '1',
'put_timestamp': '1', 'delete_timestamp': '1',
@ -660,7 +701,4 @@ class DiskAccount(DiskDir):
'object_count': self.metadata.get(X_OBJECTS_COUNT, (0, 0))[0],
'bytes_used': self.metadata.get(X_BYTES_USED, (0, 0))[0],
'hash': '', 'id': ''}
if include_metadata:
data['metadata'] = self.metadata
return data

View File

@ -24,7 +24,6 @@ import shutil
import tarfile
import hashlib
from time import time
from nose import SkipTest
from swift.common.utils import normalize_timestamp
from gluster.swift.common import utils
import gluster.swift.common.Glusterfs
@ -64,9 +63,6 @@ def timestamp_in_range(ts, base):
class TestDiskDirModuleFunctions(unittest.TestCase):
""" Tests for gluster.swift.common.DiskDir module functions """
def setUp(self):
raise SkipTest
def test__read_metadata(self):
def fake_read_metadata(p):
return { 'a': 1, 'b': ('c', 5) }
@ -180,90 +176,83 @@ class TestDiskDirModuleFunctions(unittest.TestCase):
out_objs = dd.filter_prefix(in_objs, prefix)
assert list(out_objs) == ['abc_123', 'abc_456', 'abc_789']
in_objs, prefix = ['ABC_123', 'ABC_456', 'abc_123', 'abc_456', 'abc_789', 'def_101'], 'abc'
out_objs = dd.filter_prefix(in_objs, prefix)
assert list(out_objs) == ['abc_123', 'abc_456', 'abc_789']
in_objs, prefix = ['abc_123', 'def_101', 'abc_456', 'abc_789'], 'abc'
out_objs = dd.filter_prefix(in_objs, prefix)
assert list(out_objs) == ['abc_123',]
def test_filter_delimiter(self):
in_objs, delimiter, prefix = [], None, ''
in_objs, delimiter, prefix, marker = [], None, '', ''
try:
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
except AssertionError:
pass
except Exception:
raise SkipTest
self.fail("Failed to raise assertion")
in_objs, delimiter, prefix = [], '', ''
try:
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker)
except AssertionError:
pass
except Exception:
self.fail("Failed to raise assertion")
in_objs, delimiter, prefix = [], str(255), ''
in_objs, delimiter, prefix, marker = [], '', '', ''
try:
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker)
except AssertionError:
pass
except Exception:
self.fail("Failed to raise assertion")
in_objs, delimiter, prefix = [], '_', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
in_objs, delimiter, prefix, marker = [], str(255), '', ''
try:
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker)
except AssertionError:
pass
except Exception:
self.fail("Failed to raise assertion")
in_objs, delimiter, prefix, marker = [], '_', '', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker)
assert list(out_objs) == []
in_objs, delimiter, prefix = ['abc_'], '_', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
in_objs, delimiter, prefix, marker = ['abc_'], '_', '', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker)
assert list(out_objs) == in_objs
in_objs, delimiter, prefix = ['abc_123', 'abc_456'], '_', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
in_objs, delimiter, prefix, marker = ['abc_123', 'abc_456'], '_', '', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker)
assert list(out_objs) == ['abc_']
in_objs, delimiter, prefix = ['abc_123', 'abc_456', 'def_123', 'def_456'], '_', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
in_objs, delimiter, prefix, marker = ['abc_123', 'abc_456', 'def_123', 'def_456'], '_', '', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker)
assert list(out_objs) == ['abc_', 'def_']
in_objs, delimiter, prefix = ['abc_123', 'abc_456', 'abc_789', 'def_101'], '_', 'abc_'
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
in_objs, delimiter, prefix, marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], '_', 'abc_', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker)
l = list(out_objs)
assert l == ['abc_123', 'abc_456', 'abc_789'], repr(l)
in_objs, delimiter, prefix = ['abc_123_a', 'abc_456', 'abc_789_', 'def_101'], '_', 'abc_'
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
assert list(out_objs) == ['abc_123_a', 'abc_789_']
in_objs, delimiter, prefix, marker = ['abc_123_a', 'abc_456', 'abc_789_', 'def_101'], '_', 'abc_', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker)
l = list(out_objs)
assert l == ['abc_123_', 'abc_456', 'abc_789_'], repr(l)
def test_filter_limit(self):
try:
l = list(dd.filter_limit([], 0))
except AssertionError:
pass
else:
self.fail("Accepted a zero limit")
in_objs, delimiter, prefix, marker, path = ['abc_123_a', 'abc_456', 'abc_789_', 'def_101'], '_', 'abc_', '', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker, path)
l = list(out_objs)
# FIXME: This appears to be a bug due to this upstream swift reference
# implementation of list_objects_iter, where the presence of a path
# forces a code path that does not add the match on a delimiter
assert l == ['abc_456', 'abc_789_'], repr(l)
l = list(dd.filter_limit([], 1))
assert l == []
l = list(dd.filter_limit([1,], 1))
assert l == [1,]
l = list(dd.filter_limit([1,], 10))
assert l == [1,]
l = list(dd.filter_limit([1,2,3], 1))
assert l == [1,]
l = list(dd.filter_limit([1,2,3], 2))
assert l == [1,2]
l = list(dd.filter_limit([1,2,3], 3))
assert l == [1,2,3]
l = list(dd.filter_limit([1,2,3], 4))
assert l == [1,2,3]
in_objs, delimiter, prefix, marker, path = ['abc/123', 'abc/456', 'def/123', 'def/456'], '/', 'abc/', '', ''
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix, marker, path)
l = list(out_objs)
assert l == ['abc/123', 'abc/456'], repr(l)
class TestDiskCommon(unittest.TestCase):
""" Tests for gluster.swift.common.DiskDir.DiskCommon """
def setUp(self):
raise SkipTest
_initxattr()
self.fake_logger = FakeLogger()
self.td = tempfile.mkdtemp()
@ -322,20 +311,6 @@ class TestDiskCommon(unittest.TestCase):
assert dc.datadir == os.path.join(self.td, "dne0")
assert dc._dir_exists is False
def test_initialize(self):
dc = dd.DiskCommon(self.td, self.fake_drives[0],
self.fake_accounts[0], self.fake_logger)
dc.initialize('12345')
assert dc.metadata == {}
assert dc.db_file == dd._db_file
assert dc.pending_timeout == 0
assert dc.stale_reads_ok == False
assert dc.root == self.td
assert dc.logger == self.fake_logger
assert dc.account == self.fake_accounts[0]
assert dc.datadir == os.path.join(self.td, self.fake_drives[0])
assert dc._dir_exists is None
def test_is_deleted(self):
dc = dd.DiskCommon(self.td, self.fake_drives[0],
self.fake_accounts[0], self.fake_logger)
@ -370,45 +345,6 @@ class TestDiskCommon(unittest.TestCase):
assert dc.metadata == md_copy
class TestDiskDir(unittest.TestCase):
""" Tests for gluster.swift.common.DiskDir.DiskDir """
def setUp(self):
_initxattr()
self.fake_logger = FakeLogger()
self.td = tempfile.mkdtemp()
self.fake_drives = []
self.fake_accounts = []
for i in range(0,3):
self.fake_drives.append("drv%d" % i)
os.makedirs(os.path.join(self.td, self.fake_drives[i]))
self.fake_accounts.append(self.fake_drives[i])
def tearDown(self):
_destroyxattr()
shutil.rmtree(self.td)
def test_constructor(self):
raise SkipTest
self.fail("Implement me")
def test_empty(self):
raise SkipTest
self.fail("Implement me")
def test_list_objects_iter(self):
raise SkipTest
self.fail("Implement me")
def test_get_info(self):
raise SkipTest
self.fail("Implement me")
def test_delete_db(self):
raise SkipTest
self.fail("Implement me")
class TestContainerBroker(unittest.TestCase):
"""
Tests for DiskDir.DiskDir class (duck-typed
@ -1281,8 +1217,6 @@ class TestDiskAccount(unittest.TestCase):
def test_constructor_no_metadata(self):
da = dd.DiskAccount(self.td, self.fake_drives[0],
self.fake_accounts[0], self.fake_logger)
raise SkipTest
assert da._container_info is None
assert da._dir_exists is True
ctime = os.path.getctime(da.datadir)
mtime = os.path.getmtime(da.datadir)
@ -1298,8 +1232,6 @@ class TestDiskAccount(unittest.TestCase):
def test_constructor_metadata_not_valid(self):
da = dd.DiskAccount(self.td, self.fake_drives[1],
self.fake_accounts[1], self.fake_logger)
raise SkipTest
assert da._container_info is None
assert da._dir_exists is True
ctime = os.path.getctime(da.datadir)
mtime = os.path.getmtime(da.datadir)
@ -1316,8 +1248,6 @@ class TestDiskAccount(unittest.TestCase):
def test_constructor_metadata_valid(self):
da = dd.DiskAccount(self.td, self.fake_drives[2],
self.fake_accounts[2], self.fake_logger)
raise SkipTest
assert da._container_info is None
assert da._dir_exists is True
ctime = os.path.getctime(da.datadir)
mtime = os.path.getmtime(da.datadir)
@ -1330,12 +1260,6 @@ class TestDiskAccount(unittest.TestCase):
'X-Container-Count': (0, 0)}
assert da.metadata == exp_md, repr(da.metadata)
def test_list_containers_iter(self):
da = dd.DiskAccount(self.td, self.fake_drives[0],
self.fake_accounts[0], self.fake_logger)
raise SkipTest
self.fail("Implement me")
get_info_keys = set(['account', 'created_at', 'put_timestamp',
'delete_timestamp', 'container_count',
'object_count', 'bytes_used', 'hash', 'id'])
@ -1380,7 +1304,6 @@ class TestDiskAccount(unittest.TestCase):
def test_update_put_timestamp_not_updated(self):
da = dd.DiskAccount(self.td, self.fake_drives[0],
self.fake_accounts[0], self.fake_logger)
raise SkipTest
da.update_put_timestamp('12345')
assert da.metadata['X-PUT-Timestamp'][0] != '12345', repr(da.metadata)
@ -1389,23 +1312,16 @@ class TestDiskAccount(unittest.TestCase):
self.fake_accounts[0], self.fake_logger)
exp_pts = str(float(da.metadata['X-PUT-Timestamp'][0]) + 100)
da.update_put_timestamp(exp_pts)
raise SkipTest
assert da.metadata['X-PUT-Timestamp'][0] == exp_pts, repr(da.metadata)
def test_delete_db(self):
da = dd.DiskAccount(self.td, self.fake_drives[0],
self.fake_accounts[0], self.fake_logger)
raise SkipTest
assert da._dir_exists == True
da.delete_db('12345')
assert da._dir_exists == True
def test_put_container(self):
raise SkipTest
self.fail("Implement me")
def test_is_status_deleted(self):
da = dd.DiskAccount(self.td, self.fake_drives[0],
self.fake_accounts[0], self.fake_logger)
raise SkipTest
assert da.is_status_deleted() == False