Add DiskDir unit test skeleton and pep8 filter
The new DiskDir unit test skeleton is quite incomplete, but gets the DiskDir module on the board for modules covered, explicitly exposing the fact that we need to get test coverage. This is a first step. At the same time, we also update all the modules we have applying the fix for pep8 errors now run under tox. We can then add a Jenkins pre-commit job to fail on pep8 errors. This brings our code to parity with what they are doing in OpenStack Swift. Change-Id: Ia0565606512efda6e73f67bd00269177b89db858 Signed-off-by: Peter Portante <peter.portante@redhat.com> Reviewed-on: http://review.gluster.org/5080 Reviewed-by: Luis Pabon <lpabon@redhat.com> Tested-by: Luis Pabon <lpabon@redhat.com>
This commit is contained in:
parent
3ff44850b4
commit
b291641985
@ -1,5 +1,6 @@
|
|||||||
""" Gluster for Swift """
|
""" Gluster for Swift """
|
||||||
|
|
||||||
|
|
||||||
class PkgInfo(object):
|
class PkgInfo(object):
|
||||||
def __init__(self, canonical_version, name, final):
|
def __init__(self, canonical_version, name, final):
|
||||||
self.canonical_version = canonical_version
|
self.canonical_version = canonical_version
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
# Simply importing this monkey patches the constraint handling to fit our
|
# Simply importing this monkey patches the constraint handling to fit our
|
||||||
# needs
|
# needs
|
||||||
import gluster.swift.common.constraints
|
import gluster.swift.common.constraints # noqa
|
||||||
|
|
||||||
from swift.account import server
|
from swift.account import server
|
||||||
from gluster.swift.common.DiskDir import DiskAccount
|
from gluster.swift.common.DiskDir import DiskAccount
|
||||||
|
@ -13,36 +13,23 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
import os, errno
|
import os
|
||||||
|
|
||||||
from gluster.swift.common.utils import clean_metadata, dir_empty, rmdirs, \
|
from gluster.swift.common.fs_utils import dir_empty, rmdirs, mkdirs, os_path
|
||||||
mkdirs, validate_account, validate_container, is_marker, \
|
from gluster.swift.common.utils import clean_metadata, validate_account, \
|
||||||
get_container_details, get_account_details, get_container_metadata, \
|
validate_container, get_container_details, get_account_details, \
|
||||||
create_container_metadata, create_account_metadata, DEFAULT_GID, \
|
create_container_metadata, create_account_metadata, DEFAULT_GID, \
|
||||||
DEFAULT_UID, validate_object, create_object_metadata, read_metadata, \
|
DEFAULT_UID, validate_object, create_object_metadata, read_metadata, \
|
||||||
write_metadata, X_CONTENT_TYPE, X_CONTENT_LENGTH, X_TIMESTAMP, \
|
write_metadata, X_CONTENT_TYPE, X_CONTENT_LENGTH, X_TIMESTAMP, \
|
||||||
X_PUT_TIMESTAMP, X_TYPE, X_ETAG, X_OBJECTS_COUNT, X_BYTES_USED, \
|
X_PUT_TIMESTAMP, X_ETAG, X_OBJECTS_COUNT, X_BYTES_USED, \
|
||||||
X_CONTAINER_COUNT, CONTAINER, os_path
|
X_CONTAINER_COUNT
|
||||||
from gluster.swift.common import Glusterfs
|
from gluster.swift.common import Glusterfs
|
||||||
|
|
||||||
from swift.common.constraints import CONTAINER_LISTING_LIMIT
|
|
||||||
from swift.common.utils import normalize_timestamp, TRUE_VALUES
|
|
||||||
|
|
||||||
|
|
||||||
DATADIR = 'containers'
|
DATADIR = 'containers'
|
||||||
|
|
||||||
# Create a dummy db_file in /etc/swift
|
# Create a dummy db_file in Glusterfs.RUN_DIR
|
||||||
_unittests_enabled = os.getenv('GLUSTER_UNIT_TEST_ENABLED', 'no')
|
_db_file = os.path.join(Glusterfs.RUN_DIR, 'db_file.db')
|
||||||
if _unittests_enabled in TRUE_VALUES:
|
|
||||||
_tmp_dir = '/tmp/gluster_unit_tests'
|
|
||||||
try:
|
|
||||||
os.mkdir(_tmp_dir)
|
|
||||||
except OSError as e:
|
|
||||||
if e.errno != errno.EEXIST:
|
|
||||||
raise
|
|
||||||
_db_file = os.path.join(_tmp_dir, 'db_file.db')
|
|
||||||
else:
|
|
||||||
_db_file = '/etc/swift/db_file.db'
|
|
||||||
if not os.path.exists(_db_file):
|
if not os.path.exists(_db_file):
|
||||||
file(_db_file, 'w+')
|
file(_db_file, 'w+')
|
||||||
|
|
||||||
@ -91,7 +78,7 @@ class DiskCommon(object):
|
|||||||
Accept sorted list.
|
Accept sorted list.
|
||||||
Objects should start with prefix.
|
Objects should start with prefix.
|
||||||
"""
|
"""
|
||||||
filtered_objs=[]
|
filtered_objs = []
|
||||||
for object_name in objects:
|
for object_name in objects:
|
||||||
tmp_obj = object_name.replace(prefix, '', 1)
|
tmp_obj = object_name.replace(prefix, '', 1)
|
||||||
sufix = tmp_obj.split(delimiter, 1)
|
sufix = tmp_obj.split(delimiter, 1)
|
||||||
@ -106,8 +93,7 @@ class DiskCommon(object):
|
|||||||
TODO: We can traverse in reverse order to optimize.
|
TODO: We can traverse in reverse order to optimize.
|
||||||
Accept sorted list.
|
Accept sorted list.
|
||||||
"""
|
"""
|
||||||
filtered_objs=[]
|
filtered_objs = []
|
||||||
found = 0
|
|
||||||
if objects[-1] < marker:
|
if objects[-1] < marker:
|
||||||
return filtered_objs
|
return filtered_objs
|
||||||
for object_name in objects:
|
for object_name in objects:
|
||||||
@ -120,7 +106,7 @@ class DiskCommon(object):
|
|||||||
"""
|
"""
|
||||||
Accept sorted list.
|
Accept sorted list.
|
||||||
"""
|
"""
|
||||||
filtered_objs=[]
|
filtered_objs = []
|
||||||
for object_name in objects:
|
for object_name in objects:
|
||||||
if object_name < end_marker:
|
if object_name < end_marker:
|
||||||
filtered_objs.append(object_name)
|
filtered_objs.append(object_name)
|
||||||
@ -130,7 +116,7 @@ class DiskCommon(object):
|
|||||||
return filtered_objs
|
return filtered_objs
|
||||||
|
|
||||||
def filter_limit(self, objects, limit):
|
def filter_limit(self, objects, limit):
|
||||||
filtered_objs=[]
|
filtered_objs = []
|
||||||
for i in range(0, limit):
|
for i in range(0, limit):
|
||||||
filtered_objs.append(objects[i])
|
filtered_objs.append(objects[i])
|
||||||
|
|
||||||
@ -232,7 +218,8 @@ class DiskDir(DiskCommon):
|
|||||||
self.metadata[X_OBJECTS_COUNT] = (int(ocnt) + 1, timestamp)
|
self.metadata[X_OBJECTS_COUNT] = (int(ocnt) + 1, timestamp)
|
||||||
self.metadata[X_PUT_TIMESTAMP] = timestamp
|
self.metadata[X_PUT_TIMESTAMP] = timestamp
|
||||||
bused = self.metadata[X_BYTES_USED][0]
|
bused = self.metadata[X_BYTES_USED][0]
|
||||||
self.metadata[X_BYTES_USED] = (int(bused) + int(content_length), timestamp)
|
self.metadata[X_BYTES_USED] = (int(bused) + int(content_length),
|
||||||
|
timestamp)
|
||||||
#TODO: define update_metadata instad of writing whole metadata again.
|
#TODO: define update_metadata instad of writing whole metadata again.
|
||||||
self.put_metadata(self.metadata)
|
self.put_metadata(self.metadata)
|
||||||
|
|
||||||
@ -240,10 +227,12 @@ class DiskDir(DiskCommon):
|
|||||||
ocnt, timestamp = self.metadata[X_OBJECTS_COUNT][0]
|
ocnt, timestamp = self.metadata[X_OBJECTS_COUNT][0]
|
||||||
self.metadata[X_OBJECTS_COUNT] = (int(ocnt) - 1, timestamp)
|
self.metadata[X_OBJECTS_COUNT] = (int(ocnt) - 1, timestamp)
|
||||||
bused, timestamp = self.metadata[X_BYTES_USED]
|
bused, timestamp = self.metadata[X_BYTES_USED]
|
||||||
self.metadata[X_BYTES_USED] = (int(bused) - int(content_length), timestamp)
|
self.metadata[X_BYTES_USED] = (int(bused) - int(content_length),
|
||||||
|
timestamp)
|
||||||
self.put_metadata(self.metadata)
|
self.put_metadata(self.metadata)
|
||||||
|
|
||||||
def put_container(self, container, put_timestamp, del_timestamp, object_count, bytes_used):
|
def put_container(self, container, put_timestamp, del_timestamp,
|
||||||
|
object_count, bytes_used):
|
||||||
"""
|
"""
|
||||||
For account server.
|
For account server.
|
||||||
"""
|
"""
|
||||||
@ -363,20 +352,22 @@ class DiskDir(DiskCommon):
|
|||||||
# update the object counts in case they changed behind our back.
|
# update the object counts in case they changed behind our back.
|
||||||
self.update_object_count()
|
self.update_object_count()
|
||||||
|
|
||||||
data = {'account' : self.account, 'container' : self.container,
|
data = {'account': self.account, 'container': self.container,
|
||||||
'object_count' : self.metadata.get(X_OBJECTS_COUNT, ('0', 0))[0],
|
'object_count': self.metadata.get(
|
||||||
'bytes_used' : self.metadata.get(X_BYTES_USED, ('0',0))[0],
|
X_OBJECTS_COUNT, ('0', 0))[0],
|
||||||
'hash': '', 'id' : '', 'created_at' : '1',
|
'bytes_used': self.metadata.get(X_BYTES_USED, ('0', 0))[0],
|
||||||
'put_timestamp' : self.metadata.get(X_PUT_TIMESTAMP, ('0',0))[0],
|
'hash': '', 'id': '', 'created_at': '1',
|
||||||
'delete_timestamp' : '1',
|
'put_timestamp': self.metadata.get(
|
||||||
'reported_put_timestamp' : '1', 'reported_delete_timestamp' : '1',
|
X_PUT_TIMESTAMP, ('0', 0))[0],
|
||||||
'reported_object_count' : '1', 'reported_bytes_used' : '1'}
|
'delete_timestamp': '1',
|
||||||
|
'reported_put_timestamp': '1',
|
||||||
|
'reported_delete_timestamp': '1',
|
||||||
|
'reported_object_count': '1', 'reported_bytes_used': '1'}
|
||||||
if include_metadata:
|
if include_metadata:
|
||||||
data['metadata'] = self.metadata
|
data['metadata'] = self.metadata
|
||||||
return data
|
return data
|
||||||
|
|
||||||
def put_object(self, name, timestamp, size, content_type,
|
def put_object(self, name, timestamp, size, content_type, etag, deleted=0):
|
||||||
etag, deleted=0):
|
|
||||||
# TODO: Implement the specifics of this func.
|
# TODO: Implement the specifics of this func.
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -401,7 +392,8 @@ class DiskDir(DiskCommon):
|
|||||||
self.unlink()
|
self.unlink()
|
||||||
|
|
||||||
def update_metadata(self, metadata):
|
def update_metadata(self, metadata):
|
||||||
assert self.metadata, "Valid container/account metadata should have been created by now"
|
assert self.metadata, "Valid container/account metadata should have" \
|
||||||
|
" been created by now"
|
||||||
if metadata:
|
if metadata:
|
||||||
new_metadata = self.metadata.copy()
|
new_metadata = self.metadata.copy()
|
||||||
new_metadata.update(metadata)
|
new_metadata.update(metadata)
|
||||||
@ -478,12 +470,13 @@ class DiskAccount(DiskDir):
|
|||||||
# update the container counts in case they changed behind our back.
|
# update the container counts in case they changed behind our back.
|
||||||
self.update_container_count()
|
self.update_container_count()
|
||||||
|
|
||||||
data = {'account' : self.account, 'created_at' : '1',
|
data = {'account': self.account, 'created_at': '1',
|
||||||
'put_timestamp' : '1', 'delete_timestamp' : '1',
|
'put_timestamp': '1', 'delete_timestamp': '1',
|
||||||
'container_count' : self.metadata.get(X_CONTAINER_COUNT, (0,0))[0],
|
'container_count': self.metadata.get(
|
||||||
'object_count' : self.metadata.get(X_OBJECTS_COUNT, (0,0))[0],
|
X_CONTAINER_COUNT, (0, 0))[0],
|
||||||
'bytes_used' : self.metadata.get(X_BYTES_USED, (0,0))[0],
|
'object_count': self.metadata.get(X_OBJECTS_COUNT, (0, 0))[0],
|
||||||
'hash' : '', 'id' : ''}
|
'bytes_used': self.metadata.get(X_BYTES_USED, (0, 0))[0],
|
||||||
|
'hash': '', 'id': ''}
|
||||||
|
|
||||||
if include_metadata:
|
if include_metadata:
|
||||||
data['metadata'] = self.metadata
|
data['metadata'] = self.metadata
|
||||||
@ -493,4 +486,4 @@ class DiskAccount(DiskDir):
|
|||||||
cont_path = os.path.join(self.datadir, container)
|
cont_path = os.path.join(self.datadir, container)
|
||||||
metadata = read_metadata(cont_path)
|
metadata = read_metadata(cont_path)
|
||||||
|
|
||||||
return int(metadata.get(X_PUT_TIMESTAMP, ('0',0))[0]) or None
|
return int(metadata.get(X_PUT_TIMESTAMP, ('0', 0))[0]) or None
|
||||||
|
@ -18,16 +18,16 @@ import errno
|
|||||||
import random
|
import random
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from swift.common.utils import normalize_timestamp, renamer
|
from swift.common.utils import renamer
|
||||||
from swift.common.exceptions import DiskFileNotExist
|
from swift.common.exceptions import DiskFileNotExist
|
||||||
from gluster.swift.common.exceptions import AlreadyExistsAsDir
|
from gluster.swift.common.exceptions import AlreadyExistsAsDir
|
||||||
from gluster.swift.common.utils import mkdirs, rmdirs, validate_object, \
|
from gluster.swift.common.fs_utils import mkdirs, rmdirs, do_open, do_close, \
|
||||||
create_object_metadata, do_open, do_close, do_unlink, do_chown, \
|
do_unlink, do_chown, os_path, do_fsync
|
||||||
do_listdir, read_metadata, write_metadata, os_path, do_fsync
|
from gluster.swift.common.utils import read_metadata, write_metadata, \
|
||||||
from gluster.swift.common.utils import X_CONTENT_TYPE, X_CONTENT_LENGTH, \
|
validate_object, create_object_metadata
|
||||||
X_TIMESTAMP, X_PUT_TIMESTAMP, X_TYPE, X_ETAG, X_OBJECTS_COUNT, \
|
from gluster.swift.common.utils import X_CONTENT_LENGTH, X_CONTENT_TYPE, \
|
||||||
X_BYTES_USED, X_OBJECT_TYPE, FILE, DIR, MARKER_DIR, OBJECT, DIR_TYPE, \
|
X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, MARKER_DIR, OBJECT, DIR_TYPE, \
|
||||||
FILE_TYPE, DEFAULT_UID, DEFAULT_GID
|
FILE_TYPE, DEFAULT_UID, DEFAULT_GID
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from swift.obj.server import DiskFile
|
from swift.obj.server import DiskFile
|
||||||
@ -42,17 +42,18 @@ def _adjust_metadata(metadata):
|
|||||||
# Fix up the metadata to ensure it has a proper value for the
|
# Fix up the metadata to ensure it has a proper value for the
|
||||||
# Content-Type metadata, as well as an X_TYPE and X_OBJECT_TYPE
|
# Content-Type metadata, as well as an X_TYPE and X_OBJECT_TYPE
|
||||||
# metadata values.
|
# metadata values.
|
||||||
content_type = metadata['Content-Type']
|
content_type = metadata[X_CONTENT_TYPE]
|
||||||
if not content_type:
|
if not content_type:
|
||||||
# FIXME: How can this be that our caller supplied us with metadata
|
# FIXME: How can this be that our caller supplied us with metadata
|
||||||
# that has a content type that evaluates to False?
|
# that has a content type that evaluates to False?
|
||||||
#
|
#
|
||||||
# FIXME: If the file exists, we would already know it is a
|
# FIXME: If the file exists, we would already know it is a
|
||||||
# directory. So why are we assuming it is a file object?
|
# directory. So why are we assuming it is a file object?
|
||||||
metadata['Content-Type'] = FILE_TYPE
|
metadata[X_CONTENT_TYPE] = FILE_TYPE
|
||||||
x_object_type = FILE
|
x_object_type = FILE
|
||||||
else:
|
else:
|
||||||
x_object_type = MARKER_DIR if content_type.lower() == DIR_TYPE else FILE
|
x_object_type = MARKER_DIR if content_type.lower() == DIR_TYPE \
|
||||||
|
else FILE
|
||||||
metadata[X_TYPE] = OBJECT
|
metadata[X_TYPE] = OBJECT
|
||||||
metadata[X_OBJECT_TYPE] = x_object_type
|
metadata[X_OBJECT_TYPE] = x_object_type
|
||||||
return metadata
|
return metadata
|
||||||
@ -184,7 +185,8 @@ class Gluster_DiskFile(DiskFile):
|
|||||||
if tombstone:
|
if tombstone:
|
||||||
# We don't write tombstone files. So do nothing.
|
# We don't write tombstone files. So do nothing.
|
||||||
return
|
return
|
||||||
assert self.data_file is not None, "put_metadata: no file to put metadata into"
|
assert self.data_file is not None, \
|
||||||
|
"put_metadata: no file to put metadata into"
|
||||||
metadata = _adjust_metadata(metadata)
|
metadata = _adjust_metadata(metadata)
|
||||||
write_metadata(self.data_file, metadata)
|
write_metadata(self.data_file, metadata)
|
||||||
self.metadata = metadata
|
self.metadata = metadata
|
||||||
@ -192,8 +194,8 @@ class Gluster_DiskFile(DiskFile):
|
|||||||
|
|
||||||
def put(self, fd, metadata, extension='.data'):
|
def put(self, fd, metadata, extension='.data'):
|
||||||
"""
|
"""
|
||||||
Finalize writing the file on disk, and renames it from the temp file to
|
Finalize writing the file on disk, and renames it from the temp file
|
||||||
the real location. This should be called after the data has been
|
to the real location. This should be called after the data has been
|
||||||
written to the temp file.
|
written to the temp file.
|
||||||
|
|
||||||
:param fd: file descriptor of the temp file
|
:param fd: file descriptor of the temp file
|
||||||
@ -202,7 +204,6 @@ class Gluster_DiskFile(DiskFile):
|
|||||||
"""
|
"""
|
||||||
# Our caller will use '.data' here; we just ignore it since we map the
|
# Our caller will use '.data' here; we just ignore it since we map the
|
||||||
# URL directly to the file system.
|
# URL directly to the file system.
|
||||||
extension = ''
|
|
||||||
|
|
||||||
metadata = _adjust_metadata(metadata)
|
metadata = _adjust_metadata(metadata)
|
||||||
|
|
||||||
@ -220,7 +221,6 @@ class Gluster_DiskFile(DiskFile):
|
|||||||
msg = 'File object exists as a directory: %s' % self.data_file
|
msg = 'File object exists as a directory: %s' % self.data_file
|
||||||
raise AlreadyExistsAsDir(msg)
|
raise AlreadyExistsAsDir(msg)
|
||||||
|
|
||||||
timestamp = normalize_timestamp(metadata[X_TIMESTAMP])
|
|
||||||
write_metadata(self.tmppath, metadata)
|
write_metadata(self.tmppath, metadata)
|
||||||
if X_CONTENT_LENGTH in metadata:
|
if X_CONTENT_LENGTH in metadata:
|
||||||
self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
|
self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH]))
|
||||||
@ -248,7 +248,7 @@ class Gluster_DiskFile(DiskFile):
|
|||||||
|
|
||||||
:param timestamp: timestamp to compare with each file
|
:param timestamp: timestamp to compare with each file
|
||||||
"""
|
"""
|
||||||
if not self.metadata or self.metadata['X-Timestamp'] >= timestamp:
|
if not self.metadata or self.metadata[X_TIMESTAMP] >= timestamp:
|
||||||
return
|
return
|
||||||
|
|
||||||
assert self.data_file, \
|
assert self.data_file, \
|
||||||
@ -257,7 +257,8 @@ class Gluster_DiskFile(DiskFile):
|
|||||||
if self._is_dir:
|
if self._is_dir:
|
||||||
# Marker directory object
|
# Marker directory object
|
||||||
if not rmdirs(self.data_file):
|
if not rmdirs(self.data_file):
|
||||||
logging.error('Unable to delete dir object: %s', self.data_file)
|
logging.error('Unable to delete dir object: %s',
|
||||||
|
self.data_file)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
# File object
|
# File object
|
||||||
@ -283,7 +284,7 @@ class Gluster_DiskFile(DiskFile):
|
|||||||
file_size = 0
|
file_size = 0
|
||||||
if self.data_file:
|
if self.data_file:
|
||||||
file_size = os_path.getsize(self.data_file)
|
file_size = os_path.getsize(self.data_file)
|
||||||
if X_CONTENT_LENGTH in self.metadata:
|
if X_CONTENT_LENGTH in self.metadata:
|
||||||
metadata_size = int(self.metadata[X_CONTENT_LENGTH])
|
metadata_size = int(self.metadata[X_CONTENT_LENGTH])
|
||||||
if file_size != metadata_size:
|
if file_size != metadata_size:
|
||||||
self.metadata[X_CONTENT_LENGTH] = file_size
|
self.metadata[X_CONTENT_LENGTH] = file_size
|
||||||
@ -314,11 +315,11 @@ class Gluster_DiskFile(DiskFile):
|
|||||||
path = self._container_path
|
path = self._container_path
|
||||||
subdir_list = self._obj_path.split(os.path.sep)
|
subdir_list = self._obj_path.split(os.path.sep)
|
||||||
for i in range(len(subdir_list)):
|
for i in range(len(subdir_list)):
|
||||||
path = os.path.join(path, subdir_list[i]);
|
path = os.path.join(path, subdir_list[i])
|
||||||
if not os_path.exists(path):
|
if not os_path.exists(path):
|
||||||
self._create_dir_object(path)
|
self._create_dir_object(path)
|
||||||
|
|
||||||
tmpfile = '.' + self._obj + '.' + md5(self._obj + \
|
tmpfile = '.' + self._obj + '.' + md5(self._obj +
|
||||||
str(random.random())).hexdigest()
|
str(random.random())).hexdigest()
|
||||||
|
|
||||||
self.tmppath = os.path.join(self.datadir, tmpfile)
|
self.tmppath = os.path.join(self.datadir, tmpfile)
|
||||||
|
@ -13,8 +13,12 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import fcntl
|
||||||
|
import time
|
||||||
|
import errno
|
||||||
import logging
|
import logging
|
||||||
import os, sys, fcntl, time, errno
|
|
||||||
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
|
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
|
||||||
from swift.common.utils import TRUE_VALUES, search_tree
|
from swift.common.utils import TRUE_VALUES, search_tree
|
||||||
from gluster.swift.common.fs_utils import mkdirs
|
from gluster.swift.common.fs_utils import mkdirs
|
||||||
@ -25,26 +29,29 @@ from gluster.swift.common.fs_utils import mkdirs
|
|||||||
_fs_conf = ConfigParser()
|
_fs_conf = ConfigParser()
|
||||||
MOUNT_IP = 'localhost'
|
MOUNT_IP = 'localhost'
|
||||||
OBJECT_ONLY = False
|
OBJECT_ONLY = False
|
||||||
RUN_DIR='/var/run/swift'
|
RUN_DIR = '/var/run/swift'
|
||||||
SWIFT_DIR = '/etc/swift'
|
SWIFT_DIR = '/etc/swift'
|
||||||
_do_getsize = False
|
_do_getsize = False
|
||||||
if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
|
if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
|
||||||
try:
|
try:
|
||||||
MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', 'localhost')
|
MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', MOUNT_IP)
|
||||||
except (NoSectionError, NoOptionError):
|
except (NoSectionError, NoOptionError):
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
OBJECT_ONLY = _fs_conf.get('DEFAULT', 'object_only', "no") in TRUE_VALUES
|
OBJECT_ONLY = _fs_conf.get('DEFAULT',
|
||||||
|
'object_only',
|
||||||
|
"no") in TRUE_VALUES
|
||||||
except (NoSectionError, NoOptionError):
|
except (NoSectionError, NoOptionError):
|
||||||
pass
|
pass
|
||||||
try:
|
try:
|
||||||
RUN_DIR = _fs_conf.get('DEFAULT', 'run_dir', '/var/run/swift')
|
RUN_DIR = _fs_conf.get('DEFAULT', 'run_dir', RUN_DIR)
|
||||||
except (NoSectionError, NoOptionError):
|
except (NoSectionError, NoOptionError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
_do_getsize = _fs_conf.get('DEFAULT', 'accurate_size_in_listing', \
|
_do_getsize = _fs_conf.get('DEFAULT',
|
||||||
"no") in TRUE_VALUES
|
'accurate_size_in_listing',
|
||||||
|
"no") in TRUE_VALUES
|
||||||
except (NoSectionError, NoOptionError):
|
except (NoSectionError, NoOptionError):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -58,9 +65,11 @@ def _busy_wait(full_mount_path):
|
|||||||
if os.path.ismount(full_mount_path):
|
if os.path.ismount(full_mount_path):
|
||||||
return True
|
return True
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
logging.error('Busy wait for mount timed out for mount %s', full_mount_path)
|
logging.error('Busy wait for mount timed out for mount %s',
|
||||||
|
full_mount_path)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def mount(root, drive):
|
def mount(root, drive):
|
||||||
# FIXME: Possible thundering herd problem here
|
# FIXME: Possible thundering herd problem here
|
||||||
|
|
||||||
@ -77,15 +86,15 @@ def mount(root, drive):
|
|||||||
if not os.path.isdir(full_mount_path):
|
if not os.path.isdir(full_mount_path):
|
||||||
mkdirs(full_mount_path)
|
mkdirs(full_mount_path)
|
||||||
|
|
||||||
lck_file = os.path.join(RUN_DIR, '%s.lock' %drive);
|
lck_file = os.path.join(RUN_DIR, '%s.lock' % drive)
|
||||||
|
|
||||||
if not os.path.exists(RUN_DIR):
|
if not os.path.exists(RUN_DIR):
|
||||||
mkdirs(RUN_DIR)
|
mkdirs(RUN_DIR)
|
||||||
|
|
||||||
fd = os.open(lck_file, os.O_CREAT|os.O_RDWR)
|
fd = os.open(lck_file, os.O_CREAT | os.O_RDWR)
|
||||||
with os.fdopen(fd, 'r+b') as f:
|
with os.fdopen(fd, 'r+b') as f:
|
||||||
try:
|
try:
|
||||||
fcntl.lockf(f, fcntl.LOCK_EX|fcntl.LOCK_NB)
|
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
|
||||||
except IOError as ex:
|
except IOError as ex:
|
||||||
if ex.errno in (errno.EACCES, errno.EAGAIN):
|
if ex.errno in (errno.EACCES, errno.EAGAIN):
|
||||||
# This means that some other process is mounting the
|
# This means that some other process is mounting the
|
||||||
@ -93,13 +102,14 @@ def mount(root, drive):
|
|||||||
return _busy_wait(full_mount_path)
|
return _busy_wait(full_mount_path)
|
||||||
else:
|
else:
|
||||||
raise ex
|
raise ex
|
||||||
mnt_cmd = 'mount -t glusterfs %s:%s %s' % (MOUNT_IP, export, \
|
mnt_cmd = 'mount -t glusterfs %s:%s %s' % (MOUNT_IP, export,
|
||||||
full_mount_path)
|
full_mount_path)
|
||||||
if os.system(mnt_cmd) or not _busy_wait(full_mount_path):
|
if os.system(mnt_cmd) or not _busy_wait(full_mount_path):
|
||||||
logging.error('Mount failed %s: %s', NAME, mnt_cmd)
|
logging.error('Mount failed %s: %s', NAME, mnt_cmd)
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def unmount(full_mount_path):
|
def unmount(full_mount_path):
|
||||||
# FIXME: Possible thundering herd problem here
|
# FIXME: Possible thundering herd problem here
|
||||||
|
|
||||||
@ -107,6 +117,7 @@ def unmount(full_mount_path):
|
|||||||
if os.system(umnt_cmd):
|
if os.system(umnt_cmd):
|
||||||
logging.error('Unable to unmount %s %s' % (full_mount_path, NAME))
|
logging.error('Unable to unmount %s %s' % (full_mount_path, NAME))
|
||||||
|
|
||||||
|
|
||||||
def _get_export_list():
|
def _get_export_list():
|
||||||
cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
|
cmnd = 'gluster --remote-host=%s volume info' % MOUNT_IP
|
||||||
|
|
||||||
@ -126,6 +137,7 @@ def _get_export_list():
|
|||||||
|
|
||||||
return export_list
|
return export_list
|
||||||
|
|
||||||
|
|
||||||
def get_mnt_point(vol_name, conf_dir=SWIFT_DIR, conf_file="object-server*"):
|
def get_mnt_point(vol_name, conf_dir=SWIFT_DIR, conf_file="object-server*"):
|
||||||
"""Read the object-server's configuration file and return
|
"""Read the object-server's configuration file and return
|
||||||
the device value"""
|
the device value"""
|
||||||
|
@ -23,11 +23,12 @@ from gluster.swift.common import Glusterfs, ring
|
|||||||
|
|
||||||
if hasattr(swift.common.constraints, 'constraints_conf_int'):
|
if hasattr(swift.common.constraints, 'constraints_conf_int'):
|
||||||
MAX_OBJECT_NAME_COMPONENT_LENGTH = \
|
MAX_OBJECT_NAME_COMPONENT_LENGTH = \
|
||||||
swift.common.constraints.constraints_conf_int(
|
swift.common.constraints.constraints_conf_int(
|
||||||
'max_object_name_component_length', 255)
|
'max_object_name_component_length', 255)
|
||||||
else:
|
else:
|
||||||
MAX_OBJECT_NAME_COMPONENT_LENGTH = 255
|
MAX_OBJECT_NAME_COMPONENT_LENGTH = 255
|
||||||
|
|
||||||
|
|
||||||
def validate_obj_name_component(obj):
|
def validate_obj_name_component(obj):
|
||||||
if len(obj) > MAX_OBJECT_NAME_COMPONENT_LENGTH:
|
if len(obj) > MAX_OBJECT_NAME_COMPONENT_LENGTH:
|
||||||
return 'too long (%d)' % len(obj)
|
return 'too long (%d)' % len(obj)
|
||||||
@ -38,6 +39,7 @@ def validate_obj_name_component(obj):
|
|||||||
# Save the original check object creation
|
# Save the original check object creation
|
||||||
__check_object_creation = swift.common.constraints.check_object_creation
|
__check_object_creation = swift.common.constraints.check_object_creation
|
||||||
|
|
||||||
|
|
||||||
# Define our new one which invokes the original
|
# Define our new one which invokes the original
|
||||||
def gluster_check_object_creation(req, object_name):
|
def gluster_check_object_creation(req, object_name):
|
||||||
"""
|
"""
|
||||||
@ -61,7 +63,7 @@ def gluster_check_object_creation(req, object_name):
|
|||||||
reason = validate_obj_name_component(obj)
|
reason = validate_obj_name_component(obj)
|
||||||
if reason:
|
if reason:
|
||||||
bdy = 'Invalid object name "%s", component "%s" %s' \
|
bdy = 'Invalid object name "%s", component "%s" %s' \
|
||||||
% (object_name, obj, reason)
|
% (object_name, obj, reason)
|
||||||
ret = HTTPBadRequest(body=bdy,
|
ret = HTTPBadRequest(body=bdy,
|
||||||
request=req,
|
request=req,
|
||||||
content_type='text/plain')
|
content_type='text/plain')
|
||||||
@ -74,6 +76,7 @@ swift.common.constraints.check_object_creation = gluster_check_object_creation
|
|||||||
# Save the original check mount
|
# Save the original check mount
|
||||||
__check_mount = swift.common.constraints.check_mount
|
__check_mount = swift.common.constraints.check_mount
|
||||||
|
|
||||||
|
|
||||||
# Define our new one which invokes the original
|
# Define our new one which invokes the original
|
||||||
def gluster_check_mount(root, drive):
|
def gluster_check_mount(root, drive):
|
||||||
# FIXME: Potential performance optimization here to not call the original
|
# FIXME: Potential performance optimization here to not call the original
|
||||||
@ -84,6 +87,7 @@ def gluster_check_mount(root, drive):
|
|||||||
|
|
||||||
return Glusterfs.mount(root, drive)
|
return Glusterfs.mount(root, drive)
|
||||||
|
|
||||||
|
|
||||||
# Replace the original check mount with ours
|
# Replace the original check mount with ours
|
||||||
swift.common.constraints.check_mount = gluster_check_mount
|
swift.common.constraints.check_mount = gluster_check_mount
|
||||||
|
|
||||||
|
@ -13,15 +13,18 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
class GlusterfsException(Exception):
|
class GlusterfsException(Exception):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class FileOrDirNotFoundError(GlusterfsException):
|
class FileOrDirNotFoundError(GlusterfsException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class NotDirectoryError(GlusterfsException):
|
class NotDirectoryError(GlusterfsException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class AlreadyExistsAsDir(GlusterfsException):
|
class AlreadyExistsAsDir(GlusterfsException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
@ -16,14 +16,16 @@
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
import errno
|
import errno
|
||||||
import os.path as os_path
|
import os.path as os_path # noqa
|
||||||
from eventlet import tpool
|
from eventlet import tpool
|
||||||
from gluster.swift.common.exceptions import FileOrDirNotFoundError, \
|
from gluster.swift.common.exceptions import FileOrDirNotFoundError, \
|
||||||
NotDirectoryError
|
NotDirectoryError
|
||||||
|
|
||||||
|
|
||||||
def do_walk(*args, **kwargs):
|
def do_walk(*args, **kwargs):
|
||||||
return os.walk(*args, **kwargs)
|
return os.walk(*args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def do_write(fd, msg):
|
def do_write(fd, msg):
|
||||||
try:
|
try:
|
||||||
cnt = os.write(fd, msg)
|
cnt = os.write(fd, msg)
|
||||||
@ -32,6 +34,7 @@ def do_write(fd, msg):
|
|||||||
raise
|
raise
|
||||||
return cnt
|
return cnt
|
||||||
|
|
||||||
|
|
||||||
def do_mkdir(path):
|
def do_mkdir(path):
|
||||||
try:
|
try:
|
||||||
os.mkdir(path)
|
os.mkdir(path)
|
||||||
@ -41,15 +44,18 @@ def do_mkdir(path):
|
|||||||
raise
|
raise
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def do_makedirs(path):
|
def do_makedirs(path):
|
||||||
try:
|
try:
|
||||||
os.makedirs(path)
|
os.makedirs(path)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
if err.errno != errno.EEXIST:
|
if err.errno != errno.EEXIST:
|
||||||
logging.exception("Makedirs failed on %s err: %s", path, err.strerror)
|
logging.exception("Makedirs failed on %s err: %s",
|
||||||
|
path, err.strerror)
|
||||||
raise
|
raise
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def do_listdir(path):
|
def do_listdir(path):
|
||||||
try:
|
try:
|
||||||
buf = os.listdir(path)
|
buf = os.listdir(path)
|
||||||
@ -58,6 +64,7 @@ def do_listdir(path):
|
|||||||
raise
|
raise
|
||||||
return buf
|
return buf
|
||||||
|
|
||||||
|
|
||||||
def do_chown(path, uid, gid):
|
def do_chown(path, uid, gid):
|
||||||
try:
|
try:
|
||||||
os.chown(path, uid, gid)
|
os.chown(path, uid, gid)
|
||||||
@ -66,6 +73,7 @@ def do_chown(path, uid, gid):
|
|||||||
raise
|
raise
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def do_stat(path):
|
def do_stat(path):
|
||||||
try:
|
try:
|
||||||
#Check for fd.
|
#Check for fd.
|
||||||
@ -78,6 +86,7 @@ def do_stat(path):
|
|||||||
raise
|
raise
|
||||||
return buf
|
return buf
|
||||||
|
|
||||||
|
|
||||||
def do_open(path, mode):
|
def do_open(path, mode):
|
||||||
if isinstance(mode, int):
|
if isinstance(mode, int):
|
||||||
try:
|
try:
|
||||||
@ -93,6 +102,7 @@ def do_open(path, mode):
|
|||||||
raise
|
raise
|
||||||
return fd
|
return fd
|
||||||
|
|
||||||
|
|
||||||
def do_close(fd):
|
def do_close(fd):
|
||||||
#fd could be file or int type.
|
#fd could be file or int type.
|
||||||
try:
|
try:
|
||||||
@ -105,16 +115,19 @@ def do_close(fd):
|
|||||||
raise
|
raise
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def do_unlink(path, log = True):
|
|
||||||
|
def do_unlink(path, log=True):
|
||||||
try:
|
try:
|
||||||
os.unlink(path)
|
os.unlink(path)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
if err.errno != errno.ENOENT:
|
if err.errno != errno.ENOENT:
|
||||||
if log:
|
if log:
|
||||||
logging.exception("Unlink failed on %s err: %s", path, err.strerror)
|
logging.exception("Unlink failed on %s err: %s",
|
||||||
|
path, err.strerror)
|
||||||
raise
|
raise
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def do_rmdir(path):
|
def do_rmdir(path):
|
||||||
try:
|
try:
|
||||||
os.rmdir(path)
|
os.rmdir(path)
|
||||||
@ -127,15 +140,17 @@ def do_rmdir(path):
|
|||||||
res = True
|
res = True
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
||||||
def do_rename(old_path, new_path):
|
def do_rename(old_path, new_path):
|
||||||
try:
|
try:
|
||||||
os.rename(old_path, new_path)
|
os.rename(old_path, new_path)
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
logging.exception("Rename failed on %s to %s err: %s", old_path, new_path, \
|
logging.exception("Rename failed on %s to %s err: %s",
|
||||||
err.strerror)
|
old_path, new_path, err.strerror)
|
||||||
raise
|
raise
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def mkdirs(path):
|
def mkdirs(path):
|
||||||
"""
|
"""
|
||||||
Ensures the path is a directory or makes it if not. Errors if the path
|
Ensures the path is a directory or makes it if not. Errors if the path
|
||||||
@ -146,6 +161,7 @@ def mkdirs(path):
|
|||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
do_makedirs(path)
|
do_makedirs(path)
|
||||||
|
|
||||||
|
|
||||||
def dir_empty(path):
|
def dir_empty(path):
|
||||||
"""
|
"""
|
||||||
Return true if directory/container is empty.
|
Return true if directory/container is empty.
|
||||||
@ -159,6 +175,7 @@ def dir_empty(path):
|
|||||||
raise FileOrDirNotFoundError()
|
raise FileOrDirNotFoundError()
|
||||||
raise NotDirectoryError()
|
raise NotDirectoryError()
|
||||||
|
|
||||||
|
|
||||||
def rmdirs(path):
|
def rmdirs(path):
|
||||||
if not os.path.isdir(path):
|
if not os.path.isdir(path):
|
||||||
return False
|
return False
|
||||||
@ -170,6 +187,7 @@ def rmdirs(path):
|
|||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def do_fsync(fd):
|
def do_fsync(fd):
|
||||||
try:
|
try:
|
||||||
tpool.execute(os.fsync, fd)
|
tpool.execute(os.fsync, fd)
|
||||||
|
@ -36,13 +36,12 @@ if conf_files and _conf.read(conf_file):
|
|||||||
if not reseller_prefix.endswith('_'):
|
if not reseller_prefix.endswith('_'):
|
||||||
reseller_prefix = reseller_prefix + '_'
|
reseller_prefix = reseller_prefix + '_'
|
||||||
|
|
||||||
|
|
||||||
class Ring(ring.Ring):
|
class Ring(ring.Ring):
|
||||||
def _get_part_nodes(self, part):
|
def _get_part_nodes(self, part):
|
||||||
seen_ids = set()
|
seen_ids = set()
|
||||||
nodes = [dev for dev in self._devs \
|
nodes = [dev for dev in self._devs if dev['device'] == self.acc_name
|
||||||
if dev['device'] == self.acc_name \
|
and not (dev['id'] in seen_ids or seen_ids.add(dev['id']))]
|
||||||
and not (dev['id'] in seen_ids \
|
|
||||||
or seen_ids.add(dev['id']))]
|
|
||||||
if not nodes:
|
if not nodes:
|
||||||
nodes = [self.false_node]
|
nodes = [self.false_node]
|
||||||
return nodes
|
return nodes
|
||||||
@ -86,8 +85,8 @@ class Ring(ring.Ring):
|
|||||||
hardware description
|
hardware description
|
||||||
====== ===============================================================
|
====== ===============================================================
|
||||||
"""
|
"""
|
||||||
self.false_node = {'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1', 'id': 0, \
|
self.false_node = {'zone': 1, 'weight': 100.0, 'ip': '127.0.0.1',
|
||||||
'meta': '', 'device': 'volume_not_in_ring', \
|
'id': 0, 'meta': '', 'device': 'volume_not_in_ring',
|
||||||
'port': 6012}
|
'port': 6012}
|
||||||
if account.startswith(reseller_prefix):
|
if account.startswith(reseller_prefix):
|
||||||
self.acc_name = account.replace(reseller_prefix, '', 1)
|
self.acc_name = account.replace(reseller_prefix, '', 1)
|
||||||
@ -97,7 +96,6 @@ class Ring(ring.Ring):
|
|||||||
part = 0
|
part = 0
|
||||||
return part, self._get_part_nodes(part)
|
return part, self._get_part_nodes(part)
|
||||||
|
|
||||||
|
|
||||||
def get_more_nodes(self, part):
|
def get_more_nodes(self, part):
|
||||||
"""
|
"""
|
||||||
Generator to get extra nodes for a partition for hinted handoff.
|
Generator to get extra nodes for a partition for hinted handoff.
|
||||||
|
@ -21,9 +21,9 @@ import random
|
|||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from eventlet import sleep
|
from eventlet import sleep
|
||||||
import cPickle as pickle
|
import cPickle as pickle
|
||||||
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
|
from swift.common.utils import normalize_timestamp
|
||||||
from swift.common.utils import normalize_timestamp, TRUE_VALUES
|
from gluster.swift.common.fs_utils import do_rename, do_fsync, os_path, \
|
||||||
from gluster.swift.common.fs_utils import *
|
do_stat, do_listdir, do_walk
|
||||||
from gluster.swift.common import Glusterfs
|
from gluster.swift.common import Glusterfs
|
||||||
|
|
||||||
X_CONTENT_TYPE = 'Content-Type'
|
X_CONTENT_TYPE = 'Content-Type'
|
||||||
@ -54,8 +54,11 @@ DEFAULT_GID = -1
|
|||||||
PICKLE_PROTOCOL = 2
|
PICKLE_PROTOCOL = 2
|
||||||
CHUNK_SIZE = 65536
|
CHUNK_SIZE = 65536
|
||||||
MEMCACHE_KEY_PREFIX = 'gluster.swift.'
|
MEMCACHE_KEY_PREFIX = 'gluster.swift.'
|
||||||
MEMCACHE_ACCOUNT_DETAILS_KEY_PREFIX = MEMCACHE_KEY_PREFIX + 'account.details.'
|
MEMCACHE_ACCOUNT_DETAILS_KEY_PREFIX = MEMCACHE_KEY_PREFIX + \
|
||||||
MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX = MEMCACHE_KEY_PREFIX + 'container.details.'
|
'account.details.'
|
||||||
|
MEMCACHE_CONTAINER_DETAILS_KEY_PREFIX = MEMCACHE_KEY_PREFIX + \
|
||||||
|
'container.details.'
|
||||||
|
|
||||||
|
|
||||||
def read_metadata(path):
|
def read_metadata(path):
|
||||||
"""
|
"""
|
||||||
@ -70,7 +73,8 @@ def read_metadata(path):
|
|||||||
key = 0
|
key = 0
|
||||||
while metadata is None:
|
while metadata is None:
|
||||||
try:
|
try:
|
||||||
metadata_s += xattr.getxattr(path, '%s%s' % (METADATA_KEY, (key or '')))
|
metadata_s += xattr.getxattr(path,
|
||||||
|
'%s%s' % (METADATA_KEY, (key or '')))
|
||||||
except IOError as err:
|
except IOError as err:
|
||||||
if err.errno == errno.ENODATA:
|
if err.errno == errno.ENODATA:
|
||||||
if key > 0:
|
if key > 0:
|
||||||
@ -108,6 +112,7 @@ def read_metadata(path):
|
|||||||
key += 1
|
key += 1
|
||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
def write_metadata(path, metadata):
|
def write_metadata(path, metadata):
|
||||||
"""
|
"""
|
||||||
Helper function to write pickled metadata for a File/Directory.
|
Helper function to write pickled metadata for a File/Directory.
|
||||||
@ -120,13 +125,17 @@ def write_metadata(path, metadata):
|
|||||||
key = 0
|
key = 0
|
||||||
while metastr:
|
while metastr:
|
||||||
try:
|
try:
|
||||||
xattr.setxattr(path, '%s%s' % (METADATA_KEY, key or ''), metastr[:MAX_XATTR_SIZE])
|
xattr.setxattr(path,
|
||||||
|
'%s%s' % (METADATA_KEY, key or ''),
|
||||||
|
metastr[:MAX_XATTR_SIZE])
|
||||||
except IOError as err:
|
except IOError as err:
|
||||||
logging.exception("setxattr failed on %s key %s err: %s", path, key, str(err))
|
logging.exception("setxattr failed on %s key %s err: %s",
|
||||||
|
path, key, str(err))
|
||||||
raise
|
raise
|
||||||
metastr = metastr[MAX_XATTR_SIZE:]
|
metastr = metastr[MAX_XATTR_SIZE:]
|
||||||
key += 1
|
key += 1
|
||||||
|
|
||||||
|
|
||||||
def clean_metadata(path):
|
def clean_metadata(path):
|
||||||
key = 0
|
key = 0
|
||||||
while True:
|
while True:
|
||||||
@ -138,21 +147,25 @@ def clean_metadata(path):
|
|||||||
raise
|
raise
|
||||||
key += 1
|
key += 1
|
||||||
|
|
||||||
|
|
||||||
def check_user_xattr(path):
|
def check_user_xattr(path):
|
||||||
if not os_path.exists(path):
|
if not os_path.exists(path):
|
||||||
return False
|
return False
|
||||||
try:
|
try:
|
||||||
xattr.setxattr(path, 'user.test.key1', 'value1')
|
xattr.setxattr(path, 'user.test.key1', 'value1')
|
||||||
except IOError as err:
|
except IOError as err:
|
||||||
logging.exception("check_user_xattr: set failed on %s err: %s", path, str(err))
|
logging.exception("check_user_xattr: set failed on %s err: %s",
|
||||||
|
path, str(err))
|
||||||
raise
|
raise
|
||||||
try:
|
try:
|
||||||
xattr.removexattr(path, 'user.test.key1')
|
xattr.removexattr(path, 'user.test.key1')
|
||||||
except IOError as err:
|
except IOError as err:
|
||||||
logging.exception("check_user_xattr: remove failed on %s err: %s", path, str(err))
|
logging.exception("check_user_xattr: remove failed on %s err: %s",
|
||||||
|
path, str(err))
|
||||||
#Remove xattr may fail in case of concurrent remove.
|
#Remove xattr may fail in case of concurrent remove.
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def validate_container(metadata):
|
def validate_container(metadata):
|
||||||
if not metadata:
|
if not metadata:
|
||||||
logging.warn('validate_container: No metadata')
|
logging.warn('validate_container: No metadata')
|
||||||
@ -163,16 +176,17 @@ def validate_container(metadata):
|
|||||||
X_PUT_TIMESTAMP not in metadata.keys() or \
|
X_PUT_TIMESTAMP not in metadata.keys() or \
|
||||||
X_OBJECTS_COUNT not in metadata.keys() or \
|
X_OBJECTS_COUNT not in metadata.keys() or \
|
||||||
X_BYTES_USED not in metadata.keys():
|
X_BYTES_USED not in metadata.keys():
|
||||||
#logging.warn('validate_container: Metadata missing entries: %s' % metadata)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
(value, timestamp) = metadata[X_TYPE]
|
(value, timestamp) = metadata[X_TYPE]
|
||||||
if value == CONTAINER:
|
if value == CONTAINER:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
logging.warn('validate_container: metadata type is not CONTAINER (%r)' % (value,))
|
logging.warn('validate_container: metadata type is not CONTAINER (%r)',
|
||||||
|
value)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def validate_account(metadata):
|
def validate_account(metadata):
|
||||||
if not metadata:
|
if not metadata:
|
||||||
logging.warn('validate_account: No metadata')
|
logging.warn('validate_account: No metadata')
|
||||||
@ -184,16 +198,17 @@ def validate_account(metadata):
|
|||||||
X_OBJECTS_COUNT not in metadata.keys() or \
|
X_OBJECTS_COUNT not in metadata.keys() or \
|
||||||
X_BYTES_USED not in metadata.keys() or \
|
X_BYTES_USED not in metadata.keys() or \
|
||||||
X_CONTAINER_COUNT not in metadata.keys():
|
X_CONTAINER_COUNT not in metadata.keys():
|
||||||
#logging.warn('validate_account: Metadata missing entries: %s' % metadata)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
(value, timestamp) = metadata[X_TYPE]
|
(value, timestamp) = metadata[X_TYPE]
|
||||||
if value == ACCOUNT:
|
if value == ACCOUNT:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
logging.warn('validate_account: metadata type is not ACCOUNT (%r)' % (value,))
|
logging.warn('validate_account: metadata type is not ACCOUNT (%r)',
|
||||||
|
value)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def validate_object(metadata):
|
def validate_object(metadata):
|
||||||
if not metadata:
|
if not metadata:
|
||||||
logging.warn('validate_object: No metadata')
|
logging.warn('validate_object: No metadata')
|
||||||
@ -205,22 +220,24 @@ def validate_object(metadata):
|
|||||||
X_CONTENT_LENGTH not in metadata.keys() or \
|
X_CONTENT_LENGTH not in metadata.keys() or \
|
||||||
X_TYPE not in metadata.keys() or \
|
X_TYPE not in metadata.keys() or \
|
||||||
X_OBJECT_TYPE not in metadata.keys():
|
X_OBJECT_TYPE not in metadata.keys():
|
||||||
#logging.warn('validate_object: Metadata missing entries: %s' % metadata)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if metadata[X_TYPE] == OBJECT:
|
if metadata[X_TYPE] == OBJECT:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
logging.warn('validate_object: metadata type is not OBJECT (%r)' % (metadata[X_TYPE],))
|
logging.warn('validate_object: metadata type is not OBJECT (%r)',
|
||||||
|
metadata[X_TYPE])
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def is_marker(metadata):
|
def is_marker(metadata):
|
||||||
if not metadata:
|
if not metadata:
|
||||||
logging.warn('is_marker: No metadata')
|
logging.warn('is_marker: No metadata')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if X_OBJECT_TYPE not in metadata.keys():
|
if X_OBJECT_TYPE not in metadata.keys():
|
||||||
logging.warn('is_marker: X_OBJECT_TYPE missing from metadata: %s' % metadata)
|
logging.warn('is_marker: X_OBJECT_TYPE missing from metadata: %s',
|
||||||
|
metadata)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if metadata[X_OBJECT_TYPE] == MARKER_DIR:
|
if metadata[X_OBJECT_TYPE] == MARKER_DIR:
|
||||||
@ -228,6 +245,7 @@ def is_marker(metadata):
|
|||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
|
def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
|
||||||
bytes_used=0, obj_list=[]):
|
bytes_used=0, obj_list=[]):
|
||||||
# strip the prefix off, also stripping the leading and trailing slashes
|
# strip the prefix off, also stripping the leading and trailing slashes
|
||||||
@ -247,6 +265,7 @@ def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
|
|||||||
|
|
||||||
return object_count, bytes_used
|
return object_count, bytes_used
|
||||||
|
|
||||||
|
|
||||||
def update_list(path, cont_path, dirs=[], files=[], object_count=0,
|
def update_list(path, cont_path, dirs=[], files=[], object_count=0,
|
||||||
bytes_used=0, obj_list=[]):
|
bytes_used=0, obj_list=[]):
|
||||||
if files:
|
if files:
|
||||||
@ -279,15 +298,16 @@ def _get_container_details_from_fs(cont_path):
|
|||||||
|
|
||||||
if os_path.isdir(cont_path):
|
if os_path.isdir(cont_path):
|
||||||
for (path, dirs, files) in do_walk(cont_path):
|
for (path, dirs, files) in do_walk(cont_path):
|
||||||
object_count, bytes_used = update_list(path, cont_path, dirs, files,
|
object_count, bytes_used = update_list(path, cont_path, dirs,
|
||||||
object_count, bytes_used,
|
files, object_count,
|
||||||
obj_list)
|
bytes_used, obj_list)
|
||||||
|
|
||||||
dir_list.append((path, do_stat(path).st_mtime))
|
dir_list.append((path, do_stat(path).st_mtime))
|
||||||
sleep()
|
sleep()
|
||||||
|
|
||||||
return ContainerDetails(bytes_used, object_count, obj_list, dir_list)
|
return ContainerDetails(bytes_used, object_count, obj_list, dir_list)
|
||||||
|
|
||||||
|
|
||||||
def get_container_details(cont_path, memcache=None):
|
def get_container_details(cont_path, memcache=None):
|
||||||
"""
|
"""
|
||||||
Return object_list, object_count and bytes_used.
|
Return object_list, object_count and bytes_used.
|
||||||
@ -344,6 +364,7 @@ def _get_account_details_from_fs(acc_path, acc_stats):
|
|||||||
|
|
||||||
return AccountDetails(acc_stats.st_mtime, container_count, container_list)
|
return AccountDetails(acc_stats.st_mtime, container_count, container_list)
|
||||||
|
|
||||||
|
|
||||||
def get_account_details(acc_path, memcache=None):
|
def get_account_details(acc_path, memcache=None):
|
||||||
"""
|
"""
|
||||||
Return container_list and container_count.
|
Return container_list and container_count.
|
||||||
@ -369,6 +390,7 @@ def get_account_details(acc_path, memcache=None):
|
|||||||
memcache.set(mkey, ad)
|
memcache.set(mkey, ad)
|
||||||
return ad.container_list, ad.container_count
|
return ad.container_list, ad.container_count
|
||||||
|
|
||||||
|
|
||||||
def _get_etag(path):
|
def _get_etag(path):
|
||||||
etag = md5()
|
etag = md5()
|
||||||
with open(path, 'rb') as fp:
|
with open(path, 'rb') as fp:
|
||||||
@ -380,6 +402,7 @@ def _get_etag(path):
|
|||||||
break
|
break
|
||||||
return etag.hexdigest()
|
return etag.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
def get_object_metadata(obj_path):
|
def get_object_metadata(obj_path):
|
||||||
"""
|
"""
|
||||||
Return metadata of object.
|
Return metadata of object.
|
||||||
@ -398,10 +421,10 @@ def get_object_metadata(obj_path):
|
|||||||
X_CONTENT_TYPE: DIR_TYPE if is_dir else FILE_TYPE,
|
X_CONTENT_TYPE: DIR_TYPE if is_dir else FILE_TYPE,
|
||||||
X_OBJECT_TYPE: DIR if is_dir else FILE,
|
X_OBJECT_TYPE: DIR if is_dir else FILE,
|
||||||
X_CONTENT_LENGTH: 0 if is_dir else stats.st_size,
|
X_CONTENT_LENGTH: 0 if is_dir else stats.st_size,
|
||||||
X_ETAG: md5().hexdigest() if is_dir else _get_etag(obj_path),
|
X_ETAG: md5().hexdigest() if is_dir else _get_etag(obj_path)}
|
||||||
}
|
|
||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
def _add_timestamp(metadata_i):
|
def _add_timestamp(metadata_i):
|
||||||
# At this point we have a simple key/value dictionary, turn it into
|
# At this point we have a simple key/value dictionary, turn it into
|
||||||
# key/(value,timestamp) pairs.
|
# key/(value,timestamp) pairs.
|
||||||
@ -414,30 +437,38 @@ def _add_timestamp(metadata_i):
|
|||||||
metadata[key] = value_i
|
metadata[key] = value_i
|
||||||
return metadata
|
return metadata
|
||||||
|
|
||||||
|
|
||||||
def get_container_metadata(cont_path, memcache=None):
|
def get_container_metadata(cont_path, memcache=None):
|
||||||
objects = []
|
objects = []
|
||||||
object_count = 0
|
object_count = 0
|
||||||
bytes_used = 0
|
bytes_used = 0
|
||||||
objects, object_count, bytes_used = get_container_details(cont_path, memcache)
|
objects, object_count, bytes_used = get_container_details(cont_path,
|
||||||
|
memcache)
|
||||||
metadata = {X_TYPE: CONTAINER,
|
metadata = {X_TYPE: CONTAINER,
|
||||||
X_TIMESTAMP: normalize_timestamp(os_path.getctime(cont_path)),
|
X_TIMESTAMP: normalize_timestamp(
|
||||||
X_PUT_TIMESTAMP: normalize_timestamp(os_path.getmtime(cont_path)),
|
os_path.getctime(cont_path)),
|
||||||
|
X_PUT_TIMESTAMP: normalize_timestamp(
|
||||||
|
os_path.getmtime(cont_path)),
|
||||||
X_OBJECTS_COUNT: object_count,
|
X_OBJECTS_COUNT: object_count,
|
||||||
X_BYTES_USED: bytes_used}
|
X_BYTES_USED: bytes_used}
|
||||||
return _add_timestamp(metadata)
|
return _add_timestamp(metadata)
|
||||||
|
|
||||||
|
|
||||||
def get_account_metadata(acc_path, memcache=None):
|
def get_account_metadata(acc_path, memcache=None):
|
||||||
containers = []
|
containers = []
|
||||||
container_count = 0
|
container_count = 0
|
||||||
containers, container_count = get_account_details(acc_path, memcache)
|
containers, container_count = get_account_details(acc_path, memcache)
|
||||||
metadata = {X_TYPE: ACCOUNT,
|
metadata = {X_TYPE: ACCOUNT,
|
||||||
X_TIMESTAMP: normalize_timestamp(os_path.getctime(acc_path)),
|
X_TIMESTAMP: normalize_timestamp(
|
||||||
X_PUT_TIMESTAMP: normalize_timestamp(os_path.getmtime(acc_path)),
|
os_path.getctime(acc_path)),
|
||||||
|
X_PUT_TIMESTAMP: normalize_timestamp(
|
||||||
|
os_path.getmtime(acc_path)),
|
||||||
X_OBJECTS_COUNT: 0,
|
X_OBJECTS_COUNT: 0,
|
||||||
X_BYTES_USED: 0,
|
X_BYTES_USED: 0,
|
||||||
X_CONTAINER_COUNT: container_count}
|
X_CONTAINER_COUNT: container_count}
|
||||||
return _add_timestamp(metadata)
|
return _add_timestamp(metadata)
|
||||||
|
|
||||||
|
|
||||||
def restore_metadata(path, metadata):
|
def restore_metadata(path, metadata):
|
||||||
meta_orig = read_metadata(path)
|
meta_orig = read_metadata(path)
|
||||||
if meta_orig:
|
if meta_orig:
|
||||||
@ -449,18 +480,22 @@ def restore_metadata(path, metadata):
|
|||||||
write_metadata(path, meta_new)
|
write_metadata(path, meta_new)
|
||||||
return meta_new
|
return meta_new
|
||||||
|
|
||||||
|
|
||||||
def create_object_metadata(obj_path):
|
def create_object_metadata(obj_path):
|
||||||
metadata = get_object_metadata(obj_path)
|
metadata = get_object_metadata(obj_path)
|
||||||
return restore_metadata(obj_path, metadata)
|
return restore_metadata(obj_path, metadata)
|
||||||
|
|
||||||
|
|
||||||
def create_container_metadata(cont_path, memcache=None):
|
def create_container_metadata(cont_path, memcache=None):
|
||||||
metadata = get_container_metadata(cont_path, memcache)
|
metadata = get_container_metadata(cont_path, memcache)
|
||||||
return restore_metadata(cont_path, metadata)
|
return restore_metadata(cont_path, metadata)
|
||||||
|
|
||||||
|
|
||||||
def create_account_metadata(acc_path, memcache=None):
|
def create_account_metadata(acc_path, memcache=None):
|
||||||
metadata = get_account_metadata(acc_path, memcache)
|
metadata = get_account_metadata(acc_path, memcache)
|
||||||
return restore_metadata(acc_path, metadata)
|
return restore_metadata(acc_path, metadata)
|
||||||
|
|
||||||
|
|
||||||
def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
|
def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
|
||||||
"""
|
"""
|
||||||
Ensure that a pickle file gets written to disk. The file is first written
|
Ensure that a pickle file gets written to disk. The file is first written
|
||||||
@ -479,7 +514,8 @@ def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
|
|||||||
"""
|
"""
|
||||||
dirname = os.path.dirname(dest)
|
dirname = os.path.dirname(dest)
|
||||||
basename = os.path.basename(dest)
|
basename = os.path.basename(dest)
|
||||||
tmpname = '.' + basename + '.' + md5(basename + str(random.random())).hexdigest()
|
tmpname = '.' + basename + '.' + \
|
||||||
|
md5(basename + str(random.random())).hexdigest()
|
||||||
tmppath = os.path.join(dirname, tmpname)
|
tmppath = os.path.join(dirname, tmpname)
|
||||||
with open(tmppath, 'wb') as fo:
|
with open(tmppath, 'wb') as fo:
|
||||||
pickle.dump(obj, fo, pickle_protocol)
|
pickle.dump(obj, fo, pickle_protocol)
|
||||||
@ -491,6 +527,7 @@ def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
|
|||||||
do_fsync(fo)
|
do_fsync(fo)
|
||||||
do_rename(tmppath, dest)
|
do_rename(tmppath, dest)
|
||||||
|
|
||||||
|
|
||||||
# Over-ride Swift's utils.write_pickle with ours
|
# Over-ride Swift's utils.write_pickle with ours
|
||||||
import swift.common.utils
|
import swift.common.utils
|
||||||
swift.common.utils.write_pickle = write_pickle
|
swift.common.utils.write_pickle = write_pickle
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
# Simply importing this monkey patches the constraint handling to fit our
|
# Simply importing this monkey patches the constraint handling to fit our
|
||||||
# needs
|
# needs
|
||||||
import gluster.swift.common.constraints
|
import gluster.swift.common.constraints # noqa
|
||||||
|
|
||||||
from swift.container import server
|
from swift.container import server
|
||||||
from gluster.swift.common.DiskDir import DiskDir
|
from gluster.swift.common.DiskDir import DiskDir
|
||||||
|
@ -17,8 +17,8 @@
|
|||||||
|
|
||||||
# Simply importing this monkey patches the constraint handling to fit our
|
# Simply importing this monkey patches the constraint handling to fit our
|
||||||
# needs
|
# needs
|
||||||
import gluster.swift.common.constraints
|
import gluster.swift.common.constraints # noqa
|
||||||
import gluster.swift.common.utils
|
import gluster.swift.common.utils # noqa
|
||||||
|
|
||||||
from swift.obj import server
|
from swift.obj import server
|
||||||
from gluster.swift.common.DiskFile import Gluster_DiskFile
|
from gluster.swift.common.DiskFile import Gluster_DiskFile
|
||||||
|
@ -16,10 +16,11 @@
|
|||||||
|
|
||||||
# Simply importing this monkey patches the constraint handling to fit our
|
# Simply importing this monkey patches the constraint handling to fit our
|
||||||
# needs
|
# needs
|
||||||
import gluster.swift.common.constraints
|
import gluster.swift.common.constraints # noqa
|
||||||
|
|
||||||
from swift.proxy import server
|
from swift.proxy import server
|
||||||
|
|
||||||
|
|
||||||
def app_factory(global_conf, **local_conf):
|
def app_factory(global_conf, **local_conf):
|
||||||
"""paste.deploy app factory for creating WSGI proxy apps."""
|
"""paste.deploy app factory for creating WSGI proxy apps."""
|
||||||
conf = global_conf.copy()
|
conf = global_conf.copy()
|
||||||
|
575
test/unit/common/test_diskdir.py
Normal file
575
test/unit/common/test_diskdir.py
Normal file
@ -0,0 +1,575 @@
|
|||||||
|
# Copyright (c) 2013 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
""" Tests for gluster.swift.common.DiskDir """
|
||||||
|
|
||||||
|
import os
|
||||||
|
import errno
|
||||||
|
import tempfile
|
||||||
|
import cPickle as pickle
|
||||||
|
import unittest
|
||||||
|
import shutil
|
||||||
|
import tarfile
|
||||||
|
from nose import SkipTest
|
||||||
|
from swift.common.utils import normalize_timestamp
|
||||||
|
from gluster.swift.common import utils
|
||||||
|
import gluster.swift.common.Glusterfs
|
||||||
|
from test_utils import _initxattr, _destroyxattr, _setxattr, _getxattr
|
||||||
|
from test.unit import FakeLogger
|
||||||
|
|
||||||
|
gluster.swift.common.Glusterfs.RUN_DIR = '/tmp/gluster_unit_tests/run'
|
||||||
|
try:
|
||||||
|
os.makedirs(gluster.swift.common.Glusterfs.RUN_DIR)
|
||||||
|
except OSError as e:
|
||||||
|
if e.errno != errno.EEXIST:
|
||||||
|
raise
|
||||||
|
|
||||||
|
import gluster.swift.common.DiskDir as dd
|
||||||
|
|
||||||
|
|
||||||
|
def timestamp_in_range(ts, base):
|
||||||
|
low = normalize_timestamp(base - 5)
|
||||||
|
high = normalize_timestamp(base + 5)
|
||||||
|
assert low <= ts, "timestamp %s is less than %s" % (ts, low)
|
||||||
|
assert high >= ts, "timestamp %s is greater than %s" % (ts, high)
|
||||||
|
|
||||||
|
|
||||||
|
class TestDiskDirModuleFunctions(unittest.TestCase):
|
||||||
|
""" Tests for gluster.swift.common.DiskDir module functions """
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
raise SkipTest
|
||||||
|
|
||||||
|
def test__read_metadata(self):
|
||||||
|
def fake_read_metadata(p):
|
||||||
|
return { 'a': 1, 'b': ('c', 5) }
|
||||||
|
orig_rm = dd.read_metadata
|
||||||
|
dd.read_metadata = fake_read_metadata
|
||||||
|
try:
|
||||||
|
md = dd._read_metadata("/tmp/foo")
|
||||||
|
finally:
|
||||||
|
dd.read_metadata = orig_rm
|
||||||
|
assert md['a'] == (1, 0)
|
||||||
|
assert md['b'] == ('c', 5)
|
||||||
|
|
||||||
|
def test_filter_end_marker(self):
|
||||||
|
in_objs, end_marker = [], ''
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, end_marker = [], 'abc'
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, end_marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], ''
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, end_marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'ABC'
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, end_marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'efg'
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == ['abc_123', 'abc_456', 'abc_789', 'def_101']
|
||||||
|
|
||||||
|
# Input not sorted, so we should only expect one name
|
||||||
|
in_objs, end_marker = ['abc_123', 'def_101', 'abc_456', 'abc_789'], 'abc_789'
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == ['abc_123',]
|
||||||
|
|
||||||
|
in_objs, end_marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'abc_789'
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == ['abc_123', 'abc_456']
|
||||||
|
|
||||||
|
in_objs, end_marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'abc_5'
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == ['abc_123', 'abc_456']
|
||||||
|
|
||||||
|
in_objs, end_marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'abc_123'
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, end_marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'def_101'
|
||||||
|
out_objs = dd.filter_end_marker(in_objs, end_marker)
|
||||||
|
assert list(out_objs) == ['abc_123', 'abc_456', 'abc_789']
|
||||||
|
|
||||||
|
def test_filter_marker(self):
|
||||||
|
in_objs, marker = [], ''
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, marker = [], 'abc'
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], ''
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == in_objs
|
||||||
|
|
||||||
|
in_objs, marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'ABC'
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == in_objs
|
||||||
|
|
||||||
|
in_objs, marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'efg'
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
# Input not sorted, so we should expect the names as listed
|
||||||
|
in_objs, marker = ['abc_123', 'def_101', 'abc_456', 'abc_789'], 'abc_456'
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == ['def_101', 'abc_789']
|
||||||
|
|
||||||
|
in_objs, marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'abc_456'
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == ['abc_789', 'def_101']
|
||||||
|
|
||||||
|
in_objs, marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'abc_5'
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == ['abc_789', 'def_101']
|
||||||
|
|
||||||
|
in_objs, marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'abc_123'
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == ['abc_456', 'abc_789', 'def_101']
|
||||||
|
|
||||||
|
in_objs, marker = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'def_101'
|
||||||
|
out_objs = dd.filter_marker(in_objs, marker)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
def test_filter_prefix(self):
|
||||||
|
in_objs, prefix = [], ''
|
||||||
|
out_objs = dd.filter_prefix(in_objs, prefix)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, prefix = [], 'abc'
|
||||||
|
out_objs = dd.filter_prefix(in_objs, prefix)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, prefix = ['abc_123', 'abc_456', 'abc_789', 'def_101'], ''
|
||||||
|
out_objs = dd.filter_prefix(in_objs, prefix)
|
||||||
|
assert list(out_objs) == in_objs
|
||||||
|
|
||||||
|
in_objs, prefix = ['abc_123', 'abc_456', 'abc_789', 'def_101'], 'abc'
|
||||||
|
out_objs = dd.filter_prefix(in_objs, prefix)
|
||||||
|
assert list(out_objs) == ['abc_123', 'abc_456', 'abc_789']
|
||||||
|
|
||||||
|
in_objs, prefix = ['abc_123', 'def_101', 'abc_456', 'abc_789'], 'abc'
|
||||||
|
out_objs = dd.filter_prefix(in_objs, prefix)
|
||||||
|
assert list(out_objs) == ['abc_123',]
|
||||||
|
|
||||||
|
def test_filter_delimiter(self):
|
||||||
|
in_objs, delimiter, prefix = [], None, ''
|
||||||
|
try:
|
||||||
|
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
raise SkipTest
|
||||||
|
self.fail("Failed to raise assertion")
|
||||||
|
|
||||||
|
in_objs, delimiter, prefix = [], '', ''
|
||||||
|
try:
|
||||||
|
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
self.fail("Failed to raise assertion")
|
||||||
|
|
||||||
|
in_objs, delimiter, prefix = [], str(255), ''
|
||||||
|
try:
|
||||||
|
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
self.fail("Failed to raise assertion")
|
||||||
|
|
||||||
|
in_objs, delimiter, prefix = [], '_', ''
|
||||||
|
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
|
||||||
|
assert list(out_objs) == []
|
||||||
|
|
||||||
|
in_objs, delimiter, prefix = ['abc_'], '_', ''
|
||||||
|
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
|
||||||
|
assert list(out_objs) == in_objs
|
||||||
|
|
||||||
|
in_objs, delimiter, prefix = ['abc_123', 'abc_456'], '_', ''
|
||||||
|
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
|
||||||
|
assert list(out_objs) == ['abc_']
|
||||||
|
|
||||||
|
in_objs, delimiter, prefix = ['abc_123', 'abc_456', 'def_123', 'def_456'], '_', ''
|
||||||
|
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
|
||||||
|
assert list(out_objs) == ['abc_', 'def_']
|
||||||
|
|
||||||
|
in_objs, delimiter, prefix = ['abc_123', 'abc_456', 'abc_789', 'def_101'], '_', 'abc_'
|
||||||
|
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
|
||||||
|
l = list(out_objs)
|
||||||
|
assert l == ['abc_123', 'abc_456', 'abc_789'], repr(l)
|
||||||
|
|
||||||
|
in_objs, delimiter, prefix = ['abc_123_a', 'abc_456', 'abc_789_', 'def_101'], '_', 'abc_'
|
||||||
|
out_objs = dd.filter_delimiter(in_objs, delimiter, prefix)
|
||||||
|
assert list(out_objs) == ['abc_123_a', 'abc_789_']
|
||||||
|
|
||||||
|
def test_filter_limit(self):
|
||||||
|
try:
|
||||||
|
l = list(dd.filter_limit([], 0))
|
||||||
|
except AssertionError:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
self.fail("Accepted a zero limit")
|
||||||
|
|
||||||
|
l = list(dd.filter_limit([], 1))
|
||||||
|
assert l == []
|
||||||
|
l = list(dd.filter_limit([1,], 1))
|
||||||
|
assert l == [1,]
|
||||||
|
l = list(dd.filter_limit([1,], 10))
|
||||||
|
assert l == [1,]
|
||||||
|
l = list(dd.filter_limit([1,2,3], 1))
|
||||||
|
assert l == [1,]
|
||||||
|
l = list(dd.filter_limit([1,2,3], 2))
|
||||||
|
assert l == [1,2]
|
||||||
|
l = list(dd.filter_limit([1,2,3], 3))
|
||||||
|
assert l == [1,2,3]
|
||||||
|
l = list(dd.filter_limit([1,2,3], 4))
|
||||||
|
assert l == [1,2,3]
|
||||||
|
|
||||||
|
class TestDiskCommon(unittest.TestCase):
|
||||||
|
""" Tests for gluster.swift.common.DiskDir.DiskCommon """
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
raise SkipTest
|
||||||
|
_initxattr()
|
||||||
|
self.fake_logger = FakeLogger()
|
||||||
|
self.td = tempfile.mkdtemp()
|
||||||
|
self.fake_drives = []
|
||||||
|
self.fake_accounts = []
|
||||||
|
for i in range(0,3):
|
||||||
|
self.fake_drives.append("drv%d" % i)
|
||||||
|
os.makedirs(os.path.join(self.td, self.fake_drives[i]))
|
||||||
|
self.fake_accounts.append(self.fake_drives[i])
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
_destroyxattr()
|
||||||
|
shutil.rmtree(self.td)
|
||||||
|
|
||||||
|
def test_constructor(self):
|
||||||
|
dc = dd.DiskCommon(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
assert dc.metadata == {}
|
||||||
|
assert dc.db_file == dd._db_file
|
||||||
|
assert dc.pending_timeout == 0
|
||||||
|
assert dc.stale_reads_ok == False
|
||||||
|
assert dc.root == self.td
|
||||||
|
assert dc.logger == self.fake_logger
|
||||||
|
assert dc.account == self.fake_accounts[0]
|
||||||
|
assert dc.datadir == os.path.join(self.td, self.fake_drives[0])
|
||||||
|
assert dc._dir_exists is None
|
||||||
|
|
||||||
|
def test__dir_exists_read_metadata_exists(self):
|
||||||
|
datadir = os.path.join(self.td, self.fake_drives[0])
|
||||||
|
fake_md = { "fake": (True,0) }
|
||||||
|
fake_md_p = pickle.dumps(fake_md, utils.PICKLE_PROTOCOL)
|
||||||
|
_setxattr(datadir, utils.METADATA_KEY, fake_md_p)
|
||||||
|
dc = dd.DiskCommon(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
dc._dir_exists_read_metadata()
|
||||||
|
assert dc.metadata == fake_md, repr(dc.metadata)
|
||||||
|
assert dc.db_file == dd._db_file
|
||||||
|
assert dc.pending_timeout == 0
|
||||||
|
assert dc.stale_reads_ok == False
|
||||||
|
assert dc.root == self.td
|
||||||
|
assert dc.logger == self.fake_logger
|
||||||
|
assert dc.account == self.fake_accounts[0]
|
||||||
|
assert dc.datadir == datadir
|
||||||
|
assert dc._dir_exists is True
|
||||||
|
|
||||||
|
def test__dir_exists_read_metadata_does_not_exist(self):
|
||||||
|
dc = dd.DiskCommon(self.td, "dne0", "dne0", self.fake_logger)
|
||||||
|
dc._dir_exists_read_metadata()
|
||||||
|
assert dc.metadata == {}
|
||||||
|
assert dc.db_file == dd._db_file
|
||||||
|
assert dc.pending_timeout == 0
|
||||||
|
assert dc.stale_reads_ok == False
|
||||||
|
assert dc.root == self.td
|
||||||
|
assert dc.logger == self.fake_logger
|
||||||
|
assert dc.account == "dne0"
|
||||||
|
assert dc.datadir == os.path.join(self.td, "dne0")
|
||||||
|
assert dc._dir_exists is False
|
||||||
|
|
||||||
|
def test_initialize(self):
|
||||||
|
dc = dd.DiskCommon(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
dc.initialize('12345')
|
||||||
|
assert dc.metadata == {}
|
||||||
|
assert dc.db_file == dd._db_file
|
||||||
|
assert dc.pending_timeout == 0
|
||||||
|
assert dc.stale_reads_ok == False
|
||||||
|
assert dc.root == self.td
|
||||||
|
assert dc.logger == self.fake_logger
|
||||||
|
assert dc.account == self.fake_accounts[0]
|
||||||
|
assert dc.datadir == os.path.join(self.td, self.fake_drives[0])
|
||||||
|
assert dc._dir_exists is None
|
||||||
|
|
||||||
|
def test_is_deleted(self):
|
||||||
|
dc = dd.DiskCommon(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
assert dc.is_deleted() == False
|
||||||
|
|
||||||
|
def test_update_metadata(self):
|
||||||
|
dc = dd.DiskCommon(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
utils.create_container_metadata(dc.datadir)
|
||||||
|
dc.metadata = dd._read_metadata(dc.datadir)
|
||||||
|
md_copy = dc.metadata.copy()
|
||||||
|
|
||||||
|
def _mock_write_metadata(path, md):
|
||||||
|
self.fail("write_metadata should not have been called")
|
||||||
|
|
||||||
|
orig_wm = dd.write_metadata
|
||||||
|
dd.write_metadata = _mock_write_metadata
|
||||||
|
try:
|
||||||
|
dc.update_metadata({})
|
||||||
|
assert dc.metadata == md_copy
|
||||||
|
dc.update_metadata(md_copy)
|
||||||
|
assert dc.metadata == md_copy
|
||||||
|
finally:
|
||||||
|
dd.write_metadata = orig_wm
|
||||||
|
|
||||||
|
dc.update_metadata({'X-Container-Meta-foo': '42'})
|
||||||
|
assert 'X-Container-Meta-foo' in dc.metadata
|
||||||
|
assert dc.metadata['X-Container-Meta-foo'] == '42'
|
||||||
|
md = pickle.loads(_getxattr(dc.datadir, utils.METADATA_KEY))
|
||||||
|
assert dc.metadata == md, "%r != %r" % (dc.metadata, md)
|
||||||
|
del dc.metadata['X-Container-Meta-foo']
|
||||||
|
assert dc.metadata == md_copy
|
||||||
|
|
||||||
|
|
||||||
|
class TestDiskDir(unittest.TestCase):
|
||||||
|
""" Tests for gluster.swift.common.DiskDir.DiskDir """
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
_initxattr()
|
||||||
|
self.fake_logger = FakeLogger()
|
||||||
|
self.td = tempfile.mkdtemp()
|
||||||
|
self.fake_drives = []
|
||||||
|
self.fake_accounts = []
|
||||||
|
for i in range(0,3):
|
||||||
|
self.fake_drives.append("drv%d" % i)
|
||||||
|
os.makedirs(os.path.join(self.td, self.fake_drives[i]))
|
||||||
|
self.fake_accounts.append(self.fake_drives[i])
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
_destroyxattr()
|
||||||
|
shutil.rmtree(self.td)
|
||||||
|
|
||||||
|
def test_constructor(self):
|
||||||
|
raise SkipTest
|
||||||
|
self.fail("Implement me")
|
||||||
|
|
||||||
|
def test_empty(self):
|
||||||
|
raise SkipTest
|
||||||
|
self.fail("Implement me")
|
||||||
|
|
||||||
|
def test_list_objects_iter(self):
|
||||||
|
raise SkipTest
|
||||||
|
self.fail("Implement me")
|
||||||
|
|
||||||
|
def test_get_info(self):
|
||||||
|
raise SkipTest
|
||||||
|
self.fail("Implement me")
|
||||||
|
|
||||||
|
def test_delete_db(self):
|
||||||
|
raise SkipTest
|
||||||
|
self.fail("Implement me")
|
||||||
|
|
||||||
|
|
||||||
|
class TestDiskAccount(unittest.TestCase):
|
||||||
|
""" Tests for gluster.swift.common.DiskDir.DiskAccount """
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
_initxattr()
|
||||||
|
self.fake_logger = FakeLogger()
|
||||||
|
self.td = tempfile.mkdtemp()
|
||||||
|
self.fake_drives = []
|
||||||
|
self.fake_accounts = []
|
||||||
|
self.fake_md = []
|
||||||
|
for i in range(0,3):
|
||||||
|
self.fake_drives.append("drv%d" % i)
|
||||||
|
os.makedirs(os.path.join(self.td, self.fake_drives[i]))
|
||||||
|
self.fake_accounts.append(self.fake_drives[i])
|
||||||
|
if i == 0:
|
||||||
|
# First drive does not have any initial account metadata
|
||||||
|
continue
|
||||||
|
if i == 1:
|
||||||
|
# Second drive has account metadata but it is not valid
|
||||||
|
datadir = os.path.join(self.td, self.fake_drives[i])
|
||||||
|
fake_md = { "fake-drv-%d" % i: (True,0) }
|
||||||
|
self.fake_md.append(fake_md)
|
||||||
|
fake_md_p = pickle.dumps(fake_md, utils.PICKLE_PROTOCOL)
|
||||||
|
_setxattr(datadir, utils.METADATA_KEY, fake_md_p)
|
||||||
|
if i == 2:
|
||||||
|
# Third drive has valid account metadata
|
||||||
|
utils.create_account_metadata(datadir)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
_destroyxattr()
|
||||||
|
shutil.rmtree(self.td)
|
||||||
|
|
||||||
|
def test_constructor_no_metadata(self):
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
raise SkipTest
|
||||||
|
assert da._container_info is None
|
||||||
|
assert da._dir_exists is True
|
||||||
|
ctime = os.path.getctime(da.datadir)
|
||||||
|
mtime = os.path.getmtime(da.datadir)
|
||||||
|
exp_md = {
|
||||||
|
'X-Bytes-Used': (0, 0),
|
||||||
|
'X-Timestamp': (normalize_timestamp(ctime), 0),
|
||||||
|
'X-Object-Count': (0, 0),
|
||||||
|
'X-Type': ('Account', 0),
|
||||||
|
'X-PUT-Timestamp': (normalize_timestamp(mtime), 0),
|
||||||
|
'X-Container-Count': (0, 0)}
|
||||||
|
assert da.metadata == exp_md, repr(da.metadata)
|
||||||
|
|
||||||
|
def test_constructor_metadata_not_valid(self):
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[1],
|
||||||
|
self.fake_accounts[1], self.fake_logger)
|
||||||
|
raise SkipTest
|
||||||
|
assert da._container_info is None
|
||||||
|
assert da._dir_exists is True
|
||||||
|
ctime = os.path.getctime(da.datadir)
|
||||||
|
mtime = os.path.getmtime(da.datadir)
|
||||||
|
exp_md = {
|
||||||
|
'X-Bytes-Used': (0, 0),
|
||||||
|
'X-Timestamp': (normalize_timestamp(ctime), 0),
|
||||||
|
'X-Object-Count': (0, 0),
|
||||||
|
'X-Type': ('Account', 0),
|
||||||
|
'X-PUT-Timestamp': (normalize_timestamp(mtime), 0),
|
||||||
|
'X-Container-Count': (0, 0),
|
||||||
|
'fake-drv-1': (True, 0)}
|
||||||
|
assert da.metadata == exp_md, repr(da.metadata)
|
||||||
|
|
||||||
|
def test_constructor_metadata_valid(self):
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[2],
|
||||||
|
self.fake_accounts[2], self.fake_logger)
|
||||||
|
raise SkipTest
|
||||||
|
assert da._container_info is None
|
||||||
|
assert da._dir_exists is True
|
||||||
|
ctime = os.path.getctime(da.datadir)
|
||||||
|
mtime = os.path.getmtime(da.datadir)
|
||||||
|
exp_md = {
|
||||||
|
'X-Bytes-Used': (0, 0),
|
||||||
|
'X-Timestamp': (normalize_timestamp(ctime), 0),
|
||||||
|
'X-Object-Count': (0, 0),
|
||||||
|
'X-Type': ('Account', 0),
|
||||||
|
'X-PUT-Timestamp': (normalize_timestamp(mtime), 0),
|
||||||
|
'X-Container-Count': (0, 0)}
|
||||||
|
assert da.metadata == exp_md, repr(da.metadata)
|
||||||
|
|
||||||
|
def test_list_containers_iter(self):
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
raise SkipTest
|
||||||
|
self.fail("Implement me")
|
||||||
|
|
||||||
|
get_info_keys = set(['account', 'created_at', 'put_timestamp',
|
||||||
|
'delete_timestamp', 'container_count',
|
||||||
|
'object_count', 'bytes_used', 'hash', 'id'])
|
||||||
|
|
||||||
|
def test_get_info_empty(self):
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
data = da.get_info()
|
||||||
|
assert set(data.keys()) == self.get_info_keys
|
||||||
|
assert data['account'] == self.fake_accounts[0]
|
||||||
|
assert data['created_at'] == '1'
|
||||||
|
assert data['put_timestamp'] == '1'
|
||||||
|
assert data['delete_timestamp'] == '1'
|
||||||
|
assert data['container_count'] == 0
|
||||||
|
assert data['object_count'] == 0
|
||||||
|
assert data['bytes_used'] == 0
|
||||||
|
assert data['hash'] == ''
|
||||||
|
assert data['id'] == ''
|
||||||
|
|
||||||
|
def test_get_info(self):
|
||||||
|
tf = tarfile.open("common/data/account_tree.tar.bz2", "r:bz2")
|
||||||
|
orig_cwd = os.getcwd()
|
||||||
|
os.chdir(os.path.join(self.td, self.fake_drives[0]))
|
||||||
|
try:
|
||||||
|
tf.extractall()
|
||||||
|
finally:
|
||||||
|
os.chdir(orig_cwd)
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
data = da.get_info()
|
||||||
|
assert set(data.keys()) == self.get_info_keys
|
||||||
|
assert data['account'] == self.fake_accounts[0]
|
||||||
|
assert data['created_at'] == '1'
|
||||||
|
assert data['put_timestamp'] == '1'
|
||||||
|
assert data['delete_timestamp'] == '1'
|
||||||
|
assert data['container_count'] == 3
|
||||||
|
assert data['object_count'] == 0
|
||||||
|
assert data['bytes_used'] == 0
|
||||||
|
assert data['hash'] == ''
|
||||||
|
assert data['id'] == ''
|
||||||
|
|
||||||
|
def test_get_container_timestamp(self):
|
||||||
|
tf = tarfile.open("common/data/account_tree.tar.bz2", "r:bz2")
|
||||||
|
orig_cwd = os.getcwd()
|
||||||
|
datadir = os.path.join(self.td, self.fake_drives[0])
|
||||||
|
os.chdir(datadir)
|
||||||
|
try:
|
||||||
|
tf.extractall()
|
||||||
|
finally:
|
||||||
|
os.chdir(orig_cwd)
|
||||||
|
md = dd.create_container_metadata(os.path.join(datadir, 'c2'))
|
||||||
|
assert 'X-PUT-Timestamp' in md, repr(md)
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
raise SkipTest
|
||||||
|
cts = da.get_container_timestamp('c2')
|
||||||
|
assert md['X-PUT-Timestamp'][0] == cts, repr(cts)
|
||||||
|
|
||||||
|
def test_update_put_timestamp_not_updated(self):
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
raise SkipTest
|
||||||
|
da.update_put_timestamp('12345')
|
||||||
|
assert da.metadata['X-PUT-Timestamp'][0] != '12345', repr(da.metadata)
|
||||||
|
|
||||||
|
def test_update_put_timestamp_updated(self):
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
exp_pts = str(float(da.metadata['X-PUT-Timestamp'][0]) + 100)
|
||||||
|
da.update_put_timestamp(exp_pts)
|
||||||
|
raise SkipTest
|
||||||
|
assert da.metadata['X-PUT-Timestamp'][0] == exp_pts, repr(da.metadata)
|
||||||
|
|
||||||
|
def test_delete_db(self):
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
raise SkipTest
|
||||||
|
assert da._dir_exists == True
|
||||||
|
da.delete_db('12345')
|
||||||
|
assert da._dir_exists == True
|
||||||
|
|
||||||
|
def test_put_container(self):
|
||||||
|
raise SkipTest
|
||||||
|
self.fail("Implement me")
|
||||||
|
|
||||||
|
def test_is_status_deleted(self):
|
||||||
|
da = dd.DiskAccount(self.td, self.fake_drives[0],
|
||||||
|
self.fake_accounts[0], self.fake_logger)
|
||||||
|
raise SkipTest
|
||||||
|
assert da.is_status_deleted() == False
|
@ -57,9 +57,6 @@ class MockException(Exception):
|
|||||||
def _mock_rmdirs(p):
|
def _mock_rmdirs(p):
|
||||||
raise MockException("gluster.swift.common.DiskFile.rmdirs() called")
|
raise MockException("gluster.swift.common.DiskFile.rmdirs() called")
|
||||||
|
|
||||||
def _mock_do_listdir(p):
|
|
||||||
raise MockException("gluster.swift.common.DiskFile.do_listdir() called")
|
|
||||||
|
|
||||||
def _mock_do_unlink(f):
|
def _mock_do_unlink(f):
|
||||||
ose = OSError()
|
ose = OSError()
|
||||||
ose.errno = errno.ENOENT
|
ose.errno = errno.ENOENT
|
||||||
@ -575,16 +572,13 @@ class TestDiskFile(unittest.TestCase):
|
|||||||
"z", self.lg)
|
"z", self.lg)
|
||||||
assert gdf.metadata == {}
|
assert gdf.metadata == {}
|
||||||
_saved_rmdirs = gluster.swift.common.DiskFile.rmdirs
|
_saved_rmdirs = gluster.swift.common.DiskFile.rmdirs
|
||||||
_saved_do_listdir = gluster.swift.common.DiskFile.do_listdir
|
|
||||||
gluster.swift.common.DiskFile.rmdirs = _mock_rmdirs
|
gluster.swift.common.DiskFile.rmdirs = _mock_rmdirs
|
||||||
gluster.swift.common.DiskFile.do_listdir = _mock_do_listdir
|
|
||||||
try:
|
try:
|
||||||
gdf.unlinkold(None)
|
gdf.unlinkold(None)
|
||||||
except MockException as exp:
|
except MockException as exp:
|
||||||
self.fail(str(exp))
|
self.fail(str(exp))
|
||||||
finally:
|
finally:
|
||||||
gluster.swift.common.DiskFile.rmdirs = _saved_rmdirs
|
gluster.swift.common.DiskFile.rmdirs = _saved_rmdirs
|
||||||
gluster.swift.common.DiskFile.do_listdir = _saved_do_listdir
|
|
||||||
|
|
||||||
def test_unlinkold_same_timestamp(self):
|
def test_unlinkold_same_timestamp(self):
|
||||||
assert not os.path.exists("/tmp/foo")
|
assert not os.path.exists("/tmp/foo")
|
||||||
@ -593,16 +587,13 @@ class TestDiskFile(unittest.TestCase):
|
|||||||
assert gdf.metadata == {}
|
assert gdf.metadata == {}
|
||||||
gdf.metadata['X-Timestamp'] = 1
|
gdf.metadata['X-Timestamp'] = 1
|
||||||
_saved_rmdirs = gluster.swift.common.DiskFile.rmdirs
|
_saved_rmdirs = gluster.swift.common.DiskFile.rmdirs
|
||||||
_saved_do_listdir = gluster.swift.common.DiskFile.do_listdir
|
|
||||||
gluster.swift.common.DiskFile.rmdirs = _mock_rmdirs
|
gluster.swift.common.DiskFile.rmdirs = _mock_rmdirs
|
||||||
gluster.swift.common.DiskFile.do_listdir = _mock_do_listdir
|
|
||||||
try:
|
try:
|
||||||
gdf.unlinkold(1)
|
gdf.unlinkold(1)
|
||||||
except MockException as exp:
|
except MockException as exp:
|
||||||
self.fail(str(exp))
|
self.fail(str(exp))
|
||||||
finally:
|
finally:
|
||||||
gluster.swift.common.DiskFile.rmdirs = _saved_rmdirs
|
gluster.swift.common.DiskFile.rmdirs = _saved_rmdirs
|
||||||
gluster.swift.common.DiskFile.do_listdir = _saved_do_listdir
|
|
||||||
|
|
||||||
def test_unlinkold_file(self):
|
def test_unlinkold_file(self):
|
||||||
td = tempfile.mkdtemp()
|
td = tempfile.mkdtemp()
|
||||||
|
@ -1,3 +1,8 @@
|
|||||||
|
# Install bounded pep8/pyflakes first, then let flake8 install
|
||||||
|
pep8==1.4.5
|
||||||
|
pyflakes==0.7.2
|
||||||
|
flake8==2.0
|
||||||
|
|
||||||
coverage
|
coverage
|
||||||
nose
|
nose
|
||||||
nosexcover
|
nosexcover
|
||||||
|
13
tox.ini
13
tox.ini
@ -1,5 +1,5 @@
|
|||||||
[tox]
|
[tox]
|
||||||
envlist = py26,py27
|
envlist = py26,py27,pep8
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
setenv = VIRTUAL_ENV={envdir}
|
setenv = VIRTUAL_ENV={envdir}
|
||||||
@ -18,8 +18,19 @@ commands = nosetests -v --exe --with-xunit --with-coverage --cover-package glust
|
|||||||
[tox:jenkins]
|
[tox:jenkins]
|
||||||
downloadcache = ~/cache/pip
|
downloadcache = ~/cache/pip
|
||||||
|
|
||||||
|
[testenv:pep8]
|
||||||
|
changedir = {toxinidir}
|
||||||
|
commands =
|
||||||
|
flake8 gluster test
|
||||||
|
|
||||||
[testenv:cover]
|
[testenv:cover]
|
||||||
setenv = NOSE_WITH_COVERAGE=1
|
setenv = NOSE_WITH_COVERAGE=1
|
||||||
|
|
||||||
[testenv:venv]
|
[testenv:venv]
|
||||||
commands = {posargs}
|
commands = {posargs}
|
||||||
|
|
||||||
|
[flake8]
|
||||||
|
ignore = H
|
||||||
|
builtins = _
|
||||||
|
exclude = .venv,.tox,dist,doc,test,*egg
|
||||||
|
show-source = True
|
||||||
|
Loading…
x
Reference in New Issue
Block a user