Rebase to lastest OpenStack Swift DiskFile API

As of October 28th, 2013, we rebase to OpenStack Swift master (commit
4bfe674) to pick up the lastest officially supported DiskFile API
changes. We use a snapshot of OpenStack Swift stored in the
gluster-swift launchpad downloads area so that we can deliberately
rebase at our own pace.

With this refactoring, all the code for handling I/O is wholly
contained in the swift tree for object operations. This will allow us
to use a different fs_utils implementation in the future (for example,
one based on a yet-to-be-implemented python bindings over libgfapi).
This also means the "Fake_file" class has been removed entirely.

Change-Id: I767983f88c59786e30b6c64da16d1cb6ab3c3e7f
Signed-off-by: Peter Portante <peter.portante@redhat.com>
Reviewed-on: http://review.gluster.org/5993
Reviewed-by: Luis Pabon <lpabon@redhat.com>
Tested-by: Luis Pabon <lpabon@redhat.com>
This commit is contained in:
Peter Portante 2013-09-20 11:25:04 -04:00 committed by Luis Pabon
parent 70a4cef96c
commit 100d6b01bd
12 changed files with 857 additions and 879 deletions

13
.gitignore vendored
View File

@ -1,8 +1,9 @@
.tox
/.tox
gluster_swift.egg-info
test/unit/.coverage
test/unit/nosetests.xml
test/unit/coverage.xml
test/unit/cover
build
/test/unit/.coverage
/test/unit/nosetests.xml
/test/unit/coverage.xml
/test/unit/cover
/build
/swift
*.pyc

View File

@ -18,42 +18,39 @@ import os
import errno
import stat
import random
import os.path as os_path # noqa
import ctypes
import os.path as _os_path
from eventlet import sleep
from swift.common.utils import load_libc_function
from gluster.swift.common.exceptions import FileOrDirNotFoundError, \
NotDirectoryError, GlusterFileSystemOSError, GlusterFileSystemIOError
NotDirectoryError, GlusterFileSystemOSError
class Fake_file(object):
def __init__(self, path):
self.path = path
def tell(self):
return 0
def read(self, count):
return None
def fileno(self):
return -1
def close(self):
pass
os_path = _os_path
def do_walk(*args, **kwargs):
return os.walk(*args, **kwargs)
def do_write(fd, msg):
def do_write(fd, buf):
try:
cnt = os.write(fd, msg)
cnt = os.write(fd, buf)
except OSError as err:
raise GlusterFileSystemOSError(
err.errno, '%s, os.write("%s", ...)' % (err.strerror, fd))
return cnt
def do_read(fd, n):
try:
buf = os.read(fd, n)
except OSError as err:
raise GlusterFileSystemOSError(
err.errno, '%s, os.write("%s", ...)' % (err.strerror, fd))
return buf
def do_ismount(path):
"""
Test whether a path is a mount point.
@ -203,7 +200,6 @@ def do_fstat(fd):
def do_open(path, flags, **kwargs):
if isinstance(flags, int):
try:
fd = os.open(path, flags, **kwargs)
except OSError as err:
@ -211,24 +207,9 @@ def do_open(path, flags, **kwargs):
err.errno, '%s, os.open("%s", %x, %r)' % (
err.strerror, path, flags, kwargs))
return fd
else:
try:
fp = open(path, flags, **kwargs)
except IOError as err:
raise GlusterFileSystemIOError(
err.errno, '%s, open("%s", %s, %r)' % (
err.strerror, path, flags, kwargs))
return fp
def do_close(fd):
if isinstance(fd, file) or isinstance(fd, Fake_file):
try:
fd.close()
except IOError as err:
raise GlusterFileSystemIOError(
err.errno, '%s, os.close(%s)' % (err.strerror, fd))
else:
try:
os.close(fd)
except OSError as err:
@ -268,9 +249,31 @@ def do_fsync(fd):
def do_fdatasync(fd):
try:
os.fdatasync(fd)
except AttributeError:
do_fsync(fd)
except OSError as err:
raise GlusterFileSystemOSError(
err.errno, '%s, os.fdatasync("%s")' % (err.strerror, fd))
err.errno, '%s, os.fsync("%s")' % (err.strerror, fd))
_posix_fadvise = None
def do_fadvise64(fd, offset, length):
global _posix_fadvise
if _posix_fadvise is None:
_posix_fadvise = load_libc_function('posix_fadvise64')
# 4 means "POSIX_FADV_DONTNEED"
_posix_fadvise(fd, ctypes.c_uint64(offset),
ctypes.c_uint64(length), 4)
def do_lseek(fd, pos, how):
try:
os.lseek(fd, pos, how)
except OSError as err:
raise GlusterFileSystemOSError(
err.errno, '%s, os.fsync("%s")' % (err.strerror, fd))
def mkdirs(path):

View File

@ -17,15 +17,13 @@ import os
import stat
import errno
import xattr
import random
import logging
from hashlib import md5
from eventlet import sleep
import cPickle as pickle
from swift.common.utils import normalize_timestamp
from gluster.swift.common.exceptions import GlusterFileSystemIOError
from gluster.swift.common.fs_utils import do_rename, do_fsync, os_path, \
do_stat, do_fstat, do_listdir, do_walk, do_rmdir
from gluster.swift.common.fs_utils import os_path, do_stat, do_listdir, \
do_walk, do_rmdir, do_fstat
from gluster.swift.common import Glusterfs
X_CONTENT_TYPE = 'Content-Type'
@ -56,6 +54,21 @@ PICKLE_PROTOCOL = 2
CHUNK_SIZE = 65536
def normalize_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps using values greater than or equal to November 20th,
2286 at 17:46 UTC will use 11 digits to represent the number of
seconds.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return "%016.05f" % (float(timestamp))
def read_metadata(path_or_fd):
"""
Helper function to read the pickled metadata from a File/Directory.
@ -207,7 +220,6 @@ def validate_account(metadata):
def validate_object(metadata):
if not metadata:
logging.warn('validate_object: No metadata')
return False
if X_TIMESTAMP not in metadata.keys() or \
@ -451,38 +463,6 @@ def create_account_metadata(acc_path):
return rmd
def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
"""
Ensure that a pickle file gets written to disk. The file is first written
to a tmp file location in the destination directory path, ensured it is
synced to disk, then moved to its final destination name.
This version takes advantage of Gluster's dot-prefix-dot-suffix naming
where the a file named ".thefile.name.9a7aasv" is hashed to the same
Gluster node as "thefile.name". This ensures the renaming of a temp file
once written does not move it to another Gluster node.
:param obj: python object to be pickled
:param dest: path of final destination file
:param tmp: path to tmp to use, defaults to None (ignored)
:param pickle_protocol: protocol to pickle the obj with, defaults to 0
"""
dirname = os.path.dirname(dest)
basename = os.path.basename(dest)
tmpname = '.' + basename + '.' + \
md5(basename + str(random.random())).hexdigest()
tmppath = os.path.join(dirname, tmpname)
with open(tmppath, 'wb') as fo:
pickle.dump(obj, fo, pickle_protocol)
# TODO: This flush() method call turns into a flush() system call
# We'll need to wrap this as well, but we would do this by writing
#a context manager for our own open() method which returns an object
# in fo which makes the gluster API call.
fo.flush()
do_fsync(fo)
do_rename(tmppath, dest)
# The following dir_xxx calls should definitely be replaced
# with a Metadata class to encapsulate their implementation.
# :FIXME: For now we have them as functions, but we should
@ -557,11 +537,3 @@ def rmobjdir(dir_path):
raise
else:
return True
# Over-ride Swift's utils.write_pickle with ours
#
# FIXME: Is this even invoked anymore given we don't perform container or
# account updates?
import swift.common.utils
swift.common.utils.write_pickle = write_pickle

View File

@ -23,37 +23,37 @@ try:
except ImportError:
import random
import logging
from collections import defaultdict
from socket import gethostname
from hashlib import md5
from eventlet import sleep
from greenlet import getcurrent
from contextlib import contextmanager
from swift.common.utils import TRUE_VALUES, drop_buffer_cache, ThreadPool
from swift.common.utils import TRUE_VALUES, ThreadPool, config_true_value
from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
DiskFileNoSpace, DiskFileDeviceUnavailable
DiskFileNoSpace, DiskFileDeviceUnavailable, DiskFileNotOpen
from swift.common.swob import multi_range_iterator
from gluster.swift.common.exceptions import GlusterFileSystemOSError, \
GlusterFileSystemIOError
from gluster.swift.common.exceptions import GlusterFileSystemOSError
from gluster.swift.common.Glusterfs import mount
from gluster.swift.common.fs_utils import do_fstat, do_open, do_close, \
do_unlink, do_chown, os_path, do_fsync, do_fchown, do_stat, do_write, \
do_fdatasync, do_rename, Fake_file
do_unlink, do_chown, do_fsync, do_fchown, do_stat, do_write, do_read, \
do_fadvise64, do_rename, do_fdatasync, do_lseek
from gluster.swift.common.utils import read_metadata, write_metadata, \
validate_object, create_object_metadata, rmobjdir, dir_is_object, \
get_object_metadata
from gluster.swift.common.utils import X_CONTENT_LENGTH, X_CONTENT_TYPE, \
from gluster.swift.common.utils import X_CONTENT_TYPE, \
X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, OBJECT, DIR_TYPE, \
FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.obj.diskfile import DiskFile as SwiftDiskFile
# FIXME: Hopefully we'll be able to move to Python 2.7+ where O_CLOEXEC will
# be back ported. See http://www.python.org/dev/peps/pep-0433/
O_CLOEXEC = 02000000
DEFAULT_DISK_CHUNK_SIZE = 65536
DEFAULT_BYTES_PER_SYNC = (512 * 1024 * 1024)
DEFAULT_KEEP_CACHE_SIZE = (5 * 1024 * 1024)
DEFAULT_MB_PER_SYNC = 512
# keep these lower-case
DISALLOWED_HEADERS = set('content-length content-type deleted etag'.split())
@ -279,70 +279,130 @@ def _adjust_metadata(metadata):
return metadata
class DiskWriter(object):
class OnDiskManager(object):
"""
Management class for devices, providing common place for shared parameters
and methods not provided by the DiskFile class (which primarily services
the object server REST API layer).
The `get_diskfile()` method is how this implementation creates a `DiskFile`
object.
.. note::
This class is reference implementation specific and not part of the
pluggable on-disk backend API.
:param conf: caller provided configuration object
:param logger: caller provided logger
"""
def __init__(self, conf, logger):
self.logger = logger
self.disk_chunk_size = int(conf.get('disk_chunk_size',
DEFAULT_DISK_CHUNK_SIZE))
self.keep_cache_size = int(conf.get('keep_cache_size',
DEFAULT_KEEP_CACHE_SIZE))
self.bytes_per_sync = int(conf.get('mb_per_sync',
DEFAULT_MB_PER_SYNC)) * 1024 * 1024
self.devices = conf.get('devices', '/srv/node/')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
threads_per_disk = int(conf.get('threads_per_disk', '0'))
self.threadpools = defaultdict(
lambda: ThreadPool(nthreads=threads_per_disk))
def _get_dev_path(self, device):
"""
Return the path to a device, checking to see that it is a proper mount
point based on a configuration parameter.
:param device: name of target device
:returns: full path to the device, None if the path to the device is
not a proper mount point.
"""
if self.mount_check and not mount(self.devices, device):
dev_path = None
else:
dev_path = os.path.join(self.devices, device)
return dev_path
def get_diskfile(self, device, account, container, obj,
**kwargs):
dev_path = self._get_dev_path(device)
if not dev_path:
raise DiskFileDeviceUnavailable()
return DiskFile(self, dev_path, self.threadpools[device],
account, container, obj, **kwargs)
class DiskFileWriter(object):
"""
Encapsulation of the write context for servicing PUT REST API
requests. Serves as the context manager object for DiskFile's writer()
requests. Serves as the context manager object for DiskFile's create()
method.
We just override the put() method for Gluster.
"""
def __init__(self, disk_file, fd, tmppath, threadpool):
self.disk_file = disk_file
self.fd = fd
self.tmppath = tmppath
self.upload_size = 0
self.last_sync = 0
self.threadpool = threadpool
def __init__(self, fd, tmppath, disk_file):
# Parameter tracking
self._fd = fd
self._tmppath = tmppath
self._disk_file = disk_file
# Internal attributes
self._upload_size = 0
self._last_sync = 0
def _write_entire_chunk(self, chunk):
bytes_per_sync = self._disk_file._mgr.bytes_per_sync
while chunk:
written = do_write(self._fd, chunk)
chunk = chunk[written:]
self._upload_size += written
# For large files sync every 512MB (by default) written
diff = self._upload_size - self._last_sync
if diff >= bytes_per_sync:
do_fdatasync(self._fd)
do_fadvise64(self._fd, self._last_sync, diff)
self._last_sync = self._upload_size
def write(self, chunk):
"""
Write a chunk of data into the temporary file.
Write a chunk of data to disk.
For this implementation, the data is written into a temporary file.
:param chunk: the chunk of data to write as a string object
:returns: the total number of bytes written to an object
"""
def _write_entire_chunk(chunk):
while chunk:
written = do_write(self.fd, chunk)
self.upload_size += written
chunk = chunk[written:]
self.threadpool.run_in_thread(_write_entire_chunk, chunk)
# For large files sync every 512MB (by default) written
diff = self.upload_size - self.last_sync
if diff >= self.disk_file.bytes_per_sync:
self.threadpool.force_run_in_thread(do_fdatasync, self.fd)
drop_buffer_cache(self.fd, self.last_sync, diff)
self.last_sync = self.upload_size
df = self._disk_file
df._threadpool.run_in_thread(self._write_entire_chunk, chunk)
return self._upload_size
def _finalize_put(self, metadata):
# Write out metadata before fsync() to ensure it is also forced to
# disk.
write_metadata(self.fd, metadata)
write_metadata(self._fd, metadata)
# We call fsync() before calling drop_cache() to lower the
# amount of redundant work the drop cache code will perform on
# the pages (now that after fsync the pages will be all
# clean).
do_fsync(self.fd)
do_fsync(self._fd)
# From the Department of the Redundancy Department, make sure
# we call drop_cache() after fsync() to avoid redundant work
# (pages all clean).
drop_buffer_cache(self.fd, 0, self.upload_size)
do_fadvise64(self._fd, self._last_sync, self._upload_size)
# At this point we know that the object's full directory path
# exists, so we can just rename it directly without using Swift's
# swift.common.utils.renamer(), which makes the directory path and
# adds extra stat() calls.
df = self.disk_file
data_file = os.path.join(df.put_datadir, df._obj)
df = self._disk_file
attempts = 1
while True:
try:
do_rename(self.tmppath, data_file)
do_rename(self._tmppath, df._data_file)
except OSError as err:
if err.errno in (errno.ENOENT, errno.EIO) \
and attempts < MAX_RENAME_ATTEMPTS:
@ -357,33 +417,32 @@ class DiskWriter(object):
# "The link named by tmppath does not exist; or, a
# directory component in data_file does not exist;
# or, tmppath or data_file is an empty string."
assert len(self.tmppath) > 0 and len(data_file) > 0
tpstats = do_stat(self.tmppath)
tfstats = do_fstat(self.fd)
assert len(self._tmppath) > 0 and len(df._data_file) > 0
tpstats = do_stat(self._tmppath)
tfstats = do_fstat(self._fd)
assert tfstats
if not tpstats or tfstats.st_ino != tpstats.st_ino:
# Temporary file name conflict
raise DiskFileError(
'DiskFile.put(): temporary file, %s, was'
' already renamed (targeted for %s)' % (
self.tmppath, data_file))
self._tmppath, df._data_file))
else:
# Data file target name now has a bad path!
dfstats = do_stat(df.put_datadir)
dfstats = do_stat(df._put_datadir)
if not dfstats:
raise DiskFileError(
'DiskFile.put(): path to object, %s, no'
' longer exists (targeted for %s)' % (
df.put_datadir,
data_file))
df._put_datadir, df._data_file))
else:
is_dir = stat.S_ISDIR(dfstats.st_mode)
if not is_dir:
raise DiskFileError(
'DiskFile.put(): path to object, %s,'
' no longer a directory (targeted for'
' %s)' % (df.put_datadir,
data_file))
' %s)' % (self._put_datadir,
df._data_file))
else:
# Let's retry since everything looks okay
logging.warn(
@ -391,77 +450,178 @@ class DiskWriter(object):
" initially failed (%s) but a"
" stat('%s') following that succeeded:"
" %r" % (
self.tmppath, data_file,
str(err), df.put_datadir,
dfstats))
self._tmppath, df._data_file, str(err),
df._put_datadir, dfstats))
attempts += 1
continue
else:
raise GlusterFileSystemOSError(
err.errno, "%s, os.rename('%s', '%s')" % (
err.strerror, self.tmppath, data_file))
err.strerror, self._tmppath, df._data_file))
else:
# Success!
break
# Close here so the calling context does not have to perform this
# in a thread.
do_close(self.fd)
# Close here so the calling context does not have to perform this,
# which keeps all file system operations in the threadpool context.
do_close(self._fd)
self._fd = None
def put(self, metadata, extension='.data'):
def put(self, metadata):
"""
Finalize writing the file on disk, and renames it from the temp file
to the real location. This should be called after the data has been
written to the temp file.
:param metadata: dictionary of metadata to be written
:param extension: extension to be used when making the file
"""
# Our caller will use '.data' here; we just ignore it since we map the
# URL directly to the file system.
assert self.tmppath is not None
assert self._tmppath is not None
metadata = _adjust_metadata(metadata)
df = self.disk_file
df = self._disk_file
if dir_is_object(metadata):
if not df.data_file:
# Does not exist, create it
data_file = os.path.join(df._obj_path, df._obj)
_, df._metadata = self.threadpool.force_run_in_thread(
df._create_dir_object, data_file, metadata)
df.data_file = os.path.join(df._container_path, data_file)
elif not df._is_dir:
# Exists, but as a file
raise DiskFileError('DiskFile.put(): directory creation failed'
' since the target, %s, already exists as'
' a file' % df.data_file)
df._threadpool.force_run_in_thread(
df._create_dir_object, df._data_file, metadata)
return
try:
self.threadpool.force_run_in_thread(self._finalize_put, metadata)
except GlusterFileSystemOSError as err:
if err.errno == errno.EISDIR:
if df._is_dir:
# A pre-existing directory already exists on the file
# system, perhaps gratuitously created when another
# object was created, or created externally to Swift
# REST API servicing (UFO use case).
raise DiskFileError('DiskFile.put(): file creation failed'
' since the target, %s, already exists as'
' a directory' % df.data_file)
raise
raise DiskFileError('DiskFile.put(): file creation failed since'
' the target, %s, already exists as a'
' directory' % df._data_file)
df._threadpool.force_run_in_thread(self._finalize_put, metadata)
# Avoid the unlink() system call as part of the mkstemp context
# cleanup
self.tmppath = None
df._metadata = metadata
df._filter_metadata()
# Mark that it actually exists now
df.data_file = os.path.join(df.datadir, df._obj)
class DiskFileReader(object):
"""
Encapsulation of the WSGI read context for servicing GET REST API
requests. Serves as the context manager object for the
:class:`swift.obj.diskfile.DiskFile` class's
:func:`swift.obj.diskfile.DiskFile.reader` method.
.. note::
The quarantining behavior of this method is considered implementation
specific, and is not required of the API.
.. note::
The arguments to the constructor are considered implementation
specific. The API does not define the constructor arguments.
:param fp: open file descriptor, -1 for a directory object
:param threadpool: thread pool to use for read operations
:param disk_chunk_size: size of reads from disk in bytes
:param obj_size: size of object on disk
:param keep_cache_size: maximum object size that will be kept in cache
:param iter_hook: called when __iter__ returns a chunk
:param keep_cache: should resulting reads be kept in the buffer cache
"""
def __init__(self, fd, threadpool, disk_chunk_size, obj_size,
keep_cache_size, iter_hook=None, keep_cache=False):
# Parameter tracking
self._fd = fd
self._threadpool = threadpool
self._disk_chunk_size = disk_chunk_size
self._iter_hook = iter_hook
if keep_cache:
# Caller suggests we keep this in cache, only do it if the
# object's size is less than the maximum.
self._keep_cache = obj_size < keep_cache_size
else:
self._keep_cache = False
# Internal Attributes
self._suppress_file_closing = False
def __iter__(self):
"""Returns an iterator over the data file."""
try:
dropped_cache = 0
bytes_read = 0
while True:
if self._fd != -1:
chunk = self._threadpool.run_in_thread(
do_read, self._fd, self._disk_chunk_size)
else:
chunk = None
if chunk:
bytes_read += len(chunk)
diff = bytes_read - dropped_cache
if diff > (1024 * 1024):
self._drop_cache(self._fd, dropped_cache, diff)
dropped_cache = bytes_read
yield chunk
if self._iter_hook:
self._iter_hook()
else:
diff = bytes_read - dropped_cache
if diff > 0:
self._drop_cache(dropped_cache, diff)
break
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_range(self, start, stop):
"""Returns an iterator over the data file for range (start, stop)"""
if start or start == 0:
do_lseek(self._fd, start, os.SEEK_SET)
if stop is not None:
length = stop - start
else:
length = None
try:
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_ranges(self, ranges, content_type, boundary, size):
"""Returns an iterator over the data file for a set of ranges"""
if not ranges:
yield ''
else:
try:
self._suppress_file_closing = True
for chunk in multi_range_iterator(
ranges, content_type, boundary, size,
self.app_iter_range):
yield chunk
finally:
self._suppress_file_closing = False
self.close()
def _drop_cache(self, offset, length):
"""Method for no-oping buffer cache drop method."""
if not self._keep_cache and self._fd > -1:
do_fadvise64(self._fd, offset, length)
def close(self):
"""
Close the open file handle if present.
"""
if self._fd is not None:
fd, self._fd = self._fd, None
if fd > -1:
do_close(fd)
class DiskFile(SwiftDiskFile):
class DiskFile(object):
"""
Manage object files on disk.
@ -471,152 +631,182 @@ class DiskFile(SwiftDiskFile):
gluster.common.constrains.gluster_check_object_creation() should
reject such requests.
:param path: path to devices on the node/mount path for UFO.
:param device: device name/account_name for UFO.
:param partition: partition on the device the object lives in
:param mgr: associated on-disk manager instance
:param dev_path: device name/account_name for UFO.
:param threadpool: thread pool in which to do blocking operations
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param logger: logger object for writing out log file messages
:param disk_chunk_Size: size of chunks on file reads
:param bytes_per_sync: number of bytes between fdatasync calls
:param iter_hook: called when __iter__ returns a chunk
:param threadpool: thread pool in which to do blocking operations
:param obj_dir: ignored
:param mount_check: check the target device is a mount point and not on the
root volume
:param uid: user ID disk object should assume (file or directory)
:param gid: group ID disk object should assume (file or directory)
"""
def __init__(self, path, device, partition, account, container, obj,
logger, disk_chunk_size=DEFAULT_DISK_CHUNK_SIZE,
bytes_per_sync=DEFAULT_BYTES_PER_SYNC, iter_hook=None,
threadpool=None, obj_dir='objects', mount_check=False,
def __init__(self, mgr, dev_path, threadpool, account, container, obj,
uid=DEFAULT_UID, gid=DEFAULT_GID):
if mount_check and not mount(path, device):
raise DiskFileDeviceUnavailable()
self.disk_chunk_size = disk_chunk_size
self.bytes_per_sync = bytes_per_sync
self.iter_hook = iter_hook
obj = obj.strip(os.path.sep)
self._mgr = mgr
self._device_path = dev_path
self._threadpool = threadpool or ThreadPool(nthreads=0)
self._uid = int(uid)
self._gid = int(gid)
self._is_dir = False
self._logger = mgr.logger
self._metadata = None
self._fd = None
# Don't store a value for data_file until we know it exists.
self._data_file = None
if os.path.sep in obj:
self._obj_path, self._obj = os.path.split(obj)
self._container_path = os.path.join(self._device_path, container)
obj = obj.strip(os.path.sep)
obj_path, self._obj = os.path.split(obj)
if obj_path:
self._obj_path = obj_path.strip(os.path.sep)
self._datadir = os.path.join(self._container_path, self._obj_path)
else:
self._obj_path = ''
self._obj = obj
self._datadir = self._container_path
if self._obj_path:
self.name = os.path.join(container, self._obj_path)
else:
self.name = container
# Absolute path for object directory.
self.datadir = os.path.join(path, device, self.name)
self.device_path = os.path.join(path, device)
self._container_path = os.path.join(path, device, container)
if _use_put_mount:
self.put_datadir = os.path.join(self.device_path + '_PUT',
self.name)
self._put_datadir = os.path.join(
self._device_path + '_PUT', container, self._obj_path)
else:
self.put_datadir = self.datadir
self._is_dir = False
self.logger = logger
self._metadata = None
# Don't store a value for data_file until we know it exists.
self.data_file = None
self._data_file_size = None
self.fp = None
self.iter_etag = None
self.started_at_0 = False
self.read_to_eof = False
self.quarantined_dir = None
self.suppress_file_closing = False
self._verify_close = False
self.threadpool = threadpool or ThreadPool(nthreads=0)
# FIXME(portante): this attribute is set after open and affects the
# behavior of the class (i.e. public interface)
self.keep_cache = False
self.uid = int(uid)
self.gid = int(gid)
self._put_datadir = self._datadir
self._data_file = os.path.join(self._put_datadir, self._obj)
def open(self, verify_close=False):
def open(self):
"""
Open the file and read the metadata.
Open the object.
This method must populate the _metadata attribute.
This implementation opens the data file representing the object, reads
the associated metadata in the extended attributes, additionally
combining metadata from fast-POST `.meta` files.
:param verify_close: force implicit close to verify_file, no effect on
explicit close.
.. note::
:raises DiskFileCollision: on md5 collision
An implementation is allowed to raise any of the following
exceptions, but is only required to raise `DiskFileNotExist` when
the object representation does not exist.
:raises DiskFileNotExist: if the object does not exist
:returns: itself for use as a context manager
"""
data_file = os.path.join(self.put_datadir, self._obj)
# Writes are always performed to a temporary file
try:
fd = do_open(data_file, os.O_RDONLY | os.O_EXCL)
fd = do_open(self._data_file, os.O_RDONLY | O_CLOEXEC)
except GlusterFileSystemOSError as err:
self.logger.exception(
"Error opening file, %s :: %s", data_file, err)
if err.errno in (errno.ENOENT, errno.ENOTDIR):
# If the file does exist, or some part of the path does not
# exist, raise the expected DiskFileNotExist
raise DiskFileNotExist
raise
else:
try:
stats = do_fstat(fd)
except GlusterFileSystemOSError as err:
self.logger.exception(
"Error stat'ing open file, %s :: %s", data_file, err)
else:
if not stats:
return
self._is_dir = stat.S_ISDIR(stats.st_mode)
self.data_file = data_file
obj_size = stats.st_size
self._metadata = read_metadata(fd)
if not self._metadata:
create_object_metadata(fd)
self._metadata = read_metadata(fd)
if not validate_object(self._metadata):
create_object_metadata(fd)
self._metadata = read_metadata(fd)
assert self._metadata is not None
self._filter_metadata()
if self._is_dir:
# Use a fake file handle to satisfy the super class's
# __iter__ method requirement when dealing with
# directories as objects.
os.close(fd)
self.fp = Fake_file(data_file)
do_close(fd)
obj_size = 0
self._fd = -1
else:
self.fp = os.fdopen(fd, 'rb')
self._verify_close = verify_close
self._metadata = self._metadata or {}
self._fd = fd
self._obj_size = obj_size
return self
def _drop_cache(self, fd, offset, length):
if fd >= 0:
super(DiskFile, self)._drop_cache(fd, offset, length)
def close(self, verify_file=True):
"""
Close the file. Will handle quarantining file if necessary.
:param verify_file: Defaults to True. If false, will not check
file to see if it needs quarantining.
"""
if self.fp:
do_close(self.fp)
self.fp = None
self._metadata = None
self._data_file_size = None
self._verify_close = False
def _filter_metadata(self):
if self._metadata is None:
return
if X_TYPE in self._metadata:
self._metadata.pop(X_TYPE)
if X_OBJECT_TYPE in self._metadata:
self._metadata.pop(X_OBJECT_TYPE)
def __enter__(self):
"""
Context enter.
.. note::
An implemenation shall raise `DiskFileNotOpen` when has not
previously invoked the :func:`swift.obj.diskfile.DiskFile.open`
method.
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self
def __exit__(self, t, v, tb):
"""
Context exit.
.. note::
This method will be invoked by the object server while servicing
the REST API *before* the object has actually been read. It is the
responsibility of the implementation to properly handle that.
"""
self._metadata = None
if self._fd is not None:
fd, self._fd = self._fd, None
if self._fd > -1:
do_close(fd)
def get_metadata(self):
"""
Provide the metadata for a previously opened object as a dictionary.
:returns: object's metadata dictionary
:raises DiskFileNotOpen: if the
:func:`swift.obj.diskfile.DiskFile.open` method was not previously
invoked
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata
def read_metadata(self):
"""
Return the metadata for an object without requiring the caller to open
the object first.
:returns: metadata dictionary for an object
:raises DiskFileError: this implementation will raise the same
errors as the `open()` method.
"""
with self.open():
return self.get_metadata()
def reader(self, iter_hook=None, keep_cache=False):
"""
Return a :class:`swift.common.swob.Response` class compatible
"`app_iter`" object as defined by
:class:`swift.obj.diskfile.DiskFileReader`.
For this implementation, the responsibility of closing the open file
is passed to the :class:`swift.obj.diskfile.DiskFileReader` object.
:param iter_hook: called when __iter__ returns a chunk
:param keep_cache: caller's preference for keeping data read in the
OS buffer cache
:returns: a :class:`swift.obj.diskfile.DiskFileReader` object
"""
if self._metadata is None:
raise DiskFileNotOpen()
dr = DiskFileReader(
self._fd, self._threadpool, self._mgr.disk_chunk_size,
self._obj_size, self._mgr.keep_cache_size,
iter_hook=iter_hook, keep_cache=keep_cache)
# At this point the reader object is now responsible for closing
# the file pointer.
self._fd = None
return dr
def _create_dir_object(self, dir_path, metadata=None):
"""
Create a directory object at the specified path. No check is made to
@ -648,7 +838,7 @@ class DiskFile(SwiftDiskFile):
stack = []
while True:
md = None if cur_path != full_path else metadata
ret, newmd = make_directory(cur_path, self.uid, self.gid, md)
ret, newmd = make_directory(cur_path, self._uid, self._gid, md)
if ret:
break
# Some path of the parent did not exist, so loop around and
@ -665,27 +855,41 @@ class DiskFile(SwiftDiskFile):
while child:
cur_path = os.path.join(cur_path, child)
md = None if cur_path != full_path else metadata
ret, newmd = make_directory(cur_path, self.uid, self.gid, md)
ret, newmd = make_directory(cur_path, self._uid, self._gid, md)
if not ret:
raise DiskFileError("DiskFile._create_dir_object(): failed to"
" create directory path to target, %s,"
" on subpath: %s" % (full_path, cur_path))
child = stack.pop() if stack else None
return True, newmd
# Exists, but as a file
#raise DiskFileError('DiskFile.put(): directory creation failed'
# ' since the target, %s, already exists as'
# ' a file' % df._data_file)
@contextmanager
def create(self, size=None):
"""
Contextmanager to make a temporary file, optionally of a specified
initial size.
Context manager to create a file. We create a temporary file first, and
then return a DiskFileWriter object to encapsulate the state.
For Gluster, we first optimistically create the temporary file using
the "rsync-friendly" .NAME.random naming. If we find that some path to
the file does not exist, we then create that path and then create the
temporary file again. If we get file name conflict, we'll retry using
different random suffixes 1,000 times before giving up.
.. note::
An implementation is not required to perform on-disk
preallocations even if the parameter is specified. But if it does
and it fails, it must raise a `DiskFileNoSpace` exception.
:param size: optional initial size of file to explicitly allocate on
disk
:raises DiskFileNoSpace: if a size is specified and allocation fails
"""
data_file = os.path.join(self.put_datadir, self._obj)
data_file = os.path.join(self._put_datadir, self._obj)
# Assume the full directory path exists to the file already, and
# construct the proper name for the temporary file.
@ -695,7 +899,7 @@ class DiskFile(SwiftDiskFile):
postfix = md5(self._obj + _cur_host + _cur_pid + cur_thread
+ str(random.random())).hexdigest()
tmpfile = '.' + self._obj + '.' + postfix
tmppath = os.path.join(self.put_datadir, tmpfile)
tmppath = os.path.join(self._put_datadir, tmpfile)
try:
fd = do_open(tmppath,
os.O_WRONLY | os.O_CREAT | os.O_EXCL | O_CLOEXEC)
@ -752,35 +956,39 @@ class DiskFile(SwiftDiskFile):
dw = None
try:
# Ensure it is properly owned before we make it available.
do_fchown(fd, self.uid, self.gid)
do_fchown(fd, self._uid, self._gid)
# NOTE: we do not perform the fallocate() call at all. We ignore
# it completely.
dw = DiskWriter(self, fd, tmppath, self.threadpool)
# it completely since at the time of this writing FUSE does not
# support it.
dw = DiskFileWriter(fd, tmppath, self)
yield dw
finally:
if dw is not None:
try:
if dw.fd:
do_close(dw.fd)
if dw._fd:
do_close(dw._fd)
except OSError:
pass
if dw.tmppath:
do_unlink(dw.tmppath)
if dw._tmppath:
do_unlink(dw._tmppath)
def put_metadata(self, metadata, tombstone=False):
def write_metadata(self, metadata):
"""
Short hand for putting metadata to .meta and .ts files.
Write a block of metadata to an object without requiring the caller to
open the object first.
:param metadata: dictionary of metadata to be written
:param tombstone: whether or not we are writing a tombstone
:param metadata: dictionary of metadata to be associated with the
object
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
if tombstone:
# We don't write tombstone files. So do nothing.
return
# FIXME: we need to validate system metadata is preserved
metadata = _adjust_metadata(metadata)
data_file = os.path.join(self.put_datadir, self._obj)
self.threadpool.run_in_thread(write_metadata, data_file, metadata)
data_file = os.path.join(self._put_datadir, self._obj)
self._threadpool.run_in_thread(
write_metadata, data_file, metadata)
def _delete(self):
def _unlinkold(self):
if self._is_dir:
# Marker, or object, directory.
#
@ -789,19 +997,21 @@ class DiskFile(SwiftDiskFile):
# metadata tag which will make this directory a
# fake-filesystem-only directory and will be deleted when the
# container or parent directory is deleted.
metadata = read_metadata(self.data_file)
#
# FIXME: Ideally we should use an atomic metadata update operation
metadata = read_metadata(self._data_file)
if dir_is_object(metadata):
metadata[X_OBJECT_TYPE] = DIR_NON_OBJECT
write_metadata(self.data_file, metadata)
rmobjdir(self.data_file)
write_metadata(self._data_file, metadata)
rmobjdir(self._data_file)
else:
# Delete file object
do_unlink(self.data_file)
do_unlink(self._data_file)
# Garbage collection of non-object directories. Now that we
# deleted the file, determine if the current directory and any
# parent directory may be deleted.
dirname = os.path.dirname(self.data_file)
dirname = os.path.dirname(self._data_file)
while dirname and dirname != self._container_path:
# Try to remove any directories that are not objects.
if not rmobjdir(dirname):
@ -813,58 +1023,31 @@ class DiskFile(SwiftDiskFile):
def delete(self, timestamp):
"""
Remove any older versions of the object file. Any file that has an
older timestamp than timestamp will be deleted.
Delete the object.
This implementation creates a tombstone file using the given
timestamp, and removes any older versions of the object file. Any
file that has an older timestamp than timestamp will be deleted.
.. note::
An implementation is free to use or ignore the timestamp
parameter.
:param timestamp: timestamp to compare with each file
:raises DiskFileError: this implementation will raise the same
errors as the `create()` method.
"""
timestamp_fl = float(timestamp)
data_file = os.path.join(self.put_datadir, self._obj)
try:
metadata = read_metadata(data_file)
except (GlusterFileSystemIOError, GlusterFileSystemOSError) as err:
metadata = read_metadata(self._data_file)
except (IOError, OSError) as err:
if err.errno != errno.ENOENT:
raise
else:
try:
old_ts = float(metadata[X_TIMESTAMP]) >= timestamp_fl
except (KeyError, ValueError):
# If no X-Timestamp to compare against, or the timestamp is
# not a valid float, we'll just delete the object anyways.
old_ts = False
if not old_ts:
self.threadpool.run_in_thread(self._delete)
self._metadata = {}
self.data_file = None
if metadata[X_TIMESTAMP] >= timestamp:
return
def _get_data_file_size(self):
"""
Returns the os_path.getsize for the file. Raises an exception if this
file does not match the Content-Length stored in the metadata, or if
self.data_file does not exist.
self._threadpool.run_in_thread(self._unlinkold)
:returns: file size as an int
:raises DiskFileError: on file size mismatch.
:raises DiskFileNotExist: on file not existing (including deleted)
"""
if self._is_dir:
# Directories have no size.
return 0
try:
file_size = 0
if self.data_file:
def _old_getsize():
file_size = os_path.getsize(self.data_file)
if X_CONTENT_LENGTH in self._metadata:
metadata_size = int(self._metadata[X_CONTENT_LENGTH])
if file_size != metadata_size:
# FIXME - bit rot detection?
self._metadata[X_CONTENT_LENGTH] = file_size
write_metadata(self.data_file, self._metadata)
return file_size
file_size = self.threadpool.run_in_thread(_old_getsize)
return file_size
except OSError as err:
if err.errno != errno.ENOENT:
raise
raise DiskFileNotExist('Data File does not exist.')
self._metadata = None
self._data_file = None

View File

@ -21,7 +21,7 @@ import gluster.swift.common.constraints # noqa
from swift.obj import server
from gluster.swift.obj.diskfile import DiskFile
from gluster.swift.obj.diskfile import OnDiskManager
class ObjectController(server.ObjectController):
@ -31,33 +31,52 @@ class ObjectController(server.ObjectController):
stored on disk and already updated by virtue of performing the file system
operations directly).
"""
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
def _diskfile(self, device, partition, account, container, obj, **kwargs):
"""Utility method for instantiating a DiskFile."""
kwargs.setdefault('mount_check', self.mount_check)
kwargs.setdefault('bytes_per_sync', self.bytes_per_sync)
kwargs.setdefault('disk_chunk_size', self.disk_chunk_size)
kwargs.setdefault('threadpool', self.threadpools[device])
kwargs.setdefault('obj_dir', server.DATADIR)
return DiskFile(self.devices, device, partition, account,
container, obj, self.logger, **kwargs)
:param conf: WSGI configuration parameter
"""
# FIXME: Gluster currently does not support x-delete-at, as there is
# no mechanism in GlusterFS itself to expire an object, or an external
# process that will cull expired objects.
try:
self.allowed_headers.remove('x-delete-at')
except KeyError:
pass
# Common on-disk hierarchy shared across account, container and object
# servers.
self._ondisk_mgr = OnDiskManager(conf, self.logger)
def container_update(self, op, account, container, obj, request,
headers_out, objdevice):
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._ondisk_mgr.get_diskfile(device, account, container, obj,
**kwargs)
def container_update(self, *args, **kwargs):
"""
Update the container when objects are updated.
For Gluster, this is just a no-op, since a container is just the
directory holding all the objects (sub-directory hierarchy of files).
"""
return
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request object driving the update
:param headers_out: dictionary of headers to send in the container
request(s)
:param objdevice: device name that the object is in
def delete_at_update(self, *args, **kwargs):
"""
Update the expiring objects container when objects are updated.
FIXME: Gluster currently does not support delete_at headers.
"""
return

@ -1 +1 @@
Subproject commit 7accddf1c3f54f67cf29d6eb69e416f798af6e23
Subproject commit 4bfe6748fd8e746460f5428f3dd161c82b1443a2

View File

@ -34,19 +34,6 @@ def mock_os_fdatasync(fd):
return True
class TestFakefile(unittest.TestCase):
""" Tests for common.fs_utils.Fake_file """
def test_Fake_file(self):
path = "/tmp/bar"
ff = fs.Fake_file(path)
self.assertEqual(path, ff.path)
self.assertEqual(0, ff.tell())
self.assertEqual(None, ff.read(50))
self.assertEqual(-1, ff.fileno())
self.assertEqual(None, ff.close())
class TestFsUtils(unittest.TestCase):
""" Tests for common.fs_utils """
@ -199,16 +186,6 @@ class TestFsUtils(unittest.TestCase):
def test_do_open(self):
_fd, tmpfile = mkstemp()
try:
f = fs.do_open(tmpfile, 'r')
try:
f.write('test')
except IOError as err:
pass
else:
self.fail("IOError expected")
finally:
f.close()
fd = fs.do_open(tmpfile, os.O_RDONLY)
try:
os.write(fd, 'test')
@ -222,14 +199,6 @@ class TestFsUtils(unittest.TestCase):
os.close(_fd)
os.remove(tmpfile)
def test_do_open_err(self):
try:
fs.do_open(os.path.join('/tmp', str(random.random())), 'r')
except GlusterFileSystemIOError:
pass
else:
self.fail("GlusterFileSystemIOError expected")
def test_do_open_err_int_mode(self):
try:
fs.do_open(os.path.join('/tmp', str(random.random())),
@ -463,8 +432,6 @@ class TestFsUtils(unittest.TestCase):
pass
else:
self.fail("OSError expected")
fp = open(tmpfile)
fs.do_close(fp)
finally:
os.remove(tmpfile)
@ -482,22 +449,6 @@ class TestFsUtils(unittest.TestCase):
finally:
os.remove(tmpfile)
def test_do_close_err_fp(self):
fd, tmpfile = mkstemp()
os.close(fd)
fp = open(tmpfile, 'w')
try:
fd = fp.fileno()
os.close(fd)
try:
fs.do_close(fp)
except GlusterFileSystemIOError:
pass
else:
self.fail("GlusterFileSystemIOError expected")
finally:
os.remove(tmpfile)
def test_do_unlink(self):
fd, tmpfile = mkstemp()
try:

View File

@ -26,7 +26,6 @@ import tarfile
import shutil
from collections import defaultdict
from mock import patch
from swift.common.utils import normalize_timestamp
from gluster.swift.common import utils, Glusterfs
from gluster.swift.common.exceptions import GlusterFileSystemOSError
@ -368,7 +367,7 @@ class TestUtils(unittest.TestCase):
assert md[utils.X_OBJECT_TYPE] == utils.FILE
assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE
assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name)
assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(tf.name))
assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(tf.name))
assert md[utils.X_ETAG] == utils._get_etag(tf.name)
def test_get_object_metadata_dir(self):
@ -381,7 +380,7 @@ class TestUtils(unittest.TestCase):
assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT
assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE
assert md[utils.X_CONTENT_LENGTH] == 0
assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(td))
assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(td))
assert md[utils.X_ETAG] == hashlib.md5().hexdigest()
finally:
os.rmdir(td)
@ -406,7 +405,7 @@ class TestUtils(unittest.TestCase):
assert md[utils.X_OBJECT_TYPE] == utils.FILE
assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE
assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name)
assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(tf.name))
assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(tf.name))
assert md[utils.X_ETAG] == utils._get_etag(tf.name)
def test_create_object_metadata_dir(self):
@ -428,7 +427,7 @@ class TestUtils(unittest.TestCase):
assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT
assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE
assert md[utils.X_CONTENT_LENGTH] == 0
assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(td))
assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(td))
assert md[utils.X_ETAG] == hashlib.md5().hexdigest()
finally:
os.rmdir(td)
@ -445,8 +444,8 @@ class TestUtils(unittest.TestCase):
try:
exp_md = {
utils.X_TYPE: (utils.CONTAINER, 0),
utils.X_TIMESTAMP: (normalize_timestamp(os.path.getctime(td)), 0),
utils.X_PUT_TIMESTAMP: (normalize_timestamp(os.path.getmtime(td)), 0),
utils.X_TIMESTAMP: (utils.normalize_timestamp(os.path.getctime(td)), 0),
utils.X_PUT_TIMESTAMP: (utils.normalize_timestamp(os.path.getmtime(td)), 0),
utils.X_OBJECTS_COUNT: (3, 0),
utils.X_BYTES_USED: (47, 0),
}
@ -467,8 +466,8 @@ class TestUtils(unittest.TestCase):
try:
exp_md = {
utils.X_TYPE: (utils.ACCOUNT, 0),
utils.X_TIMESTAMP: (normalize_timestamp(os.path.getctime(td)), 0),
utils.X_PUT_TIMESTAMP: (normalize_timestamp(os.path.getmtime(td)), 0),
utils.X_TIMESTAMP: (utils.normalize_timestamp(os.path.getctime(td)), 0),
utils.X_PUT_TIMESTAMP: (utils.normalize_timestamp(os.path.getmtime(td)), 0),
utils.X_OBJECTS_COUNT: (0, 0),
utils.X_BYTES_USED: (0, 0),
utils.X_CONTAINER_COUNT: (2, 0),
@ -498,8 +497,8 @@ class TestUtils(unittest.TestCase):
for key in self.cont_keys:
assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == (utils.CONTAINER, 0)
assert md[utils.X_TIMESTAMP] == (normalize_timestamp(os.path.getctime(td)), 0)
assert md[utils.X_PUT_TIMESTAMP] == (normalize_timestamp(os.path.getmtime(td)), 0)
assert md[utils.X_TIMESTAMP] == (utils.normalize_timestamp(os.path.getctime(td)), 0)
assert md[utils.X_PUT_TIMESTAMP] == (utils.normalize_timestamp(os.path.getmtime(td)), 0)
assert md[utils.X_OBJECTS_COUNT] == (0, 0)
assert md[utils.X_BYTES_USED] == (0, 0)
finally:
@ -524,8 +523,8 @@ class TestUtils(unittest.TestCase):
for key in self.acct_keys:
assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == (utils.ACCOUNT, 0)
assert md[utils.X_TIMESTAMP] == (normalize_timestamp(os.path.getctime(td)), 0)
assert md[utils.X_PUT_TIMESTAMP] == (normalize_timestamp(os.path.getmtime(td)), 0)
assert md[utils.X_TIMESTAMP] == (utils.normalize_timestamp(os.path.getctime(td)), 0)
assert md[utils.X_PUT_TIMESTAMP] == (utils.normalize_timestamp(os.path.getmtime(td)), 0)
assert md[utils.X_OBJECTS_COUNT] == (0, 0)
assert md[utils.X_BYTES_USED] == (0, 0)
assert md[utils.X_CONTAINER_COUNT] == (0, 0)
@ -608,40 +607,6 @@ class TestUtils(unittest.TestCase):
os.chdir(orig_cwd)
shutil.rmtree(td)
def test_write_pickle(self):
td = tempfile.mkdtemp()
try:
fpp = os.path.join(td, 'pp')
# FIXME: Remove this patch when coverage.py can handle eventlet
with patch("os.fsync", _mock_os_fsync):
utils.write_pickle('pickled peppers', fpp)
with open(fpp, "rb") as f:
contents = f.read()
s = pickle.loads(contents)
assert s == 'pickled peppers', repr(s)
finally:
shutil.rmtree(td)
def test_write_pickle_ignore_tmp(self):
tf = tempfile.NamedTemporaryFile()
td = tempfile.mkdtemp()
try:
fpp = os.path.join(td, 'pp')
# Also test an explicity pickle protocol
# FIXME: Remove this patch when coverage.py can handle eventlet
with patch("os.fsync", _mock_os_fsync):
utils.write_pickle('pickled peppers', fpp, tmp=tf.name,
pickle_protocol=2)
with open(fpp, "rb") as f:
contents = f.read()
s = pickle.loads(contents)
assert s == 'pickled peppers', repr(s)
with open(tf.name, "rb") as f:
contents = f.read()
assert contents == ''
finally:
shutil.rmtree(td)
def test_check_user_xattr_bad_path(self):
assert False == utils.check_user_xattr("/tmp/foo/bar/check/user/xattr")

View File

@ -22,20 +22,22 @@ import unittest
import tempfile
import shutil
import mock
from eventlet import tpool
from mock import patch
from hashlib import md5
from copy import deepcopy
from swift.common.utils import normalize_timestamp
from swift.common.exceptions import DiskFileNotExist, DiskFileError, \
DiskFileNoSpace
DiskFileNoSpace, DiskFileNotOpen
from swift.common.utils import ThreadPool
from gluster.swift.common.exceptions import GlusterFileSystemOSError
import gluster.swift.common.utils
from gluster.swift.common.utils import normalize_timestamp
import gluster.swift.obj.diskfile
from gluster.swift.obj.diskfile import DiskFile
from gluster.swift.obj.diskfile import DiskFile, OnDiskManager
from gluster.swift.common.utils import DEFAULT_UID, DEFAULT_GID, X_TYPE, \
X_OBJECT_TYPE, DIR_OBJECT
from gluster.swift.common.fs_utils import Fake_file
from test.unit.common.test_utils import _initxattr, _destroyxattr
from test.unit import FakeLogger
@ -48,13 +50,7 @@ def _mapit(filename_or_fd):
statmeth = os.fstat
else:
statmeth = os.lstat
try:
stats = statmeth(filename_or_fd)
except OSError as err:
if err.errno == errno.ENOENT:
raise GlusterFileSystemOSError(
err.errno, '%s, os.fstat(%s)' % (err.strerror, filename_or_fd))
raise
return stats.st_ino
@ -62,7 +58,7 @@ def _mock_read_metadata(filename_or_fd):
global _metadata
ino = _mapit(filename_or_fd)
if ino in _metadata:
md = _metadata[ino].copy()
md = _metadata[ino]
else:
md = {}
return md
@ -71,7 +67,7 @@ def _mock_read_metadata(filename_or_fd):
def _mock_write_metadata(filename_or_fd, metadata):
global _metadata
ino = _mapit(filename_or_fd)
_metadata[ino] = metadata.copy()
_metadata[ino] = metadata
def _mock_clear_metadata():
@ -103,6 +99,8 @@ class TestDiskFile(unittest.TestCase):
""" Tests for gluster.swift.obj.diskfile """
def setUp(self):
self._orig_tpool_exc = tpool.execute
tpool.execute = lambda f, *args, **kwargs: f(*args, **kwargs)
self.lg = FakeLogger()
_initxattr()
_mock_clear_metadata()
@ -117,8 +115,12 @@ class TestDiskFile(unittest.TestCase):
self._saved_do_fsync = gluster.swift.obj.diskfile.do_fsync
gluster.swift.obj.diskfile.do_fsync = _mock_do_fsync
self.td = tempfile.mkdtemp()
self.conf = dict(devices=self.td, mb_per_sync=2,
keep_cache_size=(1024 * 1024), mount_check=False)
self.mgr = OnDiskManager(self.conf, self.lg)
def tearDown(self):
tpool.execute = self._orig_tpool_exc
self.lg = None
_destroyxattr()
gluster.swift.obj.diskfile.write_metadata = self._saved_df_wm
@ -129,38 +131,29 @@ class TestDiskFile(unittest.TestCase):
shutil.rmtree(self.td)
def _get_diskfile(self, d, p, a, c, o, **kwargs):
return DiskFile(self.td, d, p, a, c, o, self.lg, **kwargs)
return self.mgr.get_diskfile(d, a, c, o, **kwargs)
def test_constructor_no_slash(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._mgr is self.mgr
assert gdf._device_path == os.path.join(self.td, "vol0")
assert isinstance(gdf._threadpool, ThreadPool)
assert gdf._uid == DEFAULT_UID
assert gdf._gid == DEFAULT_GID
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf.name == "bar"
assert gdf.datadir == os.path.join(self.td, "vol0", "bar")
assert gdf.device_path == os.path.join(self.td, "vol0")
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
assert gdf.disk_chunk_size == 65536
assert gdf.iter_hook is None
assert gdf.logger == self.lg
assert gdf.uid == DEFAULT_UID
assert gdf.gid == DEFAULT_GID
assert gdf._metadata == None
assert gdf.data_file is None
assert gdf.fp is None
assert gdf.iter_etag is None
assert not gdf.started_at_0
assert not gdf.read_to_eof
assert gdf.quarantined_dir is None
assert not gdf.keep_cache
assert not gdf._is_dir
assert gdf._datadir == os.path.join(self.td, "vol0", "bar"), gdf._datadir
assert gdf._datadir == gdf._put_datadir
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
assert gdf._is_dir is False
assert gdf._logger == self.lg
assert gdf._fd is None
def test_constructor_leadtrail_slash(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "/b/a/z/")
assert gdf._obj == "z"
assert gdf._obj_path == os.path.join("b", "a")
assert gdf.name == os.path.join("bar", "b", "a")
assert gdf.datadir == os.path.join(self.td, "vol0", "bar", "b", "a")
assert gdf.device_path == os.path.join(self.td, "vol0")
assert gdf._obj_path == "b/a"
assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "b", "a"), gdf._datadir
def test_open_no_metadata(self):
the_path = os.path.join(self.td, "vol0", "bar")
@ -180,11 +173,17 @@ class TestDiskFile(unittest.TestCase):
'Content-Type': 'application/octet-stream'}
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
assert gdf.data_file == the_file
assert gdf._fd is None
assert gdf._metadata is None
assert not gdf._is_dir
assert gdf.fp is not None
with gdf.open():
assert gdf._data_file == the_file
assert not gdf._is_dir
assert gdf._fd is not None
assert gdf._metadata == exp_md
self.assertRaises(DiskFileNotOpen, gdf.get_metadata)
self.assertRaises(DiskFileNotOpen, gdf.reader)
self.assertRaises(DiskFileNotOpen, gdf.__enter__)
def test_open_existing_metadata(self):
the_path = os.path.join(self.td, "vol0", "bar")
@ -205,11 +204,14 @@ class TestDiskFile(unittest.TestCase):
del exp_md['X-Object-Type']
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
assert gdf.data_file == the_file
assert gdf._fd is None
assert gdf._metadata is None
assert not gdf._is_dir
assert gdf.fp is not None
assert gdf._metadata == exp_md
with gdf.open():
assert not gdf._is_dir
assert gdf._data_file == the_file
assert gdf._fd is not None
assert gdf._metadata == exp_md, "%r != %r" % (gdf._metadata, exp_md)
def test_open_invalid_existing_metadata(self):
the_path = os.path.join(self.td, "vol0", "bar")
@ -225,10 +227,10 @@ class TestDiskFile(unittest.TestCase):
_metadata[_mapit(the_file)] = inv_md
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
assert gdf.data_file == the_file
assert not gdf._is_dir
assert gdf.fp is not None
assert gdf._fd is None
with gdf.open():
assert gdf._data_file == the_file
assert gdf._metadata != inv_md
def test_open_isdir(self):
@ -248,90 +250,101 @@ class TestDiskFile(unittest.TestCase):
del exp_md['X-Object-Type']
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "d")
assert gdf._obj == "d"
assert gdf._is_dir is False
with gdf.open():
assert gdf.data_file == the_dir
assert gdf._is_dir
assert gdf._data_file == the_dir
assert gdf._metadata == exp_md
def test_constructor_chunk_size(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z",
disk_chunk_size=8192)
assert gdf.disk_chunk_size == 8192
def _create_and_get_diskfile(self, dev, par, acc, con, obj):
# FIXME: assumes account === volume
the_path = os.path.join(self.td, dev, con)
the_file = os.path.join(the_path, obj)
base_obj = os.path.basename(the_file)
base_dir = os.path.dirname(the_file)
os.makedirs(base_dir)
with open(the_file, "wb") as fd:
fd.write("y" * 256)
gdf = self._get_diskfile(dev, par, acc, con, obj)
assert gdf._obj == base_obj
assert not gdf._is_dir
assert gdf._fd is None
return gdf
def test_constructor_iter_hook(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z",
iter_hook='hook')
assert gdf.iter_hook == 'hook'
def test_reader(self):
closed = [False]
fd = [-1]
def test_close_no_open_fp(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
gdf._is_dir = False
self.called = False
def mock_close(*args, **kwargs):
closed[0] = True
os.close(fd[0])
def our_do_close(fp):
self.called = True
with mock.patch("gluster.swift.obj.diskfile.do_close", mock_close):
gdf = self._create_and_get_diskfile("vol0", "p57", "ufo47", "bar", "z")
with gdf.open():
assert gdf._fd is not None
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
reader = gdf.reader()
assert reader._fd is not None
fd[0] = reader._fd
chunks = [ck for ck in reader]
assert reader._fd is None
assert closed[0]
assert len(chunks) == 1, repr(chunks)
with mock.patch("gluster.swift.obj.diskfile.do_close", our_do_close):
gdf.close()
assert not self.called
assert gdf.fp is None
def test_reader_disk_chunk_size(self):
conf = dict(disk_chunk_size=64)
conf.update(self.conf)
self.mgr = OnDiskManager(conf, self.lg)
gdf = self._create_and_get_diskfile("vol0", "p57", "ufo47", "bar", "z")
with gdf.open():
reader = gdf.reader()
try:
assert reader._disk_chunk_size == 64
chunks = [ck for ck in reader]
finally:
reader.close()
assert len(chunks) == 4, repr(chunks)
for chunk in chunks:
assert len(chunk) == 64, repr(chunks)
def test_reader_iter_hook(self):
called = [0]
def mock_sleep(*args, **kwargs):
called[0] += 1
gdf = self._create_and_get_diskfile("vol0", "p57", "ufo47", "bar", "z")
with gdf.open():
reader = gdf.reader(iter_hook=mock_sleep)
try:
chunks = [ck for ck in reader]
finally:
reader.close()
assert len(chunks) == 1, repr(chunks)
assert called[0] == 1, called
def test_reader_dir_object(self):
called = [False]
def our_do_close(fd):
called[0] = True
os.close(fd)
def test_all_dir_object(self):
the_cont = os.path.join(self.td, "vol0", "bar")
the_dir = "dir"
self.called = False
os.makedirs(os.path.join(the_cont, the_dir))
os.makedirs(os.path.join(the_cont, "dir"))
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir")
with gdf.open():
ret = isinstance(gdf.fp, Fake_file)
self.assertTrue(ret)
# Get a "Fake_file" pointer
ffp = gdf.fp
# This expected to call Fake_file interfaces
ret = ffp.tell()
self.assertEqual(ret, 0)
ret = ffp.read(1)
self.assertEqual(ret, None)
ret = ffp.fileno()
self.assertEqual(ret, -1)
def our_do_close(ffp):
self.called = True
reader = gdf.reader()
try:
chunks = [ck for ck in reader]
assert len(chunks) == 0, repr(chunks)
with mock.patch("gluster.swift.obj.diskfile.do_close",
our_do_close):
ret = ffp.close()
self.assertEqual(ret, None)
self.assertFalse(self.called)
def test_close_file_object(self):
the_cont = os.path.join(self.td, "vol0", "bar")
the_file = os.path.join(the_cont, "z")
self.called = False
os.makedirs(the_cont)
with open(the_file, "wb") as fd:
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
def our_do_close(fp):
self.called = True
with mock.patch("gluster.swift.obj.diskfile.do_close",
our_do_close):
with gdf.open():
assert not self.called
assert self.called
def test_is_deleted(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
with gdf.open():
assert gdf.is_deleted()
gdf.data_file = os.path.join(self.td, "bar")
assert not gdf.is_deleted()
reader.close()
assert not called[0]
finally:
reader.close()
def test_create_dir_object_no_md(self):
the_cont = os.path.join(self.td, "vol0", "bar")
@ -405,100 +418,79 @@ class TestDiskFile(unittest.TestCase):
self.assertFalse(os.path.isdir(the_dir))
self.assertFalse(_mapit(the_dir) in _metadata)
def test_put_metadata(self):
the_dir = os.path.join(self.td, "vol0", "bar", "z")
def test_write_metadata(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_dir = os.path.join(the_path, "z")
os.makedirs(the_dir)
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
md = {'Content-Type': 'application/octet-stream', 'a': 'b'}
gdf.put_metadata(md.copy())
assert gdf._metadata is None
fmd = _metadata[_mapit(the_dir)]
md.update({'X-Object-Type': 'file', 'X-Type': 'Object'})
assert fmd == md, "on-disk md = %r, md = %r" % (fmd, md)
gdf.write_metadata(md.copy())
on_disk_md = _metadata[_mapit(the_dir)]
del on_disk_md['X-Type']
del on_disk_md['X-Object-Type']
assert on_disk_md == md, "on_disk_md = %r, md = %r" % (
on_disk_md, md)
def test_put_w_tombstone(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._metadata == None
gdf.put_metadata({'x': '1'}, tombstone=True)
assert gdf._metadata is None
assert _metadata == {}
def test_put_w_meta_file(self):
def test_write_metadata_w_meta_file(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_file = os.path.join(the_path, "z")
os.makedirs(the_path)
with open(the_file, "wb") as fd:
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
with gdf.open():
newmd = gdf.get_metadata().copy()
newmd = deepcopy(gdf.read_metadata())
newmd['X-Object-Meta-test'] = '1234'
gdf.put_metadata(newmd)
assert gdf._metadata is None
fmd = _metadata[_mapit(the_file)]
assert fmd == newmd, "on-disk md = %r, newmd = %r" % (fmd, newmd)
gdf.write_metadata(newmd)
assert _metadata[_mapit(the_file)] == newmd
def test_put_w_meta_file_no_content_type(self):
def test_write_metadata_w_meta_file_no_content_type(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_file = os.path.join(the_path, "z")
os.makedirs(the_path)
with open(the_file, "wb") as fd:
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
with gdf.open():
newmd = gdf.get_metadata().copy()
newmd = deepcopy(gdf.read_metadata())
newmd['Content-Type'] = ''
newmd['X-Object-Meta-test'] = '1234'
gdf.put_metadata(newmd)
assert gdf._metadata is None
fmd = _metadata[_mapit(the_file)]
assert fmd == newmd, "on-disk md = %r, newmd = %r" % (fmd, newmd)
gdf.write_metadata(newmd)
assert _metadata[_mapit(the_file)] == newmd
def test_put_w_meta_dir(self):
def test_write_metadata_w_meta_dir(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_dir = os.path.join(the_path, "dir")
os.makedirs(the_dir)
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir")
with gdf.open():
newmd = gdf.get_metadata().copy()
newmd = deepcopy(gdf.read_metadata())
newmd['X-Object-Meta-test'] = '1234'
gdf.put_metadata(newmd)
assert gdf._metadata is None
fmd = _metadata[_mapit(the_dir)]
assert fmd == newmd, "on-disk md = %r, newmd = %r" % (fmd, newmd)
gdf.write_metadata(newmd)
assert _metadata[_mapit(the_dir)] == newmd
def test_put_w_marker_dir(self):
def test_write_metadata_w_marker_dir(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_dir = os.path.join(the_path, "dir")
os.makedirs(the_dir)
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir")
with gdf.open():
newmd = gdf.get_metadata().copy()
newmd = deepcopy(gdf.read_metadata())
newmd['X-Object-Meta-test'] = '1234'
gdf.put_metadata(newmd)
assert gdf._metadata is None
fmd = _metadata[_mapit(the_dir)]
assert fmd == newmd, "on-disk md = %r, newmd = %r" % (fmd, newmd)
gdf.write_metadata(newmd)
assert _metadata[_mapit(the_dir)] == newmd
def test_put_w_marker_dir_create(self):
the_cont = os.path.join(self.td, "vol0", "bar")
the_dir = os.path.join(the_cont, "dir")
os.makedirs(the_cont)
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir")
assert gdf._metadata == None
assert gdf._metadata is None
newmd = {
'ETag': 'etag',
'X-Timestamp': 'ts',
'Content-Type': 'application/directory'}
with gdf.create() as dw:
dw.put(newmd.copy(), extension='.dir')
with gdf.open():
assert gdf.data_file == the_dir
dw.put(newmd)
assert gdf._data_file == the_dir
for key, val in newmd.items():
assert gdf._metadata[key] == val
assert _metadata[_mapit(the_dir)][key] == val
assert X_OBJECT_TYPE not in gdf._metadata, "md = %r" % gdf._metadata
assert _metadata[_mapit(the_dir)][X_OBJECT_TYPE] == DIR_OBJECT
def test_put_is_dir(self):
@ -506,19 +498,31 @@ class TestDiskFile(unittest.TestCase):
the_dir = os.path.join(the_path, "dir")
os.makedirs(the_dir)
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir")
with gdf.open():
origmd = gdf.get_metadata()
origfmd = _metadata[_mapit(the_dir)]
newmd = deepcopy(origmd)
# FIXME: This is a hack to get to the code-path; it is not clear
# how this can happen normally.
newmd = {
'Content-Type': '',
'X-Object-Meta-test': '1234'}
newmd['Content-Type'] = ''
newmd['X-Object-Meta-test'] = '1234'
with gdf.create() as dw:
try:
dw.put(newmd, extension='.data')
# FIXME: We should probably be able to detect in .create()
# when the target file name already exists as a directory to
# avoid reading the data off the wire only to fail as a
# directory.
dw.write('12345\n')
dw.put(newmd)
except DiskFileError:
pass
else:
self.fail("Expected to encounter"
" 'already-exists-as-dir' exception")
with gdf.open():
assert gdf.get_metadata() == origmd
assert _metadata[_mapit(the_dir)] == origfmd, "was: %r, is: %r" % (
origfmd, _metadata[_mapit(the_dir)])
def test_put(self):
the_cont = os.path.join(self.td, "vol0", "bar")
@ -526,9 +530,9 @@ class TestDiskFile(unittest.TestCase):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf.name == "bar"
assert gdf.datadir == the_cont
assert gdf.data_file is None
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
assert gdf._datadir == the_cont
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
body = '1234\n'
etag = md5()
@ -542,13 +546,12 @@ class TestDiskFile(unittest.TestCase):
}
with gdf.create() as dw:
assert dw.tmppath is not None
tmppath = dw.tmppath
assert dw._tmppath is not None
tmppath = dw._tmppath
dw.write(body)
dw.put(metadata)
assert gdf.data_file == os.path.join(self.td, "vol0", "bar", "z")
assert os.path.exists(gdf.data_file)
assert os.path.exists(gdf._data_file)
assert not os.path.exists(tmppath)
def test_put_ENOSPC(self):
@ -557,9 +560,9 @@ class TestDiskFile(unittest.TestCase):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf.name == "bar"
assert gdf.datadir == the_cont
assert gdf.data_file is None
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
assert gdf._datadir == the_cont
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
body = '1234\n'
etag = md5()
@ -578,7 +581,7 @@ class TestDiskFile(unittest.TestCase):
with mock.patch("os.open", mock_open):
try:
with gdf.create() as dw:
assert dw.tmppath is not None
assert dw._tmppath is not None
dw.write(body)
dw.put(metadata)
except DiskFileNoSpace:
@ -592,9 +595,9 @@ class TestDiskFile(unittest.TestCase):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
assert gdf._obj_path == ""
assert gdf.name == "bar"
assert gdf.datadir == the_cont
assert gdf.data_file is None
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
assert gdf._datadir == the_cont
assert gdf._data_file == os.path.join(self.td, "vol0", "bar", "z")
body = '1234\n'
etag = md5()
@ -618,7 +621,8 @@ class TestDiskFile(unittest.TestCase):
with mock.patch("os.rename", mock_rename):
try:
with gdf.create() as dw:
assert dw.tmppath is not None
assert dw._tmppath is not None
tmppath = dw._tmppath
dw.write(body)
dw.put(metadata)
except GlusterFileSystemOSError:
@ -632,9 +636,10 @@ class TestDiskFile(unittest.TestCase):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", the_file)
assert gdf._obj == "z"
assert gdf._obj_path == the_obj_path
assert gdf.name == os.path.join("bar", "b", "a")
assert gdf.datadir == os.path.join(self.td, "vol0", "bar", "b", "a")
assert gdf.data_file is None
assert gdf._container_path == os.path.join(self.td, "vol0", "bar")
assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "b", "a")
assert gdf._data_file == os.path.join(
self.td, "vol0", "bar", "b", "a", "z")
body = '1234\n'
etag = md5()
@ -648,42 +653,15 @@ class TestDiskFile(unittest.TestCase):
}
with gdf.create() as dw:
assert dw.tmppath is not None
tmppath = dw.tmppath
assert dw._tmppath is not None
tmppath = dw._tmppath
dw.write(body)
dw.put(metadata)
assert gdf.data_file == os.path.join(
self.td, "vol0", "bar", "b", "a", "z")
assert os.path.exists(gdf.data_file)
assert os.path.exists(gdf._data_file)
assert not os.path.exists(tmppath)
def test_delete_no_metadata(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._metadata == None
_saved_rmobjdir = gluster.swift.obj.diskfile.rmobjdir
gluster.swift.obj.diskfile.rmobjdir = _mock_rmobjdir
try:
gdf.delete(1.0)
except MockException as exp:
self.fail(str(exp))
finally:
gluster.swift.obj.diskfile.rmobjdir = _saved_rmobjdir
def test_delete_same_timestamp(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._metadata == None
gdf._metadata = {'X-Timestamp': 1}
_saved_rmobjdir = gluster.swift.obj.diskfile.rmobjdir
gluster.swift.obj.diskfile.rmobjdir = _mock_rmobjdir
try:
gdf.delete(1)
except MockException as exp:
self.fail(str(exp))
finally:
gluster.swift.obj.diskfile.rmobjdir = _saved_rmobjdir
def test_delete_file(self):
def test_delete(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_file = os.path.join(the_path, "z")
os.makedirs(the_path)
@ -691,12 +669,27 @@ class TestDiskFile(unittest.TestCase):
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
later = float(gdf.get_metadata()['X-Timestamp']) + 1
assert gdf.data_file == the_file
assert gdf._data_file == the_file
assert not gdf._is_dir
later = float(gdf.read_metadata()['X-Timestamp']) + 1
gdf.delete(normalize_timestamp(later))
assert os.path.isdir(gdf.datadir)
assert not os.path.exists(os.path.join(gdf.datadir, gdf._obj))
assert os.path.isdir(gdf._datadir)
assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
def test_delete_same_timestamp(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_file = os.path.join(the_path, "z")
os.makedirs(the_path)
with open(the_file, "wb") as fd:
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
assert gdf._data_file == the_file
assert not gdf._is_dir
now = float(gdf.read_metadata()['X-Timestamp'])
gdf.delete(normalize_timestamp(now))
assert os.path.isdir(gdf._datadir)
assert os.path.exists(os.path.join(gdf._datadir, gdf._obj))
def test_delete_file_not_found(self):
the_path = os.path.join(self.td, "vol0", "bar")
@ -706,17 +699,16 @@ class TestDiskFile(unittest.TestCase):
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
later = float(gdf._metadata['X-Timestamp']) + 1
assert gdf.data_file == the_file
assert gdf._data_file == the_file
assert not gdf._is_dir
later = float(gdf.read_metadata()['X-Timestamp']) + 1
# Handle the case the file is not in the directory listing.
os.unlink(the_file)
gdf.delete(normalize_timestamp(later))
assert os.path.isdir(gdf.datadir)
assert not os.path.exists(os.path.join(gdf.datadir, gdf._obj))
assert os.path.isdir(gdf._datadir)
assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
def test_delete_file_unlink_error(self):
the_path = os.path.join(self.td, "vol0", "bar")
@ -726,10 +718,10 @@ class TestDiskFile(unittest.TestCase):
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
assert gdf.data_file == the_file
assert gdf._data_file == the_file
assert not gdf._is_dir
later = float(gdf._metadata['X-Timestamp']) + 1
later = float(gdf.read_metadata()['X-Timestamp']) + 1
def _mock_os_unlink_eacces_err(f):
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
@ -749,148 +741,33 @@ class TestDiskFile(unittest.TestCase):
finally:
os.chmod(the_path, stats.st_mode)
assert os.path.isdir(gdf.datadir)
assert os.path.exists(os.path.join(gdf.datadir, gdf._obj))
assert os.path.isdir(gdf._datadir)
assert os.path.exists(os.path.join(gdf._datadir, gdf._obj))
def test_delete_is_dir(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_dir = os.path.join(the_path, "d")
os.makedirs(the_dir)
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "d")
assert gdf._obj == "d"
with gdf.open():
assert gdf.data_file == the_dir
assert gdf._is_dir
later = float(gdf._metadata['X-Timestamp']) + 1
assert gdf._data_file == the_dir
later = float(gdf.read_metadata()['X-Timestamp']) + 1
gdf.delete(normalize_timestamp(later))
assert os.path.isdir(gdf.datadir)
assert not os.path.exists(os.path.join(gdf.datadir, gdf._obj))
def test_get_data_file_size(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_file = os.path.join(the_path, "z")
os.makedirs(the_path)
with open(the_file, "wb") as fd:
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
assert gdf.data_file == the_file
assert not gdf._is_dir
assert 4 == gdf.get_data_file_size()
def test_get_data_file_size_md_restored(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_file = os.path.join(the_path, "z")
os.makedirs(the_path)
with open(the_file, "wb") as fd:
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
assert gdf.data_file == the_file
assert not gdf._is_dir
assert 4 == gdf._metadata['Content-Length']
gdf._metadata['Content-Length'] = 3
assert 4 == gdf.get_data_file_size()
assert 4 == gdf._metadata['Content-Length']
def test_get_data_file_size_dne(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar",
"/b/a/z/")
try:
gdf.get_data_file_size()
except DiskFileNotExist:
pass
else:
self.fail("Expected DiskFileNotExist exception")
def test_get_data_file_size_dne_os_err(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_file = os.path.join(the_path, "z")
os.makedirs(the_path)
with open(the_file, "wb") as fd:
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
assert gdf.data_file == the_file
assert not gdf._is_dir
gdf.data_file = gdf.data_file + ".dne"
try:
gdf.get_data_file_size()
except DiskFileNotExist:
pass
else:
self.fail("Expected DiskFileNotExist exception")
def test_get_data_file_size_os_err(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_file = os.path.join(the_path, "z")
os.makedirs(the_path)
with open(the_file, "wb") as fd:
fd.write("1234")
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
with gdf.open():
assert gdf.data_file == the_file
assert not gdf._is_dir
stats = os.stat(the_path)
try:
os.chmod(the_path, 0)
def _mock_getsize_eaccess_err(f):
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
with patch("os.path.getsize", _mock_getsize_eaccess_err):
try:
gdf.get_data_file_size()
except OSError as err:
assert err.errno == errno.EACCES
else:
self.fail("Expected OSError exception")
finally:
os.chmod(the_path, stats.st_mode)
def test_get_data_file_size_dir(self):
the_path = os.path.join(self.td, "vol0", "bar")
the_dir = os.path.join(the_path, "d")
os.makedirs(the_dir)
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "d")
assert gdf._obj == "d"
with gdf.open():
assert gdf.data_file == the_dir
assert gdf._is_dir
assert 0 == gdf.get_data_file_size()
def test_filter_metadata(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._metadata == None
gdf._filter_metadata()
assert gdf._metadata == None
gdf._metadata = {}
gdf._metadata[X_TYPE] = 'a'
gdf._metadata[X_OBJECT_TYPE] = 'b'
gdf._metadata['foobar'] = 'c'
gdf._filter_metadata()
assert X_TYPE not in gdf._metadata
assert X_OBJECT_TYPE not in gdf._metadata
assert 'foobar' in gdf._metadata
assert os.path.isdir(gdf._datadir)
assert not os.path.exists(os.path.join(gdf._datadir, gdf._obj))
def test_create(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir/z")
saved_tmppath = ''
saved_fd = None
with gdf.create() as dw:
assert gdf.datadir == os.path.join(self.td, "vol0", "bar", "dir")
assert os.path.isdir(gdf.datadir)
saved_tmppath = dw.tmppath
assert os.path.dirname(saved_tmppath) == gdf.datadir
assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "dir")
assert os.path.isdir(gdf._datadir)
saved_tmppath = dw._tmppath
assert os.path.dirname(saved_tmppath) == gdf._datadir
assert os.path.basename(saved_tmppath)[:3] == '.z.'
assert os.path.exists(saved_tmppath)
dw.write("123")
saved_fd = dw.fd
saved_fd = dw._fd
# At the end of previous with block a close on fd is called.
# Calling os.close on the same fd will raise an OSError
# exception and we must catch it.
@ -906,25 +783,25 @@ class TestDiskFile(unittest.TestCase):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir/z")
saved_tmppath = ''
with gdf.create() as dw:
assert gdf.datadir == os.path.join(self.td, "vol0", "bar", "dir")
assert os.path.isdir(gdf.datadir)
saved_tmppath = dw.tmppath
assert os.path.dirname(saved_tmppath) == gdf.datadir
assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "dir")
assert os.path.isdir(gdf._datadir)
saved_tmppath = dw._tmppath
assert os.path.dirname(saved_tmppath) == gdf._datadir
assert os.path.basename(saved_tmppath)[:3] == '.z.'
assert os.path.exists(saved_tmppath)
dw.write("123")
# Closing the fd prematurely should not raise any exceptions.
os.close(dw.fd)
os.close(dw._fd)
assert not os.path.exists(saved_tmppath)
def test_create_err_on_unlink(self):
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "dir/z")
saved_tmppath = ''
with gdf.create() as dw:
assert gdf.datadir == os.path.join(self.td, "vol0", "bar", "dir")
assert os.path.isdir(gdf.datadir)
saved_tmppath = dw.tmppath
assert os.path.dirname(saved_tmppath) == gdf.datadir
assert gdf._datadir == os.path.join(self.td, "vol0", "bar", "dir")
assert os.path.isdir(gdf._datadir)
saved_tmppath = dw._tmppath
assert os.path.dirname(saved_tmppath) == gdf._datadir
assert os.path.basename(saved_tmppath)[:3] == '.z.'
assert os.path.exists(saved_tmppath)
dw.write("123")

View File

@ -75,7 +75,7 @@ def request_init(self, *args, **kwargs):
_request_instances[self] = None
def setup():
def do_setup(the_object_server):
utils.HASH_PATH_SUFFIX = 'endcap'
global _testdir, _test_servers, _test_sockets, \
_orig_container_listing_limit, _test_coros
@ -147,8 +147,8 @@ def setup():
acc2srv = account_server.AccountController(conf)
con1srv = container_server.ContainerController(conf)
con2srv = container_server.ContainerController(conf)
obj1srv = object_server.ObjectController(conf)
obj2srv = object_server.ObjectController(conf)
obj1srv = the_object_server.ObjectController(conf)
obj2srv = the_object_server.ObjectController(conf)
_test_servers = \
(prosrv, acc1srv, acc2srv, con1srv, con2srv, obj1srv, obj2srv)
nl = DebugLogger()
@ -190,6 +190,10 @@ def setup():
exp, headers[:len(exp)])
def setup():
do_setup(object_server)
def teardown():
for server in _test_coros:
server.kill()

View File

@ -10,7 +10,7 @@ setenv = VIRTUAL_ENV={envdir}
NOSE_OPENSTACK_SHOW_ELAPSED=1
NOSE_OPENSTACK_STDOUT=1
deps =
https://launchpad.net/swift/havana/1.10.0/+download/swift-1.10.0.tar.gz
https://launchpad.net/gluster-swift/icehouse/1.10.1/+download/swift-1.10.0.33.g4bfe674.tar.gz
--download-cache={homedir}/.pipcache
-r{toxinidir}/tools/test-requires
changedir = {toxinidir}/test/unit
@ -25,6 +25,9 @@ whitelist_externals=bash
commands = bash tools/functional_tests.sh
[testenv:pep8]
deps =
--download-cache={homedir}/.pipcache
-r{toxinidir}/tools/test-requires
changedir = {toxinidir}
commands =
flake8