Final forward port of PDQ performance patches

Change-Id: I4ef131b3cc7648d4571a4d854029efb1aff8b901
Signed-off-by: Peter Portante <peter.portante@redhat.com>
Reviewed-on: http://review.gluster.org/5305
Reviewed-by: Luis Pabon <lpabon@redhat.com>
Tested-by: Luis Pabon <lpabon@redhat.com>
This commit is contained in:
Peter Portante 2013-07-08 14:10:45 -04:00 committed by Luis Pabon
parent f8d876248d
commit 677d307169
23 changed files with 1768 additions and 562 deletions

View File

@ -1,17 +1,32 @@
[DEFAULT] [DEFAULT]
devices = /mnt/gluster-object devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the account-server workers start,
# you can *consider* setting this value to "false" to reduce the per-request
# overhead it can incur.
mount_check = true mount_check = true
bind_port = 6012 bind_port = 6012
user = root #
log_facility = LOG_LOCAL2
# Override swift's default behaviour for fallocate. # Override swift's default behaviour for fallocate.
disable_fallocate = true disable_fallocate = true
#
# One or two workers should be sufficient for almost any installation of
# Gluster.
workers = 1
[pipeline:main] [pipeline:main]
pipeline = account-server pipeline = account-server
[app:account-server] [app:account-server]
use = egg:gluster_swift#account use = egg:gluster_swift#account
user = root
log_facility = LOG_LOCAL2
#
# After ensuring things are running in a stable manner, you can turn off
# normal request logging for the account server to unclutter the log
# files. Warnings and errors will still be logged.
log_requests = on
[account-replicator] [account-replicator]
vm_test_mode = yes vm_test_mode = yes

View File

@ -1,17 +1,32 @@
[DEFAULT] [DEFAULT]
devices = /mnt/gluster-object devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the container-server workers
# start, you can *consider* setting this value to "false" to reduce the
# per-request overhead it can incur.
mount_check = true mount_check = true
bind_port = 6011 bind_port = 6011
user = root #
log_facility = LOG_LOCAL2
# Override swift's default behaviour for fallocate. # Override swift's default behaviour for fallocate.
disable_fallocate = true disable_fallocate = true
#
# One or two workers should be sufficient for almost any installation of
# Gluster.
workers = 1
[pipeline:main] [pipeline:main]
pipeline = container-server pipeline = container-server
[app:container-server] [app:container-server]
use = egg:gluster_swift#container use = egg:gluster_swift#container
user = root
log_facility = LOG_LOCAL2
#
# After ensuring things are running in a stable manner, you can turn off
# normal request logging for the container server to unclutter the log
# files. Warnings and errors will still be logged.
log_requests = on
[container-replicator] [container-replicator]
vm_test_mode = yes vm_test_mode = yes

View File

@ -1,6 +1,7 @@
[DEFAULT] [DEFAULT]
# IP address of a GlusterFS volume server member. By default, we assume the #
# local host. # IP address of a node in the GlusterFS server cluster hosting the
# volumes to be served via Swift API.
mount_ip = localhost mount_ip = localhost
# By default it is assumed the Gluster volumes can be accessed using other # By default it is assumed the Gluster volumes can be accessed using other

View File

@ -1,7 +1,20 @@
[DEFAULT] [DEFAULT]
devices = /mnt/gluster-object devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the object-server workers start,
# you can *consider* setting this value to "false" to reduce the per-request
# overhead it can incur.
mount_check = true mount_check = true
bind_port = 6010 bind_port = 6010
#
# Maximum number of clients one worker can process simultaneously (it will
# actually accept N + 1). Setting this to one (1) will only handle one request
# at a time, without accepting another request concurrently. By increasing the
# number of workers to a much higher value, one can prevent slow file system
# operations for one request from starving other requests.
max_clients = 1024
#
# If not doing the above, setting this value initially to match the number of # If not doing the above, setting this value initially to match the number of
# CPUs is a good starting point for determining the right value. # CPUs is a good starting point for determining the right value.
workers = 1 workers = 1
@ -15,19 +28,20 @@ pipeline = object-server
use = egg:gluster_swift#object use = egg:gluster_swift#object
user = root user = root
log_facility = LOG_LOCAL2 log_facility = LOG_LOCAL2
# Timeout clients that don't read or write to the proxy server after 5 #
# seconds. # For performance, after ensuring things are running in a stable manner, you
conn_timeout = 5 # can turn off normal request logging for the object server to reduce the
# For high load situations, once connected to a container server, allow for # per-request overhead and unclutter the log files. Warnings and errors will
# delays communicating with it. # still be logged.
node_timeout = 60 log_requests = on
#
# Adjust this value to match the stripe width of the underlying storage array # Adjust this value to match the stripe width of the underlying storage array
# (not the stripe element size). This will provide a reasonable starting point # (not the stripe element size). This will provide a reasonable starting point
# for tuning this value. # for tuning this value.
disk_chunk_size = 65536 disk_chunk_size = 65536
# Adjust this value match whatever is set for the disk_chunk_size #
# initially. This will provide a reasonable starting point for tuning this # Adjust this value match whatever is set for the disk_chunk_size initially.
# value. # This will provide a reasonable starting point for tuning this value.
network_chunk_size = 65556 network_chunk_size = 65556
[object-replicator] [object-replicator]

View File

@ -1,7 +1,6 @@
[DEFAULT] [DEFAULT]
bind_port = 8080 bind_port = 8080
user = root user = root
log_facility = LOG_LOCAL1
# Consider using 1 worker per CPU # Consider using 1 worker per CPU
workers = 1 workers = 1

View File

@ -483,12 +483,6 @@ class DiskDir(DiskCommon):
# within a directory implicitly. # within a directory implicitly.
return return
def empty(self):
try:
return dir_empty(self.datadir)
except FileOrDirNotFoundError:
return True
def delete_db(self, timestamp): def delete_db(self, timestamp):
""" """
Delete the container (directory) if empty. Delete the container (directory) if empty.

View File

@ -1,4 +1,4 @@
# Copyright (c) 2012 Red Hat, Inc. # Copyright (c) 2013 Red Hat, Inc.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
@ -15,29 +15,241 @@
import os import os
import stat import stat
import fcntl
import errno import errno
import random import random
import logging
from hashlib import md5 from hashlib import md5
from eventlet import sleep
from contextlib import contextmanager from contextlib import contextmanager
from swift.common.utils import renamer from swift.common.utils import TRUE_VALUES, fallocate
from swift.common.exceptions import DiskFileNotExist, DiskFileError from swift.common.exceptions import DiskFileNotExist, DiskFileError
from gluster.swift.common.exceptions import AlreadyExistsAsDir from gluster.swift.common.exceptions import GlusterFileSystemOSError
from gluster.swift.common.fs_utils import mkdirs, do_open, do_close, \ from gluster.swift.common.fs_utils import do_fstat, do_open, do_close, \
do_unlink, do_chown, os_path, do_fsync, do_fchown, do_stat do_unlink, do_chown, os_path, do_fsync, do_fchown, do_stat
from gluster.swift.common.utils import read_metadata, write_metadata, \ from gluster.swift.common.utils import read_metadata, write_metadata, \
validate_object, create_object_metadata, rmobjdir, dir_is_object validate_object, create_object_metadata, rmobjdir, dir_is_object, \
get_object_metadata
from gluster.swift.common.utils import X_CONTENT_LENGTH, X_CONTENT_TYPE, \ from gluster.swift.common.utils import X_CONTENT_LENGTH, X_CONTENT_TYPE, \
X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, OBJECT, DIR_TYPE, \ X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, OBJECT, DIR_TYPE, \
FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT
from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.obj.server import DiskFile from swift.obj.server import DiskFile
# FIXME: Hopefully we'll be able to move to Python 2.7+ where O_CLOEXEC will
# be back ported. See http://www.python.org/dev/peps/pep-0433/
O_CLOEXEC = 02000000
DEFAULT_DISK_CHUNK_SIZE = 65536 DEFAULT_DISK_CHUNK_SIZE = 65536
# keep these lower-case # keep these lower-case
DISALLOWED_HEADERS = set('content-length content-type deleted etag'.split()) DISALLOWED_HEADERS = set('content-length content-type deleted etag'.split())
def _random_sleep():
sleep(random.uniform(0.5, 0.15))
def _lock_parent(full_path):
parent_path, _ = full_path.rsplit(os.path.sep, 1)
try:
fd = os.open(parent_path, os.O_RDONLY | O_CLOEXEC)
except OSError as err:
if err.errno == errno.ENOENT:
# Cannot lock the parent because it does not exist, let the caller
# handle this situation.
return False
raise
else:
while True:
# Spin sleeping for 1/10th of a second until we get the lock.
# FIXME: Consider adding a final timeout just abort the operation.
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as err:
if err.errno == errno.EAGAIN:
_random_sleep()
else:
# Don't leak an open file on an exception
os.close(fd)
raise
except Exception:
# Don't leak an open file for any other exception
os.close(fd)
raise
else:
break
return fd
def _make_directory_locked(full_path, uid, gid, metadata=None):
fd = _lock_parent(full_path)
if fd is False:
# Parent does not exist either, pass this situation on to the caller
# to handle.
return False, metadata
try:
# Check for directory existence
stats = do_stat(full_path)
if stats:
# It now exists, having acquired the lock of its parent directory,
# but verify it is actually a directory
is_dir = stat.S_ISDIR(stats.st_mode)
if not is_dir:
# It is not a directory!
raise DiskFileError("_make_directory_locked: non-directory"
" found at path %s when expecting a"
" directory", full_path)
return True, metadata
# We know the parent directory exists, and we have it locked, attempt
# the creation of the target directory.
return _make_directory_unlocked(full_path, uid, gid, metadata=metadata)
finally:
# We're done here, be sure to remove our lock and close our open FD.
try:
fcntl.flock(fd, fcntl.LOCK_UN)
except:
pass
os.close(fd)
def _make_directory_unlocked(full_path, uid, gid, metadata=None):
"""
Make a directory and change the owner ship as specified, and potentially
creating the object metadata if requested.
"""
try:
os.mkdir(full_path)
except OSError as err:
if err.errno == errno.ENOENT:
# Tell the caller some directory of the parent path does not
# exist.
return False, metadata
elif err.errno == errno.EEXIST:
# Possible race, in that the caller invoked this method when it
# had previously determined the file did not exist.
#
# FIXME: When we are confident, remove this stat() call as it is
# not necessary.
try:
stats = os.stat(full_path)
except OSError as serr:
# FIXME: Ideally we'd want to return an appropriate error
# message and code in the PUT Object REST API response.
raise DiskFileError("_make_directory_unlocked: os.mkdir failed"
" because path %s already exists, and"
" a subsequent os.stat on that same"
" path failed (%s)" % (full_path,
str(serr)))
else:
is_dir = stat.S_ISDIR(stats.st_mode)
if not is_dir:
# FIXME: Ideally we'd want to return an appropriate error
# message and code in the PUT Object REST API response.
raise DiskFileError("_make_directory_unlocked: os.mkdir"
" failed on path %s because it already"
" exists but not as a directory" % (
full_path))
return True, metadata
elif err.errno == errno.ENOTDIR:
# FIXME: Ideally we'd want to return an appropriate error
# message and code in the PUT Object REST API response.
raise DiskFileError("_make_directory_unlocked: os.mkdir failed"
" because some part of path %s is not in fact"
" a directory" % (full_path))
elif err.errno == errno.EIO:
# Sometimes Fuse will return an EIO error when it does not know
# how to handle an unexpected, but transient situation. It is
# possible the directory now exists, stat() it to find out after a
# short period of time.
_random_sleep()
try:
stats = os.stat(full_path)
except OSError as serr:
if serr.errno == errno.ENOENT:
errmsg = "_make_directory_unlocked: os.mkdir failed on" \
" path %s (EIO), and a subsequent os.stat on" \
" that same path did not find the file." % (
full_path,)
else:
errmsg = "_make_directory_unlocked: os.mkdir failed on" \
" path %s (%s), and a subsequent os.stat on" \
" that same path failed as well (%s)" % (
full_path, str(err), str(serr))
raise DiskFileError(errmsg)
else:
# The directory at least exists now
is_dir = stat.S_ISDIR(stats.st_mode)
if is_dir:
# Dump the stats to the log with the original exception.
logging.warn("_make_directory_unlocked: os.mkdir initially"
" failed on path %s (%s) but a stat()"
" following that succeeded: %r" % (full_path,
str(err),
stats))
# Assume another entity took care of the proper setup.
return True, metadata
else:
raise DiskFileError("_make_directory_unlocked: os.mkdir"
" initially failed on path %s (%s) but"
" now we see that it exists but is not"
" a directory (%r)" % (full_path,
str(err),
stats))
else:
# Some other potentially rare exception occurred that does not
# currently warrant a special log entry to help diagnose.
raise DiskFileError("_make_directory_unlocked: os.mkdir failed on"
" path %s (%s)" % (full_path, str(err)))
else:
if metadata:
# We were asked to set the initial metadata for this object.
metadata_orig = get_object_metadata(full_path)
metadata_orig.update(metadata)
write_metadata(full_path, metadata_orig)
metadata = metadata_orig
# We created it, so we are reponsible for always setting the proper
# ownership.
do_chown(full_path, uid, gid)
return True, metadata
_fs_conf = ConfigParser()
if _fs_conf.read(os.path.join('/etc/swift', 'fs.conf')):
try:
_mkdir_locking = _fs_conf.get('DEFAULT', 'mkdir_locking', "no") \
in TRUE_VALUES
except (NoSectionError, NoOptionError):
_mkdir_locking = False
try:
_use_put_mount = _fs_conf.get('DEFAULT', 'use_put_mount', "no") \
in TRUE_VALUES
except (NoSectionError, NoOptionError):
_use_put_mount = False
try:
_relaxed_writes = _fs_conf.get('DEFAULT', 'relaxed_writes', "no") \
in TRUE_VALUES
except (NoSectionError, NoOptionError):
_relaxed_writes = False
try:
_preallocate = _fs_conf.get('DEFAULT', 'preallocate', "no") \
in TRUE_VALUES
except (NoSectionError, NoOptionError):
_preallocate = False
else:
_mkdir_locking = False
_use_put_mount = False
_relaxed_writes = False
_preallocate = False
if _mkdir_locking:
make_directory = _make_directory_locked
else:
make_directory = _make_directory_unlocked
def _adjust_metadata(metadata): def _adjust_metadata(metadata):
# Fix up the metadata to ensure it has a proper value for the # Fix up the metadata to ensure it has a proper value for the
# Content-Type metadata, as well as an X_TYPE and X_OBJECT_TYPE # Content-Type metadata, as well as an X_TYPE and X_OBJECT_TYPE
@ -106,6 +318,11 @@ class Gluster_DiskFile(DiskFile):
self.datadir = os.path.join(path, device, self.name) self.datadir = os.path.join(path, device, self.name)
self.device_path = os.path.join(path, device) self.device_path = os.path.join(path, device)
self._container_path = os.path.join(path, device, container) self._container_path = os.path.join(path, device, container)
if _use_put_mount:
self.put_datadir = os.path.join(self.device_path + '_PUT',
self.name)
else:
self.put_datadir = self.datadir
self._is_dir = False self._is_dir = False
self.tmppath = None self.tmppath = None
self.logger = logger self.logger = logger
@ -123,15 +340,16 @@ class Gluster_DiskFile(DiskFile):
# Don't store a value for data_file until we know it exists. # Don't store a value for data_file until we know it exists.
self.data_file = None self.data_file = None
data_file = os.path.join(self.datadir, self._obj) data_file = os.path.join(self.put_datadir, self._obj)
try: try:
stats = do_stat(data_file) stats = do_stat(data_file)
except OSError as ose: except OSError as err:
if ose.errno == errno.ENOENT or ose.errno == errno.ENOTDIR: if err.errno == errno.ENOTDIR:
return return
else: else:
raise if not stats:
return
self.data_file = data_file self.data_file = data_file
self._is_dir = stat.S_ISDIR(stats.st_mode) self._is_dir = stat.S_ISDIR(stats.st_mode)
@ -164,6 +382,7 @@ class Gluster_DiskFile(DiskFile):
""" """
# Marker directory # Marker directory
if self._is_dir: if self._is_dir:
assert not self.fp
return return
if self.fp: if self.fp:
do_close(self.fp) do_close(self.fp)
@ -178,20 +397,61 @@ class Gluster_DiskFile(DiskFile):
""" """
return not self.data_file return not self.data_file
def _create_dir_object(self, dir_path): def _create_dir_object(self, dir_path, metadata=None):
stats = None """
try: Create a directory object at the specified path. No check is made to
stats = do_stat(dir_path) see if the directory object already exists, that is left to the
except OSError: caller (this avoids a potentially duplicate stat() system call).
pass
if not stats: The "dir_path" must be relative to its container, self._container_path.
mkdirs(dir_path)
do_chown(dir_path, self.uid, self.gid) The "metadata" object is an optional set of metadata to apply to the
create_object_metadata(dir_path) newly created directory object. If not present, no initial metadata is
elif not stat.S_ISDIR(stats.st_mode): applied.
raise DiskFileError("Cannot overwrite "
"file %s with a directory" % dir_path) The algorithm used is as follows:
1. An attempt is made to create the directory, assuming the parent
directory already exists
* Directory creation races are detected, returning success in
those cases
2. If the directory creation fails because some part of the path to
the directory does not exist, then a search back up the path is
performed to find the first existing ancestor directory, and then
the missing parents are successively created, finally creating
the target directory
"""
full_path = os.path.join(self._container_path, dir_path)
cur_path = full_path
stack = []
while True:
md = None if cur_path != full_path else metadata
ret, newmd = make_directory(cur_path, self.uid, self.gid, md)
if ret:
break
# Some path of the parent did not exist, so loop around and
# create that, pushing this parent on the stack.
if os.path.sep not in cur_path:
raise DiskFileError("DiskFile._create_dir_object(): failed to"
" create directory path while exhausting"
" path elements to create: %s" % full_path)
cur_path, child = cur_path.rsplit(os.path.sep, 1)
assert child
stack.append(child)
child = stack.pop() if stack else None
while child:
cur_path = os.path.join(cur_path, child)
md = None if cur_path != full_path else metadata
ret, newmd = make_directory(cur_path, self.uid, self.gid, md)
if not ret:
raise DiskFileError("DiskFile._create_dir_object(): failed to"
" create directory path to target, %s,"
" on subpath: %s" % (full_path, cur_path))
child = stack.pop() if stack else None
return True, newmd
def put_metadata(self, metadata, tombstone=False): def put_metadata(self, metadata, tombstone=False):
""" """
@ -227,39 +487,112 @@ class Gluster_DiskFile(DiskFile):
if dir_is_object(metadata): if dir_is_object(metadata):
if not self.data_file: if not self.data_file:
self.data_file = os.path.join(self.datadir, self._obj) # Does not exist, create it
self._create_dir_object(self.data_file) data_file = os.path.join(self._obj_path, self._obj)
self.put_metadata(metadata) _, self.metadata = self._create_dir_object(data_file, metadata)
self.data_file = os.path.join(self._container_path, data_file)
elif not self.is_dir:
# Exists, but as a file
raise DiskFileError('DiskFile.put(): directory creation failed'
' since the target, %s, already exists as'
' a file' % self.data_file)
return return
# Check if directory already exists.
if self._is_dir: if self._is_dir:
# A pre-existing directory already exists on the file # A pre-existing directory already exists on the file
# system, perhaps gratuitously created when another # system, perhaps gratuitously created when another
# object was created, or created externally to Swift # object was created, or created externally to Swift
# REST API servicing (UFO use case). # REST API servicing (UFO use case).
msg = 'File object exists as a directory: %s' % self.data_file raise DiskFileError('DiskFile.put(): file creation failed since'
raise AlreadyExistsAsDir(msg) ' the target, %s, already exists as a'
' directory' % self.data_file)
write_metadata(self.tmppath, metadata) # Write out metadata before fsync() to ensure it is also forced to
if X_CONTENT_LENGTH in metadata: # disk.
self.drop_cache(fd, 0, int(metadata[X_CONTENT_LENGTH])) write_metadata(fd, metadata)
if not _relaxed_writes:
do_fsync(fd) do_fsync(fd)
if self._obj_path: if X_CONTENT_LENGTH in metadata:
dir_objs = self._obj_path.split('/') # Don't bother doing this before fsync in case the OS gets any
assert len(dir_objs) >= 1 # ideas to issue partial writes.
tmp_path = self._container_path fsize = int(metadata[X_CONTENT_LENGTH])
for dir_name in dir_objs: self.drop_cache(fd, 0, fsize)
tmp_path = os.path.join(tmp_path, dir_name)
self._create_dir_object(tmp_path) # At this point we know that the object's full directory path exists,
# so we can just rename it directly without using Swift's
# swift.common.utils.renamer(), which makes the directory path and
# adds extra stat() calls.
data_file = os.path.join(self.put_datadir, self._obj)
while True:
try:
os.rename(self.tmppath, data_file)
except OSError as err:
if err.errno in (errno.ENOENT, errno.EIO):
# FIXME: Why either of these two error conditions is
# happening is unknown at this point. This might be a FUSE
# issue of some sort or a possible race condition. So
# let's sleep on it, and double check the environment
# after a good nap.
_random_sleep()
# Tease out why this error occurred. The man page for
# rename reads:
# "The link named by tmppath does not exist; or, a
# directory component in data_file does not exist;
# or, tmppath or data_file is an empty string."
assert len(self.tmppath) > 0 and len(data_file) > 0
tpstats = do_stat(self.tmppath)
tfstats = do_fstat(fd)
assert tfstats
if not tpstats or tfstats.st_ino != tpstats.st_ino:
# Temporary file name conflict
raise DiskFileError('DiskFile.put(): temporary file,'
' %s, was already renamed'
' (targeted for %s)' % (
self.tmppath, data_file))
else:
# Data file target name now has a bad path!
dfstats = do_stat(self.put_datadir)
if not dfstats:
raise DiskFileError('DiskFile.put(): path to'
' object, %s, no longer exists'
' (targeted for %s)' % (
self.put_datadir,
data_file))
else:
is_dir = stat.S_ISDIR(dfstats.st_mode)
if not is_dir:
raise DiskFileError('DiskFile.put(): path to'
' object, %s, no longer a'
' directory (targeted for'
' %s)' % (self.put_datadir,
data_file))
else:
# Let's retry since everything looks okay
logging.warn("DiskFile.put(): os.rename('%s',"
"'%s') initially failed (%s) but"
" a stat('%s') following that"
" succeeded: %r" % (
self.tmppath, data_file,
str(err), self.put_datadir,
dfstats))
continue
else:
raise GlusterFileSystemOSError(
err.errno, "%s, os.rename('%s', '%s')" % (
err.strerror, self.tmppath, data_file))
else:
# Success!
break
# Avoid the unlink() system call as part of the mkstemp context cleanup
self.tmppath = None
do_fchown(fd, self.uid, self.gid)
newpath = os.path.join(self.datadir, self._obj)
renamer(self.tmppath, newpath)
self.metadata = metadata self.metadata = metadata
self.data_file = newpath
self.filter_metadata() self.filter_metadata()
return
# Mark that it actually exists now
self.data_file = os.path.join(self.datadir, self._obj)
def unlinkold(self, timestamp): def unlinkold(self, timestamp):
""" """
@ -313,7 +646,7 @@ class Gluster_DiskFile(DiskFile):
def get_data_file_size(self): def get_data_file_size(self):
""" """
Returns the os_path.getsize for the file. Raises an exception if this Returns the os_path.getsize for the file. Raises an exception if this
file does not match the Content-Length stored in the metadata. Or if file does not match the Content-Length stored in the metadata, or if
self.data_file does not exist. self.data_file does not exist.
:returns: file size as an int :returns: file size as an int
@ -346,33 +679,94 @@ class Gluster_DiskFile(DiskFile):
self.metadata.pop(X_OBJECT_TYPE) self.metadata.pop(X_OBJECT_TYPE)
@contextmanager @contextmanager
def mkstemp(self): def mkstemp(self, size=None):
"""Contextmanager to make a temporary file.""" """
Contextmanager to make a temporary file, optionally of a specified
initial size.
# Creating intermidiate directories and corresponding metadata. For Gluster, we first optimistically create the temporary file using
# For optimization, check if the subdirectory already exists, the "rsync-friendly" .NAME.random naming. If we find that some path to
# if exists, then it means that it also has its metadata. the file does not exist, we then create that path and then create the
# Not checking for container, since the container should already temporary file again. If we get file name conflict, we'll retry using
# exist for the call to come here. different random suffixes 1,000 times before giving up.
if not os_path.exists(self.datadir): """
path = self._container_path data_file = os.path.join(self.put_datadir, self._obj)
subdir_list = self._obj_path.split(os.path.sep)
for i in range(len(subdir_list)):
path = os.path.join(path, subdir_list[i])
if not os_path.exists(path):
self._create_dir_object(path)
# Assume the full directory path exists to the file already, and
# construct the proper name for the temporary file.
for i in range(0, 1000):
tmpfile = '.' + self._obj + '.' + md5(self._obj + tmpfile = '.' + self._obj + '.' + md5(self._obj +
str(random.random())).hexdigest() str(random.random())).hexdigest()
tmppath = os.path.join(self.put_datadir, tmpfile)
self.tmppath = os.path.join(self.datadir, tmpfile)
fd = do_open(self.tmppath, os.O_RDWR | os.O_CREAT | os.O_EXCL)
try: try:
fd = do_open(tmppath,
os.O_WRONLY | os.O_CREAT | os.O_EXCL | O_CLOEXEC)
except GlusterFileSystemOSError as gerr:
if gerr.errno == errno.EEXIST:
# Retry with a different random number.
continue
if gerr.errno == errno.EIO:
# FIXME: Possible FUSE issue or race condition, let's
# sleep on it and retry the operation.
_random_sleep()
logging.warn("DiskFile.mkstemp(): %s ... retrying in"
" 0.1 secs", gerr)
continue
if gerr.errno != errno.ENOENT:
# FIXME: Other cases we should handle?
raise
if not self._obj_path:
# No directory hierarchy and the create failed telling us
# the container or volume directory does not exist. This
# could be a FUSE issue or some race condition, so let's
# sleep a bit and retry.
_random_sleep()
logging.warn("DiskFile.mkstemp(): %s ... retrying in"
" 0.1 secs", gerr)
continue
if i != 0:
# Got ENOENT after previously making the path. This could
# also be a FUSE issue or some race condition, nap and
# retry.
_random_sleep()
logging.warn("DiskFile.mkstemp(): %s ... retrying in"
" 0.1 secs" % gerr)
continue
# It looks like the path to the object does not already exist
self._create_dir_object(self._obj_path)
continue
else:
break
else:
# We failed after 1,000 attempts to create the temporary file.
raise DiskFileError('DiskFile.mkstemp(): failed to successfully'
' create a temporary file without running'
' into a name conflict after 1,000 attempts'
' for: %s' % (data_file,))
self.tmppath = tmppath
try:
# Ensure it is properly owned before we make it available.
do_fchown(fd, self.uid, self.gid)
if _preallocate and size:
# For XFS, fallocate() turns off speculative pre-allocation
# until a write is issued either to the last block of the file
# before the EOF or beyond the EOF. This means that we are
# less likely to fragment free space with pre-allocated
# extents that get truncated back to the known file size.
# However, this call also turns holes into allocated but
# unwritten extents, so that allocation occurs before the
# write, not during XFS writeback. This effectively defeats
# any allocation optimizations the filesystem can make at
# writeback time.
fallocate(fd, size)
yield fd yield fd
finally: finally:
try: try:
do_close(fd) do_close(fd)
except OSError: except OSError:
pass pass
if self.tmppath:
tmppath, self.tmppath = self.tmppath, None tmppath, self.tmppath = self.tmppath, None
do_unlink(tmppath) do_unlink(tmppath)

View File

@ -18,10 +18,13 @@ import fcntl
import time import time
import errno import errno
import logging import logging
import urllib
from ConfigParser import ConfigParser, NoSectionError, NoOptionError from ConfigParser import ConfigParser, NoSectionError, NoOptionError
from swift.common.utils import TRUE_VALUES, search_tree from swift.common.utils import TRUE_VALUES, search_tree
from gluster.swift.common.fs_utils import mkdirs from gluster.swift.common.fs_utils import do_ismount
from gluster.swift.common.exceptions import GlusterfsException, \
FailureToMountError
# #
# Read the fs.conf file once at startup (module load) # Read the fs.conf file once at startup (module load)
@ -32,6 +35,8 @@ OBJECT_ONLY = True
RUN_DIR = '/var/run/swift' RUN_DIR = '/var/run/swift'
SWIFT_DIR = '/etc/swift' SWIFT_DIR = '/etc/swift'
_do_getsize = False _do_getsize = False
_allow_mount_per_server = False
if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')): if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
try: try:
MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', MOUNT_IP) MOUNT_IP = _fs_conf.get('DEFAULT', 'mount_ip', MOUNT_IP)
@ -55,6 +60,14 @@ if _fs_conf.read(os.path.join(SWIFT_DIR, 'fs.conf')):
except (NoSectionError, NoOptionError): except (NoSectionError, NoOptionError):
pass pass
try:
_allow_mount_per_server = _fs_conf.get('DEFAULT',
'allow_mount_per_server',
_allow_mount_per_server
) in TRUE_VALUES
except (NoSectionError, NoOptionError):
pass
NAME = 'glusterfs' NAME = 'glusterfs'
@ -70,7 +83,97 @@ def _busy_wait(full_mount_path):
return False return False
def _get_unique_id():
# Each individual server will attempt to get a free lock file
# sequentially numbered, storing the pid of the holder of that
# file, That number represents the numbered mount point to use
# for its operations.
if not _allow_mount_per_server:
return 0
try:
os.mkdir(RUN_DIR)
except OSError as err:
if err.errno == errno.EEXIST:
pass
unique_id = 0
lock_file_template = os.path.join(RUN_DIR,
'swift.object-server-%03d.lock')
for i in range(1, 201):
lock_file = lock_file_template % i
fd = os.open(lock_file, os.O_CREAT | os.O_RDWR)
try:
fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError as ex:
os.close(fd)
if ex.errno in (errno.EACCES, errno.EAGAIN):
# This means that some other process has it locked, so they
# own the lock.
continue
raise
except:
os.close(fd)
raise
else:
# We got the lock, write our PID into it, but don't close the
# file, it will be closed when our process exists
os.lseek(fd, 0, os.SEEK_SET)
pid = str(os.getpid()) + '\n'
os.write(fd, pid)
unique_id = i
break
return unique_id
_unique_id = None
def _get_drive_mount_point_name(drive):
"""
Get the GlusterFS mount point name to use for this worker for the target
drive name.
If unique is False, then we just map the drive directly to the mount point
name. If unique is True, then we determine a unique mount point name that
maps to our server PID.
"""
if not _allow_mount_per_server:
# One-to-one mapping of drive to mount point name
mount_point = drive
else:
global _unique_id
if _unique_id is None:
_unique_id = _get_unique_id()
mount_point = ("%s_%03d" % (drive, _unique_id)) \
if _unique_id else drive
return mount_point
def mount(root, drive): def mount(root, drive):
"""
Verify that the path to the device is a mount point and mounted. This
allows us to fast fail on drives that have been unmounted because of
issues, and also prevents us for accidentally filling up the root
partition.
This method effectively replaces the swift.common.constraints.check_mount
method in behavior, adding the ability to auto-mount the volume, which is
dubious (FIXME).
:param root: base path where the devices are mounted
:param drive: drive name to be checked
:returns: True if it is a valid mounted device, False otherwise
"""
if not (urllib.quote_plus(drive) == drive):
return False
mount_point = _get_drive_mount_point_name(drive)
full_mount_path = os.path.join(root, mount_point)
if do_ismount(full_mount_path):
# Don't bother checking volume if it is already a mount point. Allows
# us to use local file systems for unit tests and some functional test
# environments to isolate behaviors from GlusterFS itself.
return True
# FIXME: Possible thundering herd problem here # FIXME: Possible thundering herd problem here
el = _get_export_list() el = _get_export_list()
@ -81,15 +184,34 @@ def mount(root, drive):
logging.error('No export found in %r matching drive, %s', el, drive) logging.error('No export found in %r matching drive, %s', el, drive)
return False return False
# NOTE: root is typically the default value of /mnt/gluster-object try:
full_mount_path = os.path.join(root, drive) os.makedirs(full_mount_path)
if not os.path.isdir(full_mount_path): except OSError as err:
mkdirs(full_mount_path) if err.errno == errno.EEXIST:
pass
else:
logging.exception('Could not create mount path hierarchy:'
' %s' % full_mount_path)
return False
lck_file = os.path.join(RUN_DIR, '%s.lock' % drive) mnt_cmd = 'mount -t glusterfs %s:%s %s' % (MOUNT_IP, export,
full_mount_path)
if not os.path.exists(RUN_DIR): if _allow_mount_per_server:
mkdirs(RUN_DIR) if os.system(mnt_cmd):
logging.exception('Mount failed %s' % (mnt_cmd))
return True
lck_file = os.path.join(RUN_DIR, '%s.lock' % mount_point)
try:
os.mkdir(RUN_DIR)
except OSError as err:
if err.errno == errno.EEXIST:
pass
else:
logging.exception('Could not create RUN_DIR: %s' % full_mount_path)
return False
fd = os.open(lck_file, os.O_CREAT | os.O_RDWR) fd = os.open(lck_file, os.O_CREAT | os.O_RDWR)
with os.fdopen(fd, 'r+b') as f: with os.fdopen(fd, 'r+b') as f:
@ -100,12 +222,8 @@ def mount(root, drive):
# This means that some other process is mounting the # This means that some other process is mounting the
# filesystem, so wait for the mount process to complete # filesystem, so wait for the mount process to complete
return _busy_wait(full_mount_path) return _busy_wait(full_mount_path)
else:
raise ex
mnt_cmd = 'mount -t glusterfs %s:%s %s' % (MOUNT_IP, export,
full_mount_path)
if os.system(mnt_cmd) or not _busy_wait(full_mount_path): if os.system(mnt_cmd) or not _busy_wait(full_mount_path):
logging.error('Mount failed %s: %s', NAME, mnt_cmd) logging.error('Mount failed %s', mnt_cmd)
return False return False
return True return True
@ -115,7 +233,8 @@ def unmount(full_mount_path):
umnt_cmd = 'umount %s 2>> /dev/null' % full_mount_path umnt_cmd = 'umount %s 2>> /dev/null' % full_mount_path
if os.system(umnt_cmd): if os.system(umnt_cmd):
logging.error('Unable to unmount %s %s' % (full_mount_path, NAME)) raise FailureToMountError(
'Unable to unmount %s' % (full_mount_path))
def _get_export_list(): def _get_export_list():
@ -124,7 +243,8 @@ def _get_export_list():
export_list = [] export_list = []
if os.system(cmnd + ' >> /dev/null'): if os.system(cmnd + ' >> /dev/null'):
logging.error('Getting volume info failed for %s', NAME) logging.error('Getting volume info failed, make sure to have'
' passwordless ssh on %s', MOUNT_IP)
else: else:
fp = os.popen(cmnd) fp = os.popen(cmnd)
while True: while True:
@ -139,18 +259,25 @@ def _get_export_list():
def get_mnt_point(vol_name, conf_dir=SWIFT_DIR, conf_file="object-server*"): def get_mnt_point(vol_name, conf_dir=SWIFT_DIR, conf_file="object-server*"):
"""Read the object-server's configuration file and return """
the device value""" Read the object-server's configuration file and return
the device value.
:param vol_name: target GlusterFS volume name
:param conf_dir: Swift configuration directory root
:param conf_file: configuration file name for which to search
:returns full path to given target volume name
:raises GlusterfsException if unable to fetch mount point root from
configuration files
"""
mnt_dir = '' mnt_dir = ''
conf_files = search_tree(conf_dir, conf_file, '.conf') conf_files = search_tree(conf_dir, conf_file, '.conf')
if not conf_files: if not conf_files:
raise Exception("Config file not found") raise GlusterfsException("Config file, %s, in directory, %s, "
"not found" % (conf_file, conf_dir))
_conf = ConfigParser() _conf = ConfigParser()
if _conf.read(conf_files[0]): if _conf.read(conf_files[0]):
try:
mnt_dir = _conf.get('DEFAULT', 'devices', '') mnt_dir = _conf.get('DEFAULT', 'devices', '')
except (NoSectionError, NoOptionError):
raise
return os.path.join(mnt_dir, vol_name) return os.path.join(mnt_dir, vol_name)
else:
raise GlusterfsException("Config file, %s, is empty" % conf_files[0])

View File

@ -91,23 +91,8 @@ def gluster_check_object_creation(req, object_name):
# Replace the original check object creation with ours # Replace the original check object creation with ours
swift.common.constraints.check_object_creation = gluster_check_object_creation swift.common.constraints.check_object_creation = gluster_check_object_creation
# Save the original check mount
__check_mount = swift.common.constraints.check_mount
# Define our new one which invokes the original
def gluster_check_mount(root, drive):
# FIXME: Potential performance optimization here to not call the original
# check mount which makes two stat calls. We could do what they do with
# just one.
if __check_mount(root, drive):
return True
return Glusterfs.mount(root, drive)
# Replace the original check mount with ours # Replace the original check mount with ours
swift.common.constraints.check_mount = gluster_check_mount swift.common.constraints.check_mount = Glusterfs.mount
# Save the original Ring class # Save the original Ring class
__Ring = _ring.Ring __Ring = _ring.Ring

View File

@ -14,10 +14,22 @@
# limitations under the License. # limitations under the License.
class GlusterFileSystemOSError(OSError):
pass
class GlusterFileSystemIOError(IOError):
pass
class GlusterfsException(Exception): class GlusterfsException(Exception):
pass pass
class FailureToMountError(GlusterfsException):
pass
class FileOrDirNotFoundError(GlusterfsException): class FileOrDirNotFoundError(GlusterfsException):
pass pass
@ -28,3 +40,7 @@ class NotDirectoryError(GlusterfsException):
class AlreadyExistsAsDir(GlusterfsException): class AlreadyExistsAsDir(GlusterfsException):
pass pass
class AlreadyExistsAsFile(GlusterfsException):
pass

View File

@ -16,10 +16,13 @@
import logging import logging
import os import os
import errno import errno
import stat
import random
import os.path as os_path # noqa import os.path as os_path # noqa
from eventlet import tpool from eventlet import tpool
from eventlet import sleep
from gluster.swift.common.exceptions import FileOrDirNotFoundError, \ from gluster.swift.common.exceptions import FileOrDirNotFoundError, \
NotDirectoryError NotDirectoryError, GlusterFileSystemOSError, GlusterFileSystemIOError
def do_walk(*args, **kwargs): def do_walk(*args, **kwargs):
@ -30,78 +33,191 @@ def do_write(fd, msg):
try: try:
cnt = os.write(fd, msg) cnt = os.write(fd, msg)
except OSError as err: except OSError as err:
logging.exception("Write failed, err: %s", str(err)) raise GlusterFileSystemOSError(
raise err.errno, '%s, os.write("%s", ...)' % (err.strerror, fd))
return cnt return cnt
def do_ismount(path):
"""
Test whether a path is a mount point.
This is code hijacked from C Python 2.6.8, adapted to remove the extra
lstat() system call.
"""
try:
s1 = os.lstat(path)
except os.error as err:
if err.errno == errno.ENOENT:
# It doesn't exist -- so not a mount point :-)
return False
else:
raise GlusterFileSystemOSError(
err.errno, '%s, os.lstat("%s")' % (err.strerror, path))
if stat.S_ISLNK(s1.st_mode):
# A symlink can never be a mount point
return False
try:
s2 = os.lstat(os.path.join(path, '..'))
except os.error as err:
raise GlusterFileSystemOSError(
err.errno, '%s, os.lstat("%s")' % (err.strerror,
os.path.join(path, '..')))
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
# path/.. on a different device as path
return True
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
# path/.. is the same i-node as path
return True
return False
def do_mkdir(path):
try:
os.mkdir(path)
except OSError as err:
if err.errno == errno.EEXIST:
logging.warn("fs_utils: os.mkdir - path %s already exists", path)
else:
raise GlusterFileSystemOSError(
err.errno, '%s, os.mkdir("%s")' % (err.strerror, path))
def do_listdir(path): def do_listdir(path):
try: try:
buf = os.listdir(path) buf = os.listdir(path)
except OSError as err: except OSError as err:
logging.exception("Listdir failed on %s err: %s", path, err.strerror) raise GlusterFileSystemOSError(
raise err.errno, '%s, os.listdir("%s")' % (err.strerror, path))
return buf return buf
def dir_empty(path):
"""
Return true if directory is empty (or does not exist), false otherwise.
:param path: Directory path
:returns: True/False
"""
try:
files = do_listdir(path)
return not files
except GlusterFileSystemOSError as err:
if err.errno == errno.ENOENT:
raise FileOrDirNotFoundError()
if err.errno == errno.ENOTDIR:
raise NotDirectoryError()
raise
def do_rmdir(path):
try:
os.rmdir(path)
except OSError as err:
raise GlusterFileSystemOSError(
err.errno, '%s, os.rmdir("%s")' % (err.strerror, path))
def do_chown(path, uid, gid): def do_chown(path, uid, gid):
try: try:
os.chown(path, uid, gid) os.chown(path, uid, gid)
except OSError as err: except OSError as err:
logging.exception("Chown failed on %s err: %s", path, err.strerror) raise GlusterFileSystemOSError(
raise err.errno, '%s, os.chown("%s", %s, %s)' % (
return True err.strerror, path, uid, gid))
def do_fchown(fd, uid, gid): def do_fchown(fd, uid, gid):
try: try:
os.fchown(fd, uid, gid) os.fchown(fd, uid, gid)
except OSError as err: except OSError as err:
logging.exception("fchown failed on %d err: %s", fd, err.strerror) raise GlusterFileSystemOSError(
raise err.errno, '%s, os.fchown(%s, %s, %s)' % (
return True err.strerror, fd, uid, gid))
_STAT_ATTEMPTS = 10
def do_stat(path): def do_stat(path):
serr = None
for i in range(0, _STAT_ATTEMPTS):
try: try:
#Check for fd. stats = os.stat(path)
if isinstance(path, int):
buf = os.fstat(path)
else:
buf = os.stat(path)
except OSError as err: except OSError as err:
logging.exception("Stat failed on %s err: %s", path, err.strerror) if err.errno == errno.EIO:
raise # Retry EIO assuming it is a transient error from FUSE after a
return buf # short random sleep
serr = err
sleep(random.uniform(0.001, 0.005))
continue
if err.errno == errno.ENOENT:
stats = None
else:
raise GlusterFileSystemOSError(
err.errno, '%s, os.stat("%s")[%d attempts]' % (
err.strerror, path, i))
if i > 0:
logging.warn("fs_utils.do_stat():"
" os.stat('%s') retried %d times (%s)",
path, i, 'success' if stats else 'failure')
return stats
else:
raise GlusterFileSystemOSError(
serr.errno, '%s, os.stat("%s")[%d attempts]' % (
serr.strerror, path, _STAT_ATTEMPTS))
def do_open(path, mode): def do_fstat(fd):
if isinstance(mode, int):
try: try:
fd = os.open(path, mode) stats = os.fstat(fd)
except OSError as err: except OSError as err:
logging.exception("Open failed on %s err: %s", path, str(err)) raise GlusterFileSystemOSError(
raise err.errno, '%s, os.fstat(%s)' % (err.strerror, fd))
else: return stats
def do_open(path, flags, **kwargs):
if isinstance(flags, int):
try: try:
fd = open(path, mode) fd = os.open(path, flags, **kwargs)
except IOError as err: except OSError as err:
logging.exception("Open failed on %s err: %s", path, str(err)) raise GlusterFileSystemOSError(
raise err.errno, '%s, os.open("%s", %x, %r)' % (
err.strerror, path, flags, kwargs))
return fd return fd
else:
try:
fp = open(path, flags, **kwargs)
except IOError as err:
raise GlusterFileSystemIOError(
err.errno, '%s, open("%s", %s, %r)' % (
err.strerror, path, flags, kwargs))
return fp
def do_close(fd): def do_close(fd):
#fd could be file or int type. if isinstance(fd, file):
try: try:
if isinstance(fd, int):
os.close(fd)
else:
fd.close() fd.close()
except IOError as err:
raise GlusterFileSystemIOError(
err.errno, '%s, os.close(%s)' % (err.strerror, fd))
else:
try:
os.close(fd)
except OSError as err: except OSError as err:
logging.exception("Close failed on %s err: %s", fd, err.strerror) raise GlusterFileSystemOSError(
raise err.errno, '%s, os.close(%s)' % (err.strerror, fd))
return True
def do_unlink(path, log=True): def do_unlink(path, log=True):
@ -109,21 +225,28 @@ def do_unlink(path, log=True):
os.unlink(path) os.unlink(path)
except OSError as err: except OSError as err:
if err.errno != errno.ENOENT: if err.errno != errno.ENOENT:
if log: raise GlusterFileSystemOSError(
logging.exception("Unlink failed on %s err: %s", err.errno, '%s, os.unlink("%s")' % (err.strerror, path))
path, err.strerror) elif log:
raise logging.warn("fs_utils: os.unlink failed on non-existent path: %s",
return True path)
def do_rename(old_path, new_path): def do_rename(old_path, new_path):
try: try:
os.rename(old_path, new_path) os.rename(old_path, new_path)
except OSError as err: except OSError as err:
logging.exception("Rename failed on %s to %s err: %s", raise GlusterFileSystemOSError(
old_path, new_path, err.strerror) err.errno, '%s, os.rename("%s", "%s")' % (
raise err.strerror, old_path, new_path))
return True
def do_fsync(fd):
try:
tpool.execute(os.fsync, fd)
except OSError as err:
raise GlusterFileSystemOSError(
err.errno, '%s, os.fsync("%s")' % (err.strerror, fd))
def mkdirs(path): def mkdirs(path):
@ -133,47 +256,10 @@ def mkdirs(path):
:param path: path to create :param path: path to create
""" """
if not os.path.isdir(path):
try: try:
os.makedirs(path) os.makedirs(path)
except OSError as err: except OSError as err:
if err.errno != errno.EEXIST: if err.errno == errno.EEXIST and os.path.isdir(path):
logging.exception("Makedirs failed on %s err: %s", return
path, err.strerror) raise GlusterFileSystemOSError(
raise err.errno, '%s, os.makedirs("%s")' % (err.strerror, path))
return True
def dir_empty(path):
"""
Return true if directory/container is empty.
:param path: Directory path.
:returns: True/False.
"""
if os.path.isdir(path):
files = do_listdir(path)
return not files
elif not os.path.exists(path):
raise FileOrDirNotFoundError()
raise NotDirectoryError()
def rmdirs(path):
if not os.path.isdir(path):
return False
try:
os.rmdir(path)
except OSError as err:
if err.errno != errno.ENOENT:
logging.error("rmdirs failed on %s, err: %s", path, err.strerror)
return False
return True
def do_fsync(fd):
try:
tpool.execute(os.fsync, fd)
except OSError as err:
logging.exception("fsync failed with err: %s", err.strerror)
raise
return True

View File

@ -24,9 +24,8 @@ from eventlet import sleep
import cPickle as pickle import cPickle as pickle
from swift.common.utils import normalize_timestamp from swift.common.utils import normalize_timestamp
from gluster.swift.common.fs_utils import do_rename, do_fsync, os_path, \ from gluster.swift.common.fs_utils import do_rename, do_fsync, os_path, \
do_stat, do_listdir, do_walk, dir_empty, rmdirs do_stat, do_listdir, do_walk, do_rmdir
from gluster.swift.common import Glusterfs from gluster.swift.common import Glusterfs
from gluster.swift.common.exceptions import FileOrDirNotFoundError
X_CONTENT_TYPE = 'Content-Type' X_CONTENT_TYPE = 'Content-Type'
X_CONTENT_LENGTH = 'Content-Length' X_CONTENT_LENGTH = 'Content-Length'
@ -56,11 +55,23 @@ PICKLE_PROTOCOL = 2
CHUNK_SIZE = 65536 CHUNK_SIZE = 65536
def read_metadata(path): class GlusterFileSystemOSError(OSError):
# Having our own class means the name will show up in the stack traces
# recorded in the log files.
pass
class GlusterFileSystemIOError(IOError):
# Having our own class means the name will show up in the stack traces
# recorded in the log files.
pass
def read_metadata(path_or_fd):
""" """
Helper function to read the pickled metadata from a File/Directory. Helper function to read the pickled metadata from a File/Directory.
:param path: File/Directory to read metadata from. :param path_or_fd: File/Directory path or fd from which to read metadata.
:returns: dictionary of metadata :returns: dictionary of metadata
""" """
@ -69,7 +80,7 @@ def read_metadata(path):
key = 0 key = 0
while metadata is None: while metadata is None:
try: try:
metadata_s += xattr.getxattr(path, metadata_s += xattr.getxattr(path_or_fd,
'%s%s' % (METADATA_KEY, (key or ''))) '%s%s' % (METADATA_KEY, (key or '')))
except IOError as err: except IOError as err:
if err.errno == errno.ENODATA: if err.errno == errno.ENODATA:
@ -79,18 +90,17 @@ def read_metadata(path):
# unpickle operation, we consider the metadata lost, and # unpickle operation, we consider the metadata lost, and
# drop the existing data so that the internal state can be # drop the existing data so that the internal state can be
# recreated. # recreated.
clean_metadata(path) clean_metadata(path_or_fd)
# We either could not find any metadata key, or we could find # We either could not find any metadata key, or we could find
# some keys, but were not successful in performing the # some keys, but were not successful in performing the
# unpickling (missing keys perhaps)? Either way, just report # unpickling (missing keys perhaps)? Either way, just report
# to the caller we have no metadata. # to the caller we have no metadata.
metadata = {} metadata = {}
else: else:
logging.exception("xattr.getxattr failed on %s key %s err: %s",
path, key, str(err))
# Note that we don't touch the keys on errors fetching the # Note that we don't touch the keys on errors fetching the
# data since it could be a transient state. # data since it could be a transient state.
raise raise GlusterFileSystemIOError(
err.errno, 'xattr.getxattr("%s", %s)' % (path_or_fd, key))
else: else:
try: try:
# If this key provides all or the remaining part of the pickle # If this key provides all or the remaining part of the pickle
@ -109,38 +119,39 @@ def read_metadata(path):
return metadata return metadata
def write_metadata(path, metadata): def write_metadata(path_or_fd, metadata):
""" """
Helper function to write pickled metadata for a File/Directory. Helper function to write pickled metadata for a File/Directory.
:param path: File/Directory path to write the metadata :param path_or_fd: File/Directory path or fd to write the metadata
:param metadata: dictionary to metadata write :param metadata: dictionary of metadata write
""" """
assert isinstance(metadata, dict) assert isinstance(metadata, dict)
metastr = pickle.dumps(metadata, PICKLE_PROTOCOL) metastr = pickle.dumps(metadata, PICKLE_PROTOCOL)
key = 0 key = 0
while metastr: while metastr:
try: try:
xattr.setxattr(path, xattr.setxattr(path_or_fd,
'%s%s' % (METADATA_KEY, key or ''), '%s%s' % (METADATA_KEY, key or ''),
metastr[:MAX_XATTR_SIZE]) metastr[:MAX_XATTR_SIZE])
except IOError as err: except IOError as err:
logging.exception("setxattr failed on %s key %s err: %s", raise GlusterFileSystemIOError(
path, key, str(err)) err.errno,
raise 'xattr.setxattr("%s", %s, metastr)' % (path_or_fd, key))
metastr = metastr[MAX_XATTR_SIZE:] metastr = metastr[MAX_XATTR_SIZE:]
key += 1 key += 1
def clean_metadata(path): def clean_metadata(path_or_fd):
key = 0 key = 0
while True: while True:
try: try:
xattr.removexattr(path, '%s%s' % (METADATA_KEY, (key or ''))) xattr.removexattr(path_or_fd, '%s%s' % (METADATA_KEY, (key or '')))
except IOError as err: except IOError as err:
if err.errno == errno.ENODATA: if err.errno == errno.ENODATA:
break break
raise raise GlusterFileSystemIOError(
err.errno, 'xattr.removexattr("%s", %s)' % (path_or_fd, key))
key += 1 key += 1
@ -150,9 +161,9 @@ def check_user_xattr(path):
try: try:
xattr.setxattr(path, 'user.test.key1', 'value1') xattr.setxattr(path, 'user.test.key1', 'value1')
except IOError as err: except IOError as err:
logging.exception("check_user_xattr: set failed on %s err: %s", raise GlusterFileSystemIOError(
path, str(err)) err.errno,
raise 'xattr.setxattr("%s", "user.test.key1", "value1")' % (path,))
try: try:
xattr.removexattr(path, 'user.test.key1') xattr.removexattr(path, 'user.test.key1')
except IOError as err: except IOError as err:
@ -245,7 +256,7 @@ def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
object_count += 1 object_count += 1
if Glusterfs._do_getsize and reg_file: if reg_file and Glusterfs._do_getsize:
bytes_used += os_path.getsize(os.path.join(path, obj_name)) bytes_used += os_path.getsize(os.path.join(path, obj_name))
sleep() sleep()
@ -254,7 +265,6 @@ def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
def update_list(path, cont_path, dirs=[], files=[], object_count=0, def update_list(path, cont_path, dirs=[], files=[], object_count=0,
bytes_used=0, obj_list=[]): bytes_used=0, obj_list=[]):
if files: if files:
object_count, bytes_used = _update_list(path, cont_path, files, True, object_count, bytes_used = _update_list(path, cont_path, files, True,
object_count, bytes_used, object_count, bytes_used,
@ -266,22 +276,13 @@ def update_list(path, cont_path, dirs=[], files=[], object_count=0,
return object_count, bytes_used return object_count, bytes_used
class ContainerDetails(object): def get_container_details(cont_path):
def __init__(self, bytes_used, object_count, obj_list, dir_list):
self.bytes_used = bytes_used
self.object_count = object_count
self.obj_list = obj_list
self.dir_list = dir_list
def _get_container_details_from_fs(cont_path):
""" """
get container details by traversing the filesystem get container details by traversing the filesystem
""" """
bytes_used = 0 bytes_used = 0
object_count = 0 object_count = 0
obj_list = [] obj_list = []
dir_list = []
if os_path.isdir(cont_path): if os_path.isdir(cont_path):
for (path, dirs, files) in do_walk(cont_path): for (path, dirs, files) in do_walk(cont_path):
@ -289,36 +290,12 @@ def _get_container_details_from_fs(cont_path):
files, object_count, files, object_count,
bytes_used, obj_list) bytes_used, obj_list)
dir_list.append((path, do_stat(path).st_mtime))
sleep() sleep()
return ContainerDetails(bytes_used, object_count, obj_list, dir_list) return obj_list, object_count, bytes_used
def get_container_details(cont_path): def get_account_details(acc_path):
"""
Return object_list, object_count and bytes_used.
"""
cd = _get_container_details_from_fs(cont_path)
return cd.obj_list, cd.object_count, cd.bytes_used
class AccountDetails(object):
""" A simple class to store the three pieces of information associated
with an account:
1. The last known modification time
2. The count of containers in the following list
3. The list of containers
"""
def __init__(self, mtime, container_count, container_list):
self.mtime = mtime
self.container_count = container_count
self.container_list = container_list
def _get_account_details_from_fs(acc_path):
""" """
Return container_list and container_count. Return container_list and container_count.
""" """
@ -326,6 +303,7 @@ def _get_account_details_from_fs(acc_path):
container_count = 0 container_count = 0
acc_stats = do_stat(acc_path) acc_stats = do_stat(acc_path)
if acc_stats:
is_dir = stat.S_ISDIR(acc_stats.st_mode) is_dir = stat.S_ISDIR(acc_stats.st_mode)
if is_dir: if is_dir:
for name in do_listdir(acc_path): for name in do_listdir(acc_path):
@ -336,25 +314,29 @@ def _get_account_details_from_fs(acc_path):
container_count += 1 container_count += 1
container_list.append(name) container_list.append(name)
return AccountDetails(acc_stats.st_mtime, container_count, container_list) return container_list, container_count
def get_account_details(acc_path):
"""
Return container_list and container_count.
"""
ad = _get_account_details_from_fs(acc_path)
return ad.container_list, ad.container_count
def _get_etag(path): def _get_etag(path):
"""
FIXME: It would be great to have a translator that returns the md5sum() of
the file as an xattr that can be simply fetched.
Since we don't have that we should yield after each chunk read and
computed so that we don't consume the worker thread.
"""
etag = md5() etag = md5()
with open(path, 'rb') as fp: with open(path, 'rb') as fp:
while True: while True:
chunk = fp.read(CHUNK_SIZE) chunk = fp.read(CHUNK_SIZE)
if chunk: if chunk:
etag.update(chunk) etag.update(chunk)
if len(chunk) >= CHUNK_SIZE:
# It is likely that we have more data to be read from the
# file. Yield the co-routine cooperatively to avoid
# consuming the worker during md5sum() calculations on
# large files.
sleep()
else: else:
break break
return etag.hexdigest() return etag.hexdigest()
@ -364,14 +346,11 @@ def get_object_metadata(obj_path):
""" """
Return metadata of object. Return metadata of object.
""" """
try:
stats = do_stat(obj_path) stats = do_stat(obj_path)
except OSError as e: if not stats:
if e.errno != errno.ENOENT:
raise
metadata = {} metadata = {}
else: else:
is_dir = (stats.st_mode & 0040000) != 0 is_dir = stat.S_ISDIR(stats.st_mode)
metadata = { metadata = {
X_TYPE: OBJECT, X_TYPE: OBJECT,
X_TIMESTAMP: normalize_timestamp(stats.st_ctime), X_TIMESTAMP: normalize_timestamp(stats.st_ctime),
@ -444,12 +423,14 @@ def create_object_metadata(obj_path):
def create_container_metadata(cont_path): def create_container_metadata(cont_path):
metadata = get_container_metadata(cont_path) metadata = get_container_metadata(cont_path)
return restore_metadata(cont_path, metadata) rmd = restore_metadata(cont_path, metadata)
return rmd
def create_account_metadata(acc_path): def create_account_metadata(acc_path):
metadata = get_account_metadata(acc_path) metadata = get_account_metadata(acc_path)
return restore_metadata(acc_path, metadata) rmd = restore_metadata(acc_path, metadata)
return rmd
def write_pickle(obj, dest, tmp=None, pickle_protocol=0): def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
@ -498,42 +479,71 @@ def dir_is_object(metadata):
def rmobjdir(dir_path): def rmobjdir(dir_path):
""" """
Removes the directory as long as there are Removes the directory as long as there are no objects stored in it. This
no objects stored in it. This works for containers also. works for containers also.
""" """
try: try:
if dir_empty(dir_path): do_rmdir(dir_path)
rmdirs(dir_path) except OSError as err:
return True if err.errno == errno.ENOENT:
except FileOrDirNotFoundError:
# No such directory exists # No such directory exists
return False return False
if err.errno != errno.ENOTEMPTY:
raise
# Handle this non-empty directories below.
else:
return True
# We have a directory that is not empty, walk it to see if it is filled
# with empty sub-directories that are not user created objects
# (gratuitously created as a result of other object creations).
for (path, dirs, files) in do_walk(dir_path, topdown=False): for (path, dirs, files) in do_walk(dir_path, topdown=False):
for directory in dirs: for directory in dirs:
fullpath = os.path.join(path, directory) fullpath = os.path.join(path, directory)
metadata = read_metadata(fullpath)
if not dir_is_object(metadata):
# Directory is not an object created by the caller
# so we can go ahead and delete it.
try: try:
if dir_empty(fullpath): metadata = read_metadata(fullpath)
rmdirs(fullpath) except OSError as err:
else: if err.errno == errno.ENOENT:
# Non-object dir is not empty! # Ignore removal from another entity.
return False continue
except FileOrDirNotFoundError: raise
# No such dir!
return False
else: else:
if dir_is_object(metadata):
# Wait, this is an object created by the caller # Wait, this is an object created by the caller
# We cannot delete # We cannot delete
return False return False
rmdirs(dir_path)
# Directory is not an object created by the caller
# so we can go ahead and delete it.
try:
do_rmdir(fullpath)
except OSError as err:
if err.errno == errno.ENOTEMPTY:
# Directory is not empty, it might have objects in it
return False
if err.errno == errno.ENOENT:
# No such directory exists, already removed, ignore
continue
raise
try:
do_rmdir(dir_path)
except OSError as err:
if err.errno == errno.ENOTEMPTY:
# Directory is not empty, race with object creation
return False
if err.errno == errno.ENOENT:
# No such directory exists, already removed, ignore
return True
raise
else:
return True return True
# Over-ride Swift's utils.write_pickle with ours # Over-ride Swift's utils.write_pickle with ours
#
# FIXME: Is this even invoked anymore given we don't perform container or
# account updates?
import swift.common.utils import swift.common.utils
swift.common.utils.write_pickle = write_pickle swift.common.utils.write_pickle = write_pickle

View File

@ -1,17 +1,34 @@
[DEFAULT] [DEFAULT]
devices = /mnt/gluster-object devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the account-server workers start,
# you can *consider* setting this value to "false" to reduce the per-request
# overhead it can incur.
#
# *** Keep false for Functional Tests ***
mount_check = false mount_check = false
bind_port = 6012 bind_port = 6012
user = root #
log_facility = LOG_LOCAL2
# Override swift's default behaviour for fallocate. # Override swift's default behaviour for fallocate.
disable_fallocate = true disable_fallocate = true
#
# One or two workers should be sufficient for almost any installation of
# Gluster.
workers = 1
[pipeline:main] [pipeline:main]
pipeline = account-server pipeline = account-server
[app:account-server] [app:account-server]
use = egg:gluster_swift#account use = egg:gluster_swift#account
user = root
log_facility = LOG_LOCAL2
#
# After ensuring things are running in a stable manner, you can turn off
# normal request logging for the account server to unclutter the log
# files. Warnings and errors will still be logged.
log_requests = on
[account-replicator] [account-replicator]
vm_test_mode = yes vm_test_mode = yes

View File

@ -1,17 +1,34 @@
[DEFAULT] [DEFAULT]
devices = /mnt/gluster-object devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the container-server workers
# start, you can *consider* setting this value to "false" to reduce the
# per-request overhead it can incur.
#
# *** Keep false for Functional Tests ***
mount_check = false mount_check = false
bind_port = 6011 bind_port = 6011
user = root #
log_facility = LOG_LOCAL2
# Override swift's default behaviour for fallocate. # Override swift's default behaviour for fallocate.
disable_fallocate = true disable_fallocate = true
#
# One or two workers should be sufficient for almost any installation of
# Gluster.
workers = 1
[pipeline:main] [pipeline:main]
pipeline = container-server pipeline = container-server
[app:container-server] [app:container-server]
use = egg:gluster_swift#container use = egg:gluster_swift#container
user = root
log_facility = LOG_LOCAL2
#
# After ensuring things are running in a stable manner, you can turn off
# normal request logging for the container server to unclutter the log
# files. Warnings and errors will still be logged.
log_requests = on
[container-replicator] [container-replicator]
vm_test_mode = yes vm_test_mode = yes

View File

@ -1,6 +1,7 @@
[DEFAULT] [DEFAULT]
# IP address of a GlusterFS volume server member. By default, we assume the #
# local host. # IP address of a node in the GlusterFS server cluster hosting the
# volumes to be served via Swift API.
mount_ip = localhost mount_ip = localhost
# By default it is assumed the Gluster volumes can be accessed using other # By default it is assumed the Gluster volumes can be accessed using other
@ -14,4 +15,6 @@ object_only = yes
# numbers of objects, at the expense of an accurate count of combined bytes # numbers of objects, at the expense of an accurate count of combined bytes
# used by all objects in the container. For most installations "off" works # used by all objects in the container. For most installations "off" works
# fine. # fine.
#
# *** Keep on for Functional Tests ***
accurate_size_in_listing = on accurate_size_in_listing = on

View File

@ -1,7 +1,22 @@
[DEFAULT] [DEFAULT]
devices = /mnt/gluster-object devices = /mnt/gluster-object
#
# Once you are confident that your startup processes will always have your
# gluster volumes properly mounted *before* the object-server workers start,
# you can *consider* setting this value to "false" to reduce the per-request
# overhead it can incur.
#
# *** Keep false for Functional Tests ***
mount_check = false mount_check = false
bind_port = 6010 bind_port = 6010
#
# Maximum number of clients one worker can process simultaneously (it will
# actually accept N + 1). Setting this to one (1) will only handle one request
# at a time, without accepting another request concurrently. By increasing the
# number of workers to a much higher value, one can prevent slow file system
# operations for one request from starving other requests.
max_clients = 1024
#
# If not doing the above, setting this value initially to match the number of # If not doing the above, setting this value initially to match the number of
# CPUs is a good starting point for determining the right value. # CPUs is a good starting point for determining the right value.
workers = 1 workers = 1
@ -15,19 +30,20 @@ pipeline = object-server
use = egg:gluster_swift#object use = egg:gluster_swift#object
user = root user = root
log_facility = LOG_LOCAL2 log_facility = LOG_LOCAL2
# Timeout clients that don't read or write to the proxy server after 5 #
# seconds. # For performance, after ensuring things are running in a stable manner, you
conn_timeout = 5 # can turn off normal request logging for the object server to reduce the
# For high load situations, once connected to a container server, allow for # per-request overhead and unclutter the log files. Warnings and errors will
# delays communicating with it. # still be logged.
node_timeout = 60 log_requests = on
#
# Adjust this value to match the stripe width of the underlying storage array # Adjust this value to match the stripe width of the underlying storage array
# (not the stripe element size). This will provide a reasonable starting point # (not the stripe element size). This will provide a reasonable starting point
# for tuning this value. # for tuning this value.
disk_chunk_size = 65536 disk_chunk_size = 65536
# Adjust this value match whatever is set for the disk_chunk_size #
# initially. This will provide a reasonable starting point for tuning this # Adjust this value match whatever is set for the disk_chunk_size initially.
# value. # This will provide a reasonable starting point for tuning this value.
network_chunk_size = 65556 network_chunk_size = 65556
[object-replicator] [object-replicator]

View File

@ -1,7 +1,6 @@
[DEFAULT] [DEFAULT]
bind_port = 8080 bind_port = 8080
user = root user = root
log_facility = LOG_LOCAL1
# Consider using 1 worker per CPU # Consider using 1 worker per CPU
workers = 1 workers = 1

View File

@ -50,7 +50,12 @@ def _init():
def _init_mock_variables(tmpdir): def _init_mock_variables(tmpdir):
os.system = mock_os_system os.system = mock_os_system
os.path.ismount = mock_os_path_ismount os.path.ismount = mock_os_path_ismount
gfs.RUN_DIR = os.path.join(tmpdir, 'var/run/swift') try:
os.makedirs(os.path.join(tmpdir, "var", "run"))
except OSError as err:
if err.errno != errno.EEXIST:
raise
gfs.RUN_DIR = os.path.join(tmpdir, 'var', 'run', 'swift')
gfs._get_export_list = mock_get_export_list gfs._get_export_list = mock_get_export_list
def _reset_mock_variables(): def _reset_mock_variables():
@ -108,8 +113,16 @@ class TestGlusterfs(unittest.TestCase):
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
def test_mount_get_export_list_err(self): def test_mount_get_export_list_err(self):
try:
tmpdir = mkdtemp()
root = os.path.join(tmpdir, 'mnt/gluster-object')
drive = 'test3'
_init_mock_variables(tmpdir)
gfs._get_export_list = mock_get_export_list gfs._get_export_list = mock_get_export_list
assert not gfs.mount(None, 'drive') assert not gfs.mount(root, drive)
finally:
shutil.rmtree(tmpdir)
def tearDown(self): def tearDown(self):
_reset_mock_variables() _reset_mock_variables()

View File

@ -73,6 +73,7 @@ class TestConstraints(unittest.TestCase):
self.assertTrue(cnt.validate_obj_name_component('tests'*(max_obj_len/5+1))) self.assertTrue(cnt.validate_obj_name_component('tests'*(max_obj_len/5+1)))
self.assertTrue(cnt.validate_obj_name_component('.')) self.assertTrue(cnt.validate_obj_name_component('.'))
self.assertTrue(cnt.validate_obj_name_component('..')) self.assertTrue(cnt.validate_obj_name_component('..'))
self.assertTrue(cnt.validate_obj_name_component(''))
def test_gluster_check_object_creation(self): def test_gluster_check_object_creation(self):
with patch('gluster.swift.common.constraints.__check_object_creation', with patch('gluster.swift.common.constraints.__check_object_creation',
@ -83,14 +84,3 @@ class TestConstraints(unittest.TestCase):
with patch('gluster.swift.common.constraints.__check_object_creation', with patch('gluster.swift.common.constraints.__check_object_creation',
mock_check_object_creation): mock_check_object_creation):
self.assertTrue(cnt.gluster_check_object_creation(None, 'dir/.')) self.assertTrue(cnt.gluster_check_object_creation(None, 'dir/.'))
def test_gluster_check_mount(self):
with patch('gluster.swift.common.constraints.__check_mount',
mock_check_mount):
self.assertTrue(cnt.gluster_check_mount('/tmp/drive', 'vol0'))
with patch('gluster.swift.common.constraints.__check_mount',
mock_check_mount_err):
with patch('gluster.swift.common.Glusterfs.mount',
mock_glusterfs_mount):
self.assertTrue(cnt.gluster_check_mount('/tmp/drive', 'vol0'))

View File

@ -21,15 +21,15 @@ import errno
import unittest import unittest
import tempfile import tempfile
import shutil import shutil
from mock import patch
from hashlib import md5 from hashlib import md5
from swift.common.utils import normalize_timestamp from swift.common.utils import normalize_timestamp
from swift.common.exceptions import DiskFileNotExist, DiskFileError from swift.common.exceptions import DiskFileNotExist, DiskFileError
import gluster.swift.common.DiskFile import gluster.swift.common.DiskFile
import gluster.swift.common.utils import gluster.swift.common.utils
from gluster.swift.common.DiskFile import Gluster_DiskFile, \ from gluster.swift.common.DiskFile import Gluster_DiskFile
AlreadyExistsAsDir
from gluster.swift.common.utils import DEFAULT_UID, DEFAULT_GID, X_TYPE, \ from gluster.swift.common.utils import DEFAULT_UID, DEFAULT_GID, X_TYPE, \
X_OBJECT_TYPE X_OBJECT_TYPE, DIR_OBJECT
from test_utils import _initxattr, _destroyxattr from test_utils import _initxattr, _destroyxattr
from test.unit import FakeLogger from test.unit import FakeLogger
@ -60,21 +60,6 @@ def _mock_rmobjdir(p):
def _mock_do_fsync(fd): def _mock_do_fsync(fd):
return return
def _mock_os_unlink_eacces_err(f):
ose = OSError()
ose.errno = errno.EACCES
raise ose
def _mock_getsize_eaccess_err(f):
ose = OSError()
ose.errno = errno.EACCES
raise ose
def _mock_do_rmdir_eacces_err(f):
ose = OSError()
ose.errno = errno.EACCES
raise ose
class MockRenamerCalled(Exception): class MockRenamerCalled(Exception):
pass pass
@ -293,7 +278,7 @@ class TestDiskFile(unittest.TestCase):
gdf._is_dir = True gdf._is_dir = True
gdf.fp = "123" gdf.fp = "123"
# Should still be a no-op as is_dir is True (marker directory) # Should still be a no-op as is_dir is True (marker directory)
gdf.close() self.assertRaises(AssertionError, gdf.close)
assert gdf.fp == "123" assert gdf.fp == "123"
gdf._is_dir = False gdf._is_dir = False
@ -317,17 +302,39 @@ class TestDiskFile(unittest.TestCase):
gdf.data_file = "/tmp/foo/bar" gdf.data_file = "/tmp/foo/bar"
assert not gdf.is_deleted() assert not gdf.is_deleted()
def test_create_dir_object(self): def test_create_dir_object_no_md(self):
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
the_dir = os.path.join(td, "vol0", "bar", "dir") the_cont = os.path.join(td, "vol0", "bar")
the_dir = "dir"
try: try:
os.makedirs(the_cont)
gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar", gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
"dir/z", self.lg) os.path.join(the_dir, "z"), self.lg)
# Not created, dir object path is different, just checking # Not created, dir object path is different, just checking
assert gdf._obj == "z" assert gdf._obj == "z"
gdf._create_dir_object(the_dir) gdf._create_dir_object(the_dir)
assert os.path.isdir(the_dir) full_dir_path = os.path.join(the_cont, the_dir)
assert the_dir in _metadata assert os.path.isdir(full_dir_path)
assert full_dir_path not in _metadata
finally:
shutil.rmtree(td)
def test_create_dir_object_with_md(self):
td = tempfile.mkdtemp()
the_cont = os.path.join(td, "vol0", "bar")
the_dir = "dir"
try:
os.makedirs(the_cont)
gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
os.path.join(the_dir, "z"), self.lg)
# Not created, dir object path is different, just checking
assert gdf._obj == "z"
dir_md = {'Content-Type': 'application/directory',
X_OBJECT_TYPE: DIR_OBJECT}
gdf._create_dir_object(the_dir, dir_md)
full_dir_path = os.path.join(the_cont, the_dir)
assert os.path.isdir(full_dir_path)
assert full_dir_path in _metadata
finally: finally:
shutil.rmtree(td) shutil.rmtree(td)
@ -357,6 +364,32 @@ class TestDiskFile(unittest.TestCase):
finally: finally:
shutil.rmtree(td) shutil.rmtree(td)
def test_create_dir_object_do_stat_failure(self):
td = tempfile.mkdtemp()
the_path = os.path.join(td, "vol0", "bar")
the_dir = os.path.join(the_path, "dir")
try:
os.makedirs(the_path)
with open(the_dir, "wb") as fd:
fd.write("1234")
gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
"dir/z", self.lg)
# Not created, dir object path is different, just checking
assert gdf._obj == "z"
def _mock_do_chown(p, u, g):
assert u == DEFAULT_UID
assert g == DEFAULT_GID
dc = gluster.swift.common.DiskFile.do_chown
gluster.swift.common.DiskFile.do_chown = _mock_do_chown
self.assertRaises(DiskFileError,
gdf._create_dir_object,
the_dir)
gluster.swift.common.DiskFile.do_chown = dc
self.assertFalse(os.path.isdir(the_dir))
self.assertFalse(the_dir in _metadata)
finally:
shutil.rmtree(td)
def test_put_metadata(self): def test_put_metadata(self):
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
the_path = os.path.join(td, "vol0", "bar") the_path = os.path.join(td, "vol0", "bar")
@ -452,21 +485,24 @@ class TestDiskFile(unittest.TestCase):
def test_put_w_marker_dir_create(self): def test_put_w_marker_dir_create(self):
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
the_path = os.path.join(td, "vol0", "bar") the_cont = os.path.join(td, "vol0", "bar")
the_dir = os.path.join(the_path, "dir") the_dir = os.path.join(the_cont, "dir")
try: try:
os.makedirs(the_cont)
gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar", gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
"dir", self.lg) "dir", self.lg)
assert gdf.metadata == {} assert gdf.metadata == {}
newmd = { newmd = {
'Content-Length': 0,
'ETag': 'etag', 'ETag': 'etag',
'X-Timestamp': 'ts', 'X-Timestamp': 'ts',
'Content-Type': 'application/directory'} 'Content-Type': 'application/directory'}
gdf.put(None, newmd, extension='.dir') gdf.put(None, newmd, extension='.dir')
assert gdf.data_file == the_dir assert gdf.data_file == the_dir
assert gdf.metadata == newmd for key,val in newmd.items():
assert _metadata[the_dir] == newmd assert gdf.metadata[key] == val
assert _metadata[the_dir][key] == val
assert gdf.metadata[X_OBJECT_TYPE] == DIR_OBJECT
assert _metadata[the_dir][X_OBJECT_TYPE] == DIR_OBJECT
finally: finally:
shutil.rmtree(td) shutil.rmtree(td)
@ -487,10 +523,11 @@ class TestDiskFile(unittest.TestCase):
newmd['X-Object-Meta-test'] = '1234' newmd['X-Object-Meta-test'] = '1234'
try: try:
gdf.put(None, newmd, extension='.data') gdf.put(None, newmd, extension='.data')
except AlreadyExistsAsDir: except DiskFileError:
pass pass
else: else:
self.fail("Expected to encounter 'already-exists-as-dir' exception") self.fail("Expected to encounter"
" 'already-exists-as-dir' exception")
assert gdf.metadata == origmd assert gdf.metadata == origmd
assert _metadata[the_dir] == origfmd assert _metadata[the_dir] == origfmd
finally: finally:
@ -498,13 +535,15 @@ class TestDiskFile(unittest.TestCase):
def test_put(self): def test_put(self):
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
the_cont = os.path.join(td, "vol0", "bar")
try: try:
os.makedirs(the_cont)
gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar", gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
"z", self.lg) "z", self.lg)
assert gdf._obj == "z" assert gdf._obj == "z"
assert gdf._obj_path == "" assert gdf._obj_path == ""
assert gdf.name == "bar" assert gdf.name == "bar"
assert gdf.datadir == os.path.join(td, "vol0", "bar") assert gdf.datadir == the_cont
assert gdf.data_file is None assert gdf.data_file is None
body = '1234\n' body = '1234\n'
@ -656,12 +695,15 @@ class TestDiskFile(unittest.TestCase):
later = float(gdf.metadata['X-Timestamp']) + 1 later = float(gdf.metadata['X-Timestamp']) + 1
def _mock_os_unlink_eacces_err(f):
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
stats = os.stat(the_path) stats = os.stat(the_path)
try:
os.chmod(the_path, stats.st_mode & (~stat.S_IWUSR)) os.chmod(the_path, stats.st_mode & (~stat.S_IWUSR))
# Handle the case os_unlink() raises an OSError # Handle the case os_unlink() raises an OSError
__os_unlink = os.unlink with patch("os.unlink", _mock_os_unlink_eacces_err):
os.unlink = _mock_os_unlink_eacces_err
try: try:
gdf.unlinkold(normalize_timestamp(later)) gdf.unlinkold(normalize_timestamp(later))
except OSError as e: except OSError as e:
@ -669,7 +711,6 @@ class TestDiskFile(unittest.TestCase):
else: else:
self.fail("Excepted an OSError when unlinking file") self.fail("Excepted an OSError when unlinking file")
finally: finally:
os.unlink = __os_unlink
os.chmod(the_path, stats.st_mode) os.chmod(the_path, stats.st_mode)
assert os.path.isdir(gdf.datadir) assert os.path.isdir(gdf.datadir)
@ -695,32 +736,6 @@ class TestDiskFile(unittest.TestCase):
finally: finally:
shutil.rmtree(td) shutil.rmtree(td)
def test_unlinkold_is_dir_failure(self):
td = tempfile.mkdtemp()
the_path = os.path.join(td, "vol0", "bar")
the_dir = os.path.join(the_path, "d")
try:
os.makedirs(the_dir)
gdf = Gluster_DiskFile(td, "vol0", "p57", "ufo47", "bar",
"d", self.lg, keep_data_fp=True)
assert gdf.data_file == the_dir
assert gdf._is_dir
stats = os.stat(gdf.datadir)
os.chmod(gdf.datadir, 0)
__os_rmdir = os.rmdir
os.rmdir = _mock_do_rmdir_eacces_err
try:
later = float(gdf.metadata['X-Timestamp']) + 1
gdf.unlinkold(normalize_timestamp(later))
finally:
os.chmod(gdf.datadir, stats.st_mode)
os.rmdir = __os_rmdir
assert os.path.isdir(gdf.datadir)
self.assertTrue(gdf.data_file is None)
finally:
shutil.rmtree(td)
def test_get_data_file_size(self): def test_get_data_file_size(self):
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
the_path = os.path.join(td, "vol0", "bar") the_path = os.path.join(td, "vol0", "bar")
@ -806,9 +821,13 @@ class TestDiskFile(unittest.TestCase):
assert gdf.data_file == the_file assert gdf.data_file == the_file
assert not gdf._is_dir assert not gdf._is_dir
stats = os.stat(the_path) stats = os.stat(the_path)
try:
os.chmod(the_path, 0) os.chmod(the_path, 0)
__os_path_getsize = os.path.getsize
os.path.getsize = _mock_getsize_eaccess_err def _mock_getsize_eaccess_err(f):
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
with patch("os.path.getsize", _mock_getsize_eaccess_err):
try: try:
s = gdf.get_data_file_size() s = gdf.get_data_file_size()
except OSError as err: except OSError as err:
@ -816,7 +835,6 @@ class TestDiskFile(unittest.TestCase):
else: else:
self.fail("Expected OSError exception") self.fail("Expected OSError exception")
finally: finally:
os.path.getsize = __os_path_getsize
os.chmod(the_path, stats.st_mode) os.chmod(the_path, stats.st_mode)
finally: finally:
shutil.rmtree(td) shutil.rmtree(td)

View File

@ -24,7 +24,8 @@ from mock import patch
from tempfile import mkdtemp, mkstemp from tempfile import mkdtemp, mkstemp
from gluster.swift.common import fs_utils as fs from gluster.swift.common import fs_utils as fs
from gluster.swift.common.exceptions import NotDirectoryError, \ from gluster.swift.common.exceptions import NotDirectoryError, \
FileOrDirNotFoundError FileOrDirNotFoundError, GlusterFileSystemOSError, \
GlusterFileSystemIOError
def mock_os_fsync(fd): def mock_os_fsync(fd):
return True return True
@ -54,9 +55,135 @@ class TestFsUtils(unittest.TestCase):
finally: finally:
shutil.rmtree(tmpparent) shutil.rmtree(tmpparent)
def test_do_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
assert False == fs.do_ismount(os.path.join(tmpdir, 'bar'))
finally:
shutil.rmtree(tmpdir)
def test_do_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
assert False == fs.do_ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_do_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
try:
fs.do_ismount(tmpdir)
except GlusterFileSystemOSError as err:
pass
else:
self.fail("Expected GlusterFileSystemOSError")
finally:
shutil.rmtree(tmpdir)
def test_do_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
assert False == fs.do_ismount(link)
finally:
shutil.rmtree(tmpdir)
def test_do_ismount_path_is_root(self):
assert True == fs.do_ismount('/')
def test_do_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
try:
fs.do_ismount(tmpdir)
except GlusterFileSystemOSError as err:
pass
else:
self.fail("Expected GlusterFileSystemOSError")
finally:
shutil.rmtree(tmpdir)
def test_do_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
try:
fs.do_ismount(tmpdir)
except GlusterFileSystemOSError as err:
self.fail("Unexpected exception")
else:
pass
finally:
shutil.rmtree(tmpdir)
def test_do_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
try:
fs.do_ismount(tmpdir)
except GlusterFileSystemOSError as err:
self.fail("Unexpected exception")
else:
pass
finally:
shutil.rmtree(tmpdir)
def test_do_open(self): def test_do_open(self):
fd, tmpfile = mkstemp() _fd, tmpfile = mkstemp()
try: try:
f = fs.do_open(tmpfile, 'r') f = fs.do_open(tmpfile, 'r')
try: try:
@ -67,27 +194,36 @@ class TestFsUtils(unittest.TestCase):
self.fail("IOError expected") self.fail("IOError expected")
finally: finally:
f.close() f.close()
fd = fs.do_open(tmpfile, os.O_RDONLY)
try:
os.write(fd, 'test')
except OSError as err:
pass
else:
self.fail("OSError expected")
finally: finally:
os.close(fd) os.close(fd)
finally:
os.close(_fd)
os.remove(tmpfile) os.remove(tmpfile)
def test_do_open_err(self): def test_do_open_err(self):
try: try:
fs.do_open(os.path.join('/tmp', str(random.random())), 'r') fs.do_open(os.path.join('/tmp', str(random.random())), 'r')
except IOError: except GlusterFileSystemIOError:
pass pass
else: else:
self.fail("IOError expected") self.fail("GlusterFileSystemIOError expected")
def test_do_open_err_int_mode(self): def test_do_open_err_int_mode(self):
try: try:
fs.do_open(os.path.join('/tmp', str(random.random())), fs.do_open(os.path.join('/tmp', str(random.random())),
os.O_RDONLY) os.O_RDONLY)
except OSError: except GlusterFileSystemOSError:
pass pass
else: else:
self.fail("OSError expected") self.fail("GlusterFileSystemOSError expected")
def test_do_write(self): def test_do_write(self):
fd, tmpfile = mkstemp() fd, tmpfile = mkstemp()
@ -104,13 +240,13 @@ class TestFsUtils(unittest.TestCase):
fd1 = os.open(tmpfile, os.O_RDONLY) fd1 = os.open(tmpfile, os.O_RDONLY)
try: try:
fs.do_write(fd1, "test") fs.do_write(fd1, "test")
except OSError: except GlusterFileSystemOSError:
pass pass
else: else:
self.fail("OSError expected") self.fail("GlusterFileSystemOSError expected")
finally: finally:
os.close(fd1) os.close(fd1)
except OSError as ose: except GlusterFileSystemOSError as ose:
self.fail("Open failed with %s" %ose.strerror) self.fail("Open failed with %s" %ose.strerror)
finally: finally:
os.close(fd) os.close(fd)
@ -126,6 +262,72 @@ class TestFsUtils(unittest.TestCase):
finally: finally:
shutil.rmtree(subdir) shutil.rmtree(subdir)
def test_mkdirs_already_dir(self):
tmpdir = mkdtemp()
try:
fs.mkdirs(tmpdir)
except (GlusterFileSystemOSError, OSError):
self.fail("Unexpected exception")
else:
pass
finally:
shutil.rmtree(tmpdir)
def test_mkdirs(self):
tmpdir = mkdtemp()
try:
fs.mkdirs(os.path.join(tmpdir, "a", "b", "c"))
except OSError:
self.fail("Unexpected exception")
else:
pass
finally:
shutil.rmtree(tmpdir)
def test_mkdirs_existing_file(self):
tmpdir = mkdtemp()
fd, tmpfile = mkstemp(dir=tmpdir)
try:
fs.mkdirs(tmpfile)
except OSError:
pass
else:
self.fail("Expected GlusterFileSystemOSError exception")
finally:
os.close(fd)
shutil.rmtree(tmpdir)
def test_mkdirs_existing_file_on_path(self):
tmpdir = mkdtemp()
fd, tmpfile = mkstemp(dir=tmpdir)
try:
fs.mkdirs(os.path.join(tmpfile, 'b'))
except OSError:
pass
else:
self.fail("Expected GlusterFileSystemOSError exception")
finally:
os.close(fd)
shutil.rmtree(tmpdir)
def test_do_mkdir(self):
try:
path = os.path.join('/tmp', str(random.random()))
fs.do_mkdir(path)
assert os.path.exists(path)
assert fs.do_mkdir(path) is None
finally:
os.rmdir(path)
def test_do_mkdir_err(self):
try:
path = os.path.join('/tmp', str(random.random()), str(random.random()))
fs.do_mkdir(path)
except GlusterFileSystemOSError:
pass
else:
self.fail("GlusterFileSystemOSError expected")
def test_do_listdir(self): def test_do_listdir(self):
tmpdir = mkdtemp() tmpdir = mkdtemp()
try: try:
@ -141,38 +343,106 @@ class TestFsUtils(unittest.TestCase):
try: try:
path = os.path.join('/tmp', str(random.random())) path = os.path.join('/tmp', str(random.random()))
fs.do_listdir(path) fs.do_listdir(path)
except OSError: except GlusterFileSystemOSError:
pass pass
else: else:
self.fail("OSError expected") self.fail("GlusterFileSystemOSError expected")
def test_do_fstat(self):
tmpdir = mkdtemp()
try:
fd, tmpfile = mkstemp(dir=tmpdir)
buf1 = os.stat(tmpfile)
buf2 = fs.do_fstat(fd)
assert buf1 == buf2
finally:
os.close(fd)
os.remove(tmpfile)
os.rmdir(tmpdir)
def test_do_fstat_err(self):
try:
fs.do_fstat(1000)
except GlusterFileSystemOSError:
pass
else:
self.fail("Expected GlusterFileSystemOSError")
def test_do_stat(self): def test_do_stat(self):
tmpdir = mkdtemp() tmpdir = mkdtemp()
try: try:
fd, tmpfile = mkstemp(dir=tmpdir) fd, tmpfile = mkstemp(dir=tmpdir)
buf1 = os.stat(tmpfile) buf1 = os.stat(tmpfile)
buf2 = fs.do_stat(fd) buf2 = fs.do_stat(tmpfile)
buf3 = fs.do_stat(tmpfile)
assert buf1 == buf2 assert buf1 == buf2
assert buf1 == buf3
finally: finally:
os.close(fd) os.close(fd)
os.remove(tmpfile) os.remove(tmpfile)
os.rmdir(tmpdir) os.rmdir(tmpdir)
def test_do_stat_enoent(self):
res = fs.do_stat(os.path.join('/tmp', str(random.random())))
assert res is None
def test_do_stat_err(self): def test_do_stat_err(self):
def mock_os_stat_eacces(path):
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
try: try:
fs.do_stat(os.path.join('/tmp', str(random.random()))) with patch('os.stat', mock_os_stat_eacces):
except OSError: fs.do_stat('/tmp')
except GlusterFileSystemOSError:
pass pass
else: else:
self.fail("OSError expected") self.fail("GlusterFileSystemOSError expected")
def test_do_stat_eio_once(self):
count = [0]
_os_stat = os.stat
def mock_os_stat_eio(path):
count[0] += 1
if count[0] <= 1:
raise OSError(errno.EIO, os.strerror(errno.EIO))
return _os_stat(path)
with patch('os.stat', mock_os_stat_eio):
fs.do_stat('/tmp') is not None
def test_do_stat_eio_twice(self):
count = [0]
_os_stat = os.stat
def mock_os_stat_eio(path):
count[0] += 1
if count[0] <= 2:
raise OSError(errno.EIO, os.strerror(errno.EIO))
return _os_stat(path)
with patch('os.stat', mock_os_stat_eio):
fs.do_stat('/tmp') is not None
def test_do_stat_eio_ten(self):
def mock_os_stat_eio(path):
raise OSError(errno.EIO, os.strerror(errno.EIO))
try:
with patch('os.stat', mock_os_stat_eio):
fs.do_stat('/tmp')
except GlusterFileSystemOSError:
pass
else:
self.fail("GlusterFileSystemOSError expected")
def test_do_close(self): def test_do_close(self):
fd, tmpfile = mkstemp() fd, tmpfile = mkstemp()
try: try:
fs.do_close(fd); fs.do_close(fd)
try: try:
os.write(fd, "test") os.write(fd, "test")
except OSError: except OSError:
@ -184,26 +454,43 @@ class TestFsUtils(unittest.TestCase):
finally: finally:
os.remove(tmpfile) os.remove(tmpfile)
def test_do_close_err(self): def test_do_close_err_fd(self):
fd, tmpfile = mkstemp() fd, tmpfile = mkstemp()
try: try:
fs.do_close(fd); fs.do_close(fd)
try: try:
fs.do_close(fd); fs.do_close(fd)
except OSError: except GlusterFileSystemOSError:
pass pass
else: else:
self.fail("OSError expected") self.fail("GlusterFileSystemOSError expected")
finally:
os.remove(tmpfile)
def test_do_close_err_fp(self):
fd, tmpfile = mkstemp()
os.close(fd)
fp = open(tmpfile, 'w')
try:
fd = fp.fileno()
os.close(fd)
try:
fs.do_close(fp)
except GlusterFileSystemIOError:
pass
else:
self.fail("GlusterFileSystemIOError expected")
finally: finally:
os.remove(tmpfile) os.remove(tmpfile)
def test_do_unlink(self): def test_do_unlink(self):
fd, tmpfile = mkstemp() fd, tmpfile = mkstemp()
try: try:
fs.do_unlink(tmpfile) assert fs.do_unlink(tmpfile) is None
assert not os.path.exists(tmpfile) assert not os.path.exists(tmpfile)
assert fs.do_unlink(os.path.join('/tmp', str(random.random()))) res = fs.do_unlink(os.path.join('/tmp', str(random.random())))
assert res is None
finally: finally:
os.close(fd) os.close(fd)
@ -211,10 +498,10 @@ class TestFsUtils(unittest.TestCase):
tmpdir = mkdtemp() tmpdir = mkdtemp()
try: try:
fs.do_unlink(tmpdir) fs.do_unlink(tmpdir)
except OSError: except GlusterFileSystemOSError:
pass pass
else: else:
self.fail('OSError expected') self.fail('GlusterFileSystemOSError expected')
finally: finally:
os.rmdir(tmpdir) os.rmdir(tmpdir)
@ -233,10 +520,10 @@ class TestFsUtils(unittest.TestCase):
srcpath = os.path.join('/tmp', str(random.random())) srcpath = os.path.join('/tmp', str(random.random()))
destpath = os.path.join('/tmp', str(random.random())) destpath = os.path.join('/tmp', str(random.random()))
fs.do_rename(srcpath, destpath) fs.do_rename(srcpath, destpath)
except OSError: except GlusterFileSystemOSError:
pass pass
else: else:
self.fail("OSError expected") self.fail("GlusterFileSystemOSError expected")
def test_dir_empty(self): def test_dir_empty(self):
tmpdir = mkdtemp() tmpdir = mkdtemp()
@ -248,7 +535,18 @@ class TestFsUtils(unittest.TestCase):
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
def test_dir_empty_err(self): def test_dir_empty_err(self):
def _mock_os_listdir(path):
raise OSError(13, "foo")
with patch("os.listdir", _mock_os_listdir):
try: try:
fs.dir_empty("/tmp")
except GlusterFileSystemOSError:
pass
else:
self.fail("GlusterFileSystemOSError exception expected")
def test_dir_empty_notfound(self):
try: try:
assert fs.dir_empty(os.path.join('/tmp', str(random.random()))) assert fs.dir_empty(os.path.join('/tmp', str(random.random())))
except FileOrDirNotFoundError: except FileOrDirNotFoundError:
@ -256,7 +554,9 @@ class TestFsUtils(unittest.TestCase):
else: else:
self.fail("FileOrDirNotFoundError exception expected") self.fail("FileOrDirNotFoundError exception expected")
def test_dir_empty_notdir(self):
fd, tmpfile = mkstemp() fd, tmpfile = mkstemp()
try:
try: try:
fs.dir_empty(tmpfile) fs.dir_empty(tmpfile)
except NotDirectoryError: except NotDirectoryError:
@ -267,14 +567,26 @@ class TestFsUtils(unittest.TestCase):
os.close(fd) os.close(fd)
os.unlink(tmpfile) os.unlink(tmpfile)
def test_rmdirs(self): def test_do_rmdir(self):
tmpdir = mkdtemp() tmpdir = mkdtemp()
try: try:
subdir = mkdtemp(dir=tmpdir) subdir = mkdtemp(dir=tmpdir)
fd, tmpfile = mkstemp(dir=tmpdir) fd, tmpfile = mkstemp(dir=tmpdir)
assert not fs.rmdirs(tmpfile) try:
assert not fs.rmdirs(tmpdir) fs.do_rmdir(tmpfile)
assert fs.rmdirs(subdir) except GlusterFileSystemOSError:
pass
else:
self.fail("Expected GlusterFileSystemOSError")
assert os.path.exists(subdir)
try:
fs.do_rmdir(tmpdir)
except GlusterFileSystemOSError:
pass
else:
self.fail("Expected GlusterFileSystemOSError")
assert os.path.exists(subdir)
fs.do_rmdir(subdir)
assert not os.path.exists(subdir) assert not os.path.exists(subdir)
finally: finally:
os.close(fd) os.close(fd)
@ -290,11 +602,12 @@ class TestFsUtils(unittest.TestCase):
else: else:
try: try:
fs.do_chown(subdir, 20000, 20000) fs.do_chown(subdir, 20000, 20000)
except OSError as ex: except GlusterFileSystemOSError as ex:
if ex.errno != errno.EPERM: if ex.errno != errno.EPERM:
self.fail("Expected OSError") self.fail(
"Expected GlusterFileSystemOSError(errno=EPERM)")
else: else:
self.fail("Expected OSError") self.fail("Expected GlusterFileSystemOSError")
finally: finally:
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
@ -308,11 +621,12 @@ class TestFsUtils(unittest.TestCase):
else: else:
try: try:
fs.do_chown(tmpfile, 20000, 20000) fs.do_chown(tmpfile, 20000, 20000)
except OSError as ex: except GlusterFileSystemOSError as ex:
if ex.errno != errno.EPERM: if ex.errno != errno.EPERM:
self.fail("Expected OSError") self.fail(
"Expected GlusterFileSystemOSError(errno=EPERM")
else: else:
self.fail("Expected OSError") self.fail("Expected GlusterFileSystemOSError")
finally: finally:
os.close(fd) os.close(fd)
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
@ -321,10 +635,10 @@ class TestFsUtils(unittest.TestCase):
try: try:
fs.do_chown(os.path.join('/tmp', str(random.random())), fs.do_chown(os.path.join('/tmp', str(random.random())),
20000, 20000) 20000, 20000)
except OSError: except GlusterFileSystemOSError:
pass pass
else: else:
self.fail("Expected OSError") self.fail("Expected GlusterFileSystemOSError")
def test_fchown(self): def test_fchown(self):
tmpdir = mkdtemp() tmpdir = mkdtemp()
@ -336,11 +650,12 @@ class TestFsUtils(unittest.TestCase):
else: else:
try: try:
fs.do_fchown(fd, 20000, 20000) fs.do_fchown(fd, 20000, 20000)
except OSError as ex: except GlusterFileSystemOSError as ex:
if ex.errno != errno.EPERM: if ex.errno != errno.EPERM:
self.fail("Expected OSError") self.fail(
"Expected GlusterFileSystemOSError(errno=EPERM)")
else: else:
self.fail("Expected OSError") self.fail("Expected GlusterFileSystemOSError")
finally: finally:
os.close(fd) os.close(fd)
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)
@ -356,11 +671,12 @@ class TestFsUtils(unittest.TestCase):
else: else:
try: try:
fs.do_fchown(fd_rd, 20000, 20000) fs.do_fchown(fd_rd, 20000, 20000)
except OSError as ex: except GlusterFileSystemOSError as ex:
if ex.errno != errno.EPERM: if ex.errno != errno.EPERM:
self.fail("Expected OSError") self.fail(
"Expected GlusterFileSystemOSError(errno=EPERM)")
else: else:
self.fail("Expected OSError") self.fail("Expected GlusterFileSystemOSError")
finally: finally:
os.close(fd_rd) os.close(fd_rd)
os.close(fd) os.close(fd)
@ -374,8 +690,8 @@ class TestFsUtils(unittest.TestCase):
os.write(fd, 'test') os.write(fd, 'test')
with patch('eventlet.tpool.execute', mock_tpool_execute): with patch('eventlet.tpool.execute', mock_tpool_execute):
with patch('os.fsync', mock_os_fsync): with patch('os.fsync', mock_os_fsync):
assert fs.do_fsync(fd) assert fs.do_fsync(fd) is None
except OSError as ose: except GlusterFileSystemOSError as ose:
self.fail('Opening a temporary file failed with %s' %ose.strerror) self.fail('Opening a temporary file failed with %s' %ose.strerror)
else: else:
os.close(fd) os.close(fd)
@ -390,13 +706,13 @@ class TestFsUtils(unittest.TestCase):
os.write(fd, 'test') os.write(fd, 'test')
with patch('eventlet.tpool.execute', mock_tpool_execute): with patch('eventlet.tpool.execute', mock_tpool_execute):
with patch('os.fsync', mock_os_fsync): with patch('os.fsync', mock_os_fsync):
assert fs.do_fsync(fd) assert fs.do_fsync(fd) is None
os.close(fd) os.close(fd)
try: try:
fs.do_fsync(fd) fs.do_fsync(fd)
except OSError: except GlusterFileSystemOSError:
pass pass
else: else:
self.fail("Expected OSError") self.fail("Expected GlusterFileSystemOSError")
finally: finally:
shutil.rmtree(tmpdir) shutil.rmtree(tmpdir)

View File

@ -21,7 +21,7 @@ from gluster.swift.common.ring import Ring
class TestRing(unittest.TestCase): class TestRing(unittest.TestCase):
""" Tests for common.utils """ """ Tests for common.ring """
def setUp(self): def setUp(self):
swift.common.utils.HASH_PATH_SUFFIX = 'endcap' swift.common.utils.HASH_PATH_SUFFIX = 'endcap'
@ -66,4 +66,3 @@ class TestRing(unittest.TestCase):
def test_invalid_partition(self): def test_invalid_partition(self):
nodes = self.ring.get_part_nodes(0) nodes = self.ring.get_part_nodes(0)
self.assertEqual(nodes[0]['device'], 'volume_not_in_ring') self.assertEqual(nodes[0]['device'], 'volume_not_in_ring')

View File

@ -25,8 +25,10 @@ import hashlib
import tarfile import tarfile
import shutil import shutil
from collections import defaultdict from collections import defaultdict
from mock import patch
from swift.common.utils import normalize_timestamp from swift.common.utils import normalize_timestamp
from gluster.swift.common import utils, Glusterfs from gluster.swift.common import utils, Glusterfs
from gluster.swift.common.exceptions import GlusterFileSystemOSError
# #
# Somewhat hacky way of emulating the operation of xattr calls. They are made # Somewhat hacky way of emulating the operation of xattr calls. They are made
@ -128,6 +130,10 @@ class SimMemcache(object):
self._d[key] = value self._d[key] = value
def _mock_os_fsync(fd):
return
class TestUtils(unittest.TestCase): class TestUtils(unittest.TestCase):
""" Tests for common.utils """ """ Tests for common.utils """
@ -338,8 +344,9 @@ class TestUtils(unittest.TestCase):
def test_get_object_metadata_err(self): def test_get_object_metadata_err(self):
tf = tempfile.NamedTemporaryFile() tf = tempfile.NamedTemporaryFile()
try: try:
md = utils.get_object_metadata(os.path.join(tf.name,"doesNotEx1st")) md = utils.get_object_metadata(
except OSError as e: os.path.join(tf.name, "doesNotEx1st"))
except GlusterFileSystemOSError as e:
assert e.errno != errno.ENOENT assert e.errno != errno.ENOENT
else: else:
self.fail("Expected exception") self.fail("Expected exception")
@ -520,7 +527,7 @@ class TestUtils(unittest.TestCase):
finally: finally:
os.rmdir(td) os.rmdir(td)
def test_get_account_details_from_fs(self): def test_get_account_details(self):
orig_cwd = os.getcwd() orig_cwd = os.getcwd()
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
try: try:
@ -528,23 +535,28 @@ class TestUtils(unittest.TestCase):
os.chdir(td) os.chdir(td)
tf.extractall() tf.extractall()
ad = utils._get_account_details_from_fs(td) container_list, container_count = utils.get_account_details(td)
assert ad.mtime == os.path.getmtime(td) assert container_count == 3
assert ad.container_count == 3 assert set(container_list) == set(['c1', 'c2', 'c3'])
assert set(ad.container_list) == set(['c1', 'c2', 'c3'])
finally: finally:
os.chdir(orig_cwd) os.chdir(orig_cwd)
shutil.rmtree(td) shutil.rmtree(td)
def test_get_container_details_from_fs_notadir(self): def test_get_account_details_notadir(self):
tf = tempfile.NamedTemporaryFile() tf = tempfile.NamedTemporaryFile()
cd = utils._get_container_details_from_fs(tf.name) container_list, container_count = utils.get_account_details(tf.name)
assert cd.bytes_used == 0 assert container_count == 0
assert cd.object_count == 0 assert container_list == []
assert cd.obj_list == []
assert cd.dir_list == []
def test_get_container_details_from_fs(self): def test_get_container_details_notadir(self):
tf = tempfile.NamedTemporaryFile()
obj_list, object_count, bytes_used = \
utils.get_container_details(tf.name)
assert bytes_used == 0
assert object_count == 0
assert obj_list == []
def test_get_container_details(self):
orig_cwd = os.getcwd() orig_cwd = os.getcwd()
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
try: try:
@ -552,13 +564,14 @@ class TestUtils(unittest.TestCase):
os.chdir(td) os.chdir(td)
tf.extractall() tf.extractall()
cd = utils._get_container_details_from_fs(td) obj_list, object_count, bytes_used = \
assert cd.bytes_used == 0, repr(cd.bytes_used) utils.get_container_details(td)
assert bytes_used == 0, repr(bytes_used)
# Should not include the directories # Should not include the directories
assert cd.object_count == 5, repr(cd.object_count) assert object_count == 5, repr(object_count)
assert set(cd.obj_list) == set(['file1', 'file3', 'file2', assert set(obj_list) == set(['file1', 'file3', 'file2',
'dir1/file1', 'dir1/file2' 'dir1/file1', 'dir1/file2'
]), repr(cd.obj_list) ]), repr(obj_list)
full_dir1 = os.path.join(td, 'dir1') full_dir1 = os.path.join(td, 'dir1')
full_dir2 = os.path.join(td, 'dir2') full_dir2 = os.path.join(td, 'dir2')
@ -568,14 +581,11 @@ class TestUtils(unittest.TestCase):
full_dir2: os.path.getmtime(full_dir2), full_dir2: os.path.getmtime(full_dir2),
full_dir3: os.path.getmtime(full_dir3), full_dir3: os.path.getmtime(full_dir3),
} }
for d,m in cd.dir_list:
assert d in exp_dir_dict
assert exp_dir_dict[d] == m
finally: finally:
os.chdir(orig_cwd) os.chdir(orig_cwd)
shutil.rmtree(td) shutil.rmtree(td)
def test_get_container_details_from_fs_ufo(self): def test_get_container_details_ufo(self):
orig_cwd = os.getcwd() orig_cwd = os.getcwd()
__obj_only = Glusterfs.OBJECT_ONLY __obj_only = Glusterfs.OBJECT_ONLY
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
@ -586,13 +596,14 @@ class TestUtils(unittest.TestCase):
Glusterfs.OBJECT_ONLY = False Glusterfs.OBJECT_ONLY = False
cd = utils._get_container_details_from_fs(td) obj_list, object_count, bytes_used = \
assert cd.bytes_used == 0, repr(cd.bytes_used) utils.get_container_details(td)
assert cd.object_count == 8, repr(cd.object_count) assert bytes_used == 0, repr(bytes_used)
assert set(cd.obj_list) == set(['file1', 'file3', 'file2', assert object_count == 8, repr(object_count)
assert set(obj_list) == set(['file1', 'file3', 'file2',
'dir3', 'dir1', 'dir2', 'dir3', 'dir1', 'dir2',
'dir1/file1', 'dir1/file2' 'dir1/file1', 'dir1/file2'
]), repr(cd.obj_list) ]), repr(obj_list)
full_dir1 = os.path.join(td, 'dir1') full_dir1 = os.path.join(td, 'dir1')
full_dir2 = os.path.join(td, 'dir2') full_dir2 = os.path.join(td, 'dir2')
@ -602,9 +613,6 @@ class TestUtils(unittest.TestCase):
full_dir2: os.path.getmtime(full_dir2), full_dir2: os.path.getmtime(full_dir2),
full_dir3: os.path.getmtime(full_dir3), full_dir3: os.path.getmtime(full_dir3),
} }
for d,m in cd.dir_list:
assert d in exp_dir_dict
assert exp_dir_dict[d] == m
finally: finally:
os.chdir(orig_cwd) os.chdir(orig_cwd)
shutil.rmtree(td) shutil.rmtree(td)
@ -621,12 +629,13 @@ class TestUtils(unittest.TestCase):
Glusterfs._do_getsize = True Glusterfs._do_getsize = True
cd = utils._get_container_details_from_fs(td) obj_list, object_count, bytes_used = \
assert cd.bytes_used == 30, repr(cd.bytes_used) utils.get_container_details(td)
assert cd.object_count == 5, repr(cd.object_count) assert bytes_used == 30, repr(bytes_used)
assert set(cd.obj_list) == set(['file1', 'file3', 'file2', assert object_count == 5, repr(object_count)
assert set(obj_list) == set(['file1', 'file3', 'file2',
'dir1/file1', 'dir1/file2' 'dir1/file1', 'dir1/file2'
]), repr(cd.obj_list) ]), repr(obj_list)
full_dir1 = os.path.join(td, 'dir1') full_dir1 = os.path.join(td, 'dir1')
full_dir2 = os.path.join(td, 'dir2') full_dir2 = os.path.join(td, 'dir2')
@ -636,32 +645,17 @@ class TestUtils(unittest.TestCase):
full_dir2: os.path.getmtime(full_dir2), full_dir2: os.path.getmtime(full_dir2),
full_dir3: os.path.getmtime(full_dir3), full_dir3: os.path.getmtime(full_dir3),
} }
for d,m in cd.dir_list:
assert d in exp_dir_dict
assert exp_dir_dict[d] == m
finally: finally:
Glusterfs._do_getsize = __do_getsize Glusterfs._do_getsize = __do_getsize
os.chdir(orig_cwd) os.chdir(orig_cwd)
shutil.rmtree(td) shutil.rmtree(td)
def test_get_account_details_from_fs_notadir_w_stats(self):
tf = tempfile.NamedTemporaryFile()
ad = utils._get_account_details_from_fs(tf.name)
assert ad.mtime == os.path.getmtime(tf.name)
assert ad.container_count == 0
assert ad.container_list == []
def test_get_account_details_from_fs_notadir(self):
tf = tempfile.NamedTemporaryFile()
ad = utils._get_account_details_from_fs(tf.name)
assert ad.mtime == os.path.getmtime(tf.name)
assert ad.container_count == 0
assert ad.container_list == []
def test_write_pickle(self): def test_write_pickle(self):
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
try: try:
fpp = os.path.join(td, 'pp') fpp = os.path.join(td, 'pp')
# FIXME: Remove this patch when coverage.py can handle eventlet
with patch("os.fsync", _mock_os_fsync):
utils.write_pickle('pickled peppers', fpp) utils.write_pickle('pickled peppers', fpp)
with open(fpp, "rb") as f: with open(fpp, "rb") as f:
contents = f.read() contents = f.read()
@ -676,7 +670,10 @@ class TestUtils(unittest.TestCase):
try: try:
fpp = os.path.join(td, 'pp') fpp = os.path.join(td, 'pp')
# Also test an explicity pickle protocol # Also test an explicity pickle protocol
utils.write_pickle('pickled peppers', fpp, tmp=tf.name, pickle_protocol=2) # FIXME: Remove this patch when coverage.py can handle eventlet
with patch("os.fsync", _mock_os_fsync):
utils.write_pickle('pickled peppers', fpp, tmp=tf.name,
pickle_protocol=2)
with open(fpp, "rb") as f: with open(fpp, "rb") as f:
contents = f.read() contents = f.read()
s = pickle.loads(contents) s = pickle.loads(contents)
@ -855,3 +852,168 @@ class TestUtilsDirObjects(unittest.TestCase):
self.assertFalse(utils.rmobjdir(self.rootdir)) self.assertFalse(utils.rmobjdir(self.rootdir))
self._clear_dir_object(self.dirs[0]) self._clear_dir_object(self.dirs[0])
self.assertTrue(utils.rmobjdir(self.rootdir)) self.assertTrue(utils.rmobjdir(self.rootdir))
def test_rmobjdir_metadata_errors(self):
def _mock_rm(path):
print "_mock_rm-metadata_errors(%s)" % path
if path.endswith("dir3"):
raise OSError(13, "foo")
return {}
_orig_rm = utils.read_metadata
utils.read_metadata = _mock_rm
try:
try:
utils.rmobjdir(self.rootdir)
except OSError:
pass
else:
self.fail("Expected OSError")
finally:
utils.read_metadata = _orig_rm
def test_rmobjdir_metadata_enoent(self):
def _mock_rm(path):
print "_mock_rm-metadata_enoent(%s)" % path
shutil.rmtree(path)
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT))
# Remove the files
for f in self.files:
os.unlink(os.path.join(self.rootdir, f))
_orig_rm = utils.read_metadata
utils.read_metadata = _mock_rm
try:
try:
self.assertTrue(utils.rmobjdir(self.rootdir))
except OSError:
self.fail("Unexpected OSError")
else:
pass
finally:
utils.read_metadata = _orig_rm
def test_rmobjdir_rmdir_enoent(self):
seen = [0]
_orig_rm = utils.do_rmdir
def _mock_rm(path):
print "_mock_rm-rmdir_enoent(%s)" % path
if path == self.rootdir and not seen[0]:
seen[0] = 1
raise OSError(errno.ENOTEMPTY, os.strerror(errno.ENOTEMPTY))
else:
shutil.rmtree(path)
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT))
# Remove the files
for f in self.files:
os.unlink(os.path.join(self.rootdir, f))
utils.do_rmdir = _mock_rm
try:
try:
self.assertTrue(utils.rmobjdir(self.rootdir))
except OSError:
self.fail("Unexpected OSError")
else:
pass
finally:
utils.do_rmdir = _orig_rm
def test_rmobjdir_rmdir_error(self):
seen = [0]
_orig_rm = utils.do_rmdir
def _mock_rm(path):
print "_mock_rm-rmdir_enoent(%s)" % path
if path == self.rootdir and not seen[0]:
seen[0] = 1
raise OSError(errno.ENOTEMPTY, os.strerror(errno.ENOTEMPTY))
else:
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
# Remove the files
for f in self.files:
os.unlink(os.path.join(self.rootdir, f))
utils.do_rmdir = _mock_rm
try:
try:
utils.rmobjdir(self.rootdir)
except OSError:
pass
else:
self.fail("Expected OSError")
finally:
utils.do_rmdir = _orig_rm
def test_rmobjdir_files_left_in_top_dir(self):
seen = [0]
_orig_rm = utils.do_rmdir
def _mock_rm(path):
print "_mock_rm-files_left_in_top_dir(%s)" % path
if path == self.rootdir:
if not seen[0]:
seen[0] = 1
raise OSError(errno.ENOTEMPTY, os.strerror(errno.ENOTEMPTY))
else:
return _orig_rm(path)
else:
shutil.rmtree(path)
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT))
# Remove the files, leaving the ones at the root
for f in self.files:
if f.startswith('dir'):
os.unlink(os.path.join(self.rootdir, f))
utils.do_rmdir = _mock_rm
try:
try:
self.assertFalse(utils.rmobjdir(self.rootdir))
except OSError:
self.fail("Unexpected OSError")
else:
pass
finally:
utils.do_rmdir = _orig_rm
def test_rmobjdir_error_final_rmdir(self):
seen = [0]
_orig_rm = utils.do_rmdir
def _mock_rm(path):
print "_mock_rm-files_left_in_top_dir(%s)" % path
if path == self.rootdir:
if not seen[0]:
seen[0] = 1
raise OSError(errno.ENOTEMPTY, os.strerror(errno.ENOTEMPTY))
else:
raise OSError(errno.EACCES, os.strerror(errno.EACCES))
else:
shutil.rmtree(path)
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT))
# Remove the files, leaving the ones at the root
for f in self.files:
os.unlink(os.path.join(self.rootdir, f))
utils.do_rmdir = _mock_rm
try:
try:
utils.rmobjdir(self.rootdir)
except OSError:
pass
else:
self.fail("Expected OSError")
finally:
utils.do_rmdir = _orig_rm