OpenStack Swift Functional Tests for G4S

This commit has the following changes:
  * G4S no longer accepts URLs that end in /.  A HTTP code
of 400 is returned when a / at the end of the object
is detected.

  * Directories can be created as objects setting the
content-type to application/directory and content-length
to 0.

  * Functional tests have been adjusted to work with
G4S constraints

Change-Id: I31038a59699a8e3eeaba902db322218c6400093e
Signed-off-by: Luis Pabon <lpabon@redhat.com>
Reviewed-on: http://review.gluster.org/5246
Reviewed-by: Peter Portante <pportant@redhat.com>
Tested-by: Peter Portante <pportant@redhat.com>
This commit is contained in:
Luis Pabon 2013-06-21 16:41:50 -04:00 committed by Peter Portante
parent 92fdc68d6b
commit 5cef798f8d
9 changed files with 676 additions and 71 deletions

View File

@ -16,15 +16,14 @@
import os import os
import errno import errno
from gluster.swift.common.fs_utils import dir_empty, rmdirs, mkdirs, os_path, \ from gluster.swift.common.fs_utils import dir_empty, mkdirs, os_path, do_chown
do_chown
from gluster.swift.common.utils import validate_account, validate_container, \ from gluster.swift.common.utils import validate_account, validate_container, \
get_container_details, get_account_details, create_container_metadata, \ get_container_details, get_account_details, create_container_metadata, \
create_account_metadata, DEFAULT_GID, get_container_metadata, \ create_account_metadata, DEFAULT_GID, get_container_metadata, \
get_account_metadata, DEFAULT_UID, validate_object, \ get_account_metadata, DEFAULT_UID, validate_object, \
create_object_metadata, read_metadata, write_metadata, X_CONTENT_TYPE, \ create_object_metadata, read_metadata, write_metadata, X_CONTENT_TYPE, \
X_CONTENT_LENGTH, X_TIMESTAMP, X_PUT_TIMESTAMP, X_ETAG, X_OBJECTS_COUNT, \ X_CONTENT_LENGTH, X_TIMESTAMP, X_PUT_TIMESTAMP, X_ETAG, X_OBJECTS_COUNT, \
X_BYTES_USED, X_CONTAINER_COUNT, DIR_TYPE X_BYTES_USED, X_CONTAINER_COUNT, DIR_TYPE, rmobjdir, dir_is_object
from gluster.swift.common import Glusterfs from gluster.swift.common import Glusterfs
from gluster.swift.common.exceptions import FileOrDirNotFoundError from gluster.swift.common.exceptions import FileOrDirNotFoundError
@ -333,10 +332,9 @@ class DiskDir(DiskCommon):
else: else:
return container_list return container_list
if objects and end_marker: if end_marker:
objects = filter_end_marker(objects, end_marker) objects = filter_end_marker(objects, end_marker)
if objects:
if marker and marker >= prefix: if marker and marker >= prefix:
objects = filter_marker(objects, marker) objects = filter_marker(objects, marker)
elif prefix: elif prefix:
@ -376,7 +374,8 @@ class DiskDir(DiskCommon):
if e.errno != errno.ENOENT: if e.errno != errno.ENOENT:
raise raise
if Glusterfs.OBJECT_ONLY and metadata \ if Glusterfs.OBJECT_ONLY and metadata \
and metadata[X_CONTENT_TYPE] == DIR_TYPE: and metadata[X_CONTENT_TYPE] == DIR_TYPE \
and not dir_is_object(metadata):
continue continue
list_item = [] list_item = []
list_item.append(obj) list_item.append(obj)
@ -484,19 +483,22 @@ class DiskDir(DiskCommon):
# within a directory implicitly. # within a directory implicitly.
return return
def empty(self):
try:
return dir_empty(self.datadir)
except FileOrDirNotFoundError:
return True
def delete_db(self, timestamp): def delete_db(self, timestamp):
""" """
Delete the container (directory) if empty. Delete the container (directory) if empty.
:param timestamp: delete timestamp :param timestamp: delete timestamp
""" """
try: # Let's check and see if it has directories that
if not dir_empty(self.datadir): # where created by the code, but not by the
# FIXME: This is a failure condition here, isn't it? # caller as objects
return rmobjdir(self.datadir)
except FileOrDirNotFoundError:
return
rmdirs(self.datadir)
def set_x_container_sync_points(self, sync_point1, sync_point2): def set_x_container_sync_points(self, sync_point1, sync_point2):
self.metadata['x_container_sync_point1'] = sync_point1 self.metadata['x_container_sync_point1'] = sync_point1

View File

@ -19,17 +19,16 @@ import random
from hashlib import md5 from hashlib import md5
from contextlib import contextmanager from contextlib import contextmanager
from swift.common.utils import renamer from swift.common.utils import renamer
from swift.common.exceptions import DiskFileNotExist from swift.common.exceptions import DiskFileNotExist, DiskFileError
from gluster.swift.common.exceptions import AlreadyExistsAsDir from gluster.swift.common.exceptions import AlreadyExistsAsDir
from gluster.swift.common.fs_utils import mkdirs, rmdirs, do_open, do_close, \ from gluster.swift.common.fs_utils import mkdirs, do_open, do_close, \
do_unlink, do_chown, os_path, do_fsync, do_fchown do_unlink, do_chown, os_path, do_fsync, do_fchown
from gluster.swift.common.utils import read_metadata, write_metadata, \ from gluster.swift.common.utils import read_metadata, write_metadata, \
validate_object, create_object_metadata validate_object, create_object_metadata, rmobjdir, dir_is_object
from gluster.swift.common.utils import X_CONTENT_LENGTH, X_CONTENT_TYPE, \ from gluster.swift.common.utils import X_CONTENT_LENGTH, X_CONTENT_TYPE, \
X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, MARKER_DIR, OBJECT, DIR_TYPE, \ X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, OBJECT, DIR_TYPE, \
FILE_TYPE, DEFAULT_UID, DEFAULT_GID FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT
import logging
from swift.obj.server import DiskFile from swift.obj.server import DiskFile
@ -50,12 +49,14 @@ def _adjust_metadata(metadata):
# FIXME: If the file exists, we would already know it is a # FIXME: If the file exists, we would already know it is a
# directory. So why are we assuming it is a file object? # directory. So why are we assuming it is a file object?
metadata[X_CONTENT_TYPE] = FILE_TYPE metadata[X_CONTENT_TYPE] = FILE_TYPE
x_object_type = FILE metadata[X_OBJECT_TYPE] = FILE
else: else:
x_object_type = MARKER_DIR if content_type.lower() == DIR_TYPE \ if content_type.lower() == DIR_TYPE:
else FILE metadata[X_OBJECT_TYPE] = DIR_OBJECT
else:
metadata[X_OBJECT_TYPE] = FILE
metadata[X_TYPE] = OBJECT metadata[X_TYPE] = OBJECT
metadata[X_OBJECT_TYPE] = x_object_type
return metadata return metadata
@ -63,6 +64,12 @@ class Gluster_DiskFile(DiskFile):
""" """
Manage object files on disk. Manage object files on disk.
Object names ending or beginning with a '/' as in /a, a/, /a/b/,
etc, or object names with multiple consecutive slahes, like a//b,
are not supported. The proxy server's contraints filter
gluster.common.constrains.gluster_check_object_creation() should
reject such requests.
:param path: path to devices on the node/mount path for UFO. :param path: path to devices on the node/mount path for UFO.
:param device: device name/account_name for UFO. :param device: device name/account_name for UFO.
:param partition: partition on the device the object lives in :param partition: partition on the device the object lives in
@ -82,9 +89,8 @@ class Gluster_DiskFile(DiskFile):
uid=DEFAULT_UID, gid=DEFAULT_GID, iter_hook=None): uid=DEFAULT_UID, gid=DEFAULT_GID, iter_hook=None):
self.disk_chunk_size = disk_chunk_size self.disk_chunk_size = disk_chunk_size
self.iter_hook = iter_hook self.iter_hook = iter_hook
# Don't support obj_name ending/begining with '/', like /a, a/, /a/b/,
# etc.
obj = obj.strip(os.path.sep) obj = obj.strip(os.path.sep)
if os.path.sep in obj: if os.path.sep in obj:
self._obj_path, self._obj = os.path.split(obj) self._obj_path, self._obj = os.path.split(obj)
else: else:
@ -167,14 +173,13 @@ class Gluster_DiskFile(DiskFile):
return not self.data_file return not self.data_file
def _create_dir_object(self, dir_path): def _create_dir_object(self, dir_path):
#TODO: if object already exists??? if not os_path.exists(dir_path):
if os_path.exists(dir_path) and not os_path.isdir(dir_path):
self.logger.error("Deleting file %s", dir_path)
do_unlink(dir_path)
#If dir aleady exist just override metadata.
mkdirs(dir_path) mkdirs(dir_path)
do_chown(dir_path, self.uid, self.gid) do_chown(dir_path, self.uid, self.gid)
create_object_metadata(dir_path) create_object_metadata(dir_path)
elif not os_path.isdir(dir_path):
raise DiskFileError("Cannot overwrite "
"file %s with a directory" % dir_path)
def put_metadata(self, metadata, tombstone=False): def put_metadata(self, metadata, tombstone=False):
""" """
@ -208,7 +213,7 @@ class Gluster_DiskFile(DiskFile):
metadata = _adjust_metadata(metadata) metadata = _adjust_metadata(metadata)
if metadata[X_OBJECT_TYPE] == MARKER_DIR: if dir_is_object(metadata):
if not self.data_file: if not self.data_file:
self.data_file = os.path.join(self.datadir, self._obj) self.data_file = os.path.join(self.datadir, self._obj)
self._create_dir_object(self.data_file) self._create_dir_object(self.data_file)
@ -217,8 +222,10 @@ class Gluster_DiskFile(DiskFile):
# Check if directory already exists. # Check if directory already exists.
if self._is_dir: if self._is_dir:
# FIXME: How can we have a directory and it not be marked as a # A pre-existing directory already exists on the file
# MARKER_DIR (see above)? # system, perhaps gratuitously created when another
# object was created, or created externally to Swift
# REST API servicing (UFO use case).
msg = 'File object exists as a directory: %s' % self.data_file msg = 'File object exists as a directory: %s' % self.data_file
raise AlreadyExistsAsDir(msg) raise AlreadyExistsAsDir(msg)
@ -256,15 +263,38 @@ class Gluster_DiskFile(DiskFile):
"Have metadata, %r, but no data_file" % self.metadata "Have metadata, %r, but no data_file" % self.metadata
if self._is_dir: if self._is_dir:
# Marker directory object # Marker, or object, directory.
if not rmdirs(self.data_file): #
logging.error('Unable to delete dir object: %s', # Delete from the filesystem only if it contains
self.data_file) # no objects. If it does contain objects, then just
return # remove the object metadata tag which will make this directory a
# fake-filesystem-only directory and will be deleted
# when the container or parent directory is deleted.
metadata = read_metadata(self.data_file)
if dir_is_object(metadata):
metadata[X_OBJECT_TYPE] = DIR_NON_OBJECT
write_metadata(self.data_file, metadata)
rmobjdir(self.data_file)
else: else:
# File object # Delete file object
do_unlink(self.data_file) do_unlink(self.data_file)
# Garbage collection of non-object directories.
# Now that we deleted the file, determine
# if the current directory and any parent
# directory may be deleted.
dirname = os.path.dirname(self.data_file)
while dirname and dirname != self._container_path:
# Try to remove any directories that are not
# objects.
if not rmobjdir(dirname):
# If a directory with objects has been
# found, we can stop garabe collection
break
else:
dirname = os.path.dirname(dirname)
self.metadata = {} self.metadata = {}
self.data_file = None self.data_file = None

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
try: try:
from webob.exc import HTTPBadRequest from webob.exc import HTTPBadRequest
except ImportError: except ImportError:
@ -45,6 +46,8 @@ def get_object_name_component_length():
def validate_obj_name_component(obj): def validate_obj_name_component(obj):
if not obj:
return 'cannot begin, end, or have contiguous %s\'s' % os.path.sep
if len(obj) > MAX_OBJECT_NAME_COMPONENT_LENGTH: if len(obj) > MAX_OBJECT_NAME_COMPONENT_LENGTH:
return 'too long (%d)' % len(obj) return 'too long (%d)' % len(obj)
if obj == '.' or obj == '..': if obj == '.' or obj == '..':
@ -74,7 +77,7 @@ def gluster_check_object_creation(req, object_name):
ret = __check_object_creation(req, object_name) ret = __check_object_creation(req, object_name)
if ret is None: if ret is None:
for obj in object_name.split('/'): for obj in object_name.split(os.path.sep):
reason = validate_obj_name_component(obj) reason = validate_obj_name_component(obj)
if reason: if reason:
bdy = 'Invalid object name "%s", component "%s" %s' \ bdy = 'Invalid object name "%s", component "%s" %s' \

View File

@ -23,8 +23,9 @@ from eventlet import sleep
import cPickle as pickle import cPickle as pickle
from swift.common.utils import normalize_timestamp from swift.common.utils import normalize_timestamp
from gluster.swift.common.fs_utils import do_rename, do_fsync, os_path, \ from gluster.swift.common.fs_utils import do_rename, do_fsync, os_path, \
do_stat, do_listdir, do_walk do_stat, do_listdir, do_walk, dir_empty, rmdirs
from gluster.swift.common import Glusterfs from gluster.swift.common import Glusterfs
from gluster.swift.common.exceptions import FileOrDirNotFoundError
X_CONTENT_TYPE = 'Content-Type' X_CONTENT_TYPE = 'Content-Type'
X_CONTENT_LENGTH = 'Content-Length' X_CONTENT_LENGTH = 'Content-Length'
@ -41,8 +42,8 @@ ACCOUNT = 'Account'
METADATA_KEY = 'user.swift.metadata' METADATA_KEY = 'user.swift.metadata'
MAX_XATTR_SIZE = 65536 MAX_XATTR_SIZE = 65536
CONTAINER = 'container' CONTAINER = 'container'
DIR = 'dir' DIR_NON_OBJECT = 'dir'
MARKER_DIR = 'marker_dir' DIR_OBJECT = 'marker_dir'
TEMP_DIR = 'tmp' TEMP_DIR = 'tmp'
ASYNCDIR = 'async_pending' # Keep in sync with swift.obj.server.ASYNCDIR ASYNCDIR = 'async_pending' # Keep in sync with swift.obj.server.ASYNCDIR
FILE = 'file' FILE = 'file'
@ -231,6 +232,12 @@ def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
obj_path = path.replace(cont_path, '').strip(os.path.sep) obj_path = path.replace(cont_path, '').strip(os.path.sep)
for obj_name in src_list: for obj_name in src_list:
if not reg_file and Glusterfs.OBJECT_ONLY:
metadata = \
read_metadata(os.path.join(cont_path, obj_path, obj_name))
if not dir_is_object(metadata):
continue
if obj_path: if obj_path:
obj_list.append(os.path.join(obj_path, obj_name)) obj_list.append(os.path.join(obj_path, obj_name))
else: else:
@ -247,11 +254,12 @@ def _update_list(path, cont_path, src_list, reg_file=True, object_count=0,
def update_list(path, cont_path, dirs=[], files=[], object_count=0, def update_list(path, cont_path, dirs=[], files=[], object_count=0,
bytes_used=0, obj_list=[]): bytes_used=0, obj_list=[]):
if files: if files:
object_count, bytes_used = _update_list(path, cont_path, files, True, object_count, bytes_used = _update_list(path, cont_path, files, True,
object_count, bytes_used, object_count, bytes_used,
obj_list) obj_list)
if not Glusterfs.OBJECT_ONLY and dirs: if dirs:
object_count, bytes_used = _update_list(path, cont_path, dirs, False, object_count, bytes_used = _update_list(path, cont_path, dirs, False,
object_count, bytes_used, object_count, bytes_used,
obj_list) obj_list)
@ -368,7 +376,7 @@ def get_object_metadata(obj_path):
X_TYPE: OBJECT, X_TYPE: OBJECT,
X_TIMESTAMP: normalize_timestamp(stats.st_ctime), X_TIMESTAMP: normalize_timestamp(stats.st_ctime),
X_CONTENT_TYPE: DIR_TYPE if is_dir else FILE_TYPE, X_CONTENT_TYPE: DIR_TYPE if is_dir else FILE_TYPE,
X_OBJECT_TYPE: DIR if is_dir else FILE, X_OBJECT_TYPE: DIR_NON_OBJECT if is_dir else FILE,
X_CONTENT_LENGTH: 0 if is_dir else stats.st_size, X_CONTENT_LENGTH: 0 if is_dir else stats.st_size,
X_ETAG: md5().hexdigest() if is_dir else _get_etag(obj_path)} X_ETAG: md5().hexdigest() if is_dir else _get_etag(obj_path)}
return metadata return metadata
@ -476,6 +484,56 @@ def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
do_rename(tmppath, dest) do_rename(tmppath, dest)
# The following dir_xxx calls should definitely be replaced
# with a Metadata class to encapsulate their implementation.
# :FIXME: For now we have them as functions, but we should
# move them to a class.
def dir_is_object(metadata):
"""
Determine if the directory with the path specified
has been identified as an object
"""
return metadata.get(X_OBJECT_TYPE, "") == DIR_OBJECT
def rmobjdir(dir_path):
"""
Removes the directory as long as there are
no objects stored in it. This works for containers also.
"""
try:
if dir_empty(dir_path):
rmdirs(dir_path)
return True
except FileOrDirNotFoundError:
# No such directory exists
return False
for (path, dirs, files) in do_walk(dir_path, topdown=False):
for directory in dirs:
fullpath = os.path.join(path, directory)
metadata = read_metadata(fullpath)
if not dir_is_object(metadata):
# Directory is not an object created by the caller
# so we can go ahead and delete it.
try:
if dir_empty(fullpath):
rmdirs(fullpath)
else:
# Non-object dir is not empty!
return False
except FileOrDirNotFoundError:
# No such dir!
return False
else:
# Wait, this is an object created by the caller
# We cannot delete
return False
rmdirs(dir_path)
return True
# Over-ride Swift's utils.write_pickle with ours # Over-ride Swift's utils.write_pickle with ours
import swift.common.utils import swift.common.utils
swift.common.utils.write_pickle = write_pickle swift.common.utils.write_pickle = write_pickle

View File

@ -0,0 +1,151 @@
# Copyright (c) 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenStack Swift based functional tests for Gluster for Swift"""
from test.functional.tests import config, locale, Base, Utils
from test.functional.swift_test_client import Account, Connection, File, \
ResponseError
class TestGlusterContainerPathsEnv:
@classmethod
def setUp(cls):
cls.conn = Connection(config)
cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account',
config['username']))
cls.account.delete_containers()
cls.file_size = 8
cls.container = cls.account.container(Utils.create_name())
if not cls.container.create():
raise ResponseError(cls.conn.response)
cls.dirs = [
'dir1',
'dir2',
'dir1/subdir1',
'dir1/subdir2',
'dir1/subdir1/subsubdir1',
'dir1/subdir1/subsubdir2',
'dir1/subdir with spaces',
'dir1/subdir+with{whatever',
]
cls.files = [
'file1',
'file A',
'dir1/file2',
'dir1/subdir1/file2',
'dir1/subdir1/file3',
'dir1/subdir1/file4',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file6',
'dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir2/file9',
'dir1/subdir1/subsubdir2/file0',
'dir1/subdir with spaces/file B',
'dir1/subdir+with{whatever/file D',
]
stored_files = set()
for d in cls.dirs:
file = cls.container.file(d)
file.write(hdrs={'Content-Type': 'application/directory'})
for f in cls.files:
file = cls.container.file(f)
file.write_random(cls.file_size, hdrs={'Content-Type':
'application/octet-stream'})
stored_files.add(f)
cls.stored_files = sorted(stored_files)
cls.sorted_objects = sorted(set(cls.dirs + cls.files))
class TestGlusterContainerPaths(Base):
env = TestGlusterContainerPathsEnv
set_up = False
def testTraverseContainer(self):
found_files = []
found_dirs = []
def recurse_path(path, count=0):
if count > 10:
raise ValueError('too deep recursion')
for file in self.env.container.files(parms={'path': path}):
self.assert_(file.startswith(path))
if file in self.env.dirs:
recurse_path(file, count + 1)
found_dirs.append(file)
else:
found_files.append(file)
recurse_path('')
for file in self.env.stored_files:
self.assert_(file in found_files)
self.assert_(file not in found_dirs)
def testContainerListing(self):
for format in (None, 'json', 'xml'):
files = self.env.container.files(parms={'format': format})
self.assertFalse(len(files) == 0)
if isinstance(files[0], dict):
files = [str(x['name']) for x in files]
self.assertEquals(files, self.env.sorted_objects)
for format in ('json', 'xml'):
for file in self.env.container.files(parms={'format': format}):
self.assert_(int(file['bytes']) >= 0)
self.assert_('last_modified' in file)
if file['name'] in self.env.dirs:
self.assertEquals(file['content_type'],
'application/directory')
else:
self.assertEquals(file['content_type'],
'application/octet-stream')
def testStructure(self):
def assert_listing(path, list):
files = self.env.container.files(parms={'path': path})
self.assertEquals(sorted(list, cmp=locale.strcoll), files)
assert_listing('', ['file1', 'dir1', 'dir2', 'file A'])
assert_listing('dir1', ['dir1/file2', 'dir1/subdir1',
'dir1/subdir2', 'dir1/subdir with spaces',
'dir1/subdir+with{whatever'])
assert_listing('dir1/subdir1',
['dir1/subdir1/file4', 'dir1/subdir1/subsubdir2',
'dir1/subdir1/file2', 'dir1/subdir1/file3',
'dir1/subdir1/subsubdir1'])
assert_listing('dir1/subdir1/subsubdir1',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir1/subsubdir1',
['dir1/subdir1/subsubdir1/file7',
'dir1/subdir1/subsubdir1/file5',
'dir1/subdir1/subsubdir1/file8',
'dir1/subdir1/subsubdir1/file6'])
assert_listing('dir1/subdir with spaces',
['dir1/subdir with spaces/file B'])

View File

@ -15,6 +15,7 @@
# limitations under the License. # limitations under the License.
from datetime import datetime from datetime import datetime
import os
import locale import locale
import random import random
import StringIO import StringIO
@ -32,6 +33,8 @@ from swift.common.constraints import MAX_FILE_SIZE, MAX_META_NAME_LENGTH, \
MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \ MAX_META_VALUE_LENGTH, MAX_META_COUNT, MAX_META_OVERALL_SIZE, \
MAX_OBJECT_NAME_LENGTH, CONTAINER_LISTING_LIMIT, ACCOUNT_LISTING_LIMIT, \ MAX_OBJECT_NAME_LENGTH, CONTAINER_LISTING_LIMIT, ACCOUNT_LISTING_LIMIT, \
MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH MAX_ACCOUNT_NAME_LENGTH, MAX_CONTAINER_NAME_LENGTH
from gluster.swift.common.constraints import \
set_object_name_component_length, get_object_name_component_length
default_constraints = dict(( default_constraints = dict((
('max_file_size', MAX_FILE_SIZE), ('max_file_size', MAX_FILE_SIZE),
@ -69,6 +72,7 @@ for k in default_constraints:
web_front_end = config.get('web_front_end', 'integral') web_front_end = config.get('web_front_end', 'integral')
normalized_urls = config.get('normalized_urls', False) normalized_urls = config.get('normalized_urls', False)
set_object_name_component_length()
def load_constraint(name): def load_constraint(name):
c = config[name] c = config[name]
@ -79,6 +83,31 @@ def load_constraint(name):
locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C')) locale.setlocale(locale.LC_COLLATE, config.get('collate', 'C'))
def create_limit_filename(name_limit):
"""
Convert a split a large object name with
slashes so as to conform the GlusterFS file name
constraints.
Example: Take a object name: 'a'*1024, and
convert it to a*255/a*255/...
"""
# Get the file name limit from the configuration file
filename_limit = get_object_name_component_length()
# Convert string to a list: "abc" -> ['a', 'b', 'c']
filename_list = list('a' * name_limit)
# Replace chars at filename limits to '/'
for index in range(filename_limit, name_limit, filename_limit):
filename_list[index] = os.path.sep
# Cannot end in a '/'
if os.path.sep == filename_list[-1]:
return "".join(filename_list[:-1])
else:
return "".join(filename_list)
def chunks(s, length=3): def chunks(s, length=3):
i, j = 0, length i, j = 0, length
while i < len(s): while i < len(s):
@ -675,6 +704,7 @@ class TestContainerUTF8(Base2, TestContainer):
class TestContainerPathsEnv: class TestContainerPathsEnv:
@classmethod @classmethod
def setUp(cls): def setUp(cls):
raise SkipTest('Objects ending in / are not supported')
cls.conn = Connection(config) cls.conn = Connection(config)
cls.conn.authenticate() cls.conn.authenticate()
cls.account = Account(cls.conn, config.get('account', cls.account = Account(cls.conn, config.get('account',
@ -1034,7 +1064,7 @@ class TestFile(Base):
limit = load_constraint('max_object_name_length') limit = load_constraint('max_object_name_length')
for l in (1, 10, limit / 2, limit - 1, limit, limit + 1, limit * 2): for l in (1, 10, limit / 2, limit - 1, limit, limit + 1, limit * 2):
file = self.env.container.file('a' * l) file = self.env.container.file(create_limit_filename(l))
if l <= limit: if l <= limit:
self.assert_(file.write()) self.assert_(file.write())

View File

@ -23,7 +23,7 @@ import tempfile
import shutil import shutil
from hashlib import md5 from hashlib import md5
from swift.common.utils import normalize_timestamp from swift.common.utils import normalize_timestamp
from swift.common.exceptions import DiskFileNotExist from swift.common.exceptions import DiskFileNotExist, DiskFileError
import gluster.swift.common.DiskFile import gluster.swift.common.DiskFile
import gluster.swift.common.utils import gluster.swift.common.utils
from gluster.swift.common.DiskFile import Gluster_DiskFile, \ from gluster.swift.common.DiskFile import Gluster_DiskFile, \
@ -54,8 +54,8 @@ class MockException(Exception):
pass pass
def _mock_rmdirs(p): def _mock_rmobjdir(p):
raise MockException("gluster.swift.common.DiskFile.rmdirs() called") raise MockException("gluster.swift.common.DiskFile.rmobjdir() called")
def _mock_do_fsync(fd): def _mock_do_fsync(fd):
return return
@ -348,12 +348,12 @@ class TestDiskFile(unittest.TestCase):
assert g == DEFAULT_GID assert g == DEFAULT_GID
dc = gluster.swift.common.DiskFile.do_chown dc = gluster.swift.common.DiskFile.do_chown
gluster.swift.common.DiskFile.do_chown = _mock_do_chown gluster.swift.common.DiskFile.do_chown = _mock_do_chown
try: self.assertRaises(DiskFileError,
gdf._create_dir_object(the_dir) gdf._create_dir_object,
finally: the_dir)
gluster.swift.common.DiskFile.do_chown = dc gluster.swift.common.DiskFile.do_chown = dc
assert os.path.isdir(the_dir) self.assertFalse(os.path.isdir(the_dir))
assert the_dir in _metadata self.assertFalse(the_dir in _metadata)
finally: finally:
shutil.rmtree(td) shutil.rmtree(td)
@ -571,14 +571,14 @@ class TestDiskFile(unittest.TestCase):
gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar", gdf = Gluster_DiskFile("/tmp/foo", "vol0", "p57", "ufo47", "bar",
"z", self.lg) "z", self.lg)
assert gdf.metadata == {} assert gdf.metadata == {}
_saved_rmdirs = gluster.swift.common.DiskFile.rmdirs _saved_rmobjdir = gluster.swift.common.DiskFile.rmobjdir
gluster.swift.common.DiskFile.rmdirs = _mock_rmdirs gluster.swift.common.DiskFile.rmobjdir = _mock_rmobjdir
try: try:
gdf.unlinkold(None) gdf.unlinkold(None)
except MockException as exp: except MockException as exp:
self.fail(str(exp)) self.fail(str(exp))
finally: finally:
gluster.swift.common.DiskFile.rmdirs = _saved_rmdirs gluster.swift.common.DiskFile.rmobjdir = _saved_rmobjdir
def test_unlinkold_same_timestamp(self): def test_unlinkold_same_timestamp(self):
assert not os.path.exists("/tmp/foo") assert not os.path.exists("/tmp/foo")
@ -586,14 +586,14 @@ class TestDiskFile(unittest.TestCase):
"z", self.lg) "z", self.lg)
assert gdf.metadata == {} assert gdf.metadata == {}
gdf.metadata['X-Timestamp'] = 1 gdf.metadata['X-Timestamp'] = 1
_saved_rmdirs = gluster.swift.common.DiskFile.rmdirs _saved_rmobjdir = gluster.swift.common.DiskFile.rmobjdir
gluster.swift.common.DiskFile.rmdirs = _mock_rmdirs gluster.swift.common.DiskFile.rmobjdir = _mock_rmobjdir
try: try:
gdf.unlinkold(1) gdf.unlinkold(1)
except MockException as exp: except MockException as exp:
self.fail(str(exp)) self.fail(str(exp))
finally: finally:
gluster.swift.common.DiskFile.rmdirs = _saved_rmdirs gluster.swift.common.DiskFile.rmobjdir = _saved_rmobjdir
def test_unlinkold_file(self): def test_unlinkold_file(self):
td = tempfile.mkdtemp() td = tempfile.mkdtemp()
@ -717,7 +717,7 @@ class TestDiskFile(unittest.TestCase):
os.chmod(gdf.datadir, stats.st_mode) os.chmod(gdf.datadir, stats.st_mode)
os.rmdir = __os_rmdir os.rmdir = __os_rmdir
assert os.path.isdir(gdf.datadir) assert os.path.isdir(gdf.datadir)
assert os.path.isdir(gdf.data_file) self.assertTrue(gdf.data_file is None)
finally: finally:
shutil.rmtree(td) shutil.rmtree(td)

View File

@ -367,7 +367,7 @@ class TestUtils(unittest.TestCase):
for key in self.obj_keys: for key in self.obj_keys:
assert key in md, "Expected key %s in %r" % (key, md) assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_TYPE] == utils.OBJECT
assert md[utils.X_OBJECT_TYPE] == utils.DIR assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT
assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE
assert md[utils.X_CONTENT_LENGTH] == 0 assert md[utils.X_CONTENT_LENGTH] == 0
assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(td)) assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(td))
@ -413,7 +413,7 @@ class TestUtils(unittest.TestCase):
for key in self.obj_keys: for key in self.obj_keys:
assert key in md, "Expected key %s in %r" % (key, md) assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == utils.OBJECT assert md[utils.X_TYPE] == utils.OBJECT
assert md[utils.X_OBJECT_TYPE] == utils.DIR assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT
assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE
assert md[utils.X_CONTENT_LENGTH] == 0 assert md[utils.X_CONTENT_LENGTH] == 0
assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(td)) assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(td))
@ -802,3 +802,56 @@ class TestUtils(unittest.TestCase):
utils.X_OBJECT_TYPE: 'na' } utils.X_OBJECT_TYPE: 'na' }
ret = utils.validate_object(md) ret = utils.validate_object(md)
assert ret assert ret
class TestUtilsDirObjects(unittest.TestCase):
def setUp(self):
_initxattr()
self.dirs = ['dir1',
'dir1/dir2',
'dir1/dir2/dir3' ]
self.files = ['file1',
'file2',
'dir1/dir2/file3']
self.tempdir = tempfile.mkdtemp()
self.rootdir = os.path.join(self.tempdir, 'a')
for d in self.dirs:
os.makedirs(os.path.join(self.rootdir, d))
for f in self.files:
open(os.path.join(self.rootdir, f), 'w').close()
def tearDown(self):
_destroyxattr()
shutil.rmtree(self.tempdir)
def _set_dir_object(self, obj):
metadata = utils.read_metadata(os.path.join(self.rootdir, obj))
metadata[utils.X_OBJECT_TYPE] = utils.DIR_OBJECT
utils.write_metadata(os.path.join(self.rootdir, self.dirs[0]),
metadata)
def _clear_dir_object(self, obj):
metadata = utils.read_metadata(os.path.join(self.rootdir, obj))
metadata[utils.X_OBJECT_TYPE] = utils.DIR_NON_OBJECT
utils.write_metadata(os.path.join(self.rootdir, obj),
metadata)
def test_rmobjdir_removing_files(self):
self.assertFalse(utils.rmobjdir(self.rootdir))
# Remove the files
for f in self.files:
os.unlink(os.path.join(self.rootdir, f))
self.assertTrue(utils.rmobjdir(self.rootdir))
def test_rmobjdir_removing_dirs(self):
self.assertFalse(utils.rmobjdir(self.rootdir))
# Remove the files
for f in self.files:
os.unlink(os.path.join(self.rootdir, f))
self._set_dir_object(self.dirs[0])
self.assertFalse(utils.rmobjdir(self.rootdir))
self._clear_dir_object(self.dirs[0])
self.assertTrue(utils.rmobjdir(self.rootdir))

View File

@ -742,6 +742,46 @@ class TestObjectController(unittest.TestCase):
res = method(req) res = method(req)
self.assertEquals(res.status_int, expected) self.assertEquals(res.status_int, expected)
def test_illegal_object_name(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
# Create a container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/illegal_name HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEquals(headers[:len(exp)], exp)
# Create a file obj
fakedata = 'a' * 1024
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/illegal_name/file/ HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octect-stream\r\n'
'\r\n%s' % (str(len(fakedata)), fakedata))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 400'
self.assertEquals(headers[:len(exp)], exp)
# Delete continer
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/illegal_name HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEquals(headers[:len(exp)], exp)
def test_GET_newest_large_file(self): def test_GET_newest_large_file(self):
calls = [0] calls = [0]
@ -4547,6 +4587,244 @@ class TestContainerController(unittest.TestCase):
exp = 'HTTP/1.1 404' exp = 'HTTP/1.1 404'
self.assertEquals(headers[:len(exp)], exp) self.assertEquals(headers[:len(exp)], exp)
def test_dir_object_not_lost(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
# Create a container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/dir_obj_test HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEquals(headers[:len(exp)], exp)
# Create a dir obj A
dir_list = ['a', 'a/b', 'a/b/c']
for dir_obj in dir_list:
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/dir_obj_test/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n'
'Content-type: application/directory\r\n\r\n' % dir_obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEquals(headers[:len(exp)], exp)
# Check we see all the objects we created
req = Request.blank('/v1/a/dir_obj_test',
environ={'REQUEST_METHOD': 'GET'})
res = req.get_response(prosrv)
obj_list = res.body.split('\n')
for dir_obj in dir_list:
self.assertTrue(dir_obj in obj_list)
# Now let's create a file obj
fakedata = 'a' * 1024
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/dir_obj_test/a/b/c/file1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octect-stream\r\n'
'\r\n%s' % (str(len(fakedata)), fakedata))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEquals(headers[:len(exp)], exp)
# Now check we get all dir objs and the file obj
req = Request.blank('/v1/a/dir_obj_test',
environ={'REQUEST_METHOD': 'GET'})
res = req.get_response(prosrv)
obj_list = res.body.split('\n')
for dir_obj in dir_list:
self.assertTrue(dir_obj in obj_list)
self.assertTrue('a/b/c/file1' in obj_list)
# Delete dir objects, file should still be available
for dir_obj in dir_list:
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/dir_obj_test/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'\r\n' % dir_obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEquals(headers[:len(exp)], exp)
# Now check file is still available
req = Request.blank('/v1/a/dir_obj_test',
environ={'REQUEST_METHOD': 'GET'})
res = req.get_response(prosrv)
obj_list = res.body.split('\n')
for dir_obj in dir_list:
self.assertFalse(dir_obj in obj_list)
self.assertTrue('a/b/c/file1' in obj_list)
# Delete file
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/dir_obj_test/a/b/c/file1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEquals(headers[:len(exp)], exp)
# Delete continer
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/dir_obj_test HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEquals(headers[:len(exp)], exp)
def test_container_lists_dir_and_file_objects(self):
prolis = _test_sockets[0]
prosrv = _test_servers[0]
# Create a container
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/list_test HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEquals(headers[:len(exp)], exp)
# Create a file obj
fakedata = 'a' * 1024
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/list_test/a/b/c/file1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octect-stream\r\n'
'\r\n%s' % (str(len(fakedata)), fakedata))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEquals(headers[:len(exp)], exp)
# Create a second file obj
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/list_test/a/b/c/file2 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octect-stream\r\n'
'\r\n%s' % (str(len(fakedata)), fakedata))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEquals(headers[:len(exp)], exp)
# Create a third file obj
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/list_test/file3 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: %s\r\n'
'Content-Type: application/octect-stream\r\n'
'\r\n%s' % (str(len(fakedata)), fakedata))
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEquals(headers[:len(exp)], exp)
# Create a dir obj
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('PUT /v1/a/list_test/a/b/c/dir1 HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n'
'Content-type: application/directory\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 201'
self.assertEquals(headers[:len(exp)], exp)
# Path tests
req = Request.blank('/v1/a/list_test?path=',
environ={'REQUEST_METHOD': 'GET'})
res = req.get_response(prosrv)
obj_list = res.body.split('\n')
self.assertFalse('a/b/c/file1' in obj_list)
self.assertFalse('a/b/c/file2' in obj_list)
self.assertFalse('a/b/c/dir1' in obj_list)
self.assertTrue('file3' in obj_list)
req = Request.blank('/v1/a/list_test?path=a/b/c',
environ={'REQUEST_METHOD': 'GET'})
res = req.get_response(prosrv)
obj_list = res.body.split('\n')
self.assertTrue('a/b/c/file1' in obj_list)
self.assertTrue('a/b/c/file2' in obj_list)
self.assertTrue('a/b/c/dir1' in obj_list)
self.assertFalse('file3' in obj_list)
# Try to delete, but expect failure since the
# container is not empty
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/list_test HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 409'
self.assertEquals(headers[:len(exp)], exp)
# Get object list
req = Request.blank('/v1/a/list_test',
environ={'REQUEST_METHOD': 'GET'})
res = req.get_response(prosrv)
obj_list = res.body.split('\n')
self.assertTrue('a/b/c/file1' in obj_list)
self.assertTrue('a/b/c/file2' in obj_list)
self.assertTrue('a/b/c/dir1' in obj_list)
self.assertTrue('file3' in obj_list)
self.assertEqual(res.headers['x-container-object-count'], '4')
# Now let's delete the objects
for obj in obj_list:
if not obj:
continue
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/list_test/%s HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'\r\n' % obj)
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEquals(headers[:len(exp)], exp)
# Delete continer which has stale directies a/b/c
sock = connect_tcp(('localhost', prolis.getsockname()[1]))
fd = sock.makefile()
fd.write('DELETE /v1/a/list_test HTTP/1.1\r\nHost: localhost\r\n'
'Connection: close\r\nX-Storage-Token: t\r\n'
'Content-Length: 0\r\n\r\n')
fd.flush()
headers = readuntil2crlfs(fd)
exp = 'HTTP/1.1 204'
self.assertEquals(headers[:len(exp)], exp)
def test_response_get_accept_ranges_header(self): def test_response_get_accept_ranges_header(self):
with save_globals(): with save_globals():
set_http_connect(200, 200, body='{}') set_http_connect(200, 200, body='{}')