Rebase to Swift 2.3.0
This change does the following: * Update all functional tests, unit tests and conf files based on Swift 2.3.0 release. * Fix "test_versioning_dlo" test from Swift to be SOF compatible. * Bump swiftonfile release version to 2.3.0 * Update swiftonfile with DiskFile API changes Change-Id: I75e322724efa4b946007b6d2786d92f73d5ae313 Signed-off-by: Prashanth Pai <ppai@redhat.com>
This commit is contained in:
parent
c6aae2b0cc
commit
b145f9b68d
11
.functests
11
.functests
@ -55,8 +55,9 @@ Before proceeding forward, please make sure you already have:
|
|||||||
3. Added swiftonfile policy section to swift.conf file.
|
3. Added swiftonfile policy section to swift.conf file.
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
[storage-policy:2]
|
[storage-policy:3]
|
||||||
name = swiftonfile
|
name = swiftonfile
|
||||||
|
policy_type = replication
|
||||||
default = yes
|
default = yes
|
||||||
|
|
||||||
Added sof_constraints middleware in proxy pipeline
|
Added sof_constraints middleware in proxy pipeline
|
||||||
@ -71,11 +72,11 @@ Before proceeding forward, please make sure you already have:
|
|||||||
4. Copied etc/object-server.conf-swiftonfile to /etc/swift/object-server/5.conf
|
4. Copied etc/object-server.conf-swiftonfile to /etc/swift/object-server/5.conf
|
||||||
|
|
||||||
5. Generated ring files for swiftonfile policy.
|
5. Generated ring files for swiftonfile policy.
|
||||||
Example: for policy with index 2
|
Example: for policy with index 3
|
||||||
|
|
||||||
swift-ring-builder object-2.builder create 1 1 1
|
swift-ring-builder object-3.builder create 1 1 1
|
||||||
swift-ring-builder object-2.builder add r1z1-127.0.0.1:6050/test 1
|
swift-ring-builder object-3.builder add r1z1-127.0.0.1:6050/test 1
|
||||||
swift-ring-builder object-2.builder rebalance
|
swift-ring-builder object-3.builder rebalance
|
||||||
|
|
||||||
6. Started memcached and swift services.
|
6. Started memcached and swift services.
|
||||||
"""
|
"""
|
||||||
|
@ -45,3 +45,6 @@ disk_chunk_size = 65536
|
|||||||
# Adjust this value match whatever is set for the disk_chunk_size initially.
|
# Adjust this value match whatever is set for the disk_chunk_size initially.
|
||||||
# This will provide a reasonable starting point for tuning this value.
|
# This will provide a reasonable starting point for tuning this value.
|
||||||
network_chunk_size = 65536
|
network_chunk_size = 65536
|
||||||
|
|
||||||
|
[object-updater]
|
||||||
|
user = <your-user-name>
|
||||||
|
@ -22,9 +22,13 @@ swift_hash_path_prefix = changeme
|
|||||||
# defined you must define a policy with index 0 and you must specify a
|
# defined you must define a policy with index 0 and you must specify a
|
||||||
# default. It is recommended you always define a section for
|
# default. It is recommended you always define a section for
|
||||||
# storage-policy:0.
|
# storage-policy:0.
|
||||||
|
#
|
||||||
|
# A 'policy_type' argument is also supported but is not mandatory. Default
|
||||||
|
# policy type 'replication' is used when 'policy_type' is unspecified.
|
||||||
[storage-policy:0]
|
[storage-policy:0]
|
||||||
name = Policy-0
|
name = Policy-0
|
||||||
default = yes
|
default = yes
|
||||||
|
#policy_type = replication
|
||||||
|
|
||||||
# the following section would declare a policy called 'silver', the number of
|
# the following section would declare a policy called 'silver', the number of
|
||||||
# replicas will be determined by how the ring is built. In this example the
|
# replicas will be determined by how the ring is built. In this example the
|
||||||
@ -39,11 +43,47 @@ default = yes
|
|||||||
# current default.
|
# current default.
|
||||||
#[storage-policy:1]
|
#[storage-policy:1]
|
||||||
#name = silver
|
#name = silver
|
||||||
|
#policy_type = replication
|
||||||
|
|
||||||
|
# The following declares a storage policy of type 'erasure_coding' which uses
|
||||||
|
# Erasure Coding for data reliability. The 'erasure_coding' storage policy in
|
||||||
|
# Swift is available as a "beta". Please refer to Swift documentation for
|
||||||
|
# details on how the 'erasure_coding' storage policy is implemented.
|
||||||
|
#
|
||||||
|
# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode
|
||||||
|
# operations. Please refer to Swift documentation for details on how to
|
||||||
|
# install PyECLib.
|
||||||
|
#
|
||||||
|
# When defining an EC policy, 'policy_type' needs to be 'erasure_coding' and
|
||||||
|
# EC configuration parameters 'ec_type', 'ec_num_data_fragments' and
|
||||||
|
# 'ec_num_parity_fragments' must be specified. 'ec_type' is chosen from the
|
||||||
|
# list of EC backends supported by PyECLib. The ring configured for the
|
||||||
|
# storage policy must have it's "replica" count configured to
|
||||||
|
# 'ec_num_data_fragments' + 'ec_num_parity_fragments' - this requirement is
|
||||||
|
# validated when services start. 'ec_object_segment_size' is the amount of
|
||||||
|
# data that will be buffered up before feeding a segment into the
|
||||||
|
# encoder/decoder. More information about these configuration options and
|
||||||
|
# supported `ec_type` schemes is available in the Swift documentation. Please
|
||||||
|
# refer to Swift documentation for details on how to configure EC policies.
|
||||||
|
#
|
||||||
|
# The example 'deepfreeze10-4' policy defined below is a _sample_
|
||||||
|
# configuration with 10 'data' and 4 'parity' fragments. 'ec_type'
|
||||||
|
# defines the Erasure Coding scheme. 'jerasure_rs_vand' (Reed-Solomon
|
||||||
|
# Vandermonde) is used as an example below.
|
||||||
|
#
|
||||||
|
#[storage-policy:2]
|
||||||
|
#name = deepfreeze10-4
|
||||||
|
#policy_type = erasure_coding
|
||||||
|
#ec_type = jerasure_rs_vand
|
||||||
|
#ec_num_data_fragments = 10
|
||||||
|
#ec_num_parity_fragments = 4
|
||||||
|
#ec_object_segment_size = 1048576
|
||||||
|
|
||||||
# The following section defines a policy called 'swiftonfile' to be used by
|
# The following section defines a policy called 'swiftonfile' to be used by
|
||||||
# swiftonfile object-server implementation.
|
# swiftonfile object-server implementation.
|
||||||
[storage-policy:2]
|
[storage-policy:3]
|
||||||
name = swiftonfile
|
name = swiftonfile
|
||||||
|
policy_type = replication
|
||||||
|
|
||||||
# The swift-constraints section sets the basic constraints on data
|
# The swift-constraints section sets the basic constraints on data
|
||||||
# saved in the swift cluster. These constraints are automatically
|
# saved in the swift cluster. These constraints are automatically
|
||||||
@ -102,6 +142,16 @@ name = swiftonfile
|
|||||||
# for an account listing request
|
# for an account listing request
|
||||||
#account_listing_limit = 10000
|
#account_listing_limit = 10000
|
||||||
|
|
||||||
|
# By default all REST API calls should use "v1" or "v1.0" as the version string,
|
||||||
|
# for example "/v1/account". This can be manually overridden to make this
|
||||||
|
# backward-compatible, in case a different version string has been used before.
|
||||||
|
# Use a comma-separated list in case of multiple allowed versions, for example
|
||||||
|
# valid_api_versions = v0,v1,v2
|
||||||
|
# This is only enforced for account, container and object requests. The allowed
|
||||||
|
# api versions are by default excluded from /info.
|
||||||
|
|
||||||
|
# valid_api_versions = v1,v1.0
|
||||||
|
|
||||||
# SwiftOnFile constraints - do not exceed the maximum values which are
|
# SwiftOnFile constraints - do not exceed the maximum values which are
|
||||||
# set here as default
|
# set here as default
|
||||||
|
|
||||||
|
@ -3,9 +3,10 @@
|
|||||||
# process, which may cause wedges in the gate later.
|
# process, which may cause wedges in the gate later.
|
||||||
|
|
||||||
dnspython>=1.9.4
|
dnspython>=1.9.4
|
||||||
eventlet>=0.9.15
|
eventlet>=0.16.1,!=0.17.0
|
||||||
greenlet>=0.3.1
|
greenlet>=0.3.1
|
||||||
netifaces>=0.5,!=0.10.0,!=0.10.1
|
netifaces>=0.5,!=0.10.0,!=0.10.1
|
||||||
pastedeploy>=1.3.3
|
pastedeploy>=1.3.3
|
||||||
simplejson>=2.0.9
|
simplejson>=2.0.9
|
||||||
xattr>=0.4
|
xattr>=0.4
|
||||||
|
PyECLib>=1.0.7
|
||||||
|
@ -43,6 +43,6 @@ class PkgInfo(object):
|
|||||||
|
|
||||||
|
|
||||||
# Change the Package version here
|
# Change the Package version here
|
||||||
_pkginfo = PkgInfo('2.2.1', '0', 'swiftonfile', False)
|
_pkginfo = PkgInfo('2.3.0', '0', 'swiftonfile', False)
|
||||||
__version__ = _pkginfo.pretty_version
|
__version__ = _pkginfo.pretty_version
|
||||||
__canonical_version__ = _pkginfo.canonical_version
|
__canonical_version__ = _pkginfo.canonical_version
|
||||||
|
@ -92,7 +92,7 @@ def _get_unique_id():
|
|||||||
# own the lock.
|
# own the lock.
|
||||||
continue
|
continue
|
||||||
raise
|
raise
|
||||||
except:
|
except Exception:
|
||||||
os.close(fd)
|
os.close(fd)
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
|
@ -106,7 +106,7 @@ def read_metadata(path_or_fd):
|
|||||||
# keys sizes we are shooting for N = 1.
|
# keys sizes we are shooting for N = 1.
|
||||||
metadata = pickle.loads(metadata_s)
|
metadata = pickle.loads(metadata_s)
|
||||||
assert isinstance(metadata, dict)
|
assert isinstance(metadata, dict)
|
||||||
except EOFError, pickle.UnpicklingError:
|
except (EOFError, pickle.UnpicklingError):
|
||||||
# We still are not able recognize this existing data collected
|
# We still are not able recognize this existing data collected
|
||||||
# as a pickled object. Make sure we loop around to try to get
|
# as a pickled object. Make sure we loop around to try to get
|
||||||
# more from another xattr key.
|
# more from another xattr key.
|
||||||
|
@ -229,20 +229,20 @@ class DiskFileManager(SwiftDiskFileManager):
|
|||||||
:param logger: caller provided logger
|
:param logger: caller provided logger
|
||||||
"""
|
"""
|
||||||
def get_diskfile(self, device, partition, account, container, obj,
|
def get_diskfile(self, device, partition, account, container, obj,
|
||||||
policy_idx=0, **kwargs):
|
policy=None, **kwargs):
|
||||||
dev_path = self.get_dev_path(device)
|
dev_path = self.get_dev_path(device, self.mount_check)
|
||||||
if not dev_path:
|
if not dev_path:
|
||||||
raise DiskFileDeviceUnavailable()
|
raise DiskFileDeviceUnavailable()
|
||||||
return DiskFile(self, dev_path, self.threadpools[device],
|
return DiskFile(self, dev_path, self.threadpools[device],
|
||||||
partition, account, container, obj,
|
partition, account, container, obj,
|
||||||
policy_idx=policy_idx, **kwargs)
|
policy=policy, **kwargs)
|
||||||
|
|
||||||
def pickle_async_update(self, device, account, container, obj, data,
|
def pickle_async_update(self, device, account, container, obj, data,
|
||||||
timestamp, policy_idx):
|
timestamp, policy):
|
||||||
# This method invokes swiftonfile's writepickle method.
|
# This method invokes swiftonfile's writepickle method.
|
||||||
# Is patching just write_pickle and calling parent method better ?
|
# Is patching just write_pickle and calling parent method better ?
|
||||||
device_path = self.construct_dev_path(device)
|
device_path = self.construct_dev_path(device)
|
||||||
async_dir = os.path.join(device_path, get_async_dir(policy_idx))
|
async_dir = os.path.join(device_path, get_async_dir(policy))
|
||||||
ohash = hash_path(account, container, obj)
|
ohash = hash_path(account, container, obj)
|
||||||
self.threadpools[device].run_in_thread(
|
self.threadpools[device].run_in_thread(
|
||||||
write_pickle,
|
write_pickle,
|
||||||
@ -426,6 +426,16 @@ class DiskFileWriter(object):
|
|||||||
# cleanup
|
# cleanup
|
||||||
self._tmppath = None
|
self._tmppath = None
|
||||||
|
|
||||||
|
def commit(self, timestamp):
|
||||||
|
"""
|
||||||
|
Perform any operations necessary to mark the object as durable. For
|
||||||
|
replication policy type this is a no-op.
|
||||||
|
|
||||||
|
:param timestamp: object put timestamp, an instance of
|
||||||
|
:class:`~swift.common.utils.Timestamp`
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class DiskFileReader(object):
|
class DiskFileReader(object):
|
||||||
"""
|
"""
|
||||||
@ -570,8 +580,8 @@ class DiskFile(object):
|
|||||||
"""
|
"""
|
||||||
def __init__(self, mgr, dev_path, threadpool, partition,
|
def __init__(self, mgr, dev_path, threadpool, partition,
|
||||||
account=None, container=None, obj=None,
|
account=None, container=None, obj=None,
|
||||||
policy_idx=0, uid=DEFAULT_UID, gid=DEFAULT_GID):
|
policy=None, uid=DEFAULT_UID, gid=DEFAULT_GID):
|
||||||
# Variables partition and policy_idx is currently unused.
|
# Variables partition and policy is currently unused.
|
||||||
self._mgr = mgr
|
self._mgr = mgr
|
||||||
self._device_path = dev_path
|
self._device_path = dev_path
|
||||||
self._threadpool = threadpool or ThreadPool(nthreads=0)
|
self._threadpool = threadpool or ThreadPool(nthreads=0)
|
||||||
|
@ -28,6 +28,18 @@ from swiftonfile.swift.obj.diskfile import DiskFileManager
|
|||||||
from swiftonfile.swift.common.constraints import check_object_creation
|
from swiftonfile.swift.common.constraints import check_object_creation
|
||||||
|
|
||||||
|
|
||||||
|
class SwiftOnFileDiskFileRouter(object):
|
||||||
|
"""
|
||||||
|
Replacement for Swift's DiskFileRouter object.
|
||||||
|
Always returns SwiftOnFile's DiskFileManager implementation.
|
||||||
|
"""
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.manager_cls = DiskFileManager(*args, **kwargs)
|
||||||
|
|
||||||
|
def __getitem__(self, policy):
|
||||||
|
return self.manager_cls
|
||||||
|
|
||||||
|
|
||||||
class ObjectController(server.ObjectController):
|
class ObjectController(server.ObjectController):
|
||||||
"""
|
"""
|
||||||
Subclass of the object server's ObjectController which replaces the
|
Subclass of the object server's ObjectController which replaces the
|
||||||
@ -43,28 +55,14 @@ class ObjectController(server.ObjectController):
|
|||||||
|
|
||||||
:param conf: WSGI configuration parameter
|
:param conf: WSGI configuration parameter
|
||||||
"""
|
"""
|
||||||
# Common on-disk hierarchy shared across account, container and object
|
# Replaces Swift's DiskFileRouter object reference with ours.
|
||||||
# servers.
|
self._diskfile_router = SwiftOnFileDiskFileRouter(conf, self.logger)
|
||||||
self._diskfile_mgr = DiskFileManager(conf, self.logger)
|
|
||||||
|
|
||||||
def get_diskfile(self, device, partition, account, container, obj,
|
|
||||||
policy_idx, **kwargs):
|
|
||||||
"""
|
|
||||||
Utility method for instantiating a DiskFile object supporting a given
|
|
||||||
REST API.
|
|
||||||
|
|
||||||
An implementation of the object server that wants to use a different
|
|
||||||
DiskFile class would simply over-ride this method to provide that
|
|
||||||
behavior.
|
|
||||||
"""
|
|
||||||
return self._diskfile_mgr.get_diskfile(
|
|
||||||
device, partition, account, container, obj, policy_idx, **kwargs)
|
|
||||||
|
|
||||||
@public
|
@public
|
||||||
@timing_stats()
|
@timing_stats()
|
||||||
def PUT(self, request):
|
def PUT(self, request):
|
||||||
try:
|
try:
|
||||||
device, partition, account, container, obj, policy_idx = \
|
device, partition, account, container, obj, policy = \
|
||||||
get_name_and_placement(request, 5, 5, True)
|
get_name_and_placement(request, 5, 5, True)
|
||||||
|
|
||||||
# check swiftonfile constraints first
|
# check swiftonfile constraints first
|
||||||
|
@ -23,6 +23,7 @@ import eventlet
|
|||||||
import eventlet.debug
|
import eventlet.debug
|
||||||
import functools
|
import functools
|
||||||
import random
|
import random
|
||||||
|
from ConfigParser import ConfigParser, NoSectionError
|
||||||
from time import time, sleep
|
from time import time, sleep
|
||||||
from httplib import HTTPException
|
from httplib import HTTPException
|
||||||
from urlparse import urlparse
|
from urlparse import urlparse
|
||||||
@ -32,6 +33,7 @@ from gzip import GzipFile
|
|||||||
from shutil import rmtree
|
from shutil import rmtree
|
||||||
from tempfile import mkdtemp
|
from tempfile import mkdtemp
|
||||||
from swift.common.middleware.memcache import MemcacheMiddleware
|
from swift.common.middleware.memcache import MemcacheMiddleware
|
||||||
|
from swift.common.storage_policy import parse_storage_policies, PolicyError
|
||||||
|
|
||||||
from test import get_config
|
from test import get_config
|
||||||
from test.functional.swift_test_client import Account, Connection, \
|
from test.functional.swift_test_client import Account, Connection, \
|
||||||
@ -50,6 +52,9 @@ from swift.container import server as container_server
|
|||||||
from swift.obj import server as object_server, mem_server as mem_object_server
|
from swift.obj import server as object_server, mem_server as mem_object_server
|
||||||
import swift.proxy.controllers.obj
|
import swift.proxy.controllers.obj
|
||||||
|
|
||||||
|
|
||||||
|
DEBUG = True
|
||||||
|
|
||||||
# In order to get the proper blocking behavior of sockets without using
|
# In order to get the proper blocking behavior of sockets without using
|
||||||
# threads, where we can set an arbitrary timeout for some piece of code under
|
# threads, where we can set an arbitrary timeout for some piece of code under
|
||||||
# test, we use eventlet with the standard socket library patched. We have to
|
# test, we use eventlet with the standard socket library patched. We have to
|
||||||
@ -82,15 +87,15 @@ normalized_urls = None
|
|||||||
# If no config was read, we will fall back to old school env vars
|
# If no config was read, we will fall back to old school env vars
|
||||||
swift_test_auth_version = None
|
swift_test_auth_version = None
|
||||||
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
|
swift_test_auth = os.environ.get('SWIFT_TEST_AUTH')
|
||||||
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None, '']
|
swift_test_user = [os.environ.get('SWIFT_TEST_USER'), None, None, '', '']
|
||||||
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None, '']
|
swift_test_key = [os.environ.get('SWIFT_TEST_KEY'), None, None, '', '']
|
||||||
swift_test_tenant = ['', '', '', '']
|
swift_test_tenant = ['', '', '', '', '']
|
||||||
swift_test_perm = ['', '', '', '']
|
swift_test_perm = ['', '', '', '', '']
|
||||||
swift_test_domain = ['', '', '', '']
|
swift_test_domain = ['', '', '', '', '']
|
||||||
swift_test_user_id = ['', '', '', '']
|
swift_test_user_id = ['', '', '', '', '']
|
||||||
swift_test_tenant_id = ['', '', '', '']
|
swift_test_tenant_id = ['', '', '', '', '']
|
||||||
|
|
||||||
skip, skip2, skip3 = False, False, False
|
skip, skip2, skip3, skip_service_tokens = False, False, False, False
|
||||||
|
|
||||||
orig_collate = ''
|
orig_collate = ''
|
||||||
insecure = False
|
insecure = False
|
||||||
@ -99,7 +104,7 @@ orig_hash_path_suff_pref = ('', '')
|
|||||||
orig_swift_conf_name = None
|
orig_swift_conf_name = None
|
||||||
|
|
||||||
in_process = False
|
in_process = False
|
||||||
_testdir = _test_servers = _test_sockets = _test_coros = None
|
_testdir = _test_servers = _test_coros = None
|
||||||
|
|
||||||
|
|
||||||
class FakeMemcacheMiddleware(MemcacheMiddleware):
|
class FakeMemcacheMiddleware(MemcacheMiddleware):
|
||||||
@ -113,29 +118,187 @@ class FakeMemcacheMiddleware(MemcacheMiddleware):
|
|||||||
self.memcache = FakeMemcache()
|
self.memcache = FakeMemcache()
|
||||||
|
|
||||||
|
|
||||||
# swift.conf contents for in-process functional test runs
|
class InProcessException(BaseException):
|
||||||
functests_swift_conf = '''
|
pass
|
||||||
[swift-hash]
|
|
||||||
swift_hash_path_suffix = inprocfunctests
|
|
||||||
swift_hash_path_prefix = inprocfunctests
|
|
||||||
|
|
||||||
[swift-constraints]
|
|
||||||
max_file_size = %d
|
def _info(msg):
|
||||||
''' % ((8 * 1024 * 1024) + 2) # 8 MB + 2
|
print >> sys.stderr, msg
|
||||||
|
|
||||||
|
|
||||||
|
def _debug(msg):
|
||||||
|
if DEBUG:
|
||||||
|
_info('DEBUG: ' + msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _in_process_setup_swift_conf(swift_conf_src, testdir):
|
||||||
|
# override swift.conf contents for in-process functional test runs
|
||||||
|
conf = ConfigParser()
|
||||||
|
conf.read(swift_conf_src)
|
||||||
|
try:
|
||||||
|
section = 'swift-hash'
|
||||||
|
conf.set(section, 'swift_hash_path_suffix', 'inprocfunctests')
|
||||||
|
conf.set(section, 'swift_hash_path_prefix', 'inprocfunctests')
|
||||||
|
section = 'swift-constraints'
|
||||||
|
max_file_size = (8 * 1024 * 1024) + 2 # 8 MB + 2
|
||||||
|
conf.set(section, 'max_file_size', max_file_size)
|
||||||
|
except NoSectionError:
|
||||||
|
msg = 'Conf file %s is missing section %s' % (swift_conf_src, section)
|
||||||
|
raise InProcessException(msg)
|
||||||
|
|
||||||
|
test_conf_file = os.path.join(testdir, 'swift.conf')
|
||||||
|
with open(test_conf_file, 'w') as fp:
|
||||||
|
conf.write(fp)
|
||||||
|
|
||||||
|
return test_conf_file
|
||||||
|
|
||||||
|
|
||||||
|
def _in_process_find_conf_file(conf_src_dir, conf_file_name, use_sample=True):
|
||||||
|
"""
|
||||||
|
Look for a file first in conf_src_dir, if it exists, otherwise optionally
|
||||||
|
look in the source tree sample 'etc' dir.
|
||||||
|
|
||||||
|
:param conf_src_dir: Directory in which to search first for conf file. May
|
||||||
|
be None
|
||||||
|
:param conf_file_name: Name of conf file
|
||||||
|
:param use_sample: If True and the conf_file_name is not found, then return
|
||||||
|
any sample conf file found in the source tree sample
|
||||||
|
'etc' dir by appending '-sample' to conf_file_name
|
||||||
|
:returns: Path to conf file
|
||||||
|
:raises InProcessException: If no conf file is found
|
||||||
|
"""
|
||||||
|
dflt_src_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
|
||||||
|
os.pardir, os.pardir, os.pardir,
|
||||||
|
'etc'))
|
||||||
|
conf_src_dir = dflt_src_dir if conf_src_dir is None else conf_src_dir
|
||||||
|
conf_file_path = os.path.join(conf_src_dir, conf_file_name)
|
||||||
|
if os.path.exists(conf_file_path):
|
||||||
|
return conf_file_path
|
||||||
|
|
||||||
|
if use_sample:
|
||||||
|
# fall back to using the corresponding sample conf file
|
||||||
|
conf_file_name += '-sample'
|
||||||
|
conf_file_path = os.path.join(dflt_src_dir, conf_file_name)
|
||||||
|
if os.path.exists(conf_file_path):
|
||||||
|
return conf_file_path
|
||||||
|
|
||||||
|
msg = 'Failed to find config file %s' % conf_file_name
|
||||||
|
raise InProcessException(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def _in_process_setup_ring(swift_conf, conf_src_dir, testdir):
|
||||||
|
"""
|
||||||
|
If SWIFT_TEST_POLICY is set:
|
||||||
|
- look in swift.conf file for specified policy
|
||||||
|
- move this to be policy-0 but preserving its options
|
||||||
|
- copy its ring file to test dir, changing its devices to suit
|
||||||
|
in process testing, and renaming it to suit policy-0
|
||||||
|
Otherwise, create a default ring file.
|
||||||
|
"""
|
||||||
|
conf = ConfigParser()
|
||||||
|
conf.read(swift_conf)
|
||||||
|
sp_prefix = 'storage-policy:'
|
||||||
|
|
||||||
|
try:
|
||||||
|
# policy index 0 will be created if no policy exists in conf
|
||||||
|
policies = parse_storage_policies(conf)
|
||||||
|
except PolicyError as e:
|
||||||
|
raise InProcessException(e)
|
||||||
|
|
||||||
|
# clear all policies from test swift.conf before adding test policy back
|
||||||
|
for policy in policies:
|
||||||
|
conf.remove_section(sp_prefix + str(policy.idx))
|
||||||
|
|
||||||
|
policy_specified = os.environ.get('SWIFT_TEST_POLICY')
|
||||||
|
if policy_specified:
|
||||||
|
policy_to_test = policies.get_by_name(policy_specified)
|
||||||
|
if policy_to_test is None:
|
||||||
|
raise InProcessException('Failed to find policy name "%s"'
|
||||||
|
% policy_specified)
|
||||||
|
_info('Using specified policy %s' % policy_to_test.name)
|
||||||
|
else:
|
||||||
|
policy_to_test = policies.default
|
||||||
|
_info('Defaulting to policy %s' % policy_to_test.name)
|
||||||
|
|
||||||
|
# make policy_to_test be policy index 0 and default for the test config
|
||||||
|
sp_zero_section = sp_prefix + '0'
|
||||||
|
conf.add_section(sp_zero_section)
|
||||||
|
for (k, v) in policy_to_test.get_info(config=True).items():
|
||||||
|
conf.set(sp_zero_section, k, v)
|
||||||
|
conf.set(sp_zero_section, 'default', True)
|
||||||
|
|
||||||
|
with open(swift_conf, 'w') as fp:
|
||||||
|
conf.write(fp)
|
||||||
|
|
||||||
|
# look for a source ring file
|
||||||
|
ring_file_src = ring_file_test = 'object.ring.gz'
|
||||||
|
if policy_to_test.idx:
|
||||||
|
ring_file_src = 'object-%s.ring.gz' % policy_to_test.idx
|
||||||
|
try:
|
||||||
|
ring_file_src = _in_process_find_conf_file(conf_src_dir, ring_file_src,
|
||||||
|
use_sample=False)
|
||||||
|
except InProcessException as e:
|
||||||
|
if policy_specified:
|
||||||
|
raise InProcessException('Failed to find ring file %s'
|
||||||
|
% ring_file_src)
|
||||||
|
ring_file_src = None
|
||||||
|
|
||||||
|
ring_file_test = os.path.join(testdir, ring_file_test)
|
||||||
|
if ring_file_src:
|
||||||
|
# copy source ring file to a policy-0 test ring file, re-homing servers
|
||||||
|
_info('Using source ring file %s' % ring_file_src)
|
||||||
|
ring_data = ring.RingData.load(ring_file_src)
|
||||||
|
obj_sockets = []
|
||||||
|
for dev in ring_data.devs:
|
||||||
|
device = 'sd%c1' % chr(len(obj_sockets) + ord('a'))
|
||||||
|
utils.mkdirs(os.path.join(_testdir, 'sda1'))
|
||||||
|
utils.mkdirs(os.path.join(_testdir, 'sda1', 'tmp'))
|
||||||
|
obj_socket = eventlet.listen(('localhost', 0))
|
||||||
|
obj_sockets.append(obj_socket)
|
||||||
|
dev['port'] = obj_socket.getsockname()[1]
|
||||||
|
dev['ip'] = '127.0.0.1'
|
||||||
|
dev['device'] = device
|
||||||
|
dev['replication_port'] = dev['port']
|
||||||
|
dev['replication_ip'] = dev['ip']
|
||||||
|
ring_data.save(ring_file_test)
|
||||||
|
else:
|
||||||
|
# make default test ring, 2 replicas, 4 partitions, 2 devices
|
||||||
|
_info('No source object ring file, creating 2rep/4part/2dev ring')
|
||||||
|
obj_sockets = [eventlet.listen(('localhost', 0)) for _ in (0, 1)]
|
||||||
|
ring_data = ring.RingData(
|
||||||
|
[[0, 1, 0, 1], [1, 0, 1, 0]],
|
||||||
|
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
|
||||||
|
'port': obj_sockets[0].getsockname()[1]},
|
||||||
|
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
|
||||||
|
'port': obj_sockets[1].getsockname()[1]}],
|
||||||
|
30)
|
||||||
|
with closing(GzipFile(ring_file_test, 'wb')) as f:
|
||||||
|
pickle.dump(ring_data, f)
|
||||||
|
|
||||||
|
for dev in ring_data.devs:
|
||||||
|
_debug('Ring file dev: %s' % dev)
|
||||||
|
|
||||||
|
return obj_sockets
|
||||||
|
|
||||||
|
|
||||||
def in_process_setup(the_object_server=object_server):
|
def in_process_setup(the_object_server=object_server):
|
||||||
print >>sys.stderr, 'IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS'
|
_info('IN-PROCESS SERVERS IN USE FOR FUNCTIONAL TESTS')
|
||||||
print >>sys.stderr, 'Using object_server: %s' % the_object_server.__name__
|
_info('Using object_server class: %s' % the_object_server.__name__)
|
||||||
_dir = os.path.normpath(os.path.join(os.path.abspath(__file__),
|
conf_src_dir = os.environ.get('SWIFT_TEST_IN_PROCESS_CONF_DIR')
|
||||||
os.pardir, os.pardir, os.pardir))
|
|
||||||
proxy_conf = os.path.join(_dir, 'etc', 'proxy-server.conf-sample')
|
|
||||||
if os.path.exists(proxy_conf):
|
|
||||||
print >>sys.stderr, 'Using proxy-server config from %s' % proxy_conf
|
|
||||||
|
|
||||||
else:
|
if conf_src_dir is not None:
|
||||||
print >>sys.stderr, 'Failed to find conf file %s' % proxy_conf
|
if not os.path.isdir(conf_src_dir):
|
||||||
return
|
msg = 'Config source %s is not a dir' % conf_src_dir
|
||||||
|
raise InProcessException(msg)
|
||||||
|
_info('Using config source dir: %s' % conf_src_dir)
|
||||||
|
|
||||||
|
# If SWIFT_TEST_IN_PROCESS_CONF specifies a config source dir then
|
||||||
|
# prefer config files from there, otherwise read config from source tree
|
||||||
|
# sample files. A mixture of files from the two sources is allowed.
|
||||||
|
proxy_conf = _in_process_find_conf_file(conf_src_dir, 'proxy-server.conf')
|
||||||
|
_info('Using proxy config from %s' % proxy_conf)
|
||||||
|
swift_conf_src = _in_process_find_conf_file(conf_src_dir, 'swift.conf')
|
||||||
|
_info('Using swift config from %s' % swift_conf_src)
|
||||||
|
|
||||||
monkey_patch_mimetools()
|
monkey_patch_mimetools()
|
||||||
|
|
||||||
@ -148,9 +311,8 @@ def in_process_setup(the_object_server=object_server):
|
|||||||
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
|
utils.mkdirs(os.path.join(_testdir, 'sdb1'))
|
||||||
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
|
utils.mkdirs(os.path.join(_testdir, 'sdb1', 'tmp'))
|
||||||
|
|
||||||
swift_conf = os.path.join(_testdir, "swift.conf")
|
swift_conf = _in_process_setup_swift_conf(swift_conf_src, _testdir)
|
||||||
with open(swift_conf, "w") as scfp:
|
obj_sockets = _in_process_setup_ring(swift_conf, conf_src_dir, _testdir)
|
||||||
scfp.write(functests_swift_conf)
|
|
||||||
|
|
||||||
global orig_swift_conf_name
|
global orig_swift_conf_name
|
||||||
orig_swift_conf_name = utils.SWIFT_CONF_FILE
|
orig_swift_conf_name = utils.SWIFT_CONF_FILE
|
||||||
@ -207,22 +369,20 @@ def in_process_setup(the_object_server=object_server):
|
|||||||
# User on same account as first, but without admin access
|
# User on same account as first, but without admin access
|
||||||
'username3': 'tester3',
|
'username3': 'tester3',
|
||||||
'password3': 'testing3',
|
'password3': 'testing3',
|
||||||
# For tempauth middleware
|
# Service user and prefix (emulates glance, cinder, etc. user)
|
||||||
'user_admin_admin': 'admin .admin .reseller_admin',
|
'account5': 'test5',
|
||||||
'user_test_tester': 'testing .admin',
|
'username5': 'tester5',
|
||||||
'user_test2_tester2': 'testing2 .admin',
|
'password5': 'testing5',
|
||||||
'user_test_tester3': 'testing3'
|
'service_prefix': 'SERVICE',
|
||||||
|
# For tempauth middleware. Update reseller_prefix
|
||||||
|
'reseller_prefix': 'AUTH, SERVICE',
|
||||||
|
'SERVICE_require_group': 'service'
|
||||||
})
|
})
|
||||||
|
|
||||||
acc1lis = eventlet.listen(('localhost', 0))
|
acc1lis = eventlet.listen(('localhost', 0))
|
||||||
acc2lis = eventlet.listen(('localhost', 0))
|
acc2lis = eventlet.listen(('localhost', 0))
|
||||||
con1lis = eventlet.listen(('localhost', 0))
|
con1lis = eventlet.listen(('localhost', 0))
|
||||||
con2lis = eventlet.listen(('localhost', 0))
|
con2lis = eventlet.listen(('localhost', 0))
|
||||||
obj1lis = eventlet.listen(('localhost', 0))
|
|
||||||
obj2lis = eventlet.listen(('localhost', 0))
|
|
||||||
global _test_sockets
|
|
||||||
_test_sockets = \
|
|
||||||
(prolis, acc1lis, acc2lis, con1lis, con2lis, obj1lis, obj2lis)
|
|
||||||
|
|
||||||
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
|
account_ring_path = os.path.join(_testdir, 'account.ring.gz')
|
||||||
with closing(GzipFile(account_ring_path, 'wb')) as f:
|
with closing(GzipFile(account_ring_path, 'wb')) as f:
|
||||||
@ -240,14 +400,6 @@ def in_process_setup(the_object_server=object_server):
|
|||||||
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
|
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
|
||||||
'port': con2lis.getsockname()[1]}], 30),
|
'port': con2lis.getsockname()[1]}], 30),
|
||||||
f)
|
f)
|
||||||
object_ring_path = os.path.join(_testdir, 'object.ring.gz')
|
|
||||||
with closing(GzipFile(object_ring_path, 'wb')) as f:
|
|
||||||
pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
|
|
||||||
[{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
|
|
||||||
'port': obj1lis.getsockname()[1]},
|
|
||||||
{'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
|
|
||||||
'port': obj2lis.getsockname()[1]}], 30),
|
|
||||||
f)
|
|
||||||
|
|
||||||
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
|
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
|
||||||
# Turn off logging requests by the underlying WSGI software.
|
# Turn off logging requests by the underlying WSGI software.
|
||||||
@ -267,10 +419,13 @@ def in_process_setup(the_object_server=object_server):
|
|||||||
config, logger=debug_logger('cont1'))
|
config, logger=debug_logger('cont1'))
|
||||||
con2srv = container_server.ContainerController(
|
con2srv = container_server.ContainerController(
|
||||||
config, logger=debug_logger('cont2'))
|
config, logger=debug_logger('cont2'))
|
||||||
obj1srv = the_object_server.ObjectController(
|
|
||||||
config, logger=debug_logger('obj1'))
|
objsrvs = [
|
||||||
obj2srv = the_object_server.ObjectController(
|
(obj_sockets[index],
|
||||||
config, logger=debug_logger('obj2'))
|
the_object_server.ObjectController(
|
||||||
|
config, logger=debug_logger('obj%d' % (index + 1))))
|
||||||
|
for index in range(len(obj_sockets))
|
||||||
|
]
|
||||||
|
|
||||||
logger = debug_logger('proxy')
|
logger = debug_logger('proxy')
|
||||||
|
|
||||||
@ -280,7 +435,10 @@ def in_process_setup(the_object_server=object_server):
|
|||||||
with mock.patch('swift.common.utils.get_logger', get_logger):
|
with mock.patch('swift.common.utils.get_logger', get_logger):
|
||||||
with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
|
with mock.patch('swift.common.middleware.memcache.MemcacheMiddleware',
|
||||||
FakeMemcacheMiddleware):
|
FakeMemcacheMiddleware):
|
||||||
app = loadapp(proxy_conf, global_conf=config)
|
try:
|
||||||
|
app = loadapp(proxy_conf, global_conf=config)
|
||||||
|
except Exception as e:
|
||||||
|
raise InProcessException(e)
|
||||||
|
|
||||||
nl = utils.NullLogger()
|
nl = utils.NullLogger()
|
||||||
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
|
prospa = eventlet.spawn(eventlet.wsgi.server, prolis, app, nl)
|
||||||
@ -288,11 +446,13 @@ def in_process_setup(the_object_server=object_server):
|
|||||||
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
|
acc2spa = eventlet.spawn(eventlet.wsgi.server, acc2lis, acc2srv, nl)
|
||||||
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
|
con1spa = eventlet.spawn(eventlet.wsgi.server, con1lis, con1srv, nl)
|
||||||
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)
|
con2spa = eventlet.spawn(eventlet.wsgi.server, con2lis, con2srv, nl)
|
||||||
obj1spa = eventlet.spawn(eventlet.wsgi.server, obj1lis, obj1srv, nl)
|
|
||||||
obj2spa = eventlet.spawn(eventlet.wsgi.server, obj2lis, obj2srv, nl)
|
objspa = [eventlet.spawn(eventlet.wsgi.server, objsrv[0], objsrv[1], nl)
|
||||||
|
for objsrv in objsrvs]
|
||||||
|
|
||||||
global _test_coros
|
global _test_coros
|
||||||
_test_coros = \
|
_test_coros = \
|
||||||
(prospa, acc1spa, acc2spa, con1spa, con2spa, obj1spa, obj2spa)
|
(prospa, acc1spa, acc2spa, con1spa, con2spa) + tuple(objspa)
|
||||||
|
|
||||||
# Create accounts "test" and "test2"
|
# Create accounts "test" and "test2"
|
||||||
def create_account(act):
|
def create_account(act):
|
||||||
@ -393,8 +553,13 @@ def setup_package():
|
|||||||
if in_process:
|
if in_process:
|
||||||
in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
|
in_mem_obj_env = os.environ.get('SWIFT_TEST_IN_MEMORY_OBJ')
|
||||||
in_mem_obj = utils.config_true_value(in_mem_obj_env)
|
in_mem_obj = utils.config_true_value(in_mem_obj_env)
|
||||||
in_process_setup(the_object_server=(
|
try:
|
||||||
mem_object_server if in_mem_obj else object_server))
|
in_process_setup(the_object_server=(
|
||||||
|
mem_object_server if in_mem_obj else object_server))
|
||||||
|
except InProcessException as exc:
|
||||||
|
print >> sys.stderr, ('Exception during in-process setup: %s'
|
||||||
|
% str(exc))
|
||||||
|
raise
|
||||||
|
|
||||||
global web_front_end
|
global web_front_end
|
||||||
web_front_end = config.get('web_front_end', 'integral')
|
web_front_end = config.get('web_front_end', 'integral')
|
||||||
@ -415,6 +580,9 @@ def setup_package():
|
|||||||
global swift_test_tenant
|
global swift_test_tenant
|
||||||
global swift_test_perm
|
global swift_test_perm
|
||||||
global swift_test_domain
|
global swift_test_domain
|
||||||
|
global swift_test_service_prefix
|
||||||
|
|
||||||
|
swift_test_service_prefix = None
|
||||||
|
|
||||||
if config:
|
if config:
|
||||||
swift_test_auth_version = str(config.get('auth_version', '1'))
|
swift_test_auth_version = str(config.get('auth_version', '1'))
|
||||||
@ -430,6 +598,10 @@ def setup_package():
|
|||||||
except KeyError:
|
except KeyError:
|
||||||
pass # skip
|
pass # skip
|
||||||
|
|
||||||
|
if 'service_prefix' in config:
|
||||||
|
swift_test_service_prefix = utils.append_underscore(
|
||||||
|
config['service_prefix'])
|
||||||
|
|
||||||
if swift_test_auth_version == "1":
|
if swift_test_auth_version == "1":
|
||||||
swift_test_auth += 'v1.0'
|
swift_test_auth += 'v1.0'
|
||||||
|
|
||||||
@ -457,6 +629,13 @@ def setup_package():
|
|||||||
swift_test_key[2] = config['password3']
|
swift_test_key[2] = config['password3']
|
||||||
except KeyError:
|
except KeyError:
|
||||||
pass # old config, no third account tests can be run
|
pass # old config, no third account tests can be run
|
||||||
|
try:
|
||||||
|
swift_test_user[4] = '%s%s' % (
|
||||||
|
'%s:' % config['account5'], config['username5'])
|
||||||
|
swift_test_key[4] = config['password5']
|
||||||
|
swift_test_tenant[4] = config['account5']
|
||||||
|
except KeyError:
|
||||||
|
pass # no service token tests can be run
|
||||||
|
|
||||||
for _ in range(3):
|
for _ in range(3):
|
||||||
swift_test_perm[_] = swift_test_user[_]
|
swift_test_perm[_] = swift_test_user[_]
|
||||||
@ -476,8 +655,12 @@ def setup_package():
|
|||||||
swift_test_tenant[3] = config['account4']
|
swift_test_tenant[3] = config['account4']
|
||||||
swift_test_key[3] = config['password4']
|
swift_test_key[3] = config['password4']
|
||||||
swift_test_domain[3] = config['domain4']
|
swift_test_domain[3] = config['domain4']
|
||||||
|
if 'username5' in config:
|
||||||
|
swift_test_user[4] = config['username5']
|
||||||
|
swift_test_tenant[4] = config['account5']
|
||||||
|
swift_test_key[4] = config['password5']
|
||||||
|
|
||||||
for _ in range(4):
|
for _ in range(5):
|
||||||
swift_test_perm[_] = swift_test_tenant[_] + ':' \
|
swift_test_perm[_] = swift_test_tenant[_] + ':' \
|
||||||
+ swift_test_user[_]
|
+ swift_test_user[_]
|
||||||
|
|
||||||
@ -508,6 +691,14 @@ def setup_package():
|
|||||||
print >>sys.stderr, \
|
print >>sys.stderr, \
|
||||||
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3'
|
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO AUTH VERSION 3'
|
||||||
|
|
||||||
|
global skip_service_tokens
|
||||||
|
skip_service_tokens = not all([not skip, swift_test_user[4],
|
||||||
|
swift_test_key[4], swift_test_tenant[4],
|
||||||
|
swift_test_service_prefix])
|
||||||
|
if not skip and skip_service_tokens:
|
||||||
|
print >>sys.stderr, \
|
||||||
|
'SKIPPING FUNCTIONAL TESTS SPECIFIC TO SERVICE TOKENS'
|
||||||
|
|
||||||
get_cluster_info()
|
get_cluster_info()
|
||||||
|
|
||||||
|
|
||||||
@ -546,10 +737,11 @@ class InternalServerError(Exception):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
url = [None, None, None, None]
|
url = [None, None, None, None, None]
|
||||||
token = [None, None, None, None]
|
token = [None, None, None, None, None]
|
||||||
parsed = [None, None, None, None]
|
service_token = [None, None, None, None, None]
|
||||||
conn = [None, None, None, None]
|
parsed = [None, None, None, None, None]
|
||||||
|
conn = [None, None, None, None, None]
|
||||||
|
|
||||||
|
|
||||||
def connection(url):
|
def connection(url):
|
||||||
@ -558,6 +750,18 @@ def connection(url):
|
|||||||
return http_connection(url)
|
return http_connection(url)
|
||||||
|
|
||||||
|
|
||||||
|
def get_url_token(user_index, os_options):
|
||||||
|
authargs = dict(snet=False,
|
||||||
|
tenant_name=swift_test_tenant[user_index],
|
||||||
|
auth_version=swift_test_auth_version,
|
||||||
|
os_options=os_options,
|
||||||
|
insecure=insecure)
|
||||||
|
return get_auth(swift_test_auth,
|
||||||
|
swift_test_user[user_index],
|
||||||
|
swift_test_key[user_index],
|
||||||
|
**authargs)
|
||||||
|
|
||||||
|
|
||||||
def retry(func, *args, **kwargs):
|
def retry(func, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
You can use the kwargs to override:
|
You can use the kwargs to override:
|
||||||
@ -566,29 +770,29 @@ def retry(func, *args, **kwargs):
|
|||||||
'url_account' (default: matches 'use_account') - which user's storage URL
|
'url_account' (default: matches 'use_account') - which user's storage URL
|
||||||
'resource' (default: url[url_account] - URL to connect to; retry()
|
'resource' (default: url[url_account] - URL to connect to; retry()
|
||||||
will interpolate the variable :storage_url: if present
|
will interpolate the variable :storage_url: if present
|
||||||
|
'service_user' - add a service token from this user (1 indexed)
|
||||||
"""
|
"""
|
||||||
global url, token, parsed, conn
|
global url, token, service_token, parsed, conn
|
||||||
retries = kwargs.get('retries', 5)
|
retries = kwargs.get('retries', 5)
|
||||||
attempts, backoff = 0, 1
|
attempts, backoff = 0, 1
|
||||||
|
|
||||||
# use account #1 by default; turn user's 1-indexed account into 0-indexed
|
# use account #1 by default; turn user's 1-indexed account into 0-indexed
|
||||||
use_account = kwargs.pop('use_account', 1) - 1
|
use_account = kwargs.pop('use_account', 1) - 1
|
||||||
|
service_user = kwargs.pop('service_user', None)
|
||||||
|
if service_user:
|
||||||
|
service_user -= 1 # 0-index
|
||||||
|
|
||||||
# access our own account by default
|
# access our own account by default
|
||||||
url_account = kwargs.pop('url_account', use_account + 1) - 1
|
url_account = kwargs.pop('url_account', use_account + 1) - 1
|
||||||
os_options = {'user_domain_name': swift_test_domain[use_account],
|
os_options = {'user_domain_name': swift_test_domain[use_account],
|
||||||
'project_domain_name': swift_test_domain[use_account]}
|
'project_domain_name': swift_test_domain[use_account]}
|
||||||
while attempts <= retries:
|
while attempts <= retries:
|
||||||
|
auth_failure = False
|
||||||
attempts += 1
|
attempts += 1
|
||||||
try:
|
try:
|
||||||
if not url[use_account] or not token[use_account]:
|
if not url[use_account] or not token[use_account]:
|
||||||
url[use_account], token[use_account] = \
|
url[use_account], token[use_account] = get_url_token(
|
||||||
get_auth(swift_test_auth, swift_test_user[use_account],
|
use_account, os_options)
|
||||||
swift_test_key[use_account],
|
|
||||||
snet=False,
|
|
||||||
tenant_name=swift_test_tenant[use_account],
|
|
||||||
auth_version=swift_test_auth_version,
|
|
||||||
os_options=os_options)
|
|
||||||
parsed[use_account] = conn[use_account] = None
|
parsed[use_account] = conn[use_account] = None
|
||||||
if not parsed[use_account] or not conn[use_account]:
|
if not parsed[use_account] or not conn[use_account]:
|
||||||
parsed[use_account], conn[use_account] = \
|
parsed[use_account], conn[use_account] = \
|
||||||
@ -598,6 +802,11 @@ def retry(func, *args, **kwargs):
|
|||||||
resource = kwargs.pop('resource', '%(storage_url)s')
|
resource = kwargs.pop('resource', '%(storage_url)s')
|
||||||
template_vars = {'storage_url': url[url_account]}
|
template_vars = {'storage_url': url[url_account]}
|
||||||
parsed_result = urlparse(resource % template_vars)
|
parsed_result = urlparse(resource % template_vars)
|
||||||
|
if isinstance(service_user, int):
|
||||||
|
if not service_token[service_user]:
|
||||||
|
dummy, service_token[service_user] = get_url_token(
|
||||||
|
service_user, os_options)
|
||||||
|
kwargs['service_token'] = service_token[service_user]
|
||||||
return func(url[url_account], token[use_account],
|
return func(url[url_account], token[use_account],
|
||||||
parsed_result, conn[url_account],
|
parsed_result, conn[url_account],
|
||||||
*args, **kwargs)
|
*args, **kwargs)
|
||||||
@ -605,13 +814,18 @@ def retry(func, *args, **kwargs):
|
|||||||
if attempts > retries:
|
if attempts > retries:
|
||||||
raise
|
raise
|
||||||
parsed[use_account] = conn[use_account] = None
|
parsed[use_account] = conn[use_account] = None
|
||||||
|
if service_user:
|
||||||
|
service_token[service_user] = None
|
||||||
except AuthError:
|
except AuthError:
|
||||||
|
auth_failure = True
|
||||||
url[use_account] = token[use_account] = None
|
url[use_account] = token[use_account] = None
|
||||||
continue
|
if service_user:
|
||||||
|
service_token[service_user] = None
|
||||||
except InternalServerError:
|
except InternalServerError:
|
||||||
pass
|
pass
|
||||||
if attempts <= retries:
|
if attempts <= retries:
|
||||||
sleep(backoff)
|
if not auth_failure:
|
||||||
|
sleep(backoff)
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
raise Exception('No result after %s retries.' % retries)
|
raise Exception('No result after %s retries.' % retries)
|
||||||
|
|
||||||
@ -666,13 +880,13 @@ def requires_acls(f):
|
|||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
global skip, cluster_info
|
global skip, cluster_info
|
||||||
if skip or not cluster_info:
|
if skip or not cluster_info:
|
||||||
raise SkipTest
|
raise SkipTest('Requires account ACLs')
|
||||||
# Determine whether this cluster has account ACLs; if not, skip test
|
# Determine whether this cluster has account ACLs; if not, skip test
|
||||||
if not cluster_info.get('tempauth', {}).get('account_acls'):
|
if not cluster_info.get('tempauth', {}).get('account_acls'):
|
||||||
raise SkipTest
|
raise SkipTest('Requires account ACLs')
|
||||||
if 'keystoneauth' in cluster_info:
|
if swift_test_auth_version != '1':
|
||||||
# remove when keystoneauth supports account acls
|
# remove when keystoneauth supports account acls
|
||||||
raise SkipTest
|
raise SkipTest('Requires account ACLs')
|
||||||
reset_acl()
|
reset_acl()
|
||||||
try:
|
try:
|
||||||
rv = f(*args, **kwargs)
|
rv = f(*args, **kwargs)
|
||||||
|
@ -5,17 +5,26 @@ swift_hash_path_suffix = changeme
|
|||||||
|
|
||||||
[storage-policy:0]
|
[storage-policy:0]
|
||||||
name = gold
|
name = gold
|
||||||
|
policy_type = replication
|
||||||
|
|
||||||
[storage-policy:1]
|
[storage-policy:1]
|
||||||
name = silver
|
name = silver
|
||||||
|
policy_type = replication
|
||||||
|
|
||||||
|
[storage-policy:2]
|
||||||
|
name = ec42
|
||||||
|
policy_type = erasure_coding
|
||||||
|
ec_type = jerasure_rs_vand
|
||||||
|
ec_num_data_fragments = 4
|
||||||
|
ec_num_parity_fragments = 2
|
||||||
|
|
||||||
# SwiftOnFile
|
# SwiftOnFile
|
||||||
[storage-policy:2]
|
[storage-policy:3]
|
||||||
name = swiftonfile
|
name = swiftonfile
|
||||||
|
policy_type = replication
|
||||||
default = yes
|
default = yes
|
||||||
|
|
||||||
[swift-constraints]
|
[swift-constraints]
|
||||||
max_object_name_length = 221
|
max_object_name_length = 221
|
||||||
max_account_name_length = 255
|
max_account_name_length = 255
|
||||||
max_container_name_length = 255
|
max_container_name_length = 255
|
||||||
|
|
||||||
|
@ -4,12 +4,13 @@ auth_host = 127.0.0.1
|
|||||||
auth_port = 8080
|
auth_port = 8080
|
||||||
auth_ssl = no
|
auth_ssl = no
|
||||||
auth_prefix = /auth/
|
auth_prefix = /auth/
|
||||||
## sample config for Swift with Keystone
|
## sample config for Swift with Keystone v2 API
|
||||||
#auth_version = 2
|
# For keystone v2 change auth_version to 2 and auth_prefix to /v2.0/
|
||||||
|
#auth_version = 3
|
||||||
#auth_host = localhost
|
#auth_host = localhost
|
||||||
#auth_port = 5000
|
#auth_port = 5000
|
||||||
#auth_ssl = no
|
#auth_ssl = no
|
||||||
#auth_prefix = /v2.0/
|
#auth_prefix = /v3/
|
||||||
|
|
||||||
# Primary functional test account (needs admin access to the account)
|
# Primary functional test account (needs admin access to the account)
|
||||||
account = test
|
account = test
|
||||||
@ -25,8 +26,43 @@ password2 = testing2
|
|||||||
username3 = tester3
|
username3 = tester3
|
||||||
password3 = testing3
|
password3 = testing3
|
||||||
|
|
||||||
|
# Fourth user is required for keystone v3 specific tests.
|
||||||
|
# Account must be in a non-default domain.
|
||||||
|
#account4 = test4
|
||||||
|
#username4 = tester4
|
||||||
|
#password4 = testing4
|
||||||
|
#domain4 = test-domain
|
||||||
|
|
||||||
|
# Fifth user is required for service token-specific tests.
|
||||||
|
# The account must be different than the primary test account
|
||||||
|
# The user must not have a group (tempauth) or role (keystoneauth) on
|
||||||
|
# the primary test account. The user must have a group/role that is unique
|
||||||
|
# and not given to the primary tester and is specified in the options
|
||||||
|
# <prefix>_require_group (tempauth) or <prefix>_service_roles (keystoneauth).
|
||||||
|
#account5 = test5
|
||||||
|
#username5 = tester5
|
||||||
|
#password5 = testing5
|
||||||
|
|
||||||
|
# The service_prefix option is used for service token-specific tests.
|
||||||
|
# If service_prefix or username5 above is not supplied, the tests are skipped.
|
||||||
|
# To set the value and enable the service token tests, look at the
|
||||||
|
# reseller_prefix option in /etc/swift/proxy-server.conf. There must be at
|
||||||
|
# least two prefixes. If not, add a prefix as follows (where we add SERVICE):
|
||||||
|
# reseller_prefix = AUTH, SERVICE
|
||||||
|
# The service_prefix must match the <prefix> used in <prefix>_require_group
|
||||||
|
# (tempauth) or <prefix>_service_roles (keystoneauth); for example:
|
||||||
|
# SERVICE_require_group = service
|
||||||
|
# SERVICE_service_roles = service
|
||||||
|
# Note: Do not enable service token tests if the first prefix in
|
||||||
|
# reseller_prefix is the empty prefix AND the primary functional test
|
||||||
|
# account contains an underscore.
|
||||||
|
#service_prefix = SERVICE
|
||||||
|
|
||||||
collate = C
|
collate = C
|
||||||
|
|
||||||
|
# Only necessary if a pre-existing server uses self-signed certificate
|
||||||
|
insecure = no
|
||||||
|
|
||||||
[unit_test]
|
[unit_test]
|
||||||
fake_syslog = False
|
fake_syslog = False
|
||||||
|
|
||||||
@ -51,7 +87,7 @@ fake_syslog = False
|
|||||||
# Note that the cluster must have "sane" values for the test suite to pass
|
# Note that the cluster must have "sane" values for the test suite to pass
|
||||||
# (for some definition of sane).
|
# (for some definition of sane).
|
||||||
#
|
#
|
||||||
#max_file_size = 1099511
|
#max_file_size = 5368709122
|
||||||
#max_meta_name_length = 128
|
#max_meta_name_length = 128
|
||||||
#max_meta_value_length = 256
|
#max_meta_value_length = 256
|
||||||
#max_meta_count = 90
|
#max_meta_count = 90
|
||||||
@ -66,4 +102,3 @@ max_container_name_length = 255
|
|||||||
# Newer swift versions default to strict cors mode, but older ones were the
|
# Newer swift versions default to strict cors mode, but older ones were the
|
||||||
# opposite.
|
# opposite.
|
||||||
#strict_cors_mode = true
|
#strict_cors_mode = true
|
||||||
#
|
|
||||||
|
@ -26,8 +26,11 @@ import simplejson as json
|
|||||||
|
|
||||||
from nose import SkipTest
|
from nose import SkipTest
|
||||||
from xml.dom import minidom
|
from xml.dom import minidom
|
||||||
|
|
||||||
from swiftclient import get_auth
|
from swiftclient import get_auth
|
||||||
|
|
||||||
|
from swift.common.utils import config_true_value
|
||||||
|
|
||||||
from test import safe_repr
|
from test import safe_repr
|
||||||
|
|
||||||
|
|
||||||
@ -109,6 +112,7 @@ class Connection(object):
|
|||||||
self.auth_host = config['auth_host']
|
self.auth_host = config['auth_host']
|
||||||
self.auth_port = int(config['auth_port'])
|
self.auth_port = int(config['auth_port'])
|
||||||
self.auth_ssl = config['auth_ssl'] in ('on', 'true', 'yes', '1')
|
self.auth_ssl = config['auth_ssl'] in ('on', 'true', 'yes', '1')
|
||||||
|
self.insecure = config_true_value(config.get('insecure', 'false'))
|
||||||
self.auth_prefix = config.get('auth_prefix', '/')
|
self.auth_prefix = config.get('auth_prefix', '/')
|
||||||
self.auth_version = str(config.get('auth_version', '1'))
|
self.auth_version = str(config.get('auth_version', '1'))
|
||||||
|
|
||||||
@ -147,10 +151,11 @@ class Connection(object):
|
|||||||
auth_netloc = "%s:%d" % (self.auth_host, self.auth_port)
|
auth_netloc = "%s:%d" % (self.auth_host, self.auth_port)
|
||||||
auth_url = auth_scheme + auth_netloc + auth_path
|
auth_url = auth_scheme + auth_netloc + auth_path
|
||||||
|
|
||||||
|
authargs = dict(snet=False, tenant_name=self.account,
|
||||||
|
auth_version=self.auth_version, os_options={},
|
||||||
|
insecure=self.insecure)
|
||||||
(storage_url, storage_token) = get_auth(
|
(storage_url, storage_token) = get_auth(
|
||||||
auth_url, auth_user, self.password, snet=False,
|
auth_url, auth_user, self.password, **authargs)
|
||||||
tenant_name=self.account, auth_version=self.auth_version,
|
|
||||||
os_options={})
|
|
||||||
|
|
||||||
if not (storage_url and storage_token):
|
if not (storage_url and storage_token):
|
||||||
raise AuthenticationFailed()
|
raise AuthenticationFailed()
|
||||||
@ -582,7 +587,10 @@ class Container(Base):
|
|||||||
if self.conn.response.status == 204:
|
if self.conn.response.status == 204:
|
||||||
required_fields = [['bytes_used', 'x-container-bytes-used'],
|
required_fields = [['bytes_used', 'x-container-bytes-used'],
|
||||||
['object_count', 'x-container-object-count']]
|
['object_count', 'x-container-object-count']]
|
||||||
optional_fields = [['versions', 'x-versions-location']]
|
optional_fields = [
|
||||||
|
['versions', 'x-versions-location'],
|
||||||
|
['tempurl_key', 'x-container-meta-temp-url-key'],
|
||||||
|
['tempurl_key2', 'x-container-meta-temp-url-key-2']]
|
||||||
|
|
||||||
return self.header_fields(required_fields, optional_fields)
|
return self.header_fields(required_fields, optional_fields)
|
||||||
|
|
||||||
@ -722,8 +730,10 @@ class File(Base):
|
|||||||
['content_type', 'content-type'],
|
['content_type', 'content-type'],
|
||||||
['last_modified', 'last-modified'],
|
['last_modified', 'last-modified'],
|
||||||
['etag', 'etag']]
|
['etag', 'etag']]
|
||||||
|
optional_fields = [['x_object_manifest', 'x-object-manifest']]
|
||||||
|
|
||||||
header_fields = self.header_fields(fields)
|
header_fields = self.header_fields(fields,
|
||||||
|
optional_fields=optional_fields)
|
||||||
header_fields['etag'] = header_fields['etag'].strip('"')
|
header_fields['etag'] = header_fields['etag'].strip('"')
|
||||||
return header_fields
|
return header_fields
|
||||||
|
|
||||||
|
@ -28,8 +28,10 @@ import uuid
|
|||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
import eventlet
|
import eventlet
|
||||||
from nose import SkipTest
|
from nose import SkipTest
|
||||||
|
from swift.common.http import is_success, is_client_error
|
||||||
|
|
||||||
from test.functional import normalized_urls, load_constraint, cluster_info
|
from test.functional import normalized_urls, load_constraint, cluster_info
|
||||||
|
from test.functional import check_response, retry
|
||||||
import test.functional as tf
|
import test.functional as tf
|
||||||
from test.functional.swift_test_client import Account, Connection, File, \
|
from test.functional.swift_test_client import Account, Connection, File, \
|
||||||
ResponseError
|
ResponseError
|
||||||
@ -1322,7 +1324,12 @@ class TestFile(Base):
|
|||||||
self.assertEqual(file_types, file_types_read)
|
self.assertEqual(file_types, file_types_read)
|
||||||
|
|
||||||
def testRangedGets(self):
|
def testRangedGets(self):
|
||||||
file_length = 10000
|
# We set the file_length to a strange multiple here. This is to check
|
||||||
|
# that ranges still work in the EC case when the requested range
|
||||||
|
# spans EC segment boundaries. The 1 MiB base value is chosen because
|
||||||
|
# that's a common EC segment size. The 1.33 multiple is to ensure we
|
||||||
|
# aren't aligned on segment boundaries
|
||||||
|
file_length = int(1048576 * 1.33)
|
||||||
range_size = file_length / 10
|
range_size = file_length / 10
|
||||||
file_item = self.env.container.file(Utils.create_name())
|
file_item = self.env.container.file(Utils.create_name())
|
||||||
data = file_item.write_random(file_length)
|
data = file_item.write_random(file_length)
|
||||||
@ -1812,6 +1819,9 @@ class TestDlo(Base):
|
|||||||
file_item = self.env.container.file('man1')
|
file_item = self.env.container.file('man1')
|
||||||
file_contents = file_item.read(parms={'multipart-manifest': 'get'})
|
file_contents = file_item.read(parms={'multipart-manifest': 'get'})
|
||||||
self.assertEqual(file_contents, "man1-contents")
|
self.assertEqual(file_contents, "man1-contents")
|
||||||
|
self.assertEqual(file_item.info()['x_object_manifest'],
|
||||||
|
"%s/%s/seg_lower" %
|
||||||
|
(self.env.container.name, self.env.segment_prefix))
|
||||||
|
|
||||||
def test_get_range(self):
|
def test_get_range(self):
|
||||||
file_item = self.env.container.file('man1')
|
file_item = self.env.container.file('man1')
|
||||||
@ -1846,6 +1856,8 @@ class TestDlo(Base):
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
file_contents,
|
file_contents,
|
||||||
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
|
||||||
|
# The copied object must not have X-Object-Manifest
|
||||||
|
self.assertTrue("x_object_manifest" not in file_item.info())
|
||||||
|
|
||||||
def test_copy_account(self):
|
def test_copy_account(self):
|
||||||
# dlo use same account and same container only
|
# dlo use same account and same container only
|
||||||
@ -1870,9 +1882,12 @@ class TestDlo(Base):
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
file_contents,
|
file_contents,
|
||||||
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeeeffffffffff")
|
||||||
|
# The copied object must not have X-Object-Manifest
|
||||||
|
self.assertTrue("x_object_manifest" not in file_item.info())
|
||||||
|
|
||||||
def test_copy_manifest(self):
|
def test_copy_manifest(self):
|
||||||
# Copying the manifest should result in another manifest
|
# Copying the manifest with multipart-manifest=get query string
|
||||||
|
# should result in another manifest
|
||||||
try:
|
try:
|
||||||
man1_item = self.env.container.file('man1')
|
man1_item = self.env.container.file('man1')
|
||||||
man1_item.copy(self.env.container.name, "copied-man1",
|
man1_item.copy(self.env.container.name, "copied-man1",
|
||||||
@ -1886,6 +1901,8 @@ class TestDlo(Base):
|
|||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
copied_contents,
|
copied_contents,
|
||||||
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
|
"aaaaaaaaaabbbbbbbbbbccccccccccddddddddddeeeeeeeeee")
|
||||||
|
self.assertEqual(man1_item.info()['x_object_manifest'],
|
||||||
|
copied.info()['x_object_manifest'])
|
||||||
finally:
|
finally:
|
||||||
# try not to leave this around for other tests to stumble over
|
# try not to leave this around for other tests to stumble over
|
||||||
self.env.container.file("copied-man1").delete()
|
self.env.container.file("copied-man1").delete()
|
||||||
@ -2404,6 +2421,14 @@ class TestObjectVersioningEnv(object):
|
|||||||
cls.account = Account(cls.conn, tf.config.get('account',
|
cls.account = Account(cls.conn, tf.config.get('account',
|
||||||
tf.config['username']))
|
tf.config['username']))
|
||||||
|
|
||||||
|
# Second connection for ACL tests
|
||||||
|
config2 = deepcopy(tf.config)
|
||||||
|
config2['account'] = tf.config['account2']
|
||||||
|
config2['username'] = tf.config['username2']
|
||||||
|
config2['password'] = tf.config['password2']
|
||||||
|
cls.conn2 = Connection(config2)
|
||||||
|
cls.conn2.authenticate()
|
||||||
|
|
||||||
# avoid getting a prefix that stops halfway through an encoded
|
# avoid getting a prefix that stops halfway through an encoded
|
||||||
# character
|
# character
|
||||||
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
||||||
@ -2457,6 +2482,14 @@ class TestCrossPolicyObjectVersioningEnv(object):
|
|||||||
cls.account = Account(cls.conn, tf.config.get('account',
|
cls.account = Account(cls.conn, tf.config.get('account',
|
||||||
tf.config['username']))
|
tf.config['username']))
|
||||||
|
|
||||||
|
# Second connection for ACL tests
|
||||||
|
config2 = deepcopy(tf.config)
|
||||||
|
config2['account'] = tf.config['account2']
|
||||||
|
config2['username'] = tf.config['username2']
|
||||||
|
config2['password'] = tf.config['password2']
|
||||||
|
cls.conn2 = Connection(config2)
|
||||||
|
cls.conn2.authenticate()
|
||||||
|
|
||||||
# avoid getting a prefix that stops halfway through an encoded
|
# avoid getting a prefix that stops halfway through an encoded
|
||||||
# character
|
# character
|
||||||
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
prefix = Utils.create_name().decode("utf-8")[:10].encode("utf-8")
|
||||||
@ -2491,6 +2524,15 @@ class TestObjectVersioning(Base):
|
|||||||
"Expected versioning_enabled to be True/False, got %r" %
|
"Expected versioning_enabled to be True/False, got %r" %
|
||||||
(self.env.versioning_enabled,))
|
(self.env.versioning_enabled,))
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
super(TestObjectVersioning, self).tearDown()
|
||||||
|
try:
|
||||||
|
# delete versions first!
|
||||||
|
self.env.versions_container.delete_files()
|
||||||
|
self.env.container.delete_files()
|
||||||
|
except ResponseError:
|
||||||
|
pass
|
||||||
|
|
||||||
def test_overwriting(self):
|
def test_overwriting(self):
|
||||||
container = self.env.container
|
container = self.env.container
|
||||||
versions_container = self.env.versions_container
|
versions_container = self.env.versions_container
|
||||||
@ -2522,6 +2564,81 @@ class TestObjectVersioning(Base):
|
|||||||
versioned_obj.delete()
|
versioned_obj.delete()
|
||||||
self.assertRaises(ResponseError, versioned_obj.read)
|
self.assertRaises(ResponseError, versioned_obj.read)
|
||||||
|
|
||||||
|
def test_versioning_dlo(self):
|
||||||
|
# SOF
|
||||||
|
# Modified the swift test a little to avoid 409 in SoF
|
||||||
|
# because of destination path already being a directory.
|
||||||
|
# Example:
|
||||||
|
# PUT AUTH_test/animals/cat/seg1.jpg
|
||||||
|
# PUT AUTH_test/animals/cat/seg2.jpg
|
||||||
|
# PUT AUTH_test/animals/cat -H "X-Object-Manifest: animals/cat/"
|
||||||
|
# The last PUT above fails as animals/cat is already a directory in SoF
|
||||||
|
#
|
||||||
|
# The Change:
|
||||||
|
# Name the manifest object in such a way that it is not an existing
|
||||||
|
# file or directory in SwiftOnFile.
|
||||||
|
# Example:
|
||||||
|
# PUT AUTH_test/animals/cat_seg1.jpg
|
||||||
|
# PUT AUTH_test/animals/cat_seg2.jpg
|
||||||
|
# PUT AUTH_test/animals/cat -H "X-Object-Manifest: animals/cat_"
|
||||||
|
#
|
||||||
|
# TODO: Modify this test in Swift upstream
|
||||||
|
|
||||||
|
container = self.env.container
|
||||||
|
versions_container = self.env.versions_container
|
||||||
|
obj_name = Utils.create_name()
|
||||||
|
|
||||||
|
for i in ('1', '2', '3'):
|
||||||
|
time.sleep(.01) # guarantee that the timestamp changes
|
||||||
|
obj_name_seg = obj_name + '_seg' + i
|
||||||
|
versioned_obj = container.file(obj_name_seg)
|
||||||
|
versioned_obj.write(i)
|
||||||
|
versioned_obj.write(i + i)
|
||||||
|
|
||||||
|
self.assertEqual(3, versions_container.info()['object_count'])
|
||||||
|
|
||||||
|
manifest_obj_name = obj_name + '_'
|
||||||
|
man_file = container.file(manifest_obj_name)
|
||||||
|
man_file.write('', hdrs={"X-Object-Manifest": "%s/%s" %
|
||||||
|
(self.env.container.name, obj_name + '_')})
|
||||||
|
|
||||||
|
# guarantee that the timestamp changes
|
||||||
|
time.sleep(.01)
|
||||||
|
|
||||||
|
# write manifest file again
|
||||||
|
man_file.write('', hdrs={"X-Object-Manifest": "%s/%s" %
|
||||||
|
(self.env.container.name, obj_name + '_')})
|
||||||
|
|
||||||
|
self.assertEqual(3, versions_container.info()['object_count'])
|
||||||
|
self.assertEqual("112233", man_file.read())
|
||||||
|
|
||||||
|
def test_versioning_check_acl(self):
|
||||||
|
container = self.env.container
|
||||||
|
versions_container = self.env.versions_container
|
||||||
|
versions_container.create(hdrs={'X-Container-Read': '.r:*,.rlistings'})
|
||||||
|
|
||||||
|
obj_name = Utils.create_name()
|
||||||
|
versioned_obj = container.file(obj_name)
|
||||||
|
versioned_obj.write("aaaaa")
|
||||||
|
self.assertEqual("aaaaa", versioned_obj.read())
|
||||||
|
|
||||||
|
versioned_obj.write("bbbbb")
|
||||||
|
self.assertEqual("bbbbb", versioned_obj.read())
|
||||||
|
|
||||||
|
# Use token from second account and try to delete the object
|
||||||
|
org_token = self.env.account.conn.storage_token
|
||||||
|
self.env.account.conn.storage_token = self.env.conn2.storage_token
|
||||||
|
try:
|
||||||
|
self.assertRaises(ResponseError, versioned_obj.delete)
|
||||||
|
finally:
|
||||||
|
self.env.account.conn.storage_token = org_token
|
||||||
|
|
||||||
|
# Verify with token from first account
|
||||||
|
self.assertEqual("bbbbb", versioned_obj.read())
|
||||||
|
|
||||||
|
versioned_obj.delete()
|
||||||
|
self.assertEqual("aaaaa", versioned_obj.read())
|
||||||
|
|
||||||
|
|
||||||
class TestObjectVersioningUTF8(Base2, TestObjectVersioning):
|
class TestObjectVersioningUTF8(Base2, TestObjectVersioning):
|
||||||
set_up = False
|
set_up = False
|
||||||
@ -2715,6 +2832,212 @@ class TestTempurlUTF8(Base2, TestTempurl):
|
|||||||
set_up = False
|
set_up = False
|
||||||
|
|
||||||
|
|
||||||
|
class TestContainerTempurlEnv(object):
|
||||||
|
tempurl_enabled = None # tri-state: None initially, then True/False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setUp(cls):
|
||||||
|
cls.conn = Connection(tf.config)
|
||||||
|
cls.conn.authenticate()
|
||||||
|
|
||||||
|
if cls.tempurl_enabled is None:
|
||||||
|
cls.tempurl_enabled = 'tempurl' in cluster_info
|
||||||
|
if not cls.tempurl_enabled:
|
||||||
|
return
|
||||||
|
|
||||||
|
cls.tempurl_key = Utils.create_name()
|
||||||
|
cls.tempurl_key2 = Utils.create_name()
|
||||||
|
|
||||||
|
cls.account = Account(
|
||||||
|
cls.conn, tf.config.get('account', tf.config['username']))
|
||||||
|
cls.account.delete_containers()
|
||||||
|
|
||||||
|
# creating another account and connection
|
||||||
|
# for ACL tests
|
||||||
|
config2 = deepcopy(tf.config)
|
||||||
|
config2['account'] = tf.config['account2']
|
||||||
|
config2['username'] = tf.config['username2']
|
||||||
|
config2['password'] = tf.config['password2']
|
||||||
|
cls.conn2 = Connection(config2)
|
||||||
|
cls.conn2.authenticate()
|
||||||
|
cls.account2 = Account(
|
||||||
|
cls.conn2, config2.get('account', config2['username']))
|
||||||
|
cls.account2 = cls.conn2.get_account()
|
||||||
|
|
||||||
|
cls.container = cls.account.container(Utils.create_name())
|
||||||
|
if not cls.container.create({
|
||||||
|
'x-container-meta-temp-url-key': cls.tempurl_key,
|
||||||
|
'x-container-meta-temp-url-key-2': cls.tempurl_key2,
|
||||||
|
'x-container-read': cls.account2.name}):
|
||||||
|
raise ResponseError(cls.conn.response)
|
||||||
|
|
||||||
|
cls.obj = cls.container.file(Utils.create_name())
|
||||||
|
cls.obj.write("obj contents")
|
||||||
|
cls.other_obj = cls.container.file(Utils.create_name())
|
||||||
|
cls.other_obj.write("other obj contents")
|
||||||
|
|
||||||
|
|
||||||
|
class TestContainerTempurl(Base):
|
||||||
|
env = TestContainerTempurlEnv
|
||||||
|
set_up = False
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestContainerTempurl, self).setUp()
|
||||||
|
if self.env.tempurl_enabled is False:
|
||||||
|
raise SkipTest("TempURL not enabled")
|
||||||
|
elif self.env.tempurl_enabled is not True:
|
||||||
|
# just some sanity checking
|
||||||
|
raise Exception(
|
||||||
|
"Expected tempurl_enabled to be True/False, got %r" %
|
||||||
|
(self.env.tempurl_enabled,))
|
||||||
|
|
||||||
|
expires = int(time.time()) + 86400
|
||||||
|
sig = self.tempurl_sig(
|
||||||
|
'GET', expires, self.env.conn.make_path(self.env.obj.path),
|
||||||
|
self.env.tempurl_key)
|
||||||
|
self.obj_tempurl_parms = {'temp_url_sig': sig,
|
||||||
|
'temp_url_expires': str(expires)}
|
||||||
|
|
||||||
|
def tempurl_sig(self, method, expires, path, key):
|
||||||
|
return hmac.new(
|
||||||
|
key,
|
||||||
|
'%s\n%s\n%s' % (method, expires, urllib.unquote(path)),
|
||||||
|
hashlib.sha1).hexdigest()
|
||||||
|
|
||||||
|
def test_GET(self):
|
||||||
|
contents = self.env.obj.read(
|
||||||
|
parms=self.obj_tempurl_parms,
|
||||||
|
cfg={'no_auth_token': True})
|
||||||
|
self.assertEqual(contents, "obj contents")
|
||||||
|
|
||||||
|
# GET tempurls also allow HEAD requests
|
||||||
|
self.assert_(self.env.obj.info(parms=self.obj_tempurl_parms,
|
||||||
|
cfg={'no_auth_token': True}))
|
||||||
|
|
||||||
|
def test_GET_with_key_2(self):
|
||||||
|
expires = int(time.time()) + 86400
|
||||||
|
sig = self.tempurl_sig(
|
||||||
|
'GET', expires, self.env.conn.make_path(self.env.obj.path),
|
||||||
|
self.env.tempurl_key2)
|
||||||
|
parms = {'temp_url_sig': sig,
|
||||||
|
'temp_url_expires': str(expires)}
|
||||||
|
|
||||||
|
contents = self.env.obj.read(parms=parms, cfg={'no_auth_token': True})
|
||||||
|
self.assertEqual(contents, "obj contents")
|
||||||
|
|
||||||
|
def test_PUT(self):
|
||||||
|
new_obj = self.env.container.file(Utils.create_name())
|
||||||
|
|
||||||
|
expires = int(time.time()) + 86400
|
||||||
|
sig = self.tempurl_sig(
|
||||||
|
'PUT', expires, self.env.conn.make_path(new_obj.path),
|
||||||
|
self.env.tempurl_key)
|
||||||
|
put_parms = {'temp_url_sig': sig,
|
||||||
|
'temp_url_expires': str(expires)}
|
||||||
|
|
||||||
|
new_obj.write('new obj contents',
|
||||||
|
parms=put_parms, cfg={'no_auth_token': True})
|
||||||
|
self.assertEqual(new_obj.read(), "new obj contents")
|
||||||
|
|
||||||
|
# PUT tempurls also allow HEAD requests
|
||||||
|
self.assert_(new_obj.info(parms=put_parms,
|
||||||
|
cfg={'no_auth_token': True}))
|
||||||
|
|
||||||
|
def test_HEAD(self):
|
||||||
|
expires = int(time.time()) + 86400
|
||||||
|
sig = self.tempurl_sig(
|
||||||
|
'HEAD', expires, self.env.conn.make_path(self.env.obj.path),
|
||||||
|
self.env.tempurl_key)
|
||||||
|
head_parms = {'temp_url_sig': sig,
|
||||||
|
'temp_url_expires': str(expires)}
|
||||||
|
|
||||||
|
self.assert_(self.env.obj.info(parms=head_parms,
|
||||||
|
cfg={'no_auth_token': True}))
|
||||||
|
# HEAD tempurls don't allow PUT or GET requests, despite the fact that
|
||||||
|
# PUT and GET tempurls both allow HEAD requests
|
||||||
|
self.assertRaises(ResponseError, self.env.other_obj.read,
|
||||||
|
cfg={'no_auth_token': True},
|
||||||
|
parms=self.obj_tempurl_parms)
|
||||||
|
self.assert_status([401])
|
||||||
|
|
||||||
|
self.assertRaises(ResponseError, self.env.other_obj.write,
|
||||||
|
'new contents',
|
||||||
|
cfg={'no_auth_token': True},
|
||||||
|
parms=self.obj_tempurl_parms)
|
||||||
|
self.assert_status([401])
|
||||||
|
|
||||||
|
def test_different_object(self):
|
||||||
|
contents = self.env.obj.read(
|
||||||
|
parms=self.obj_tempurl_parms,
|
||||||
|
cfg={'no_auth_token': True})
|
||||||
|
self.assertEqual(contents, "obj contents")
|
||||||
|
|
||||||
|
self.assertRaises(ResponseError, self.env.other_obj.read,
|
||||||
|
cfg={'no_auth_token': True},
|
||||||
|
parms=self.obj_tempurl_parms)
|
||||||
|
self.assert_status([401])
|
||||||
|
|
||||||
|
def test_changing_sig(self):
|
||||||
|
contents = self.env.obj.read(
|
||||||
|
parms=self.obj_tempurl_parms,
|
||||||
|
cfg={'no_auth_token': True})
|
||||||
|
self.assertEqual(contents, "obj contents")
|
||||||
|
|
||||||
|
parms = self.obj_tempurl_parms.copy()
|
||||||
|
if parms['temp_url_sig'][0] == 'a':
|
||||||
|
parms['temp_url_sig'] = 'b' + parms['temp_url_sig'][1:]
|
||||||
|
else:
|
||||||
|
parms['temp_url_sig'] = 'a' + parms['temp_url_sig'][1:]
|
||||||
|
|
||||||
|
self.assertRaises(ResponseError, self.env.obj.read,
|
||||||
|
cfg={'no_auth_token': True},
|
||||||
|
parms=parms)
|
||||||
|
self.assert_status([401])
|
||||||
|
|
||||||
|
def test_changing_expires(self):
|
||||||
|
contents = self.env.obj.read(
|
||||||
|
parms=self.obj_tempurl_parms,
|
||||||
|
cfg={'no_auth_token': True})
|
||||||
|
self.assertEqual(contents, "obj contents")
|
||||||
|
|
||||||
|
parms = self.obj_tempurl_parms.copy()
|
||||||
|
if parms['temp_url_expires'][-1] == '0':
|
||||||
|
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '1'
|
||||||
|
else:
|
||||||
|
parms['temp_url_expires'] = parms['temp_url_expires'][:-1] + '0'
|
||||||
|
|
||||||
|
self.assertRaises(ResponseError, self.env.obj.read,
|
||||||
|
cfg={'no_auth_token': True},
|
||||||
|
parms=parms)
|
||||||
|
self.assert_status([401])
|
||||||
|
|
||||||
|
def test_tempurl_keys_visible_to_account_owner(self):
|
||||||
|
if not tf.cluster_info.get('tempauth'):
|
||||||
|
raise SkipTest('TEMP AUTH SPECIFIC TEST')
|
||||||
|
metadata = self.env.container.info()
|
||||||
|
self.assertEqual(metadata.get('tempurl_key'), self.env.tempurl_key)
|
||||||
|
self.assertEqual(metadata.get('tempurl_key2'), self.env.tempurl_key2)
|
||||||
|
|
||||||
|
def test_tempurl_keys_hidden_from_acl_readonly(self):
|
||||||
|
if not tf.cluster_info.get('tempauth'):
|
||||||
|
raise SkipTest('TEMP AUTH SPECIFIC TEST')
|
||||||
|
original_token = self.env.container.conn.storage_token
|
||||||
|
self.env.container.conn.storage_token = self.env.conn2.storage_token
|
||||||
|
metadata = self.env.container.info()
|
||||||
|
self.env.container.conn.storage_token = original_token
|
||||||
|
|
||||||
|
self.assertTrue('tempurl_key' not in metadata,
|
||||||
|
'Container TempURL key found, should not be visible '
|
||||||
|
'to readonly ACLs')
|
||||||
|
self.assertTrue('tempurl_key2' not in metadata,
|
||||||
|
'Container TempURL key-2 found, should not be visible '
|
||||||
|
'to readonly ACLs')
|
||||||
|
|
||||||
|
|
||||||
|
class TestContainerTempurlUTF8(Base2, TestContainerTempurl):
|
||||||
|
set_up = False
|
||||||
|
|
||||||
|
|
||||||
class TestSloTempurlEnv(object):
|
class TestSloTempurlEnv(object):
|
||||||
enabled = None # tri-state: None initially, then True/False
|
enabled = None # tri-state: None initially, then True/False
|
||||||
|
|
||||||
@ -2802,5 +3125,174 @@ class TestSloTempurlUTF8(Base2, TestSloTempurl):
|
|||||||
set_up = False
|
set_up = False
|
||||||
|
|
||||||
|
|
||||||
|
class TestServiceToken(unittest.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
if tf.skip_service_tokens:
|
||||||
|
raise SkipTest
|
||||||
|
|
||||||
|
self.SET_TO_USERS_TOKEN = 1
|
||||||
|
self.SET_TO_SERVICE_TOKEN = 2
|
||||||
|
|
||||||
|
# keystoneauth and tempauth differ in allowing PUT account
|
||||||
|
# Even if keystoneauth allows it, the proxy-server uses
|
||||||
|
# allow_account_management to decide if accounts can be created
|
||||||
|
self.put_account_expect = is_client_error
|
||||||
|
if tf.swift_test_auth_version != '1':
|
||||||
|
if cluster_info.get('swift').get('allow_account_management'):
|
||||||
|
self.put_account_expect = is_success
|
||||||
|
|
||||||
|
def _scenario_generator(self):
|
||||||
|
paths = ((None, None), ('c', None), ('c', 'o'))
|
||||||
|
for path in paths:
|
||||||
|
for method in ('PUT', 'POST', 'HEAD', 'GET', 'OPTIONS'):
|
||||||
|
yield method, path[0], path[1]
|
||||||
|
for path in reversed(paths):
|
||||||
|
yield 'DELETE', path[0], path[1]
|
||||||
|
|
||||||
|
def _assert_is_authed_response(self, method, container, object, resp):
|
||||||
|
resp.read()
|
||||||
|
expect = is_success
|
||||||
|
if method == 'DELETE' and not container:
|
||||||
|
expect = is_client_error
|
||||||
|
if method == 'PUT' and not container:
|
||||||
|
expect = self.put_account_expect
|
||||||
|
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
|
||||||
|
% (resp.status, method, container, object))
|
||||||
|
|
||||||
|
def _assert_not_authed_response(self, method, container, object, resp):
|
||||||
|
resp.read()
|
||||||
|
expect = is_client_error
|
||||||
|
if method == 'OPTIONS':
|
||||||
|
expect = is_success
|
||||||
|
self.assertTrue(expect(resp.status), 'Unexpected %s for %s %s %s'
|
||||||
|
% (resp.status, method, container, object))
|
||||||
|
|
||||||
|
def prepare_request(self, method, use_service_account=False,
|
||||||
|
container=None, obj=None, body=None, headers=None,
|
||||||
|
x_auth_token=None,
|
||||||
|
x_service_token=None, dbg=False):
|
||||||
|
"""
|
||||||
|
Setup for making the request
|
||||||
|
|
||||||
|
When retry() calls the do_request() function, it calls it the
|
||||||
|
test user's token, the parsed path, a connection and (optionally)
|
||||||
|
a token from the test service user. We save options here so that
|
||||||
|
do_request() can make the appropriate request.
|
||||||
|
|
||||||
|
:param method: The operation (e.g'. 'HEAD')
|
||||||
|
:param use_service_account: Optional. Set True to change the path to
|
||||||
|
be the service account
|
||||||
|
:param container: Optional. Adds a container name to the path
|
||||||
|
:param obj: Optional. Adds an object name to the path
|
||||||
|
:param body: Optional. Adds a body (string) in the request
|
||||||
|
:param headers: Optional. Adds additional headers.
|
||||||
|
:param x_auth_token: Optional. Default is SET_TO_USERS_TOKEN. One of:
|
||||||
|
SET_TO_USERS_TOKEN Put the test user's token in
|
||||||
|
X-Auth-Token
|
||||||
|
SET_TO_SERVICE_TOKEN Put the service token in X-Auth-Token
|
||||||
|
:param x_service_token: Optional. Default is to not set X-Service-Token
|
||||||
|
to any value. If specified, is one of following:
|
||||||
|
SET_TO_USERS_TOKEN Put the test user's token in
|
||||||
|
X-Service-Token
|
||||||
|
SET_TO_SERVICE_TOKEN Put the service token in
|
||||||
|
X-Service-Token
|
||||||
|
:param dbg: Optional. Set true to check request arguments
|
||||||
|
"""
|
||||||
|
self.method = method
|
||||||
|
self.use_service_account = use_service_account
|
||||||
|
self.container = container
|
||||||
|
self.obj = obj
|
||||||
|
self.body = body
|
||||||
|
self.headers = headers
|
||||||
|
if x_auth_token:
|
||||||
|
self.x_auth_token = x_auth_token
|
||||||
|
else:
|
||||||
|
self.x_auth_token = self.SET_TO_USERS_TOKEN
|
||||||
|
self.x_service_token = x_service_token
|
||||||
|
self.dbg = dbg
|
||||||
|
|
||||||
|
def do_request(self, url, token, parsed, conn, service_token=''):
|
||||||
|
if self.use_service_account:
|
||||||
|
path = self._service_account(parsed.path)
|
||||||
|
else:
|
||||||
|
path = parsed.path
|
||||||
|
if self.container:
|
||||||
|
path += '/%s' % self.container
|
||||||
|
if self.obj:
|
||||||
|
path += '/%s' % self.obj
|
||||||
|
headers = {}
|
||||||
|
if self.body:
|
||||||
|
headers.update({'Content-Length': len(self.body)})
|
||||||
|
if self.headers:
|
||||||
|
headers.update(self.headers)
|
||||||
|
if self.x_auth_token == self.SET_TO_USERS_TOKEN:
|
||||||
|
headers.update({'X-Auth-Token': token})
|
||||||
|
elif self.x_auth_token == self.SET_TO_SERVICE_TOKEN:
|
||||||
|
headers.update({'X-Auth-Token': service_token})
|
||||||
|
if self.x_service_token == self.SET_TO_USERS_TOKEN:
|
||||||
|
headers.update({'X-Service-Token': token})
|
||||||
|
elif self.x_service_token == self.SET_TO_SERVICE_TOKEN:
|
||||||
|
headers.update({'X-Service-Token': service_token})
|
||||||
|
if self.dbg:
|
||||||
|
print('DEBUG: conn.request: method:%s path:%s'
|
||||||
|
' body:%s headers:%s' % (self.method, path, self.body,
|
||||||
|
headers))
|
||||||
|
conn.request(self.method, path, self.body, headers=headers)
|
||||||
|
return check_response(conn)
|
||||||
|
|
||||||
|
def _service_account(self, path):
|
||||||
|
parts = path.split('/', 3)
|
||||||
|
account = parts[2]
|
||||||
|
try:
|
||||||
|
project_id = account[account.index('_') + 1:]
|
||||||
|
except ValueError:
|
||||||
|
project_id = account
|
||||||
|
parts[2] = '%s%s' % (tf.swift_test_service_prefix, project_id)
|
||||||
|
return '/'.join(parts)
|
||||||
|
|
||||||
|
def test_user_access_own_auth_account(self):
|
||||||
|
# This covers ground tested elsewhere (tests a user doing HEAD
|
||||||
|
# on own account). However, if this fails, none of the remaining
|
||||||
|
# tests will work
|
||||||
|
self.prepare_request('HEAD')
|
||||||
|
resp = retry(self.do_request)
|
||||||
|
resp.read()
|
||||||
|
self.assert_(resp.status in (200, 204), resp.status)
|
||||||
|
|
||||||
|
def test_user_cannot_access_service_account(self):
|
||||||
|
for method, container, obj in self._scenario_generator():
|
||||||
|
self.prepare_request(method, use_service_account=True,
|
||||||
|
container=container, obj=obj)
|
||||||
|
resp = retry(self.do_request)
|
||||||
|
self._assert_not_authed_response(method, container, obj, resp)
|
||||||
|
|
||||||
|
def test_service_user_denied_with_x_auth_token(self):
|
||||||
|
for method, container, obj in self._scenario_generator():
|
||||||
|
self.prepare_request(method, use_service_account=True,
|
||||||
|
container=container, obj=obj,
|
||||||
|
x_auth_token=self.SET_TO_SERVICE_TOKEN)
|
||||||
|
resp = retry(self.do_request, service_user=5)
|
||||||
|
self._assert_not_authed_response(method, container, obj, resp)
|
||||||
|
|
||||||
|
def test_service_user_denied_with_x_service_token(self):
|
||||||
|
for method, container, obj in self._scenario_generator():
|
||||||
|
self.prepare_request(method, use_service_account=True,
|
||||||
|
container=container, obj=obj,
|
||||||
|
x_auth_token=self.SET_TO_SERVICE_TOKEN,
|
||||||
|
x_service_token=self.SET_TO_SERVICE_TOKEN)
|
||||||
|
resp = retry(self.do_request, service_user=5)
|
||||||
|
self._assert_not_authed_response(method, container, obj, resp)
|
||||||
|
|
||||||
|
def test_user_plus_service_can_access_service_account(self):
|
||||||
|
for method, container, obj in self._scenario_generator():
|
||||||
|
self.prepare_request(method, use_service_account=True,
|
||||||
|
container=container, obj=obj,
|
||||||
|
x_auth_token=self.SET_TO_USERS_TOKEN,
|
||||||
|
x_service_token=self.SET_TO_SERVICE_TOKEN)
|
||||||
|
resp = retry(self.do_request, service_user=5)
|
||||||
|
self._assert_is_authed_response(method, container, obj, resp)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
@ -20,71 +20,288 @@ import copy
|
|||||||
import logging
|
import logging
|
||||||
import errno
|
import errno
|
||||||
import sys
|
import sys
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager, closing
|
||||||
from collections import defaultdict
|
from collections import defaultdict, Iterable
|
||||||
|
import itertools
|
||||||
|
from numbers import Number
|
||||||
from tempfile import NamedTemporaryFile
|
from tempfile import NamedTemporaryFile
|
||||||
import time
|
import time
|
||||||
|
import eventlet
|
||||||
from eventlet.green import socket
|
from eventlet.green import socket
|
||||||
from tempfile import mkdtemp
|
from tempfile import mkdtemp
|
||||||
from shutil import rmtree
|
from shutil import rmtree
|
||||||
|
from swift.common.utils import Timestamp
|
||||||
from test import get_config
|
from test import get_config
|
||||||
from swift.common.utils import config_true_value, LogAdapter
|
from swift.common import swob, utils
|
||||||
|
from swift.common.ring import Ring, RingData
|
||||||
from hashlib import md5
|
from hashlib import md5
|
||||||
from eventlet import sleep, Timeout
|
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
from httplib import HTTPException
|
from httplib import HTTPException
|
||||||
from numbers import Number
|
from swift.common import storage_policy
|
||||||
|
from swift.common.storage_policy import StoragePolicy, ECStoragePolicy
|
||||||
|
import functools
|
||||||
|
import cPickle as pickle
|
||||||
|
from gzip import GzipFile
|
||||||
|
import mock as mocklib
|
||||||
|
import inspect
|
||||||
|
|
||||||
|
EMPTY_ETAG = md5().hexdigest()
|
||||||
|
|
||||||
|
# try not to import this module from swift
|
||||||
|
if not os.path.basename(sys.argv[0]).startswith('swift'):
|
||||||
|
# never patch HASH_PATH_SUFFIX AGAIN!
|
||||||
|
utils.HASH_PATH_SUFFIX = 'endcap'
|
||||||
|
|
||||||
|
|
||||||
class FakeRing(object):
|
def patch_policies(thing_or_policies=None, legacy_only=False,
|
||||||
|
with_ec_default=False, fake_ring_args=None):
|
||||||
|
if isinstance(thing_or_policies, (
|
||||||
|
Iterable, storage_policy.StoragePolicyCollection)):
|
||||||
|
return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
|
||||||
|
|
||||||
def __init__(self, replicas=3, max_more_nodes=0):
|
if legacy_only:
|
||||||
|
default_policies = [
|
||||||
|
StoragePolicy(0, name='legacy', is_default=True),
|
||||||
|
]
|
||||||
|
default_ring_args = [{}]
|
||||||
|
elif with_ec_default:
|
||||||
|
default_policies = [
|
||||||
|
ECStoragePolicy(0, name='ec', is_default=True,
|
||||||
|
ec_type='jerasure_rs_vand', ec_ndata=10,
|
||||||
|
ec_nparity=4, ec_segment_size=4096),
|
||||||
|
StoragePolicy(1, name='unu'),
|
||||||
|
]
|
||||||
|
default_ring_args = [{'replicas': 14}, {}]
|
||||||
|
else:
|
||||||
|
default_policies = [
|
||||||
|
StoragePolicy(0, name='nulo', is_default=True),
|
||||||
|
StoragePolicy(1, name='unu'),
|
||||||
|
]
|
||||||
|
default_ring_args = [{}, {}]
|
||||||
|
|
||||||
|
fake_ring_args = fake_ring_args or default_ring_args
|
||||||
|
decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
|
||||||
|
|
||||||
|
if not thing_or_policies:
|
||||||
|
return decorator
|
||||||
|
else:
|
||||||
|
# it's a thing, we return the wrapped thing instead of the decorator
|
||||||
|
return decorator(thing_or_policies)
|
||||||
|
|
||||||
|
|
||||||
|
class PatchPolicies(object):
|
||||||
|
"""
|
||||||
|
Why not mock.patch? In my case, when used as a decorator on the class it
|
||||||
|
seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
|
||||||
|
patched yet)
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, policies, fake_ring_args=None):
|
||||||
|
if isinstance(policies, storage_policy.StoragePolicyCollection):
|
||||||
|
self.policies = policies
|
||||||
|
else:
|
||||||
|
self.policies = storage_policy.StoragePolicyCollection(policies)
|
||||||
|
self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
|
||||||
|
|
||||||
|
def _setup_rings(self):
|
||||||
|
"""
|
||||||
|
Our tests tend to use the policies rings like their own personal
|
||||||
|
playground - which can be a problem in the particular case of a
|
||||||
|
patched TestCase class where the FakeRing objects are scoped in the
|
||||||
|
call to the patch_policies wrapper outside of the TestCase instance
|
||||||
|
which can lead to some bled state.
|
||||||
|
|
||||||
|
To help tests get better isolation without having to think about it,
|
||||||
|
here we're capturing the args required to *build* a new FakeRing
|
||||||
|
instances so we can ensure each test method gets a clean ring setup.
|
||||||
|
|
||||||
|
The TestCase can always "tweak" these fresh rings in setUp - or if
|
||||||
|
they'd prefer to get the same "reset" behavior with custom FakeRing's
|
||||||
|
they can pass in their own fake_ring_args to patch_policies instead of
|
||||||
|
setting the object_ring on the policy definitions.
|
||||||
|
"""
|
||||||
|
for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
|
||||||
|
if fake_ring_arg is not None:
|
||||||
|
policy.object_ring = FakeRing(**fake_ring_arg)
|
||||||
|
|
||||||
|
def __call__(self, thing):
|
||||||
|
if isinstance(thing, type):
|
||||||
|
return self._patch_class(thing)
|
||||||
|
else:
|
||||||
|
return self._patch_method(thing)
|
||||||
|
|
||||||
|
def _patch_class(self, cls):
|
||||||
|
"""
|
||||||
|
Creating a new class that inherits from decorated class is the more
|
||||||
|
common way I've seen class decorators done - but it seems to cause
|
||||||
|
infinite recursion when super is called from inside methods in the
|
||||||
|
decorated class.
|
||||||
|
"""
|
||||||
|
|
||||||
|
orig_setUp = cls.setUp
|
||||||
|
orig_tearDown = cls.tearDown
|
||||||
|
|
||||||
|
def setUp(cls_self):
|
||||||
|
self._orig_POLICIES = storage_policy._POLICIES
|
||||||
|
if not getattr(cls_self, '_policies_patched', False):
|
||||||
|
storage_policy._POLICIES = self.policies
|
||||||
|
self._setup_rings()
|
||||||
|
cls_self._policies_patched = True
|
||||||
|
|
||||||
|
orig_setUp(cls_self)
|
||||||
|
|
||||||
|
def tearDown(cls_self):
|
||||||
|
orig_tearDown(cls_self)
|
||||||
|
storage_policy._POLICIES = self._orig_POLICIES
|
||||||
|
|
||||||
|
cls.setUp = setUp
|
||||||
|
cls.tearDown = tearDown
|
||||||
|
|
||||||
|
return cls
|
||||||
|
|
||||||
|
def _patch_method(self, f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def mywrapper(*args, **kwargs):
|
||||||
|
self._orig_POLICIES = storage_policy._POLICIES
|
||||||
|
try:
|
||||||
|
storage_policy._POLICIES = self.policies
|
||||||
|
self._setup_rings()
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
storage_policy._POLICIES = self._orig_POLICIES
|
||||||
|
return mywrapper
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self._orig_POLICIES = storage_policy._POLICIES
|
||||||
|
storage_policy._POLICIES = self.policies
|
||||||
|
|
||||||
|
def __exit__(self, *args):
|
||||||
|
storage_policy._POLICIES = self._orig_POLICIES
|
||||||
|
|
||||||
|
|
||||||
|
class FakeRing(Ring):
|
||||||
|
|
||||||
|
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
|
||||||
|
base_port=1000):
|
||||||
|
"""
|
||||||
|
:param part_power: make part calculation based on the path
|
||||||
|
|
||||||
|
If you set a part_power when you setup your FakeRing the parts you get
|
||||||
|
out of ring methods will actually be based on the path - otherwise we
|
||||||
|
exercise the real ring code, but ignore the result and return 1.
|
||||||
|
"""
|
||||||
|
self._base_port = base_port
|
||||||
|
self.max_more_nodes = max_more_nodes
|
||||||
|
self._part_shift = 32 - part_power
|
||||||
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
|
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
|
||||||
# this is set higher, or R^2 for R replicas
|
# this is set higher, or R^2 for R replicas
|
||||||
self.replicas = replicas
|
self.set_replicas(replicas)
|
||||||
self.max_more_nodes = max_more_nodes
|
self._reload()
|
||||||
self.devs = {}
|
|
||||||
|
def _reload(self):
|
||||||
|
self._rtime = time.time()
|
||||||
|
|
||||||
def set_replicas(self, replicas):
|
def set_replicas(self, replicas):
|
||||||
self.replicas = replicas
|
self.replicas = replicas
|
||||||
self.devs = {}
|
self._devs = []
|
||||||
|
for x in range(self.replicas):
|
||||||
|
ip = '10.0.0.%s' % x
|
||||||
|
port = self._base_port + x
|
||||||
|
self._devs.append({
|
||||||
|
'ip': ip,
|
||||||
|
'replication_ip': ip,
|
||||||
|
'port': port,
|
||||||
|
'replication_port': port,
|
||||||
|
'device': 'sd' + (chr(ord('a') + x)),
|
||||||
|
'zone': x % 3,
|
||||||
|
'region': x % 2,
|
||||||
|
'id': x,
|
||||||
|
})
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def replica_count(self):
|
def replica_count(self):
|
||||||
return self.replicas
|
return self.replicas
|
||||||
|
|
||||||
def get_part(self, account, container=None, obj=None):
|
def _get_part_nodes(self, part):
|
||||||
return 1
|
return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
|
||||||
|
|
||||||
def get_nodes(self, account, container=None, obj=None):
|
|
||||||
devs = []
|
|
||||||
for x in xrange(self.replicas):
|
|
||||||
devs.append(self.devs.get(x))
|
|
||||||
if devs[x] is None:
|
|
||||||
self.devs[x] = devs[x] = \
|
|
||||||
{'ip': '10.0.0.%s' % x,
|
|
||||||
'port': 1000 + x,
|
|
||||||
'device': 'sd' + (chr(ord('a') + x)),
|
|
||||||
'zone': x % 3,
|
|
||||||
'region': x % 2,
|
|
||||||
'id': x}
|
|
||||||
return 1, devs
|
|
||||||
|
|
||||||
def get_part_nodes(self, part):
|
|
||||||
return self.get_nodes('blah')[1]
|
|
||||||
|
|
||||||
def get_more_nodes(self, part):
|
def get_more_nodes(self, part):
|
||||||
# replicas^2 is the true cap
|
# replicas^2 is the true cap
|
||||||
for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes,
|
for x in xrange(self.replicas, min(self.replicas + self.max_more_nodes,
|
||||||
self.replicas * self.replicas)):
|
self.replicas * self.replicas)):
|
||||||
yield {'ip': '10.0.0.%s' % x,
|
yield {'ip': '10.0.0.%s' % x,
|
||||||
'port': 1000 + x,
|
'replication_ip': '10.0.0.%s' % x,
|
||||||
|
'port': self._base_port + x,
|
||||||
|
'replication_port': self._base_port + x,
|
||||||
'device': 'sda',
|
'device': 'sda',
|
||||||
'zone': x % 3,
|
'zone': x % 3,
|
||||||
'region': x % 2,
|
'region': x % 2,
|
||||||
'id': x}
|
'id': x}
|
||||||
|
|
||||||
|
|
||||||
|
def write_fake_ring(path, *devs):
|
||||||
|
"""
|
||||||
|
Pretty much just a two node, two replica, 2 part power ring...
|
||||||
|
"""
|
||||||
|
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
|
||||||
|
'port': 6000}
|
||||||
|
dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
|
||||||
|
'port': 6000}
|
||||||
|
|
||||||
|
dev1_updates, dev2_updates = devs or ({}, {})
|
||||||
|
|
||||||
|
dev1.update(dev1_updates)
|
||||||
|
dev2.update(dev2_updates)
|
||||||
|
|
||||||
|
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
|
||||||
|
devs = [dev1, dev2]
|
||||||
|
part_shift = 30
|
||||||
|
with closing(GzipFile(path, 'wb')) as f:
|
||||||
|
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
|
||||||
|
|
||||||
|
|
||||||
|
class FabricatedRing(Ring):
|
||||||
|
"""
|
||||||
|
When a FakeRing just won't do - you can fabricate one to meet
|
||||||
|
your tests needs.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
|
||||||
|
part_power=4):
|
||||||
|
self.devices = devices
|
||||||
|
self.nodes = nodes
|
||||||
|
self.port = port
|
||||||
|
self.replicas = 6
|
||||||
|
self.part_power = part_power
|
||||||
|
self._part_shift = 32 - self.part_power
|
||||||
|
self._reload()
|
||||||
|
|
||||||
|
def _reload(self, *args, **kwargs):
|
||||||
|
self._rtime = time.time() * 2
|
||||||
|
if hasattr(self, '_replica2part2dev_id'):
|
||||||
|
return
|
||||||
|
self._devs = [{
|
||||||
|
'region': 1,
|
||||||
|
'zone': 1,
|
||||||
|
'weight': 1.0,
|
||||||
|
'id': i,
|
||||||
|
'device': 'sda%d' % i,
|
||||||
|
'ip': '10.0.0.%d' % (i % self.nodes),
|
||||||
|
'replication_ip': '10.0.0.%d' % (i % self.nodes),
|
||||||
|
'port': self.port,
|
||||||
|
'replication_port': self.port,
|
||||||
|
} for i in range(self.devices)]
|
||||||
|
|
||||||
|
self._replica2part2dev_id = [
|
||||||
|
[None] * 2 ** self.part_power
|
||||||
|
for i in range(self.replicas)
|
||||||
|
]
|
||||||
|
dev_ids = itertools.cycle(range(self.devices))
|
||||||
|
for p in range(2 ** self.part_power):
|
||||||
|
for r in range(self.replicas):
|
||||||
|
self._replica2part2dev_id[r][p] = next(dev_ids)
|
||||||
|
|
||||||
|
|
||||||
class FakeMemcache(object):
|
class FakeMemcache(object):
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@ -152,24 +369,13 @@ def tmpfile(content):
|
|||||||
xattr_data = {}
|
xattr_data = {}
|
||||||
|
|
||||||
|
|
||||||
def _get_inode(fd_or_name):
|
def _get_inode(fd):
|
||||||
try:
|
if not isinstance(fd, int):
|
||||||
if isinstance(fd_or_name, int):
|
try:
|
||||||
fd = fd_or_name
|
fd = fd.fileno()
|
||||||
else:
|
except AttributeError:
|
||||||
try:
|
return os.stat(fd).st_ino
|
||||||
fd = fd_or_name.fileno()
|
return os.fstat(fd).st_ino
|
||||||
except AttributeError:
|
|
||||||
fd = None
|
|
||||||
if fd is None:
|
|
||||||
ino = os.stat(fd_or_name).st_ino
|
|
||||||
else:
|
|
||||||
ino = os.fstat(fd).st_ino
|
|
||||||
except OSError as err:
|
|
||||||
ioerr = IOError()
|
|
||||||
ioerr.errno = err.errno
|
|
||||||
raise ioerr
|
|
||||||
return ino
|
|
||||||
|
|
||||||
|
|
||||||
def _setxattr(fd, k, v):
|
def _setxattr(fd, k, v):
|
||||||
@ -183,9 +389,7 @@ def _getxattr(fd, k):
|
|||||||
inode = _get_inode(fd)
|
inode = _get_inode(fd)
|
||||||
data = xattr_data.get(inode, {}).get(k)
|
data = xattr_data.get(inode, {}).get(k)
|
||||||
if not data:
|
if not data:
|
||||||
e = IOError("Fake IOError")
|
raise IOError(errno.ENODATA, "Fake IOError")
|
||||||
e.errno = errno.ENODATA
|
|
||||||
raise e
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
import xattr
|
import xattr
|
||||||
@ -214,6 +418,22 @@ def temptree(files, contents=''):
|
|||||||
rmtree(tempdir)
|
rmtree(tempdir)
|
||||||
|
|
||||||
|
|
||||||
|
def with_tempdir(f):
|
||||||
|
"""
|
||||||
|
Decorator to give a single test a tempdir as argument to test method.
|
||||||
|
"""
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapped(*args, **kwargs):
|
||||||
|
tempdir = mkdtemp()
|
||||||
|
args = list(args)
|
||||||
|
args.append(tempdir)
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
finally:
|
||||||
|
rmtree(tempdir)
|
||||||
|
return wrapped
|
||||||
|
|
||||||
|
|
||||||
class NullLoggingHandler(logging.Handler):
|
class NullLoggingHandler(logging.Handler):
|
||||||
|
|
||||||
def emit(self, record):
|
def emit(self, record):
|
||||||
@ -239,8 +459,8 @@ class UnmockTimeModule(object):
|
|||||||
logging.time = UnmockTimeModule()
|
logging.time = UnmockTimeModule()
|
||||||
|
|
||||||
|
|
||||||
class FakeLogger(logging.Logger):
|
class FakeLogger(logging.Logger, object):
|
||||||
# a thread safe logger
|
# a thread safe fake logger
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
self._clear()
|
self._clear()
|
||||||
@ -250,42 +470,57 @@ class FakeLogger(logging.Logger):
|
|||||||
self.facility = kwargs['facility']
|
self.facility = kwargs['facility']
|
||||||
self.statsd_client = None
|
self.statsd_client = None
|
||||||
self.thread_locals = None
|
self.thread_locals = None
|
||||||
|
self.parent = None
|
||||||
|
|
||||||
|
store_in = {
|
||||||
|
logging.ERROR: 'error',
|
||||||
|
logging.WARNING: 'warning',
|
||||||
|
logging.INFO: 'info',
|
||||||
|
logging.DEBUG: 'debug',
|
||||||
|
logging.CRITICAL: 'critical',
|
||||||
|
}
|
||||||
|
|
||||||
|
def _log(self, level, msg, *args, **kwargs):
|
||||||
|
store_name = self.store_in[level]
|
||||||
|
cargs = [msg]
|
||||||
|
if any(args):
|
||||||
|
cargs.extend(args)
|
||||||
|
captured = dict(kwargs)
|
||||||
|
if 'exc_info' in kwargs and \
|
||||||
|
not isinstance(kwargs['exc_info'], tuple):
|
||||||
|
captured['exc_info'] = sys.exc_info()
|
||||||
|
self.log_dict[store_name].append((tuple(cargs), captured))
|
||||||
|
super(FakeLogger, self)._log(level, msg, *args, **kwargs)
|
||||||
|
|
||||||
def _clear(self):
|
def _clear(self):
|
||||||
self.log_dict = defaultdict(list)
|
self.log_dict = defaultdict(list)
|
||||||
self.lines_dict = defaultdict(list)
|
self.lines_dict = {'critical': [], 'error': [], 'info': [],
|
||||||
|
'warning': [], 'debug': []}
|
||||||
|
|
||||||
|
def get_lines_for_level(self, level):
|
||||||
|
if level not in self.lines_dict:
|
||||||
|
raise KeyError(
|
||||||
|
"Invalid log level '%s'; valid levels are %s" %
|
||||||
|
(level,
|
||||||
|
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
|
||||||
|
return self.lines_dict[level]
|
||||||
|
|
||||||
|
def all_log_lines(self):
|
||||||
|
return dict((level, msgs) for level, msgs in self.lines_dict.items()
|
||||||
|
if len(msgs) > 0)
|
||||||
|
|
||||||
def _store_in(store_name):
|
def _store_in(store_name):
|
||||||
def stub_fn(self, *args, **kwargs):
|
def stub_fn(self, *args, **kwargs):
|
||||||
self.log_dict[store_name].append((args, kwargs))
|
self.log_dict[store_name].append((args, kwargs))
|
||||||
return stub_fn
|
return stub_fn
|
||||||
|
|
||||||
def _store_and_log_in(store_name):
|
|
||||||
def stub_fn(self, *args, **kwargs):
|
|
||||||
self.log_dict[store_name].append((args, kwargs))
|
|
||||||
self._log(store_name, args[0], args[1:], **kwargs)
|
|
||||||
return stub_fn
|
|
||||||
|
|
||||||
def get_lines_for_level(self, level):
|
|
||||||
return self.lines_dict[level]
|
|
||||||
|
|
||||||
error = _store_and_log_in('error')
|
|
||||||
info = _store_and_log_in('info')
|
|
||||||
warning = _store_and_log_in('warning')
|
|
||||||
warn = _store_and_log_in('warning')
|
|
||||||
debug = _store_and_log_in('debug')
|
|
||||||
|
|
||||||
def exception(self, *args, **kwargs):
|
|
||||||
self.log_dict['exception'].append((args, kwargs,
|
|
||||||
str(sys.exc_info()[1])))
|
|
||||||
print 'FakeLogger Exception: %s' % self.log_dict
|
|
||||||
|
|
||||||
# mock out the StatsD logging methods:
|
# mock out the StatsD logging methods:
|
||||||
|
update_stats = _store_in('update_stats')
|
||||||
increment = _store_in('increment')
|
increment = _store_in('increment')
|
||||||
decrement = _store_in('decrement')
|
decrement = _store_in('decrement')
|
||||||
timing = _store_in('timing')
|
timing = _store_in('timing')
|
||||||
timing_since = _store_in('timing_since')
|
timing_since = _store_in('timing_since')
|
||||||
update_stats = _store_in('update_stats')
|
transfer_rate = _store_in('transfer_rate')
|
||||||
set_statsd_prefix = _store_in('set_statsd_prefix')
|
set_statsd_prefix = _store_in('set_statsd_prefix')
|
||||||
|
|
||||||
def get_increments(self):
|
def get_increments(self):
|
||||||
@ -328,7 +563,7 @@ class FakeLogger(logging.Logger):
|
|||||||
print 'WARNING: unable to format log message %r %% %r' % (
|
print 'WARNING: unable to format log message %r %% %r' % (
|
||||||
record.msg, record.args)
|
record.msg, record.args)
|
||||||
raise
|
raise
|
||||||
self.lines_dict[record.levelno].append(line)
|
self.lines_dict[record.levelname.lower()].append(line)
|
||||||
|
|
||||||
def handle(self, record):
|
def handle(self, record):
|
||||||
self._handle(record)
|
self._handle(record)
|
||||||
@ -345,19 +580,40 @@ class DebugLogger(FakeLogger):
|
|||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
FakeLogger.__init__(self, *args, **kwargs)
|
FakeLogger.__init__(self, *args, **kwargs)
|
||||||
self.formatter = logging.Formatter("%(server)s: %(message)s")
|
self.formatter = logging.Formatter(
|
||||||
|
"%(server)s %(levelname)s: %(message)s")
|
||||||
|
|
||||||
def handle(self, record):
|
def handle(self, record):
|
||||||
self._handle(record)
|
self._handle(record)
|
||||||
print self.formatter.format(record)
|
print self.formatter.format(record)
|
||||||
|
|
||||||
def write(self, *args):
|
|
||||||
print args
|
class DebugLogAdapter(utils.LogAdapter):
|
||||||
|
|
||||||
|
def _send_to_logger(name):
|
||||||
|
def stub_fn(self, *args, **kwargs):
|
||||||
|
return getattr(self.logger, name)(*args, **kwargs)
|
||||||
|
return stub_fn
|
||||||
|
|
||||||
|
# delegate to FakeLogger's mocks
|
||||||
|
update_stats = _send_to_logger('update_stats')
|
||||||
|
increment = _send_to_logger('increment')
|
||||||
|
decrement = _send_to_logger('decrement')
|
||||||
|
timing = _send_to_logger('timing')
|
||||||
|
timing_since = _send_to_logger('timing_since')
|
||||||
|
transfer_rate = _send_to_logger('transfer_rate')
|
||||||
|
set_statsd_prefix = _send_to_logger('set_statsd_prefix')
|
||||||
|
|
||||||
|
def __getattribute__(self, name):
|
||||||
|
try:
|
||||||
|
return object.__getattribute__(self, name)
|
||||||
|
except AttributeError:
|
||||||
|
return getattr(self.__dict__['logger'], name)
|
||||||
|
|
||||||
|
|
||||||
def debug_logger(name='test'):
|
def debug_logger(name='test'):
|
||||||
"""get a named adapted debug logger"""
|
"""get a named adapted debug logger"""
|
||||||
return LogAdapter(DebugLogger(), name)
|
return DebugLogAdapter(DebugLogger(), name)
|
||||||
|
|
||||||
|
|
||||||
original_syslog_handler = logging.handlers.SysLogHandler
|
original_syslog_handler = logging.handlers.SysLogHandler
|
||||||
@ -374,7 +630,8 @@ def fake_syslog_handler():
|
|||||||
logging.handlers.SysLogHandler = FakeLogger
|
logging.handlers.SysLogHandler = FakeLogger
|
||||||
|
|
||||||
|
|
||||||
if config_true_value(get_config('unit_test').get('fake_syslog', 'False')):
|
if utils.config_true_value(
|
||||||
|
get_config('unit_test').get('fake_syslog', 'False')):
|
||||||
fake_syslog_handler()
|
fake_syslog_handler()
|
||||||
|
|
||||||
|
|
||||||
@ -447,17 +704,66 @@ def mock(update):
|
|||||||
delattr(module, attr)
|
delattr(module, attr)
|
||||||
|
|
||||||
|
|
||||||
|
class SlowBody(object):
|
||||||
|
"""
|
||||||
|
This will work with our fake_http_connect, if you hand in these
|
||||||
|
instead of strings it will make reads take longer by the given
|
||||||
|
amount. It should be a little bit easier to extend than the
|
||||||
|
current slow kwarg - which inserts whitespace in the response.
|
||||||
|
Also it should be easy to detect if you have one of these (or a
|
||||||
|
subclass) for the body inside of FakeConn if we wanted to do
|
||||||
|
something smarter than just duck-type the str/buffer api
|
||||||
|
enough to get by.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, body, slowness):
|
||||||
|
self.body = body
|
||||||
|
self.slowness = slowness
|
||||||
|
|
||||||
|
def slowdown(self):
|
||||||
|
eventlet.sleep(self.slowness)
|
||||||
|
|
||||||
|
def __getitem__(self, s):
|
||||||
|
return SlowBody(self.body[s], self.slowness)
|
||||||
|
|
||||||
|
def __len__(self):
|
||||||
|
return len(self.body)
|
||||||
|
|
||||||
|
def __radd__(self, other):
|
||||||
|
self.slowdown()
|
||||||
|
return other + self.body
|
||||||
|
|
||||||
|
|
||||||
def fake_http_connect(*code_iter, **kwargs):
|
def fake_http_connect(*code_iter, **kwargs):
|
||||||
|
|
||||||
class FakeConn(object):
|
class FakeConn(object):
|
||||||
|
|
||||||
def __init__(self, status, etag=None, body='', timestamp='1',
|
def __init__(self, status, etag=None, body='', timestamp='1',
|
||||||
expect_status=None, headers=None):
|
headers=None, expect_headers=None, connection_id=None,
|
||||||
self.status = status
|
give_send=None):
|
||||||
if expect_status is None:
|
# connect exception
|
||||||
self.expect_status = self.status
|
if isinstance(status, (Exception, eventlet.Timeout)):
|
||||||
|
raise status
|
||||||
|
if isinstance(status, tuple):
|
||||||
|
self.expect_status = list(status[:-1])
|
||||||
|
self.status = status[-1]
|
||||||
|
self.explicit_expect_list = True
|
||||||
else:
|
else:
|
||||||
self.expect_status = expect_status
|
self.expect_status, self.status = ([], status)
|
||||||
|
self.explicit_expect_list = False
|
||||||
|
if not self.expect_status:
|
||||||
|
# when a swift backend service returns a status before reading
|
||||||
|
# from the body (mostly an error response) eventlet.wsgi will
|
||||||
|
# respond with that status line immediately instead of 100
|
||||||
|
# Continue, even if the client sent the Expect 100 header.
|
||||||
|
# BufferedHttp and the proxy both see these error statuses
|
||||||
|
# when they call getexpect, so our FakeConn tries to act like
|
||||||
|
# our backend services and return certain types of responses
|
||||||
|
# as expect statuses just like a real backend server would do.
|
||||||
|
if self.status in (507, 412, 409):
|
||||||
|
self.expect_status = [status]
|
||||||
|
else:
|
||||||
|
self.expect_status = [100, 100]
|
||||||
self.reason = 'Fake'
|
self.reason = 'Fake'
|
||||||
self.host = '1.2.3.4'
|
self.host = '1.2.3.4'
|
||||||
self.port = '1234'
|
self.port = '1234'
|
||||||
@ -466,30 +772,41 @@ def fake_http_connect(*code_iter, **kwargs):
|
|||||||
self.etag = etag
|
self.etag = etag
|
||||||
self.body = body
|
self.body = body
|
||||||
self.headers = headers or {}
|
self.headers = headers or {}
|
||||||
|
self.expect_headers = expect_headers or {}
|
||||||
self.timestamp = timestamp
|
self.timestamp = timestamp
|
||||||
|
self.connection_id = connection_id
|
||||||
|
self.give_send = give_send
|
||||||
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
|
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
|
||||||
try:
|
try:
|
||||||
self._next_sleep = kwargs['slow'].pop(0)
|
self._next_sleep = kwargs['slow'].pop(0)
|
||||||
except IndexError:
|
except IndexError:
|
||||||
self._next_sleep = None
|
self._next_sleep = None
|
||||||
|
# be nice to trixy bits with node_iter's
|
||||||
|
eventlet.sleep()
|
||||||
|
|
||||||
def getresponse(self):
|
def getresponse(self):
|
||||||
if kwargs.get('raise_exc'):
|
if self.expect_status and self.explicit_expect_list:
|
||||||
|
raise Exception('Test did not consume all fake '
|
||||||
|
'expect status: %r' % (self.expect_status,))
|
||||||
|
if isinstance(self.status, (Exception, eventlet.Timeout)):
|
||||||
|
raise self.status
|
||||||
|
exc = kwargs.get('raise_exc')
|
||||||
|
if exc:
|
||||||
|
if isinstance(exc, (Exception, eventlet.Timeout)):
|
||||||
|
raise exc
|
||||||
raise Exception('test')
|
raise Exception('test')
|
||||||
if kwargs.get('raise_timeout_exc'):
|
if kwargs.get('raise_timeout_exc'):
|
||||||
raise Timeout()
|
raise eventlet.Timeout()
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def getexpect(self):
|
def getexpect(self):
|
||||||
if self.expect_status == -2:
|
expect_status = self.expect_status.pop(0)
|
||||||
raise HTTPException()
|
if isinstance(self.expect_status, (Exception, eventlet.Timeout)):
|
||||||
if self.expect_status == -3:
|
raise self.expect_status
|
||||||
return FakeConn(507)
|
headers = dict(self.expect_headers)
|
||||||
if self.expect_status == -4:
|
if expect_status == 409:
|
||||||
return FakeConn(201)
|
headers['X-Backend-Timestamp'] = self.timestamp
|
||||||
if self.expect_status == 412:
|
return FakeConn(expect_status, headers=headers)
|
||||||
return FakeConn(412)
|
|
||||||
return FakeConn(100)
|
|
||||||
|
|
||||||
def getheaders(self):
|
def getheaders(self):
|
||||||
etag = self.etag
|
etag = self.etag
|
||||||
@ -499,19 +816,23 @@ def fake_http_connect(*code_iter, **kwargs):
|
|||||||
else:
|
else:
|
||||||
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
|
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
|
||||||
|
|
||||||
headers = {'content-length': len(self.body),
|
headers = swob.HeaderKeyDict({
|
||||||
'content-type': 'x-application/test',
|
'content-length': len(self.body),
|
||||||
'x-timestamp': self.timestamp,
|
'content-type': 'x-application/test',
|
||||||
'last-modified': self.timestamp,
|
'x-timestamp': self.timestamp,
|
||||||
'x-object-meta-test': 'testing',
|
'x-backend-timestamp': self.timestamp,
|
||||||
'x-delete-at': '9876543210',
|
'last-modified': self.timestamp,
|
||||||
'etag': etag,
|
'x-object-meta-test': 'testing',
|
||||||
'x-works': 'yes'}
|
'x-delete-at': '9876543210',
|
||||||
|
'etag': etag,
|
||||||
|
'x-works': 'yes',
|
||||||
|
})
|
||||||
if self.status // 100 == 2:
|
if self.status // 100 == 2:
|
||||||
headers['x-account-container-count'] = \
|
headers['x-account-container-count'] = \
|
||||||
kwargs.get('count', 12345)
|
kwargs.get('count', 12345)
|
||||||
if not self.timestamp:
|
if not self.timestamp:
|
||||||
del headers['x-timestamp']
|
# when timestamp is None, HeaderKeyDict raises KeyError
|
||||||
|
headers.pop('x-timestamp', None)
|
||||||
try:
|
try:
|
||||||
if container_ts_iter.next() is False:
|
if container_ts_iter.next() is False:
|
||||||
headers['x-container-timestamp'] = '1'
|
headers['x-container-timestamp'] = '1'
|
||||||
@ -538,34 +859,45 @@ def fake_http_connect(*code_iter, **kwargs):
|
|||||||
if am_slow:
|
if am_slow:
|
||||||
if self.sent < 4:
|
if self.sent < 4:
|
||||||
self.sent += 1
|
self.sent += 1
|
||||||
sleep(value)
|
eventlet.sleep(value)
|
||||||
return ' '
|
return ' '
|
||||||
rv = self.body[:amt]
|
rv = self.body[:amt]
|
||||||
self.body = self.body[amt:]
|
self.body = self.body[amt:]
|
||||||
return rv
|
return rv
|
||||||
|
|
||||||
def send(self, amt=None):
|
def send(self, amt=None):
|
||||||
|
if self.give_send:
|
||||||
|
self.give_send(self.connection_id, amt)
|
||||||
am_slow, value = self.get_slow()
|
am_slow, value = self.get_slow()
|
||||||
if am_slow:
|
if am_slow:
|
||||||
if self.received < 4:
|
if self.received < 4:
|
||||||
self.received += 1
|
self.received += 1
|
||||||
sleep(value)
|
eventlet.sleep(value)
|
||||||
|
|
||||||
def getheader(self, name, default=None):
|
def getheader(self, name, default=None):
|
||||||
return dict(self.getheaders()).get(name.lower(), default)
|
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
pass
|
||||||
|
|
||||||
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
|
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
|
||||||
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
|
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
|
||||||
if isinstance(kwargs.get('headers'), list):
|
if isinstance(kwargs.get('headers'), (list, tuple)):
|
||||||
headers_iter = iter(kwargs['headers'])
|
headers_iter = iter(kwargs['headers'])
|
||||||
else:
|
else:
|
||||||
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
|
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
|
||||||
|
if isinstance(kwargs.get('expect_headers'), (list, tuple)):
|
||||||
|
expect_headers_iter = iter(kwargs['expect_headers'])
|
||||||
|
else:
|
||||||
|
expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
|
||||||
|
len(code_iter))
|
||||||
|
|
||||||
x = kwargs.get('missing_container', [False] * len(code_iter))
|
x = kwargs.get('missing_container', [False] * len(code_iter))
|
||||||
if not isinstance(x, (tuple, list)):
|
if not isinstance(x, (tuple, list)):
|
||||||
x = [x] * len(code_iter)
|
x = [x] * len(code_iter)
|
||||||
container_ts_iter = iter(x)
|
container_ts_iter = iter(x)
|
||||||
code_iter = iter(code_iter)
|
code_iter = iter(code_iter)
|
||||||
|
conn_id_and_code_iter = enumerate(code_iter)
|
||||||
static_body = kwargs.get('body', None)
|
static_body = kwargs.get('body', None)
|
||||||
body_iter = kwargs.get('body_iter', None)
|
body_iter = kwargs.get('body_iter', None)
|
||||||
if body_iter:
|
if body_iter:
|
||||||
@ -573,21 +905,22 @@ def fake_http_connect(*code_iter, **kwargs):
|
|||||||
|
|
||||||
def connect(*args, **ckwargs):
|
def connect(*args, **ckwargs):
|
||||||
if kwargs.get('slow_connect', False):
|
if kwargs.get('slow_connect', False):
|
||||||
sleep(0.1)
|
eventlet.sleep(0.1)
|
||||||
if 'give_content_type' in kwargs:
|
if 'give_content_type' in kwargs:
|
||||||
if len(args) >= 7 and 'Content-Type' in args[6]:
|
if len(args) >= 7 and 'Content-Type' in args[6]:
|
||||||
kwargs['give_content_type'](args[6]['Content-Type'])
|
kwargs['give_content_type'](args[6]['Content-Type'])
|
||||||
else:
|
else:
|
||||||
kwargs['give_content_type']('')
|
kwargs['give_content_type']('')
|
||||||
|
i, status = conn_id_and_code_iter.next()
|
||||||
if 'give_connect' in kwargs:
|
if 'give_connect' in kwargs:
|
||||||
kwargs['give_connect'](*args, **ckwargs)
|
give_conn_fn = kwargs['give_connect']
|
||||||
status = code_iter.next()
|
argspec = inspect.getargspec(give_conn_fn)
|
||||||
if isinstance(status, tuple):
|
if argspec.keywords or 'connection_id' in argspec.args:
|
||||||
status, expect_status = status
|
ckwargs['connection_id'] = i
|
||||||
else:
|
give_conn_fn(*args, **ckwargs)
|
||||||
expect_status = status
|
|
||||||
etag = etag_iter.next()
|
etag = etag_iter.next()
|
||||||
headers = headers_iter.next()
|
headers = headers_iter.next()
|
||||||
|
expect_headers = expect_headers_iter.next()
|
||||||
timestamp = timestamps_iter.next()
|
timestamp = timestamps_iter.next()
|
||||||
|
|
||||||
if status <= 0:
|
if status <= 0:
|
||||||
@ -597,8 +930,39 @@ def fake_http_connect(*code_iter, **kwargs):
|
|||||||
else:
|
else:
|
||||||
body = body_iter.next()
|
body = body_iter.next()
|
||||||
return FakeConn(status, etag, body=body, timestamp=timestamp,
|
return FakeConn(status, etag, body=body, timestamp=timestamp,
|
||||||
expect_status=expect_status, headers=headers)
|
headers=headers, expect_headers=expect_headers,
|
||||||
|
connection_id=i, give_send=kwargs.get('give_send'))
|
||||||
|
|
||||||
connect.code_iter = code_iter
|
connect.code_iter = code_iter
|
||||||
|
|
||||||
return connect
|
return connect
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def mocked_http_conn(*args, **kwargs):
|
||||||
|
requests = []
|
||||||
|
|
||||||
|
def capture_requests(ip, port, method, path, headers, qs, ssl):
|
||||||
|
req = {
|
||||||
|
'ip': ip,
|
||||||
|
'port': port,
|
||||||
|
'method': method,
|
||||||
|
'path': path,
|
||||||
|
'headers': headers,
|
||||||
|
'qs': qs,
|
||||||
|
'ssl': ssl,
|
||||||
|
}
|
||||||
|
requests.append(req)
|
||||||
|
kwargs.setdefault('give_connect', capture_requests)
|
||||||
|
fake_conn = fake_http_connect(*args, **kwargs)
|
||||||
|
fake_conn.requests = requests
|
||||||
|
with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
|
||||||
|
new=fake_conn):
|
||||||
|
yield fake_conn
|
||||||
|
left_over_status = list(fake_conn.code_iter)
|
||||||
|
if left_over_status:
|
||||||
|
raise AssertionError('left over status %r' % left_over_status)
|
||||||
|
|
||||||
|
|
||||||
|
def make_timestamp_iter():
|
||||||
|
return iter(Timestamp(t) for t in itertools.count(int(time.time())))
|
||||||
|
13
tox.ini
13
tox.ini
@ -21,7 +21,7 @@ deps =
|
|||||||
# Note: pip supports installing from git repos.
|
# Note: pip supports installing from git repos.
|
||||||
# https://pip.pypa.io/en/latest/reference/pip_install.html#git
|
# https://pip.pypa.io/en/latest/reference/pip_install.html#git
|
||||||
# Example: git+https://github.com/openstack/swift.git@2.0.0
|
# Example: git+https://github.com/openstack/swift.git@2.0.0
|
||||||
https://launchpad.net/swift/kilo/2.2.1/+download/swift-2.2.1.tar.gz
|
https://launchpad.net/swift/kilo/2.3.0/+download/swift-2.3.0.tar.gz
|
||||||
-r{toxinidir}/test-requirements.txt
|
-r{toxinidir}/test-requirements.txt
|
||||||
changedir = {toxinidir}/test/unit
|
changedir = {toxinidir}/test/unit
|
||||||
commands = nosetests -v {posargs}
|
commands = nosetests -v {posargs}
|
||||||
@ -59,7 +59,14 @@ changedir = {toxinidir}
|
|||||||
commands = bash tools/tox_run.sh
|
commands = bash tools/tox_run.sh
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
|
# it's not a bug that we aren't using all of hacking
|
||||||
|
# H102 -> apache2 license exists
|
||||||
|
# H103 -> license is apache
|
||||||
|
# H201 -> no bare excepts (unless marked with " # noqa")
|
||||||
|
# H231 -> Check for except statements to be Python 3.x compatible
|
||||||
|
# H501 -> don't use locals() for str formatting
|
||||||
|
# H903 -> \n not \r\n
|
||||||
ignore = H
|
ignore = H
|
||||||
builtins = _
|
select = F,E,W,H102,H103,H201,H231,H501,H903
|
||||||
exclude = .venv,.tox,dist,doc,test,*egg
|
exclude = .venv,.tox,dist,doc,*egg,test
|
||||||
show-source = True
|
show-source = True
|
||||||
|
Loading…
Reference in New Issue
Block a user