Adopting change 151897 (https://review.openstack.org/#/c/151897/), previously abandoned.

This is in order to support a tool that will synchronize the names on the HPSS filesystem to the names in Swift metadata.
This commit is contained in:
Phil Bridges 2016-01-29 15:32:59 -06:00
parent 14599e460b
commit 2d4cc93deb
5 changed files with 167 additions and 268 deletions

View File

@ -218,38 +218,6 @@ def clean_metadata(path_or_fd):
key += 1
def validate_object(metadata, statinfo=None):
if not metadata:
return False
if X_TIMESTAMP not in metadata.keys() or \
X_CONTENT_TYPE not in metadata.keys() or \
X_ETAG not in metadata.keys() or \
X_CONTENT_LENGTH not in metadata.keys() or \
X_TYPE not in metadata.keys() or \
X_OBJECT_TYPE not in metadata.keys():
return False
if statinfo and stat.S_ISREG(statinfo.st_mode):
# File length has changed
if int(metadata[X_CONTENT_LENGTH]) != statinfo.st_size:
return False
# File might have changed with length being the same.
if X_MTIME in metadata and \
normalize_timestamp(metadata[X_MTIME]) != \
normalize_timestamp(statinfo.st_mtime):
return False
if metadata[X_TYPE] == OBJECT:
return True
logging.warn('validate_object: metadata type is not OBJECT (%r)',
metadata[X_TYPE])
return False
def _read_for_etag(fp):
etag = md5()
while True:
@ -267,7 +235,7 @@ def _read_for_etag(fp):
return etag.hexdigest()
def _get_etag(path_or_fd):
def get_etag(fd):
"""
FIXME: It would be great to have a translator that returns the md5sum() of
the file as an xattr that can be simply fetched.
@ -275,16 +243,16 @@ def _get_etag(path_or_fd):
Since we don't have that we should yield after each chunk read and
computed so that we don't consume the worker thread.
"""
if isinstance(path_or_fd, int):
if isinstance(fd, int):
# We are given a file descriptor, so this is an invocation from the
# DiskFile.open() method.
fd = path_or_fd
fd = fd
etag = _read_for_etag(do_dup(fd))
do_lseek(fd, 0, os.SEEK_SET)
else:
# We are given a path to the object when the DiskDir.list_objects_iter
# method invokes us.
path = path_or_fd
path = fd
fd = do_open(path, os.O_RDONLY)
etag = _read_for_etag(fd)
do_close(fd)
@ -317,29 +285,10 @@ def get_object_metadata(obj_path_or_fd, stats=None):
X_OBJECT_TYPE: DIR_NON_OBJECT if is_dir else FILE,
X_CONTENT_LENGTH: 0 if is_dir else stats.st_size,
X_MTIME: 0 if is_dir else normalize_timestamp(stats.st_mtime),
X_ETAG: md5().hexdigest() if is_dir else _get_etag(obj_path_or_fd)}
X_ETAG: md5().hexdigest() if is_dir else get_etag(obj_path_or_fd)}
return metadata
def restore_metadata(path, metadata, meta_orig):
if meta_orig:
meta_new = meta_orig.copy()
meta_new.update(metadata)
else:
meta_new = metadata
if meta_orig != meta_new:
write_metadata(path, meta_new)
return meta_new
def create_object_metadata(obj_path_or_fd, stats=None, existing_meta={}):
# We must accept either a path or a file descriptor as an argument to this
# method, as the diskfile modules uses a file descriptior and the DiskDir
# module (for container operations) uses a path.
metadata_from_stat = get_object_metadata(obj_path_or_fd, stats)
return restore_metadata(obj_path_or_fd, metadata_from_stat, existing_meta)
# The following dir_xxx calls should definitely be replaced
# with a Metadata class to encapsulate their implementation.
# :FIXME: For now we have them as functions, but we should

View File

@ -25,6 +25,7 @@ import logging
import time
import hpssfs
from uuid import uuid4
from hashlib import md5
from eventlet import sleep
from contextlib import contextmanager
from swiftonhpss.swift.common.exceptions import AlreadyExistsAsFile, \
@ -41,9 +42,9 @@ from swiftonhpss.swift.common.exceptions import SwiftOnFileSystemOSError, \
from swiftonhpss.swift.common.fs_utils import do_fstat, do_open, do_close, \
do_unlink, do_chown, do_fsync, do_fchown, do_stat, do_write, do_read, \
do_fadvise64, do_rename, do_fdatasync, do_lseek, do_mkdir
from swiftonhpss.swift.common.utils import read_metadata, write_metadata, \
validate_object, create_object_metadata, rmobjdir, dir_is_object, \
get_object_metadata, write_pickle
from swiftonhpss.swift.common.utils import read_metadata, write_metadata,\
rmobjdir, dir_is_object, \
get_object_metadata, write_pickle, get_etag
from swiftonhpss.swift.common.utils import X_CONTENT_TYPE, \
X_TIMESTAMP, X_TYPE, X_OBJECT_TYPE, FILE, OBJECT, DIR_TYPE, \
FILE_TYPE, DEFAULT_UID, DEFAULT_GID, DIR_NON_OBJECT, DIR_OBJECT, \
@ -53,7 +54,7 @@ from swift.obj.diskfile import get_async_dir
# FIXME: Hopefully we'll be able to move to Python 2.7+ where O_CLOEXEC will
# be back ported. See http://www.python.org/dev/peps/pep-0433/
O_CLOEXEC = 0o2000000
O_CLOEXEC = 02000000
MAX_RENAME_ATTEMPTS = 10
MAX_OPEN_ATTEMPTS = 10
@ -593,7 +594,12 @@ class DiskFile(object):
self._is_dir = False
self._metadata = None
self._fd = None
# Save stat info as internal variable to avoid multiple stat() calls
self._stat = None
# Save md5sum of object as internal variable to avoid reading the
# entire object more than once.
self._etag = None
self._file_has_changed = None
# Don't store a value for data_file until we know it exists.
self._data_file = None
@ -662,9 +668,9 @@ class DiskFile(object):
obj_size = self._stat.st_size
self._metadata = read_metadata(self._fd)
if not validate_object(self._metadata, self._stat):
self._metadata = create_object_metadata(self._fd, self._stat,
self._metadata)
if not self._validate_object_metadata(self._fd):
self._create_object_metadata(self._fd)
assert self._metadata is not None
self._filter_metadata()
@ -694,6 +700,92 @@ class DiskFile(object):
return self
def _validate_object_metadata(self, fd):
# Has no Swift specific metadata saved as xattr. Probably because
# object was added/replaced through filesystem interface.
if not self._metadata:
self._file_has_changed = True
return False
required_keys = \
(X_TIMESTAMP, X_CONTENT_TYPE, X_CONTENT_LENGTH, X_ETAG,
# SOF specific keys
X_TYPE, X_OBJECT_TYPE)
if not all(k in self._metadata for k in required_keys):
# At least one of the required keys does not exist
return False
if not self._is_dir:
# X_MTIME is a new key added recently, newer objects will
# have the key set during PUT.
if X_MTIME in self._metadata:
# Check if the file has been modified through filesystem
# interface by comparing mtime stored in xattr during PUT
# and current mtime of file.
if normalize_timestamp(self._metadata[X_MTIME]) != \
normalize_timestamp(self._stat.st_mtime):
self._file_has_changed = True
return False
else:
# Without X_MTIME key, comparing md5sum is the only way
# to determine if file has changed or not. This is inefficient
# but there's no other way!
self._etag = get_etag(fd)
if self._etag != self._metadata[X_ETAG]:
self._file_has_changed = True
return False
else:
# Checksums are same; File has not changed. For the next
# GET request on same file, we don't compute md5sum again!
# This is achieved by setting X_MTIME to mtime in
# _create_object_metadata()
return False
if self._metadata[X_TYPE] == OBJECT:
return True
return False
def _create_object_metadata(self, fd):
if self._etag is None:
self._etag = md5().hexdigest() if self._is_dir \
else get_etag(fd)
if self._file_has_changed or (X_TIMESTAMP not in self._metadata):
timestamp = normalize_timestamp(self._stat.st_mtime)
else:
timestamp = self._metadata[X_TIMESTAMP]
metadata = {
X_TYPE: OBJECT,
X_TIMESTAMP: timestamp,
X_CONTENT_TYPE: DIR_TYPE if self._is_dir else FILE_TYPE,
X_OBJECT_TYPE: DIR_NON_OBJECT if self._is_dir else FILE,
X_CONTENT_LENGTH: 0 if self._is_dir else self._stat.st_size,
X_ETAG: self._etag}
# Add X_MTIME key if object is a file
if not self._is_dir:
metadata[X_MTIME] = normalize_timestamp(self._stat.st_mtime)
meta_new = self._metadata.copy()
meta_new.update(metadata)
if self._metadata != meta_new:
write_metadata(fd, meta_new)
# Avoid additional read_metadata() later
self._metadata = meta_new
def _filter_metadata(self):
for key in (X_TYPE, X_OBJECT_TYPE, X_MTIME):
self._metadata.pop(key, None)
if self._file_has_changed:
# Really ugly hack to let SOF's GET() wrapper know that we need
# to update the container database
self._metadata['X-Object-Sysmeta-Update-Container'] = True
def _is_object_expired(self, metadata):
try:
x_delete_at = int(metadata['X-Delete-At'])
@ -709,12 +801,6 @@ class DiskFile(object):
return True
return False
def _filter_metadata(self):
if X_TYPE in self._metadata:
self._metadata.pop(X_TYPE)
if X_OBJECT_TYPE in self._metadata:
self._metadata.pop(X_OBJECT_TYPE)
def __enter__(self):
"""
Context enter.

View File

@ -30,7 +30,7 @@ from swift.common.swob import HTTPConflict, HTTPBadRequest, HeaderKeyDict, \
from swift.common.utils import public, timing_stats, replication, \
config_true_value, Timestamp, csv_append
from swift.common.request_helpers import get_name_and_placement, \
split_and_validate_path, is_sys_or_user_meta
split_and_validate_path, is_sys_or_user_meta, is_user_meta
from swiftonhpss.swift.common.exceptions import AlreadyExistsAsFile, \
AlreadyExistsAsDir, SwiftOnFileSystemIOError, SwiftOnFileSystemOSError, \
SwiftOnFileFsException
@ -41,6 +41,7 @@ from swift.common.constraints import valid_timestamp, check_account_format, \
check_destination_header
from swift.obj import server
from swift.common.ring import Ring
from swiftonhpss.swift.obj.diskfile import DiskFileManager
from swiftonhpss.swift.common.constraints import check_object_creation
@ -76,11 +77,19 @@ class ObjectController(server.ObjectController):
"""
# Replaces Swift's DiskFileRouter object reference with ours.
self._diskfile_router = SwiftOnFileDiskFileRouter(conf, self.logger)
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.container_ring = None
# This conf option will be deprecated and eventualy removed in
# future releases
utils.read_pickled_metadata = \
config_true_value(conf.get('read_pickled_metadata', 'no'))
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.container_ring = Ring(self.swift_dir, ring_name='container')
return self.container_ring
@public
@timing_stats()
def PUT(self, request):
@ -252,8 +261,8 @@ class ObjectController(server.ObjectController):
device, policy)
# Create convenience symlink
try:
self.object_symlink(request, disk_file._data_file, device,
account)
self._object_symlink(request, disk_file._data_file, device,
account)
except SwiftOnFileSystemOSError:
return HTTPServiceUnavailable(request=request)
return HTTPCreated(request=request, etag=etag)
@ -263,7 +272,39 @@ class ObjectController(server.ObjectController):
split_and_validate_path(request, 1, 5, True)
return HTTPConflict(drive=device, request=request)
def object_symlink(self, request, diskfile, device, account):
def _sof_container_update(self, request, resp):
"""
SOF specific metadata is set in DiskFile.open()._filter_metadata()
This method internally invokes Swift's container_update() method.
"""
device, partition, account, container, obj, policy_idx = \
get_name_and_placement(request, 5, 5, True)
# The container_update() method requires certain container
# specific headers. The proxy object controller appends these
# headers for PUT backend request but not for HEAD/GET requests.
# Thus, we populate the required information in request
# and then invoke container_update()
container_partition, container_nodes = \
self.get_container_ring().get_nodes(account, container)
request.headers['X-Container-Partition'] = container_partition
for node in container_nodes:
request.headers['X-Container-Host'] = csv_append(
request.headers.get('X-Container-Host'),
'%(ip)s:%(port)s' % node)
request.headers['X-Container-Device'] = csv_append(
request.headers.get('X-Container-Device'), node['device'])
self.container_update(
'PUT', account, container, obj, request,
HeaderKeyDict({
'x-size': resp.headers['Content-Length'],
'x-content-type': resp.headers['Content-Type'],
'x-timestamp': resp.headers['X-Timestamp'],
'x-etag': resp.headers['ETag']}),
device, policy_idx)
def _object_symlink(self, request, diskfile, device, account):
mount = diskfile.split(device)[0]
dev = "%s%s" % (mount, device)
project = None
@ -333,22 +374,8 @@ class ObjectController(server.ObjectController):
except SwiftOnFileSystemIOError:
return HTTPServiceUnavailable(request=request)
# Bill Owen's hack to force container sync on HEAD, so we can manually
# tell the Swift container server when objects exist on disk it didn't
# know about.
# TODO: do a similar trick for HEADing objects that didn't exist
# TODO: see if this block that's duplicated can be a function instead
if 'X-Object-Sysmeta-Update-Container' in response.headers:
self.container_update(
'PUT', account, container, obj, request,
HeaderKeyDict(
{'x-size': metadata['Content-Length'],
'x-content-type': metadata['Content-Type'],
'x-timestamp': metadata['X-Timestamp'],
'x-etag': metadata['ETag']
}
),
device, policy)
self._sof_container_update(request, response)
response.headers.pop('X-Object-Sysmeta-Update-Container')
return response
@ -551,7 +578,7 @@ class ObjectController(server.ObjectController):
orig_timestamp = e.timestamp
orig_metadata = e.metadata
response_class = HTTPNotFound
except DiskFileDeleted:
except DiskFileDeleted as e:
orig_timestamp = e.timestamp
orig_metadata = {}
response_class = HTTPNotFound

View File

@ -317,36 +317,6 @@ class TestUtils(unittest.TestCase):
assert res_d == {}
assert _xattr_op_cnt['get'] == 3, "%r" % _xattr_op_cnt
def test_restore_metadata_none(self):
# No initial metadata
path = "/tmp/foo/i"
res_d = utils.restore_metadata(path, {'b': 'y'}, {})
expected_d = {'b': 'y'}
assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
assert _xattr_op_cnt['set'] == 1, "%r" % _xattr_op_cnt
def test_restore_metadata(self):
# Initial metadata
path = "/tmp/foo/i"
initial_d = {'a': 'z'}
xkey = _xkey(path, utils.METADATA_KEY)
_xattrs[xkey] = serialize_metadata(initial_d)
res_d = utils.restore_metadata(path, {'b': 'y'}, initial_d)
expected_d = {'a': 'z', 'b': 'y'}
assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
assert _xattr_op_cnt['set'] == 1, "%r" % _xattr_op_cnt
def test_restore_metadata_nochange(self):
# Initial metadata but no changes
path = "/tmp/foo/i"
initial_d = {'a': 'z'}
xkey = _xkey(path, utils.METADATA_KEY)
_xattrs[xkey] = serialize_metadata(initial_d)
res_d = utils.restore_metadata(path, {}, initial_d)
expected_d = {'a': 'z'}
assert res_d == expected_d, "Expected %r, result %r" % (expected_d, res_d)
assert _xattr_op_cnt['set'] == 0, "%r" % _xattr_op_cnt
def test_deserialize_metadata_pickle(self):
orig_md = {'key1': 'value1', 'key2': 'value2'}
pickled_md = pickle.dumps(orig_md, PICKLE_PROTOCOL)
@ -385,14 +355,14 @@ class TestUtils(unittest.TestCase):
def test_get_etag_empty(self):
tf = tempfile.NamedTemporaryFile()
hd = utils._get_etag(tf.name)
hd = utils.get_etag(tf.name)
assert hd == hashlib.md5().hexdigest()
def test_get_etag(self):
tf = tempfile.NamedTemporaryFile()
tf.file.write('123' * utils.CHUNK_SIZE)
tf.file.flush()
hd = utils._get_etag(tf.name)
hd = utils.get_etag(tf.name)
tf.file.seek(0)
md5 = hashlib.md5()
while True:
@ -402,137 +372,6 @@ class TestUtils(unittest.TestCase):
md5.update(chunk)
assert hd == md5.hexdigest()
def test_get_object_metadata_dne(self):
md = utils.get_object_metadata("/tmp/doesNotEx1st")
assert md == {}
def test_get_object_metadata_err(self):
tf = tempfile.NamedTemporaryFile()
try:
utils.get_object_metadata(
os.path.join(tf.name, "doesNotEx1st"))
except SwiftOnFileSystemOSError as e:
assert e.errno != errno.ENOENT
else:
self.fail("Expected exception")
obj_keys = (utils.X_TIMESTAMP, utils.X_CONTENT_TYPE, utils.X_ETAG,
utils.X_CONTENT_LENGTH, utils.X_TYPE, utils.X_OBJECT_TYPE)
def test_get_object_metadata_file(self):
tf = tempfile.NamedTemporaryFile()
tf.file.write('123')
tf.file.flush()
md = utils.get_object_metadata(tf.name)
for key in self.obj_keys:
assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == utils.OBJECT
assert md[utils.X_OBJECT_TYPE] == utils.FILE
assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE
assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name)
assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(tf.name))
assert md[utils.X_ETAG] == utils._get_etag(tf.name)
def test_get_object_metadata_dir(self):
td = tempfile.mkdtemp()
try:
md = utils.get_object_metadata(td)
for key in self.obj_keys:
assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == utils.OBJECT
assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT
assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE
assert md[utils.X_CONTENT_LENGTH] == 0
assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(td))
assert md[utils.X_ETAG] == hashlib.md5().hexdigest()
finally:
os.rmdir(td)
def test_create_object_metadata_file(self):
tf = tempfile.NamedTemporaryFile()
tf.file.write('4567')
tf.file.flush()
r_md = utils.create_object_metadata(tf.name)
xkey = _xkey(tf.name, utils.METADATA_KEY)
assert len(_xattrs.keys()) == 1
assert xkey in _xattrs
assert _xattr_op_cnt['set'] == 1
md = deserialize_metadata(_xattrs[xkey])
assert r_md == md
for key in self.obj_keys:
assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == utils.OBJECT
assert md[utils.X_OBJECT_TYPE] == utils.FILE
assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE
assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name)
assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(tf.name))
assert md[utils.X_ETAG] == utils._get_etag(tf.name)
def test_create_object_metadata_dir(self):
td = tempfile.mkdtemp()
try:
r_md = utils.create_object_metadata(td)
xkey = _xkey(td, utils.METADATA_KEY)
assert len(_xattrs.keys()) == 1
assert xkey in _xattrs
assert _xattr_op_cnt['set'] == 1
md = deserialize_metadata(_xattrs[xkey])
assert r_md == md
for key in self.obj_keys:
assert key in md, "Expected key %s in %r" % (key, md)
assert md[utils.X_TYPE] == utils.OBJECT
assert md[utils.X_OBJECT_TYPE] == utils.DIR_NON_OBJECT
assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE
assert md[utils.X_CONTENT_LENGTH] == 0
assert md[utils.X_TIMESTAMP] == utils.normalize_timestamp(os.path.getctime(td))
assert md[utils.X_ETAG] == hashlib.md5().hexdigest()
finally:
os.rmdir(td)
def test_validate_object_empty(self):
ret = utils.validate_object({})
assert not ret
def test_validate_object_missing_keys(self):
ret = utils.validate_object({'foo': 'bar'})
assert not ret
def test_validate_object_bad_type(self):
md = {utils.X_TIMESTAMP: 'na',
utils.X_CONTENT_TYPE: 'na',
utils.X_ETAG: 'bad',
utils.X_CONTENT_LENGTH: 'na',
utils.X_TYPE: 'bad',
utils.X_OBJECT_TYPE: 'na'}
ret = utils.validate_object(md)
assert not ret
def test_validate_object_good_type(self):
md = {utils.X_TIMESTAMP: 'na',
utils.X_CONTENT_TYPE: 'na',
utils.X_ETAG: 'bad',
utils.X_CONTENT_LENGTH: 'na',
utils.X_TYPE: utils.OBJECT,
utils.X_OBJECT_TYPE: 'na'}
ret = utils.validate_object(md)
assert ret
def test_validate_object_with_stat(self):
md = {utils.X_TIMESTAMP: 'na',
utils.X_CONTENT_TYPE: 'na',
utils.X_ETAG: 'bad',
utils.X_CONTENT_LENGTH: '12345',
utils.X_TYPE: utils.OBJECT,
utils.X_OBJECT_TYPE: 'na'}
fake_stat = Mock(st_size=12346, st_mode=33188)
self.assertFalse(utils.validate_object(md, fake_stat))
fake_stat = Mock(st_size=12345, st_mode=33188)
self.assertTrue(utils.validate_object(md, fake_stat))
def test_write_pickle(self):
td = tempfile.mkdtemp()
try:

View File

@ -188,14 +188,13 @@ class TestDiskFile(unittest.TestCase):
fd.write("1234")
stats = os.stat(the_file)
ts = normalize_timestamp(stats.st_ctime)
etag = md5()
etag.update("1234")
etag = etag.hexdigest()
etag = md5("1234").hexdigest()
exp_md = {
'Content-Length': 4,
'ETag': etag,
'X-Timestamp': ts,
'X-Object-PUT-Mtime': normalize_timestamp(stats.st_mtime),
'X-Object-Sysmeta-Update-Container': True,
'Content-Type': 'application/octet-stream'}
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
assert gdf._obj == "z"
@ -227,10 +226,10 @@ class TestDiskFile(unittest.TestCase):
ini_md = {
'X-Type': 'Object',
'X-Object-Type': 'file',
'Content-Length': 4,
'ETag': 'etag',
'X-Timestamp': 'ts',
'Content-Type': 'application/loctet-stream'}
'Content-Length': os.path.getsize(the_file),
'ETag': md5("1234").hexdigest(),
'X-Timestamp': os.stat(the_file).st_mtime,
'Content-Type': 'application/octet-stream'}
_metadata[_mapit(the_file)] = ini_md
exp_md = ini_md.copy()
del exp_md['X-Type']
@ -273,10 +272,10 @@ class TestDiskFile(unittest.TestCase):
ini_md = {
'X-Type': 'Object',
'X-Object-Type': 'dir',
'Content-Length': 5,
'ETag': 'etag',
'X-Timestamp': 'ts',
'Content-Type': 'application/loctet-stream'}
'Content-Length': 0,
'ETag': md5().hexdigest(),
'X-Timestamp': os.stat(the_dir).st_mtime,
'Content-Type': 'application/directory'}
_metadata[_mapit(the_dir)] = ini_md
exp_md = ini_md.copy()
del exp_md['X-Type']
@ -493,7 +492,7 @@ class TestDiskFile(unittest.TestCase):
'X-Object-Type': 'file',
'Content-Length': 4,
'ETag': 'etag',
'X-Timestamp': 'ts',
'X-Timestamp': '1234',
'Content-Type': 'application/loctet-stream'}
_metadata[_mapit(the_file)] = ini_md
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
@ -522,7 +521,7 @@ class TestDiskFile(unittest.TestCase):
'Content-Length': 4,
'name': 'z',
'ETag': 'etag',
'X-Timestamp': 'ts'}
'X-Timestamp': '1234'}
_metadata[_mapit(the_file)] = ini_md
gdf = self._get_diskfile("vol0", "p57", "ufo47", "bar", "z")
@ -548,7 +547,7 @@ class TestDiskFile(unittest.TestCase):
'X-Type': 'Object',
'Content-Length': 0,
'ETag': 'etag',
'X-Timestamp': 'ts',
'X-Timestamp': '1234',
'X-Object-Meta-test':'test',
'Content-Type': 'application/directory'}
_metadata[_mapit(the_dir)] = init_md
@ -571,7 +570,6 @@ class TestDiskFile(unittest.TestCase):
DIR_OBJECT)
self.assertFalse('X-Object-Meta-test' in _metadata[_mapit(the_dir)])
def test_write_metadata_w_meta_file(self):
the_path = os.path.join(self.td, "vol0", "ufo47", "bar")
the_file = os.path.join(the_path, "z")
@ -625,7 +623,7 @@ class TestDiskFile(unittest.TestCase):
assert gdf._metadata is None
newmd = {
'ETag': 'etag',
'X-Timestamp': 'ts',
'X-Timestamp': '1234',
'Content-Type': 'application/directory'}
with gdf.create(None, None) as dw:
dw.put(newmd)