1dceafa7d5
Previously, ssync would not sync nor cleanup non-durable data fragments on handoffs. When the reconstructor is syncing objects from a handoff node (a 'revert' reconstructor job) it may be useful, and is not harmful, to also send non-durable fragments if the receiver has older or no fragment data. Several changes are made to enable this. On the sending side: - For handoff (revert) jobs, the reconstructor instantiates SsyncSender with a new 'include_non_durable' option. - If configured with the include_non_durable option, the SsyncSender calls the diskfile yield_hashes function with options that allow non-durable fragments to be yielded. - The diskfile yield_hashes function is enhanced to include a 'durable' flag in the data structure yielded for each object. - The SsyncSender includes the 'durable' flag in the metadata sent during the missing_check exchange with the receiver. - If the receiver requests the non-durable object, the SsyncSender includes a new 'X-Backend-No-Commit' header when sending the PUT subrequest for the object. - The SsyncSender includes the non-durable object in the collection of synced objects returned to the reconstructor so that the non-durable fragment is removed from the handoff node. On the receiving side: - The object server includes a new 'X-Backend-Accept-No-Commit' header in its response to SSYNC requests. This indicates to the sender that the receiver has been upgraded to understand the 'X-Backend-No-Commit' header. - The SsyncReceiver is enhanced to consider non-durable data when determining if the sender's data is wanted or not. - The object server PUT method is enhanced to check for and 'X-Backend-No-Commit' header before committing a diskfile. If a handoff sender has both a durable and newer non-durable fragment for the same object and frag-index, only the newer non-durable fragment will be synced and removed on the first reconstructor pass. The durable fragment will be synced and removed on the next reconstructor pass. Change-Id: I1d47b865e0a621f35d323bbed472a6cfd2a5971b Closes-Bug: 1778002
109 lines
4.3 KiB
Python
109 lines
4.3 KiB
Python
# Copyright (c) 2013 - 2015 OpenStack Foundation
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
# implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
import os
|
|
import shutil
|
|
import tempfile
|
|
import unittest
|
|
|
|
from swift.common import utils
|
|
from swift.common.storage_policy import POLICIES
|
|
from swift.common.utils import Timestamp, md5
|
|
|
|
|
|
def write_diskfile(df, timestamp, data=b'test data', frag_index=None,
|
|
commit=True, legacy_durable=False, extra_metadata=None):
|
|
# Helper method to write some data and metadata to a diskfile.
|
|
# Optionally do not commit the diskfile, or commit but using a legacy
|
|
# durable file
|
|
with df.create() as writer:
|
|
writer.write(data)
|
|
metadata = {
|
|
'ETag': md5(data, usedforsecurity=False).hexdigest(),
|
|
'X-Timestamp': timestamp.internal,
|
|
'Content-Length': str(len(data)),
|
|
}
|
|
if extra_metadata:
|
|
metadata.update(extra_metadata)
|
|
if frag_index is not None:
|
|
metadata['X-Object-Sysmeta-Ec-Frag-Index'] = str(frag_index)
|
|
writer.put(metadata)
|
|
if commit and legacy_durable:
|
|
# simulate legacy .durable file creation
|
|
durable_file = os.path.join(df._datadir,
|
|
timestamp.internal + '.durable')
|
|
with open(durable_file, 'wb'):
|
|
pass
|
|
elif commit:
|
|
writer.commit(timestamp)
|
|
# else: don't make it durable
|
|
return metadata
|
|
|
|
|
|
class BaseTest(unittest.TestCase):
|
|
def setUp(self):
|
|
self.device = 'dev'
|
|
self.partition = '9'
|
|
self.tmpdir = tempfile.mkdtemp()
|
|
# sender side setup
|
|
self.tx_testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_sender')
|
|
utils.mkdirs(os.path.join(self.tx_testdir, self.device))
|
|
self.daemon_conf = {
|
|
'devices': self.tx_testdir,
|
|
'mount_check': 'false',
|
|
}
|
|
# daemon will be set in subclass setUp
|
|
self.daemon = None
|
|
|
|
def tearDown(self):
|
|
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
|
|
|
def _make_diskfile(self, device='dev', partition='9',
|
|
account='a', container='c', obj='o', body=b'test',
|
|
extra_metadata=None, policy=None,
|
|
frag_index=None, timestamp=None, df_mgr=None,
|
|
commit=True, verify=True, **kwargs):
|
|
policy = policy or POLICIES.legacy
|
|
object_parts = account, container, obj
|
|
timestamp = Timestamp.now() if timestamp is None else timestamp
|
|
if df_mgr is None:
|
|
df_mgr = self.daemon._df_router[policy]
|
|
df = df_mgr.get_diskfile(
|
|
device, partition, *object_parts, policy=policy,
|
|
frag_index=frag_index, **kwargs)
|
|
write_diskfile(df, timestamp, data=body, extra_metadata=extra_metadata,
|
|
commit=commit)
|
|
if commit and verify:
|
|
# when we write and commit stub data, sanity check it's readable
|
|
# and not quarantined because of any validation check
|
|
with df.open():
|
|
self.assertEqual(b''.join(df.reader()), body)
|
|
# sanity checks
|
|
listing = os.listdir(df._datadir)
|
|
self.assertTrue(listing)
|
|
for filename in listing:
|
|
self.assertTrue(filename.startswith(timestamp.internal))
|
|
return df
|
|
|
|
def _make_open_diskfile(self, device='dev', partition='9',
|
|
account='a', container='c', obj='o', body=b'test',
|
|
extra_metadata=None, policy=None,
|
|
frag_index=None, timestamp=None, df_mgr=None,
|
|
commit=True, **kwargs):
|
|
df = self._make_diskfile(device, partition, account, container, obj,
|
|
body, extra_metadata, policy, frag_index,
|
|
timestamp, df_mgr, commit, **kwargs)
|
|
df.open()
|
|
return df
|