b09360d447
This patch fixes the object-reconstructor to calculate device_count as the total number of local devices in all policies. Previously Swift counts it for each policy but reconstruction_device_count which means the number of devices actually swift needs to reconstruct is counted as sum of ones for all polices. With this patch, Swift will gather all local devices for all policies at first, and then, collect parts for each devices as well as current. To do so, we can see the statuses for remaining job/disks percentage via stats_line output. To enable this change, this patch also touchs the object replicator to get a DiskFileManager via the DiskFileRouter class so that DiskFileManager instances are policy specific. Currently the same replication policy DiskFileManager class is always used, but this change future proofs the replicator for possible other DiskFileManager implementations. The change also gives the ObjectReplicator a _df_router variable, making it consistent with the ObjectReconstructor, and allowing a common way for ssync.Sender to access DiskFileManager instances via it's daemon's _df_router instance. Also, remove the use of FakeReplicator from the ssync test suite. It was not necessary and risked masking divergence between ssync and the replicator and reconstructor daemon implementations. Co-Author: Alistair Coles <alistair.coles@hpe.com> Closes-Bug: #1488608 Change-Id: Ic7a4c932b59158d21a5fb4de9ed3ed57f249d068
110 lines
4.2 KiB
Python
110 lines
4.2 KiB
Python
# Copyright (c) 2013 - 2015 OpenStack Foundation
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
# implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
import hashlib
|
|
import os
|
|
import shutil
|
|
import tempfile
|
|
import unittest
|
|
import time
|
|
|
|
from swift.common import utils
|
|
from swift.common.storage_policy import POLICIES
|
|
from swift.common.utils import Timestamp
|
|
|
|
|
|
def write_diskfile(df, timestamp, data='test data', frag_index=None,
|
|
commit=True, legacy_durable=False, extra_metadata=None):
|
|
# Helper method to write some data and metadata to a diskfile.
|
|
# Optionally do not commit the diskfile, or commit but using a legacy
|
|
# durable file
|
|
with df.create() as writer:
|
|
writer.write(data)
|
|
metadata = {
|
|
'ETag': hashlib.md5(data).hexdigest(),
|
|
'X-Timestamp': timestamp.internal,
|
|
'Content-Length': str(len(data)),
|
|
}
|
|
if extra_metadata:
|
|
metadata.update(extra_metadata)
|
|
if frag_index is not None:
|
|
metadata['X-Object-Sysmeta-Ec-Frag-Index'] = str(frag_index)
|
|
writer.put(metadata)
|
|
if commit and legacy_durable:
|
|
# simulate legacy .durable file creation
|
|
durable_file = os.path.join(df._datadir,
|
|
timestamp.internal + '.durable')
|
|
with open(durable_file, 'wb'):
|
|
pass
|
|
elif commit:
|
|
writer.commit(timestamp)
|
|
# else: don't make it durable
|
|
return metadata
|
|
|
|
|
|
class BaseTest(unittest.TestCase):
|
|
def setUp(self):
|
|
self.device = 'dev'
|
|
self.partition = '9'
|
|
self.tmpdir = tempfile.mkdtemp()
|
|
# sender side setup
|
|
self.tx_testdir = os.path.join(self.tmpdir, 'tmp_test_ssync_sender')
|
|
utils.mkdirs(os.path.join(self.tx_testdir, self.device))
|
|
self.daemon_conf = {
|
|
'devices': self.tx_testdir,
|
|
'mount_check': 'false',
|
|
}
|
|
# daemon will be set in subclass setUp
|
|
self.daemon = None
|
|
|
|
def tearDown(self):
|
|
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
|
|
|
def _make_diskfile(self, device='dev', partition='9',
|
|
account='a', container='c', obj='o', body='test',
|
|
extra_metadata=None, policy=None,
|
|
frag_index=None, timestamp=None, df_mgr=None,
|
|
commit=True):
|
|
policy = policy or POLICIES.legacy
|
|
object_parts = account, container, obj
|
|
timestamp = Timestamp(time.time()) if timestamp is None else timestamp
|
|
if df_mgr is None:
|
|
df_mgr = self.daemon._df_router[policy]
|
|
df = df_mgr.get_diskfile(
|
|
device, partition, *object_parts, policy=policy,
|
|
frag_index=frag_index)
|
|
write_diskfile(df, timestamp, data=body, extra_metadata=extra_metadata,
|
|
commit=commit)
|
|
if commit:
|
|
# when we write and commit stub data, sanity check it's readable
|
|
# and not quarantined because of any validation check
|
|
with df.open():
|
|
self.assertEqual(''.join(df.reader()), body)
|
|
# sanity checks
|
|
listing = os.listdir(df._datadir)
|
|
self.assertTrue(listing)
|
|
for filename in listing:
|
|
self.assertTrue(filename.startswith(timestamp.internal))
|
|
return df
|
|
|
|
def _make_open_diskfile(self, device='dev', partition='9',
|
|
account='a', container='c', obj='o', body='test',
|
|
extra_metadata=None, policy=None,
|
|
frag_index=None, timestamp=None, df_mgr=None):
|
|
df = self._make_diskfile(device, partition, account, container, obj,
|
|
body, extra_metadata, policy, frag_index,
|
|
timestamp, df_mgr)
|
|
df.open()
|
|
return df
|