4321bb0af6
Containers now have a storage policy index associated with them, stored in the container_stat table. This index is only settable at container creation time (PUT request), and cannot be changed without deleting and recreating the container. This is because a container's policy index will apply to all its objects, so changing a container's policy index would require moving large amounts of object data around. If a user wants to change the policy for data in a container, they must create a new container with the desired policy and move the data over. Keep status_changed_at up-to-date with status changes. In particular during container recreation and replication. When a container-server receives a PUT for a deleted database an extra UPDATE is issued against the container_stat table to notate the x-timestamp of the request. During replication if merge_timestamps causes a container's status to change (from DELETED to ACTIVE or vice-versa) the status_changed_at field is set to the current time. Accurate reporting of status_changed_at is useful for container replication forensics and allows resolution of "set on create" attributes like the upcoming storage_policy_index. Expose Backend container info on deleted containers. Include basic container info in backend headers on 404 responses from the container server. Default empty values are used as placeholders if the database does not exist. Specifically the X-Backend-Status-Changed-At, X-Backend-DELETE-Timestamp and the X-Backend-Storage-Policy-Index value will be needed by the reconciler to deal with reconciling out of order object writes in the face of recently deleted containers. * Add "status_changed_at" key to the response from ContainerBroker.get_info. * Add "Status Timestamp" field to swift.cli.info.print_db_info_metadata. * Add "status_changed_at" key to the response from AccountBroker.get_info. DocImpact Implements: blueprint storage-policies Change-Id: Ie6d388f067f5b096b0f96faef151120ba23c8748
164 lines
5.7 KiB
Python
164 lines
5.7 KiB
Python
# Copyright (c) 2010-2012 OpenStack Foundation
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
# implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import unittest
|
|
import mock
|
|
import time
|
|
import os
|
|
import random
|
|
from tempfile import mkdtemp
|
|
from shutil import rmtree
|
|
|
|
from swift.common.utils import normalize_timestamp
|
|
from swift.container import auditor
|
|
from test.unit import debug_logger, with_tempdir
|
|
from test.unit.container import test_backend
|
|
|
|
|
|
class FakeContainerBroker(object):
|
|
def __init__(self, path):
|
|
self.path = path
|
|
self.db_file = path
|
|
self.file = os.path.basename(path)
|
|
|
|
def is_deleted(self):
|
|
return False
|
|
|
|
def get_info(self):
|
|
if self.file.startswith('fail'):
|
|
raise ValueError
|
|
if self.file.startswith('true'):
|
|
return 'ok'
|
|
|
|
|
|
class TestAuditor(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
self.testdir = os.path.join(mkdtemp(), 'tmp_test_container_auditor')
|
|
self.logger = debug_logger()
|
|
rmtree(self.testdir, ignore_errors=1)
|
|
os.mkdir(self.testdir)
|
|
fnames = ['true1.db', 'true2.db', 'true3.db',
|
|
'fail1.db', 'fail2.db']
|
|
for fn in fnames:
|
|
with open(os.path.join(self.testdir, fn), 'w+') as f:
|
|
f.write(' ')
|
|
|
|
def tearDown(self):
|
|
rmtree(os.path.dirname(self.testdir), ignore_errors=1)
|
|
|
|
@mock.patch('swift.container.auditor.ContainerBroker', FakeContainerBroker)
|
|
def test_run_forever(self):
|
|
sleep_times = random.randint(5, 10)
|
|
call_times = sleep_times - 1
|
|
|
|
class FakeTime(object):
|
|
def __init__(self):
|
|
self.times = 0
|
|
|
|
def sleep(self, sec):
|
|
self.times += 1
|
|
if self.times < sleep_times:
|
|
time.sleep(0.1)
|
|
else:
|
|
# stop forever by an error
|
|
raise ValueError()
|
|
|
|
def time(self):
|
|
return time.time()
|
|
|
|
conf = {}
|
|
test_auditor = auditor.ContainerAuditor(conf, logger=self.logger)
|
|
|
|
with mock.patch('swift.container.auditor.time', FakeTime()):
|
|
def fake_audit_location_generator(*args, **kwargs):
|
|
files = os.listdir(self.testdir)
|
|
return [(os.path.join(self.testdir, f), '', '') for f in files]
|
|
|
|
with mock.patch('swift.container.auditor.audit_location_generator',
|
|
fake_audit_location_generator):
|
|
self.assertRaises(ValueError, test_auditor.run_forever)
|
|
self.assertEquals(test_auditor.container_failures, 2 * call_times)
|
|
self.assertEquals(test_auditor.container_passes, 3 * call_times)
|
|
|
|
@mock.patch('swift.container.auditor.ContainerBroker', FakeContainerBroker)
|
|
def test_run_once(self):
|
|
conf = {}
|
|
test_auditor = auditor.ContainerAuditor(conf, logger=self.logger)
|
|
|
|
def fake_audit_location_generator(*args, **kwargs):
|
|
files = os.listdir(self.testdir)
|
|
return [(os.path.join(self.testdir, f), '', '') for f in files]
|
|
|
|
with mock.patch('swift.container.auditor.audit_location_generator',
|
|
fake_audit_location_generator):
|
|
test_auditor.run_once()
|
|
self.assertEquals(test_auditor.container_failures, 2)
|
|
self.assertEquals(test_auditor.container_passes, 3)
|
|
|
|
@mock.patch('swift.container.auditor.ContainerBroker', FakeContainerBroker)
|
|
def test_container_auditor(self):
|
|
conf = {}
|
|
test_auditor = auditor.ContainerAuditor(conf, logger=self.logger)
|
|
files = os.listdir(self.testdir)
|
|
for f in files:
|
|
path = os.path.join(self.testdir, f)
|
|
test_auditor.container_audit(path)
|
|
self.assertEquals(test_auditor.container_failures, 2)
|
|
self.assertEquals(test_auditor.container_passes, 3)
|
|
|
|
|
|
class TestAuditorMigrations(unittest.TestCase):
|
|
|
|
@with_tempdir
|
|
def test_db_migration(self, tempdir):
|
|
db_path = os.path.join(tempdir, 'sda', 'containers', '0', '0', '0',
|
|
'test.db')
|
|
with test_backend.TestContainerBrokerBeforeSPI.old_broker() as \
|
|
old_ContainerBroker:
|
|
broker = old_ContainerBroker(db_path, account='a', container='c')
|
|
broker.initialize(normalize_timestamp(0), -1)
|
|
|
|
with broker.get() as conn:
|
|
try:
|
|
conn.execute('SELECT storage_policy_index '
|
|
'FROM container_stat')
|
|
except Exception as err:
|
|
self.assert_('no such column: storage_policy_index' in
|
|
str(err))
|
|
else:
|
|
self.fail('TestContainerBrokerBeforeSPI broker class '
|
|
'was already migrated')
|
|
|
|
conf = {'devices': tempdir, 'mount_check': False}
|
|
test_auditor = auditor.ContainerAuditor(conf, logger=debug_logger())
|
|
test_auditor.run_once()
|
|
|
|
broker = auditor.ContainerBroker(db_path, account='a', container='c')
|
|
info = broker.get_info()
|
|
expected = {
|
|
'account': 'a',
|
|
'container': 'c',
|
|
'object_count': 0,
|
|
'bytes_used': 0,
|
|
'storage_policy_index': 0,
|
|
}
|
|
for k, v in expected.items():
|
|
self.assertEqual(info[k], v)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|