From 8cdf0fdebe9eb782322fccfc11253dc959cf321d Mon Sep 17 00:00:00 2001 From: Clay Gerrard Date: Mon, 27 Apr 2015 13:29:50 -0700 Subject: [PATCH] Fix account replication during pre-storage-policy upgrade Old account schemas don't send the storage_policy_index key for container rows during replication, and if the recieving end is already running an upgraded server it is surprised with a KeyError. Normally this would work itself out if the old schema recieved any updates from container layer, or a new container is created, or requires a row sync from another account database - but if the account databases have rows out of sync and there's no activity in the account otherwise, there's nothing to force the old schemas to be upgraded. Rather than force the old schema that already has a complete set of container rows to migrate even in the absense of activity we can just fill in default legacy value for the storage policy index and allow the accounts to get back in sync and migrate the next time a container update occurs. FWIW, I never able to get a cluster upgrade to get stuck in this state without some sort of account failure that forced them to get their rows out of sync (in my cause I just unlinked a pending and then made sure to force all my account datbases to commit pending files before upgrading - leading to an upgraded cluster that absolutly needed account-replication to solve a row mismatch for inactive accounts with old schemas) Closes-Bug #1424108 Change-Id: Iaf4ef834eb24f0e11a52cc22b93a864574fabf83 --- swift/account/backend.py | 1 + test/unit/account/test_backend.py | 41 ++++++++++++++++++++++++++++++- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/swift/account/backend.py b/swift/account/backend.py index 3ff42518d2..ec28394626 100644 --- a/swift/account/backend.py +++ b/swift/account/backend.py @@ -460,6 +460,7 @@ class AccountBroker(DatabaseBroker): max_rowid = -1 curs = conn.cursor() for rec in item_list: + rec.setdefault('storage_policy_index', 0) # legacy record = [rec['name'], rec['put_timestamp'], rec['delete_timestamp'], rec['object_count'], rec['bytes_used'], rec['deleted'], diff --git a/test/unit/account/test_backend.py b/test/unit/account/test_backend.py index d231fea741..d262689e87 100644 --- a/test/unit/account/test_backend.py +++ b/test/unit/account/test_backend.py @@ -32,7 +32,7 @@ import random from swift.account.backend import AccountBroker from swift.common.utils import Timestamp -from test.unit import patch_policies, with_tempdir +from test.unit import patch_policies, with_tempdir, make_timestamp_iter from swift.common.db import DatabaseConnectionError from swift.common.storage_policy import StoragePolicy, POLICIES @@ -1120,6 +1120,45 @@ class TestAccountBrokerBeforeSPI(TestAccountBroker): conn.execute('SELECT * FROM policy_stat') conn.execute('SELECT storage_policy_index FROM container') + @with_tempdir + def test_pre_storage_policy_replication(self, tempdir): + ts = make_timestamp_iter() + + # make and two account database "replicas" + old_broker = AccountBroker(os.path.join(tempdir, 'old_account.db'), + account='a') + old_broker.initialize(ts.next().internal) + new_broker = AccountBroker(os.path.join(tempdir, 'new_account.db'), + account='a') + new_broker.initialize(ts.next().internal) + + # manually insert an existing row to avoid migration for old database + with old_broker.get() as conn: + conn.execute(''' + INSERT INTO container (name, put_timestamp, + delete_timestamp, object_count, bytes_used, + deleted) + VALUES (?, ?, ?, ?, ?, ?) + ''', ('test_name', ts.next().internal, 0, 1, 2, 0)) + conn.commit() + + # get replication info and rows form old database + info = old_broker.get_info() + rows = old_broker.get_items_since(0, 10) + + # "send" replication rows to new database + new_broker.merge_items(rows, info['id']) + + # make sure "test_name" container in new database + self.assertEqual(new_broker.get_info()['container_count'], 1) + for c in new_broker.list_containers_iter(1, None, None, None, None): + self.assertEqual(c, ('test_name', 1, 2, 0)) + + # full migration successful + with new_broker.get() as conn: + conn.execute('SELECT * FROM policy_stat') + conn.execute('SELECT storage_policy_index FROM container') + def pre_track_containers_create_policy_stat(self, conn): """