Merge "Add online data migrations for conductor version"
This commit is contained in:
commit
45df95c51e
@ -58,8 +58,8 @@ dbapi = db_api.get_instance()
|
|||||||
# of migrated objects.
|
# of migrated objects.
|
||||||
# """
|
# """
|
||||||
ONLINE_MIGRATIONS = (
|
ONLINE_MIGRATIONS = (
|
||||||
# Added in Pike
|
# Added in Pike, modified in Queens
|
||||||
# TODO(rloo): remove in Queens
|
# TODO(rloo): remove in Rocky
|
||||||
dbapi.backfill_version_column,
|
dbapi.backfill_version_column,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -482,6 +482,7 @@ class Connection(object):
|
|||||||
'hostname': the unique hostname which identifies
|
'hostname': the unique hostname which identifies
|
||||||
this Conductor service.
|
this Conductor service.
|
||||||
'drivers': a list of supported drivers.
|
'drivers': a list of supported drivers.
|
||||||
|
'version': the version of the object.Conductor
|
||||||
}
|
}
|
||||||
:param update_existing: When false, registration will raise an
|
:param update_existing: When false, registration will raise an
|
||||||
exception when a conflicting online record
|
exception when a conflicting online record
|
||||||
@ -899,14 +900,20 @@ class Connection(object):
|
|||||||
|
|
||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def backfill_version_column(self, max_count):
|
def backfill_version_column(self, max_count):
|
||||||
"""Backfill the version column with Ocata versions.
|
"""Backfill the Conductor version column with Pike version.
|
||||||
|
|
||||||
The version column was added to all the resource tables in this Pike
|
The version column was added to all the resource tables in the Pike
|
||||||
release (via 'ironic-dbsync upgrade'). After upgrading (from Ocata to
|
release (via 'ironic-dbsync upgrade'). After upgrading (from Ocata to
|
||||||
Pike), the 'ironic-dbsync online_data_migrations' command will invoke
|
Pike), the 'ironic-dbsync online_data_migrations' command would have
|
||||||
this method to populate (backfill) the version columns. The version
|
populated (backfilled) the version column for all objects.
|
||||||
used will be the object version from the pinning set in config (i.e.
|
|
||||||
prior to this column being added).
|
Unfortunately, in the Pike release, we forgot to set the value for the
|
||||||
|
conductor's version column. For the Queens release, we are setting
|
||||||
|
the conductor version, however, we still need to backfill in case new
|
||||||
|
conductors were added between the time the operator ran Pike's
|
||||||
|
'ironic-dbsync online_data_migrations' and their upgrade to Queens.
|
||||||
|
The version used will be the conductor object version from the Pike
|
||||||
|
release.
|
||||||
|
|
||||||
:param max_count: The maximum number of objects to migrate. Must be
|
:param max_count: The maximum number of objects to migrate. Must be
|
||||||
>= 0. If zero, all the objects will be migrated.
|
>= 0. If zero, all the objects will be migrated.
|
||||||
@ -914,4 +921,4 @@ class Connection(object):
|
|||||||
migrated (at the beginning of this call) and 2. the number
|
migrated (at the beginning of this call) and 2. the number
|
||||||
of migrated objects.
|
of migrated objects.
|
||||||
"""
|
"""
|
||||||
# TODO(rloo) Delete this in Queens cycle.
|
# TODO(rloo) Delete this in Rocky cycle.
|
||||||
|
@ -1160,13 +1160,20 @@ class Connection(api.Connection):
|
|||||||
|
|
||||||
@oslo_db_api.retry_on_deadlock
|
@oslo_db_api.retry_on_deadlock
|
||||||
def backfill_version_column(self, context, max_count):
|
def backfill_version_column(self, context, max_count):
|
||||||
"""Backfill the version column with Ocata versions.
|
"""Backfill the Conductor version column with Pike version.
|
||||||
|
|
||||||
The version column was added to all the resource tables in this Pike
|
The version column was added to all the resource tables in the Pike
|
||||||
release (via 'ironic-dbsync upgrade'). After upgrading (from Ocata to
|
release (via 'ironic-dbsync upgrade'). After upgrading (from Ocata to
|
||||||
Pike), the 'ironic-dbsync online_data_migrations' command will invoke
|
Pike), the 'ironic-dbsync online_data_migrations' command would have
|
||||||
this method to populate (backfill) the version columns. The version
|
populated (backfilled) the version column for all objects.
|
||||||
used will be the object version prior to this column being added.
|
|
||||||
|
Unfortunately, in the Pike release, we forgot to set the value for the
|
||||||
|
conductor's version column. For the Queens release, we are setting
|
||||||
|
the conductor version, however, we still need to backfill in case new
|
||||||
|
conductors were added between the time the operator ran Pike's
|
||||||
|
'ironic-dbsync online_data_migrations' and their upgrade to Queens.
|
||||||
|
The version used will be the conductor object version from the Pike
|
||||||
|
release.
|
||||||
|
|
||||||
:param context: the admin context (not used)
|
:param context: the admin context (not used)
|
||||||
:param max_count: The maximum number of objects to migrate. Must be
|
:param max_count: The maximum number of objects to migrate. Must be
|
||||||
@ -1175,15 +1182,17 @@ class Connection(api.Connection):
|
|||||||
migrated (at the beginning of this call) and 2. the number
|
migrated (at the beginning of this call) and 2. the number
|
||||||
of migrated objects.
|
of migrated objects.
|
||||||
"""
|
"""
|
||||||
# TODO(rloo): Delete this in Queens cycle.
|
# TODO(rloo): Delete this in Rocky cycle.
|
||||||
prior_release = '7.0'
|
prior_release = 'pike'
|
||||||
mapping = release_mappings.RELEASE_MAPPING[prior_release]['objects']
|
mapping = release_mappings.RELEASE_MAPPING[prior_release]['objects']
|
||||||
total_to_migrate = 0
|
total_to_migrate = 0
|
||||||
total_migrated = 0
|
total_migrated = 0
|
||||||
|
|
||||||
# backfill only objects that were in the prior release
|
# backfill only the Conductor.
|
||||||
sql_models = [model for model in models.Base.__subclasses__()
|
# NOTE(rloo) This code was used in Pike to backfill all the objects.
|
||||||
if model.__name__ in mapping]
|
# To make it easier to review, etc., we are re-using that code with
|
||||||
|
# minimal code changes to only backfill the 'Conductor' object.
|
||||||
|
sql_models = [models.Conductor]
|
||||||
for model in sql_models:
|
for model in sql_models:
|
||||||
query = model_query(model).filter(model.version.is_(None))
|
query = model_query(model).filter(model.version.is_(None))
|
||||||
total_to_migrate += query.count()
|
total_to_migrate += query.count()
|
||||||
@ -1211,6 +1220,9 @@ class Connection(api.Connection):
|
|||||||
num_migrated = 0
|
num_migrated = 0
|
||||||
with _session_for_write():
|
with _session_for_write():
|
||||||
query = model_query(model).filter(model.version.is_(None))
|
query = model_query(model).filter(model.version.is_(None))
|
||||||
|
# NOTE(rloo) Caution here; after doing query.count(), it is
|
||||||
|
# possible that the value is different in the
|
||||||
|
# next invocation of the query.
|
||||||
if max_to_migrate < query.count():
|
if max_to_migrate < query.count():
|
||||||
# Only want to update max_to_migrate objects; cannot use
|
# Only want to update max_to_migrate objects; cannot use
|
||||||
# sql's limit(), so we generate a new query with
|
# sql's limit(), so we generate a new query with
|
||||||
|
@ -57,81 +57,62 @@ class BackfillVersionTestCase(base.DbTestCase):
|
|||||||
super(BackfillVersionTestCase, self).setUp()
|
super(BackfillVersionTestCase, self).setUp()
|
||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
self.dbapi = db_api.get_instance()
|
self.dbapi = db_api.get_instance()
|
||||||
obj_mapping = release_mappings.RELEASE_MAPPING['ocata']['objects']
|
obj_mapping = release_mappings.RELEASE_MAPPING['pike']['objects']
|
||||||
self.node_ver = obj_mapping['Node'][0]
|
self.conductor_ver = obj_mapping['Conductor'][0]
|
||||||
self.chassis_ver = obj_mapping['Chassis'][0]
|
|
||||||
|
|
||||||
def test_empty_db(self):
|
def test_empty_db(self):
|
||||||
self.assertEqual((0, 0),
|
self.assertEqual((0, 0),
|
||||||
self.dbapi.backfill_version_column(self.context, 10))
|
self.dbapi.backfill_version_column(self.context, 10))
|
||||||
|
|
||||||
def test_version_exists(self):
|
def test_version_exists(self):
|
||||||
utils.create_test_node()
|
utils.create_test_conductor()
|
||||||
self.assertEqual((0, 0),
|
self.assertEqual((0, 0),
|
||||||
self.dbapi.backfill_version_column(self.context, 10))
|
self.dbapi.backfill_version_column(self.context, 10))
|
||||||
|
|
||||||
def test_one_node(self):
|
def test_one_conductor(self):
|
||||||
node = utils.create_test_node(version=None)
|
conductors = self._create_conductors(1)
|
||||||
self.assertIsNone(node.version)
|
|
||||||
node = self.dbapi.get_node_by_uuid(node.uuid)
|
|
||||||
self.assertIsNone(node.version)
|
|
||||||
self.assertEqual((1, 1),
|
self.assertEqual((1, 1),
|
||||||
self.dbapi.backfill_version_column(self.context, 10))
|
self.dbapi.backfill_version_column(self.context, 10))
|
||||||
res = self.dbapi.get_node_by_uuid(node.uuid)
|
res = self.dbapi.get_conductor(conductors[0])
|
||||||
self.assertEqual(self.node_ver, res.version)
|
self.assertEqual(self.conductor_ver, res.version)
|
||||||
|
|
||||||
def test_max_count_zero(self):
|
def test_max_count_zero(self):
|
||||||
orig_node = utils.create_test_node(version=None)
|
conductors = self._create_conductors(2)
|
||||||
orig_chassis = utils.create_test_chassis(version=None)
|
|
||||||
self.assertIsNone(orig_node.version)
|
|
||||||
self.assertIsNone(orig_chassis.version)
|
|
||||||
self.assertEqual((2, 2),
|
self.assertEqual((2, 2),
|
||||||
self.dbapi.backfill_version_column(self.context, 0))
|
self.dbapi.backfill_version_column(self.context, 0))
|
||||||
node = self.dbapi.get_node_by_uuid(orig_node.uuid)
|
for hostname in conductors:
|
||||||
self.assertEqual(self.node_ver, node.version)
|
conductor = self.dbapi.get_conductor(hostname)
|
||||||
chassis = self.dbapi.get_chassis_by_uuid(orig_chassis.uuid)
|
self.assertEqual(self.conductor_ver, conductor.version)
|
||||||
self.assertEqual(self.chassis_ver, chassis.version)
|
|
||||||
|
|
||||||
def test_no_version_max_count_1(self):
|
def _create_conductors(self, num, version=None):
|
||||||
orig_node = utils.create_test_node(version=None)
|
conductors = []
|
||||||
orig_chassis = utils.create_test_chassis(version=None)
|
for i in range(0, num):
|
||||||
self.assertIsNone(orig_node.version)
|
conductor = utils.create_test_conductor(
|
||||||
self.assertIsNone(orig_chassis.version)
|
version=version,
|
||||||
self.assertEqual((2, 1),
|
hostname='test_name_%d' % i,
|
||||||
self.dbapi.backfill_version_column(self.context, 1))
|
uuid=uuidutils.generate_uuid())
|
||||||
node = self.dbapi.get_node_by_uuid(orig_node.uuid)
|
conductors.append(conductor.hostname)
|
||||||
chassis = self.dbapi.get_chassis_by_uuid(orig_chassis.uuid)
|
for hostname in conductors:
|
||||||
self.assertTrue(node.version is None or chassis.version is None)
|
conductor = self.dbapi.get_conductor(hostname)
|
||||||
self.assertTrue(node.version == self.node_ver or
|
self.assertEqual(version, conductor.version)
|
||||||
chassis.version == self.chassis_ver)
|
return conductors
|
||||||
|
|
||||||
def _create_nodes(self, num_nodes, version=None):
|
def test_no_version_max_count_2_some_conductors(self):
|
||||||
nodes = []
|
conductors = self._create_conductors(5)
|
||||||
for i in range(0, num_nodes):
|
|
||||||
node = utils.create_test_node(version=version,
|
|
||||||
uuid=uuidutils.generate_uuid())
|
|
||||||
nodes.append(node.uuid)
|
|
||||||
for uuid in nodes:
|
|
||||||
node = self.dbapi.get_node_by_uuid(uuid)
|
|
||||||
self.assertIsNone(node.version)
|
|
||||||
return nodes
|
|
||||||
|
|
||||||
def test_no_version_max_count_2_some_nodes(self):
|
|
||||||
nodes = self._create_nodes(5)
|
|
||||||
|
|
||||||
self.assertEqual((5, 2),
|
self.assertEqual((5, 2),
|
||||||
self.dbapi.backfill_version_column(self.context, 2))
|
self.dbapi.backfill_version_column(self.context, 2))
|
||||||
self.assertEqual((3, 3),
|
self.assertEqual((3, 3),
|
||||||
self.dbapi.backfill_version_column(self.context, 10))
|
self.dbapi.backfill_version_column(self.context, 10))
|
||||||
for uuid in nodes:
|
for hostname in conductors:
|
||||||
node = self.dbapi.get_node_by_uuid(uuid)
|
conductor = self.dbapi.get_conductor(hostname)
|
||||||
self.assertEqual(self.node_ver, node.version)
|
self.assertEqual(self.conductor_ver, conductor.version)
|
||||||
|
|
||||||
def test_no_version_max_count_same_nodes(self):
|
def test_no_version_max_count_same(self):
|
||||||
nodes = self._create_nodes(5)
|
conductors = self._create_conductors(5)
|
||||||
|
|
||||||
self.assertEqual((5, 5),
|
self.assertEqual((5, 5),
|
||||||
self.dbapi.backfill_version_column(self.context, 5))
|
self.dbapi.backfill_version_column(self.context, 5))
|
||||||
for uuid in nodes:
|
for hostname in conductors:
|
||||||
node = self.dbapi.get_node_by_uuid(uuid)
|
conductor = self.dbapi.get_conductor(hostname)
|
||||||
self.assertEqual(self.node_ver, node.version)
|
self.assertEqual(self.conductor_ver, conductor.version)
|
||||||
|
@ -368,6 +368,23 @@ def get_test_conductor(**kw):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def create_test_conductor(**kw):
|
||||||
|
"""Create test conductor entry in DB and return Conductor DB object.
|
||||||
|
|
||||||
|
Function to be used to create test Conductor objects in the database.
|
||||||
|
|
||||||
|
:param kw: kwargs with overriding values for conductor's attributes.
|
||||||
|
:returns: Test Conductor DB object.
|
||||||
|
|
||||||
|
"""
|
||||||
|
conductor = get_test_conductor(**kw)
|
||||||
|
# Let DB generate ID if it isn't specified explicitly
|
||||||
|
if 'id' not in kw:
|
||||||
|
del conductor['id']
|
||||||
|
dbapi = db_api.get_instance()
|
||||||
|
return dbapi.register_conductor(conductor)
|
||||||
|
|
||||||
|
|
||||||
def get_test_ucs_info():
|
def get_test_ucs_info():
|
||||||
return {
|
return {
|
||||||
"ucs_username": "admin",
|
"ucs_username": "admin",
|
||||||
|
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
upgrade:
|
||||||
|
- |
|
||||||
|
The ``conductors`` database table's ``version`` column is
|
||||||
|
populated as part of the data migration (via the command
|
||||||
|
``ironic-dbsync online_data_migrations``).
|
Loading…
x
Reference in New Issue
Block a user