Locality support for clusters
In order to allow clusters to be all on the same hypervisor (affinity) or all on different hypervisors (anti-affinity) a new argument (locality) needed to be added to the Trove cluster-create API. This changeset addresses the Trove server part of this feature. 'locality' can now be added to the ReST payload for a cluster-create command and it is passed along as a scheduler hint to Nova. The cluster scenario tests were enhanced to test that 'affinity' works. Testing that 'anti-affinity' fails proved to be too time consuming, since at present the only way for a cluster to 'fail' is to timeout (and that takes 10 hours). The server-group is checked to make sure it is created properly, and that it has been deleted after all the related instances are gone. DocImpact: New functionality Partially implements: blueprint replication-cluster-locality Depends-On: Ie46cfa69ffe6a64760aca38c495563f5724bd0d0 Change-Id: I657bf5c023d0257e462cc39f57c16eb6ee83807a
This commit is contained in:
parent
ff49045744
commit
51f2655770
@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- A locality flag was added to the trove ReST API to
|
||||
allow a user to specify whether the instances of a
|
||||
cluster should be on the same hypervisor (affinity)
|
||||
or on different hypervisors (anti-affinity).
|
||||
|
@ -24,6 +24,7 @@ from trove.common.i18n import _
|
||||
from trove.common.notification import DBaaSClusterGrow, DBaaSClusterShrink
|
||||
from trove.common.notification import StartNotification
|
||||
from trove.common import remote
|
||||
from trove.common import server_group as srv_grp
|
||||
from trove.common.strategies.cluster import strategy
|
||||
from trove.common import utils
|
||||
from trove.datastore import models as datastore_models
|
||||
@ -89,6 +90,9 @@ class Cluster(object):
|
||||
self.ds = (datastore_models.Datastore.
|
||||
load(self.ds_version.datastore_id))
|
||||
self._db_instances = None
|
||||
self._server_group = None
|
||||
self._server_group_loaded = False
|
||||
self._locality = None
|
||||
|
||||
@classmethod
|
||||
def get_guest(cls, instance):
|
||||
@ -198,13 +202,39 @@ class Cluster(object):
|
||||
return inst_models.Instances.load_all_by_cluster_id(
|
||||
self.context, self.db_info.id, load_servers=False)
|
||||
|
||||
@property
|
||||
def server_group(self):
|
||||
# The server group could be empty, so we need a flag to cache it
|
||||
if not self._server_group_loaded and self.instances:
|
||||
self._server_group = self.instances[0].server_group
|
||||
self._server_group_loaded = True
|
||||
return self._server_group
|
||||
|
||||
@property
|
||||
def locality(self):
|
||||
if not self._locality:
|
||||
if self.server_group:
|
||||
self._locality = srv_grp.ServerGroup.get_locality(
|
||||
self._server_group)
|
||||
return self._locality
|
||||
|
||||
@locality.setter
|
||||
def locality(self, value):
|
||||
"""This is to facilitate the fact that the server group may not be
|
||||
set up before the create command returns.
|
||||
"""
|
||||
self._locality = value
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties):
|
||||
instances, extended_properties, locality):
|
||||
locality = srv_grp.ServerGroup.build_scheduler_hint(
|
||||
context, locality, name)
|
||||
api_strategy = strategy.load_api_strategy(datastore_version.manager)
|
||||
return api_strategy.cluster_class.create(context, name, datastore,
|
||||
datastore_version, instances,
|
||||
extended_properties)
|
||||
extended_properties,
|
||||
locality)
|
||||
|
||||
def validate_cluster_available(self, valid_states=[ClusterTasks.NONE]):
|
||||
if self.db_info.task_status not in valid_states:
|
||||
@ -224,6 +254,11 @@ class Cluster(object):
|
||||
|
||||
self.update_db(task_status=ClusterTasks.DELETING)
|
||||
|
||||
# we force the server-group delete here since we need to load the
|
||||
# group while the instances still exist. Also, since the instances
|
||||
# take a while to be removed they might not all be gone even if we
|
||||
# do it after the delete.
|
||||
srv_grp.ServerGroup.delete(self.context, self.server_group, force=True)
|
||||
for db_inst in db_insts:
|
||||
instance = inst_models.load_any_instance(self.context, db_inst.id)
|
||||
instance.delete()
|
||||
@ -261,7 +296,7 @@ class Cluster(object):
|
||||
|
||||
@staticmethod
|
||||
def load_instance(context, cluster_id, instance_id):
|
||||
return inst_models.load_instance_with_guest(
|
||||
return inst_models.load_instance_with_info(
|
||||
inst_models.DetailInstance, context, instance_id, cluster_id)
|
||||
|
||||
@staticmethod
|
||||
|
@ -172,12 +172,24 @@ class ClusterController(wsgi.Controller):
|
||||
"nics": nics,
|
||||
"availability_zone": availability_zone})
|
||||
|
||||
locality = body['cluster'].get('locality')
|
||||
if locality:
|
||||
locality_domain = ['affinity', 'anti-affinity']
|
||||
locality_domain_msg = ("Invalid locality '%s'. "
|
||||
"Must be one of ['%s']" %
|
||||
(locality,
|
||||
"', '".join(locality_domain)))
|
||||
if locality not in locality_domain:
|
||||
raise exception.BadRequest(msg=locality_domain_msg)
|
||||
|
||||
context.notification = notification.DBaaSClusterCreate(context,
|
||||
request=req)
|
||||
with StartNotification(context, name=name, datastore=datastore.name,
|
||||
datastore_version=datastore_version.name):
|
||||
cluster = models.Cluster.create(context, name, datastore,
|
||||
datastore_version, instances,
|
||||
extended_properties)
|
||||
extended_properties,
|
||||
locality)
|
||||
cluster.locality = locality
|
||||
view = views.load_view(cluster, req=req, load_servers=False)
|
||||
return wsgi.Result(view.data(), 200)
|
||||
|
@ -53,6 +53,8 @@ class ClusterView(object):
|
||||
if extended_properties:
|
||||
cluster_dict["extended_properties"] = extended_properties
|
||||
|
||||
if self.cluster.locality:
|
||||
cluster_dict['locality'] = self.cluster.locality
|
||||
LOG.debug(cluster_dict)
|
||||
return {"cluster": cluster_dict}
|
||||
|
||||
|
@ -255,7 +255,8 @@ cluster = {
|
||||
"modules": module_list,
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"locality": non_empty_string
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ from trove.cluster import models
|
||||
from trove.cluster.tasks import ClusterTasks
|
||||
from trove.cluster.views import ClusterView
|
||||
from trove.common import cfg
|
||||
from trove.common import server_group as srv_grp
|
||||
from trove.common.strategies.cluster import base
|
||||
from trove.common.strategies.cluster.experimental.cassandra.taskmanager import(
|
||||
CassandraClusterTasks)
|
||||
@ -81,7 +82,7 @@ class CassandraCluster(models.Cluster):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties):
|
||||
instances, extended_properties, locality):
|
||||
LOG.debug("Processing a request for creating a new cluster.")
|
||||
|
||||
# Updating Cluster Task.
|
||||
@ -92,7 +93,8 @@ class CassandraCluster(models.Cluster):
|
||||
|
||||
cls._create_cluster_instances(
|
||||
context, db_info.id, db_info.name,
|
||||
datastore, datastore_version, instances, extended_properties)
|
||||
datastore, datastore_version, instances, extended_properties,
|
||||
locality)
|
||||
|
||||
# Calling taskmanager to further proceed for cluster-configuration.
|
||||
task_api.load(context, datastore_version.manager).create_cluster(
|
||||
@ -103,7 +105,8 @@ class CassandraCluster(models.Cluster):
|
||||
@classmethod
|
||||
def _create_cluster_instances(
|
||||
cls, context, cluster_id, cluster_name,
|
||||
datastore, datastore_version, instances, extended_properties=None):
|
||||
datastore, datastore_version, instances, extended_properties,
|
||||
locality):
|
||||
LOG.debug("Processing a request for new cluster instances.")
|
||||
|
||||
cassandra_conf = CONF.get(datastore_version.manager)
|
||||
@ -151,7 +154,8 @@ class CassandraCluster(models.Cluster):
|
||||
nics=instance.get('nics', None),
|
||||
availability_zone=instance_az,
|
||||
configuration_id=None,
|
||||
cluster_config=member_config)
|
||||
cluster_config=member_config,
|
||||
locality=locality)
|
||||
|
||||
new_instances.append(new_instance)
|
||||
|
||||
@ -173,9 +177,10 @@ class CassandraCluster(models.Cluster):
|
||||
|
||||
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
|
||||
|
||||
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
|
||||
new_instances = self._create_cluster_instances(
|
||||
context, db_info.id, db_info.name, datastore, datastore_version,
|
||||
instances)
|
||||
instances, None, locality)
|
||||
|
||||
task_api.load(context, datastore_version.manager).grow_cluster(
|
||||
db_info.id, [instance.id for instance in new_instances])
|
||||
|
@ -22,6 +22,7 @@ from trove.cluster.views import ClusterView
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import remote
|
||||
from trove.common import server_group as srv_grp
|
||||
from trove.common.strategies.cluster import base as cluster_base
|
||||
from trove.extensions.mgmt.clusters.views import MgmtClusterView
|
||||
from trove.instance.models import DBInstance
|
||||
@ -115,7 +116,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
|
||||
@staticmethod
|
||||
def _create_instances(context, db_info, datastore, datastore_version,
|
||||
instances):
|
||||
instances, extended_properties, locality):
|
||||
member_config = {"id": db_info.id,
|
||||
"instance_type": "member"}
|
||||
name_index = 1
|
||||
@ -137,13 +138,14 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
'availability_zone', None),
|
||||
nics=instance.get('nics', None),
|
||||
configuration_id=None,
|
||||
cluster_config=member_config
|
||||
cluster_config=member_config,
|
||||
locality=locality
|
||||
)
|
||||
for instance in instances]
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties):
|
||||
instances, extended_properties, locality):
|
||||
LOG.debug("Initiating Galera cluster creation.")
|
||||
cls._validate_cluster_instances(context, instances, datastore,
|
||||
datastore_version)
|
||||
@ -154,7 +156,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
task_status=ClusterTasks.BUILDING_INITIAL)
|
||||
|
||||
cls._create_instances(context, db_info, datastore, datastore_version,
|
||||
instances)
|
||||
instances, extended_properties, locality)
|
||||
|
||||
# Calling taskmanager to further proceed for cluster-configuration
|
||||
task_api.load(context, datastore_version.manager).create_cluster(
|
||||
@ -187,8 +189,10 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
for instance in instances:
|
||||
instance["nics"] = interface_ids
|
||||
|
||||
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
|
||||
new_instances = self._create_instances(
|
||||
context, db_info, datastore, datastore_version, instances)
|
||||
context, db_info, datastore, datastore_version, instances,
|
||||
None, locality)
|
||||
|
||||
task_api.load(context, datastore_version.manager).grow_cluster(
|
||||
db_info.id, [instance.id for instance in new_instances])
|
||||
|
@ -25,6 +25,7 @@ from trove.common.i18n import _
|
||||
from trove.common.notification import DBaaSClusterGrow
|
||||
from trove.common.notification import StartNotification
|
||||
from trove.common import remote
|
||||
from trove.common import server_group as srv_grp
|
||||
from trove.common.strategies.cluster import base
|
||||
from trove.common import utils
|
||||
from trove.datastore import models as datastore_models
|
||||
@ -57,7 +58,7 @@ class MongoDbCluster(models.Cluster):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties):
|
||||
instances, extended_properties, locality):
|
||||
|
||||
# TODO(amcreynolds): consider moving into CONF and even supporting
|
||||
# TODO(amcreynolds): an array of values, e.g. [3, 5, 7]
|
||||
@ -144,7 +145,8 @@ class MongoDbCluster(models.Cluster):
|
||||
availability_zone=azs[i],
|
||||
nics=nics[i],
|
||||
configuration_id=None,
|
||||
cluster_config=member_config)
|
||||
cluster_config=member_config,
|
||||
locality=locality)
|
||||
|
||||
for i in range(1, num_configsvr + 1):
|
||||
instance_name = "%s-%s-%s" % (name, "configsvr", str(i))
|
||||
@ -157,7 +159,8 @@ class MongoDbCluster(models.Cluster):
|
||||
availability_zone=None,
|
||||
nics=None,
|
||||
configuration_id=None,
|
||||
cluster_config=configsvr_config)
|
||||
cluster_config=configsvr_config,
|
||||
locality=locality)
|
||||
|
||||
for i in range(1, num_mongos + 1):
|
||||
instance_name = "%s-%s-%s" % (name, "mongos", str(i))
|
||||
@ -170,7 +173,8 @@ class MongoDbCluster(models.Cluster):
|
||||
availability_zone=None,
|
||||
nics=None,
|
||||
configuration_id=None,
|
||||
cluster_config=mongos_config)
|
||||
cluster_config=mongos_config,
|
||||
locality=locality)
|
||||
|
||||
task_api.load(context, datastore_version.manager).create_cluster(
|
||||
db_info.id)
|
||||
@ -276,6 +280,7 @@ class MongoDbCluster(models.Cluster):
|
||||
"instance_type": "member",
|
||||
"replica_set_name": new_replica_set_name,
|
||||
"key": key}
|
||||
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
|
||||
for i in range(1, num_members_per_shard + 1):
|
||||
instance_name = "%s-%s-%s" % (self.name, new_replica_set_name,
|
||||
str(i))
|
||||
@ -288,7 +293,8 @@ class MongoDbCluster(models.Cluster):
|
||||
availability_zone=None,
|
||||
nics=None,
|
||||
configuration_id=None,
|
||||
cluster_config=member_config)
|
||||
cluster_config=member_config,
|
||||
locality=locality)
|
||||
|
||||
self.update_db(task_status=ClusterTasks.ADDING_SHARD)
|
||||
manager.mongodb_add_shard_cluster(
|
||||
@ -316,12 +322,13 @@ class MongoDbCluster(models.Cluster):
|
||||
self._check_instances(self.context, query_routers,
|
||||
self.datastore_version)
|
||||
# all checks are done before any instances are created
|
||||
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
|
||||
instance_ids = []
|
||||
for shard in shards:
|
||||
instance_ids.extend(self._create_shard_instances(shard))
|
||||
instance_ids.extend(self._create_shard_instances(shard, locality))
|
||||
if query_routers:
|
||||
instance_ids.extend(
|
||||
self._create_query_router_instances(query_routers)
|
||||
self._create_query_router_instances(query_routers, locality)
|
||||
)
|
||||
|
||||
self.update_db(task_status=ClusterTasks.GROWING_CLUSTER)
|
||||
@ -400,7 +407,7 @@ class MongoDbCluster(models.Cluster):
|
||||
self.manager.shrink_cluster(self.id, instance_ids)
|
||||
|
||||
def _create_instances(self, instances, cluster_config,
|
||||
default_name_tag, key=None):
|
||||
default_name_tag, locality, key=None):
|
||||
"""Loop through the instances and create them in this cluster."""
|
||||
cluster_config['id'] = self.id
|
||||
if CONF.get(self.datastore_version.manager).cluster_secure:
|
||||
@ -418,12 +425,13 @@ class MongoDbCluster(models.Cluster):
|
||||
instance['volume_size'], None,
|
||||
availability_zone=instance.get('availability_zone', None),
|
||||
nics=instance.get('nics', None),
|
||||
cluster_config=cluster_config
|
||||
cluster_config=cluster_config,
|
||||
locality=locality
|
||||
)
|
||||
instance_ids.append(new_instance.id)
|
||||
return instance_ids
|
||||
|
||||
def _create_shard_instances(self, instances,
|
||||
def _create_shard_instances(self, instances, locality,
|
||||
replica_set_name=None, key=None):
|
||||
"""Create the instances for a new shard in the cluster."""
|
||||
shard_id = utils.generate_uuid()
|
||||
@ -433,13 +441,13 @@ class MongoDbCluster(models.Cluster):
|
||||
'instance_type': 'member',
|
||||
'replica_set_name': replica_set_name}
|
||||
return self._create_instances(instances, cluster_config,
|
||||
replica_set_name, key=key)
|
||||
replica_set_name, locality, key=key)
|
||||
|
||||
def _create_query_router_instances(self, instances, key=None):
|
||||
def _create_query_router_instances(self, instances, locality, key=None):
|
||||
"""Create the instances for the new query router."""
|
||||
cluster_config = {'instance_type': 'query_router'}
|
||||
return self._create_instances(instances, cluster_config,
|
||||
'mongos', key=key)
|
||||
'mongos', locality, key=key)
|
||||
|
||||
def _prep_resize(self):
|
||||
"""Get information about the cluster's current state."""
|
||||
|
@ -21,6 +21,7 @@ from trove.cluster.views import ClusterView
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import remote
|
||||
from trove.common import server_group as srv_grp
|
||||
from trove.common.strategies.cluster import base
|
||||
from trove.extensions.mgmt.clusters.views import MgmtClusterView
|
||||
from trove.instance import models as inst_models
|
||||
@ -49,7 +50,7 @@ class RedisCluster(models.Cluster):
|
||||
|
||||
@staticmethod
|
||||
def _create_instances(context, db_info, datastore, datastore_version,
|
||||
instances):
|
||||
instances, extended_properties, locality):
|
||||
Redis_conf = CONF.get(datastore_version.manager)
|
||||
num_instances = len(instances)
|
||||
total_volume_allocation = 0
|
||||
@ -103,13 +104,14 @@ class RedisCluster(models.Cluster):
|
||||
configuration_id=None,
|
||||
cluster_config={
|
||||
"id": db_info.id,
|
||||
"instance_type": "member"}
|
||||
"instance_type": "member"},
|
||||
locality=locality
|
||||
)
|
||||
for instance in instances]
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties):
|
||||
instances, extended_properties, locality):
|
||||
LOG.debug("Initiating cluster creation.")
|
||||
|
||||
# Updating Cluster Task
|
||||
@ -120,7 +122,7 @@ class RedisCluster(models.Cluster):
|
||||
task_status=ClusterTasks.BUILDING_INITIAL)
|
||||
|
||||
cls._create_instances(context, db_info, datastore, datastore_version,
|
||||
instances)
|
||||
instances, extended_properties, locality)
|
||||
|
||||
# Calling taskmanager to further proceed for cluster-configuration
|
||||
task_api.load(context, datastore_version.manager).create_cluster(
|
||||
@ -140,9 +142,10 @@ class RedisCluster(models.Cluster):
|
||||
|
||||
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
|
||||
|
||||
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
|
||||
new_instances = self._create_instances(context, db_info,
|
||||
datastore, datastore_version,
|
||||
instances)
|
||||
instances, None, locality)
|
||||
|
||||
task_api.load(context, datastore_version.manager).grow_cluster(
|
||||
db_info.id, [instance.id for instance in new_instances])
|
||||
|
@ -20,6 +20,7 @@ from trove.cluster.views import ClusterView
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import remote
|
||||
from trove.common import server_group as srv_grp
|
||||
from trove.common.strategies.cluster import base
|
||||
from trove.common import utils
|
||||
from trove.extensions.mgmt.clusters.views import MgmtClusterView
|
||||
@ -70,7 +71,8 @@ class VerticaCluster(models.Cluster):
|
||||
|
||||
@staticmethod
|
||||
def _create_instances(context, db_info, datastore, datastore_version,
|
||||
instances, new_cluster):
|
||||
instances, extended_properties, locality,
|
||||
new_cluster=True):
|
||||
vertica_conf = CONF.get(datastore_version.manager)
|
||||
num_instances = len(instances)
|
||||
|
||||
@ -78,8 +80,8 @@ class VerticaCluster(models.Cluster):
|
||||
num_existing = len(existing)
|
||||
|
||||
# Matching number of instances with configured cluster_member_count
|
||||
if new_cluster \
|
||||
and num_instances != vertica_conf.cluster_member_count:
|
||||
if (new_cluster and
|
||||
num_instances != vertica_conf.cluster_member_count):
|
||||
raise exception.ClusterNumInstancesNotSupported(
|
||||
num_instances=vertica_conf.cluster_member_count)
|
||||
|
||||
@ -140,13 +142,14 @@ class VerticaCluster(models.Cluster):
|
||||
nics=nics[i],
|
||||
availability_zone=azs[i],
|
||||
configuration_id=None,
|
||||
cluster_config=member_config)
|
||||
cluster_config=member_config,
|
||||
locality=locality)
|
||||
)
|
||||
return minstances
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties):
|
||||
instances, extended_properties, locality):
|
||||
LOG.debug("Initiating cluster creation.")
|
||||
|
||||
vertica_conf = CONF.get(datastore_version.manager)
|
||||
@ -163,7 +166,8 @@ class VerticaCluster(models.Cluster):
|
||||
task_status=ClusterTasks.BUILDING_INITIAL)
|
||||
|
||||
cls._create_instances(context, db_info, datastore, datastore_version,
|
||||
instances, new_cluster=True)
|
||||
instances, extended_properties, locality,
|
||||
new_cluster=True)
|
||||
# Calling taskmanager to further proceed for cluster-configuration
|
||||
task_api.load(context, datastore_version.manager).create_cluster(
|
||||
db_info.id)
|
||||
@ -196,8 +200,10 @@ class VerticaCluster(models.Cluster):
|
||||
|
||||
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
|
||||
|
||||
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
|
||||
new_instances = self._create_instances(context, db_info, datastore,
|
||||
datastore_version, instances,
|
||||
None, locality,
|
||||
new_cluster=False)
|
||||
|
||||
task_api.load(context, datastore_version.manager).grow_cluster(
|
||||
|
@ -40,6 +40,16 @@ class ClusterActionsGroup(TestGroup):
|
||||
"""Create a cluster."""
|
||||
self.test_runner.run_cluster_create()
|
||||
|
||||
@test(depends_on=[cluster_create])
|
||||
def cluster_list(self):
|
||||
"""List the clusters."""
|
||||
self.test_runner.run_cluster_list()
|
||||
|
||||
@test(depends_on=[cluster_create])
|
||||
def cluster_show(self):
|
||||
"""Show a cluster."""
|
||||
self.test_runner.run_cluster_show()
|
||||
|
||||
@test(depends_on=[cluster_create])
|
||||
def add_initial_cluster_data(self):
|
||||
"""Add data to cluster."""
|
||||
@ -61,7 +71,8 @@ class ClusterActionsGroup(TestGroup):
|
||||
self.test_runner.run_verify_cluster_root_enable()
|
||||
|
||||
@test(depends_on=[cluster_create],
|
||||
runs_after=[verify_initial_cluster_data, verify_cluster_root_enable])
|
||||
runs_after=[verify_initial_cluster_data, verify_cluster_root_enable,
|
||||
cluster_list, cluster_show])
|
||||
def cluster_grow(self):
|
||||
"""Grow cluster."""
|
||||
self.test_runner.run_cluster_grow()
|
||||
|
@ -41,8 +41,13 @@ class ClusterActionsRunner(TestRunner):
|
||||
def __init__(self):
|
||||
super(ClusterActionsRunner, self).__init__()
|
||||
|
||||
self.cluster_name = 'test_cluster'
|
||||
self.cluster_id = 0
|
||||
self.cluster_inst_ids = None
|
||||
self.cluster_count_before_create = None
|
||||
self.srv_grp_id = None
|
||||
self.current_root_creds = None
|
||||
self.locality = 'affinity'
|
||||
|
||||
@property
|
||||
def is_using_existing_cluster(self):
|
||||
@ -52,9 +57,15 @@ class ClusterActionsRunner(TestRunner):
|
||||
def has_do_not_delete_cluster(self):
|
||||
return self.has_env_flag(self.DO_NOT_DELETE_CLUSTER_FLAG)
|
||||
|
||||
@property
|
||||
def min_cluster_node_count(self):
|
||||
return 2
|
||||
|
||||
def run_cluster_create(self, num_nodes=None, expected_task_name='BUILDING',
|
||||
expected_instance_states=['BUILD', 'ACTIVE'],
|
||||
expected_http_code=200):
|
||||
self.cluster_count_before_create = len(
|
||||
self.auth_client.clusters.list())
|
||||
if not num_nodes:
|
||||
num_nodes = self.min_cluster_node_count
|
||||
|
||||
@ -64,15 +75,11 @@ class ClusterActionsRunner(TestRunner):
|
||||
volume_size=self.instance_info.volume['size'])] * num_nodes
|
||||
|
||||
self.cluster_id = self.assert_cluster_create(
|
||||
'test_cluster', instances_def, expected_task_name,
|
||||
expected_instance_states, expected_http_code)
|
||||
|
||||
@property
|
||||
def min_cluster_node_count(self):
|
||||
return 2
|
||||
self.cluster_name, instances_def, self.locality,
|
||||
expected_task_name, expected_instance_states, expected_http_code)
|
||||
|
||||
def assert_cluster_create(
|
||||
self, cluster_name, instances_def, expected_task_name,
|
||||
self, cluster_name, instances_def, locality, expected_task_name,
|
||||
expected_instance_states, expected_http_code):
|
||||
self.report.log("Testing cluster create: %s" % cluster_name)
|
||||
|
||||
@ -86,8 +93,11 @@ class ClusterActionsRunner(TestRunner):
|
||||
cluster = self.auth_client.clusters.create(
|
||||
cluster_name, self.instance_info.dbaas_datastore,
|
||||
self.instance_info.dbaas_datastore_version,
|
||||
instances=instances_def)
|
||||
self._assert_cluster_action(cluster.id, expected_task_name,
|
||||
instances=instances_def, locality=locality)
|
||||
self._assert_cluster_values(cluster, expected_task_name)
|
||||
# Don't give an expected task here or it will do a 'get' on
|
||||
# the cluster. We tested the cluster values above.
|
||||
self._assert_cluster_action(cluster.id, None,
|
||||
expected_http_code)
|
||||
cluster_instances = self._get_cluster_instances(cluster.id)
|
||||
self.assert_all_instance_states(
|
||||
@ -95,6 +105,13 @@ class ClusterActionsRunner(TestRunner):
|
||||
# Create the helper user/database on the first node.
|
||||
# The cluster should handle the replication itself.
|
||||
self.create_test_helper_on_instance(cluster_instances[0])
|
||||
# make sure the server_group was created
|
||||
self.cluster_inst_ids = [inst.id for inst in cluster_instances]
|
||||
for id in self.cluster_inst_ids:
|
||||
srv_grp_id = self.assert_server_group_exists(id)
|
||||
if self.srv_grp_id and self.srv_grp_id != srv_grp_id:
|
||||
self.fail("Found multiple server groups for cluster")
|
||||
self.srv_grp_id = srv_grp_id
|
||||
|
||||
cluster_id = cluster.id
|
||||
|
||||
@ -103,7 +120,6 @@ class ClusterActionsRunner(TestRunner):
|
||||
# it may take up to the periodic task interval until the task name
|
||||
# gets updated in the Trove database.
|
||||
self._assert_cluster_states(cluster_id, ['NONE'])
|
||||
self._assert_cluster_response(cluster_id, 'NONE')
|
||||
|
||||
return cluster_id
|
||||
|
||||
@ -112,7 +128,26 @@ class ClusterActionsRunner(TestRunner):
|
||||
cluster_id = os.environ.get(self.USE_CLUSTER_ID_FLAG)
|
||||
return self.auth_client.clusters.get(cluster_id)
|
||||
|
||||
return None
|
||||
def run_cluster_list(self, expected_http_code=200):
|
||||
|
||||
self.assert_cluster_list(
|
||||
self.cluster_count_before_create + 1,
|
||||
expected_http_code)
|
||||
|
||||
def assert_cluster_list(self, expected_count,
|
||||
expected_http_code):
|
||||
count = len(self.auth_client.clusters.list())
|
||||
self.assert_client_code(expected_http_code)
|
||||
self.assert_equal(expected_count, count, "Unexpected cluster count")
|
||||
|
||||
def run_cluster_show(self, expected_http_code=200,
|
||||
expected_task_name='NONE'):
|
||||
self.assert_cluster_show(
|
||||
self.cluster_id, expected_task_name, expected_http_code)
|
||||
|
||||
def assert_cluster_show(self, cluster_id, expected_task_name,
|
||||
expected_http_code):
|
||||
self._assert_cluster_response(cluster_id, expected_task_name)
|
||||
|
||||
def run_cluster_root_enable(self, expected_task_name=None,
|
||||
expected_http_code=200):
|
||||
@ -267,11 +302,15 @@ class ClusterActionsRunner(TestRunner):
|
||||
cluster_instances = self._get_cluster_instances(cluster_id)
|
||||
|
||||
self.auth_client.clusters.delete(cluster_id)
|
||||
# Since the server_group is removed right at the beginning of the
|
||||
# cluster delete process we can't check for locality anymore.
|
||||
self._assert_cluster_action(cluster_id, expected_task_name,
|
||||
expected_http_code)
|
||||
expected_http_code, check_locality=False)
|
||||
|
||||
self.assert_all_gone(cluster_instances, expected_last_instance_state)
|
||||
self._assert_cluster_gone(cluster_id)
|
||||
# make sure the server group is gone too
|
||||
self.assert_server_group_gone(self.srv_grp_id)
|
||||
|
||||
def _get_cluster_instances(self, cluster_id):
|
||||
cluster = self.auth_client.clusters.get(cluster_id)
|
||||
@ -279,11 +318,13 @@ class ClusterActionsRunner(TestRunner):
|
||||
for instance in cluster.instances]
|
||||
|
||||
def _assert_cluster_action(
|
||||
self, cluster_id, expected_state, expected_http_code):
|
||||
self, cluster_id, expected_task_name, expected_http_code,
|
||||
check_locality=True):
|
||||
if expected_http_code is not None:
|
||||
self.assert_client_code(expected_http_code)
|
||||
if expected_state:
|
||||
self._assert_cluster_response(cluster_id, expected_state)
|
||||
if expected_task_name:
|
||||
self._assert_cluster_response(cluster_id, expected_task_name,
|
||||
check_locality=check_locality)
|
||||
|
||||
def _assert_cluster_states(self, cluster_id, expected_states,
|
||||
fast_fail_status=None):
|
||||
@ -314,8 +355,15 @@ class ClusterActionsRunner(TestRunner):
|
||||
% (cluster_id, task))
|
||||
return task_name == task
|
||||
|
||||
def _assert_cluster_response(self, cluster_id, expected_state):
|
||||
def _assert_cluster_response(self, cluster_id, expected_task_name,
|
||||
expected_http_code=200, check_locality=True):
|
||||
cluster = self.auth_client.clusters.get(cluster_id)
|
||||
self.assert_client_code(expected_http_code)
|
||||
self._assert_cluster_values(cluster, expected_task_name,
|
||||
check_locality=check_locality)
|
||||
|
||||
def _assert_cluster_values(self, cluster, expected_task_name,
|
||||
check_locality=True):
|
||||
with TypeCheck('Cluster', cluster) as check:
|
||||
check.has_field("id", six.string_types)
|
||||
check.has_field("name", six.string_types)
|
||||
@ -324,13 +372,18 @@ class ClusterActionsRunner(TestRunner):
|
||||
check.has_field("links", list)
|
||||
check.has_field("created", six.text_type)
|
||||
check.has_field("updated", six.text_type)
|
||||
if check_locality:
|
||||
check.has_field("locality", six.text_type)
|
||||
for instance in cluster.instances:
|
||||
isinstance(instance, dict)
|
||||
self.assert_is_not_none(instance['id'])
|
||||
self.assert_is_not_none(instance['links'])
|
||||
self.assert_is_not_none(instance['name'])
|
||||
self.assert_equal(expected_state, cluster.task['name'],
|
||||
self.assert_equal(expected_task_name, cluster.task['name'],
|
||||
'Unexpected cluster task name')
|
||||
if check_locality:
|
||||
self.assert_equal(self.locality, cluster.locality,
|
||||
"Unexpected cluster locality")
|
||||
|
||||
def _assert_cluster_gone(self, cluster_id):
|
||||
t0 = timer.time()
|
||||
|
@ -53,7 +53,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
CassandraCluster._create_cluster_instances(
|
||||
self.context, 'test_cluster_id', 'test_cluster',
|
||||
datastore, datastore_version,
|
||||
test_instances)
|
||||
test_instances, None, None)
|
||||
|
||||
check_quotas.assert_called_once_with(
|
||||
ANY, instances=num_instances, volumes=get_vol_size.return_value)
|
||||
|
@ -61,6 +61,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster = mongodb_api.MongoDbCluster(self.context, self.db_info,
|
||||
self.datastore,
|
||||
self.datastore_version)
|
||||
self.cluster._server_group_loaded = True
|
||||
self.instances = [{'volume_size': 1, 'flavor_id': '1234'},
|
||||
{'volume_size': 1, 'flavor_id': '1234'},
|
||||
{'volume_size': 1, 'flavor_id': '1234'}]
|
||||
@ -80,7 +81,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
[],
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
def test_create_unequal_flavors(self):
|
||||
@ -93,7 +94,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@ -110,7 +111,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@ -140,7 +141,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
@patch('trove.cluster.models.LOG')
|
||||
|
@ -37,6 +37,7 @@ class TestClusterController(TestCase):
|
||||
def setUp(self):
|
||||
super(TestClusterController, self).setUp()
|
||||
self.controller = ClusterController()
|
||||
self.locality = 'anti-affinity'
|
||||
instances = [
|
||||
{
|
||||
"flavorRef": "7",
|
||||
@ -57,7 +58,8 @@ class TestClusterController(TestCase):
|
||||
"type": "mongodb",
|
||||
"version": "2.4.10"
|
||||
},
|
||||
"instances": instances
|
||||
"instances": instances,
|
||||
"locality": self.locality,
|
||||
}
|
||||
}
|
||||
self.add_shard = {
|
||||
@ -112,6 +114,20 @@ class TestClusterController(TestCase):
|
||||
self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages)
|
||||
self.assertIn("type", error_paths)
|
||||
|
||||
def test_validate_create_bad_locality(self):
|
||||
body = self.cluster
|
||||
body['cluster']['locality'] = "$%^&"
|
||||
schema = self.controller.get_schema('create', body)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertFalse(validator.is_valid(body))
|
||||
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
|
||||
error_messages = [error.message for error in errors]
|
||||
error_paths = [error.path.pop() for error in errors]
|
||||
self.assertThat(len(errors), Is(1))
|
||||
self.assertIn("'$%^&' does not match '^.*[0-9a-zA-Z]+.*$'",
|
||||
error_messages)
|
||||
self.assertIn("locality", error_paths)
|
||||
|
||||
@patch.object(Cluster, 'create')
|
||||
@patch.object(datastore_models, 'get_datastore_version')
|
||||
def test_create_clusters_disabled(self,
|
||||
@ -176,7 +192,8 @@ class TestClusterController(TestCase):
|
||||
self.controller.create(req, body, tenant_id)
|
||||
mock_cluster_create.assert_called_with(context, 'products',
|
||||
datastore, datastore_version,
|
||||
instances, {})
|
||||
instances, {},
|
||||
self.locality)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_show_cluster(self,
|
||||
@ -193,6 +210,7 @@ class TestClusterController(TestCase):
|
||||
mock_cluster.instances_without_server = []
|
||||
mock_cluster.datastore_version.manager = 'mongodb'
|
||||
mock_cluster_load.return_value = mock_cluster
|
||||
mock_cluster.locality = self.locality
|
||||
|
||||
self.controller.show(req, tenant_id, id)
|
||||
mock_cluster_load.assert_called_with(context, id)
|
||||
|
@ -19,6 +19,7 @@ from trove.cluster import models
|
||||
from trove.common.strategies.cluster.experimental.mongodb.api import (
|
||||
MongoDbCluster)
|
||||
from trove.datastore import models as datastore_models
|
||||
from trove.instance import models as instance_models
|
||||
from trove.tests.unittests import trove_testtools
|
||||
|
||||
|
||||
@ -27,12 +28,20 @@ class TestClusterModel(trove_testtools.TestCase):
|
||||
@patch.object(datastore_models.Datastore, 'load')
|
||||
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
|
||||
@patch.object(models.DBCluster, 'find_by')
|
||||
def test_load(self, mock_find_by, mock_load_dsv_by_uuid, mock_ds_load):
|
||||
@patch.object(instance_models.Instances, 'load_all_by_cluster_id')
|
||||
def test_load(self, mock_inst_load, mock_find_by,
|
||||
mock_load_dsv_by_uuid, mock_ds_load):
|
||||
context = trove_testtools.TroveTestContext(self)
|
||||
id = Mock()
|
||||
inst_mock = Mock()
|
||||
server_group = Mock()
|
||||
inst_mock.server_group = server_group
|
||||
mock_inst_load.return_value = [inst_mock]
|
||||
|
||||
dsv = Mock()
|
||||
dsv.manager = 'mongodb'
|
||||
mock_load_dsv_by_uuid.return_value = dsv
|
||||
cluster = models.Cluster.load(context, id)
|
||||
self.assertIsInstance(cluster, MongoDbCluster)
|
||||
self.assertEqual(server_group, cluster.server_group,
|
||||
"Unexpected server group")
|
||||
|
@ -157,7 +157,7 @@ class TestClusterController(trove_testtools.TestCase):
|
||||
self.controller.create(req, body, tenant_id)
|
||||
mock_cluster_create.assert_called_with(context, 'products',
|
||||
datastore, datastore_version,
|
||||
instances, {})
|
||||
instances, {}, None)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_show_cluster(self,
|
||||
|
@ -190,7 +190,7 @@ class TestClusterController(trove_testtools.TestCase):
|
||||
self.controller.create(req, body, tenant_id)
|
||||
mock_cluster_create.assert_called_with(context, 'products',
|
||||
datastore, datastore_version,
|
||||
instances, {})
|
||||
instances, {}, None)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_show_cluster(self,
|
||||
|
@ -157,7 +157,7 @@ class TestClusterController(trove_testtools.TestCase):
|
||||
self.controller.create(req, body, tenant_id)
|
||||
mock_cluster_create.assert_called_with(context, 'products',
|
||||
datastore, datastore_version,
|
||||
instances, {})
|
||||
instances, {}, None)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_show_cluster(self,
|
||||
|
@ -33,6 +33,7 @@ class ClusterViewTest(trove_testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ClusterViewTest, self).setUp()
|
||||
self.locality = 'anti-affinity'
|
||||
self.cluster = Mock()
|
||||
self.cluster.created = 'Yesterday'
|
||||
self.cluster.updated = 'Now'
|
||||
@ -46,6 +47,7 @@ class ClusterViewTest(trove_testtools.TestCase):
|
||||
self.cluster.instances[0].volume.size = 1
|
||||
self.cluster.instances[0].slave_of_id = None
|
||||
self.cluster.instances[0].slaves = None
|
||||
self.cluster.locality = self.locality
|
||||
|
||||
def tearDown(self):
|
||||
super(ClusterViewTest, self).tearDown()
|
||||
@ -64,6 +66,7 @@ class ClusterViewTest(trove_testtools.TestCase):
|
||||
self.assertEqual(self.cluster.name, result['cluster']['name'])
|
||||
self.assertEqual(self.cluster.datastore_version.name,
|
||||
result['cluster']['datastore']['version'])
|
||||
self.assertEqual(self.locality, result['cluster']['locality'])
|
||||
|
||||
@patch.object(ClusterView, 'build_instances', return_value=('10.0.0.1',
|
||||
[]))
|
||||
|
@ -65,6 +65,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore_version = self.dv
|
||||
self.cluster = galera_api.GaleraCommonCluster(
|
||||
self.context, self.db_info, self.datastore, self.datastore_version)
|
||||
self.cluster._server_group_loaded = True
|
||||
self.instances = [
|
||||
{'volume_size': 1, 'flavor_id': '1234',
|
||||
'nics': [{"net-id": "foo-bar"}]},
|
||||
@ -83,7 +84,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
[], {},
|
||||
[], {}, None
|
||||
)
|
||||
|
||||
def test_create_flavor_not_specified(self):
|
||||
@ -95,7 +96,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}
|
||||
instances, {}, None
|
||||
)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@ -115,7 +116,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}
|
||||
instances, {}, None
|
||||
)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@ -131,7 +132,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}
|
||||
instances, {}, None
|
||||
)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@ -151,7 +152,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}
|
||||
instances, {}, None
|
||||
)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@ -183,7 +184,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}
|
||||
instances, {}, None
|
||||
)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@ -198,7 +199,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}
|
||||
instances, {}, None
|
||||
)
|
||||
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@ -218,7 +219,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {})
|
||||
instances, {}, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
@ -240,7 +241,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {})
|
||||
instances, {}, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(4, mock_ins_create.call_count)
|
||||
@ -278,7 +279,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {})
|
||||
instances, {}, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
|
@ -50,6 +50,7 @@ class MongoDBClusterTest(trove_testtools.TestCase):
|
||||
self.cluster = api.MongoDbCluster(self.context, self.db_info,
|
||||
self.datastore,
|
||||
self.datastore_version)
|
||||
self.cluster._server_group_loaded = True
|
||||
self.manager = mock.Mock()
|
||||
self.cluster.manager = self.manager
|
||||
self.volume_support = CONF.get('mongodb').volume_support
|
||||
@ -83,8 +84,9 @@ class MongoDBClusterTest(trove_testtools.TestCase):
|
||||
|
||||
self.assertEqual(mock_prep_resize.called, True)
|
||||
mock_create_shard_instances.assert_called_with([instance1, instance2,
|
||||
instance3])
|
||||
mock_create_query_router_instances.assert_called_with([instance4])
|
||||
instance3], None)
|
||||
mock_create_query_router_instances.assert_called_with([instance4],
|
||||
None)
|
||||
mock_update_db.assert_called_with(
|
||||
task_status=tasks.ClusterTasks.GROWING_CLUSTER
|
||||
)
|
||||
|
@ -72,6 +72,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster = redis_api.RedisCluster(self.context, self.db_info,
|
||||
self.datastore,
|
||||
self.datastore_version)
|
||||
self.cluster._server_group_loaded = True
|
||||
self.instances_w_volumes = [{'volume_size': 1,
|
||||
'flavor_id': '1234'}] * 3
|
||||
self.instances_no_volumes = [{'flavor_id': '1234'}] * 3
|
||||
@ -93,7 +94,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_w_volumes,
|
||||
{})
|
||||
{}, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@patch.object(redis_api, 'CONF')
|
||||
@ -107,7 +108,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_no_volumes,
|
||||
{})
|
||||
{}, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@patch.object(redis_api, 'CONF')
|
||||
@ -124,7 +125,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_w_volumes,
|
||||
{})
|
||||
{}, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@patch.object(redis_api, 'CONF')
|
||||
@ -153,7 +154,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_no_volumes,
|
||||
{})
|
||||
{}, None)
|
||||
|
||||
@patch.object(redis_api, 'CONF')
|
||||
@patch.object(inst_models.Instance, 'create')
|
||||
@ -169,7 +170,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_w_volumes, {})
|
||||
self.instances_w_volumes, {}, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
self.dbcreate_mock.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
@ -201,7 +202,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_no_volumes, {})
|
||||
self.instances_no_volumes, {}, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
self.dbcreate_mock.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
|
@ -83,7 +83,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
[], None)
|
||||
[], None, None)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@ -97,7 +97,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@ -120,7 +120,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@ -139,7 +139,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@ -164,7 +164,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@ -201,7 +201,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@ -220,7 +220,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None
|
||||
None, None
|
||||
)
|
||||
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@ -239,7 +239,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None)
|
||||
None, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
@ -278,7 +278,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None)
|
||||
None, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
|
Loading…
Reference in New Issue
Block a user