Merge "Configuration Groups for MongoDB"
This commit is contained in:
commit
cd70460260
@ -39,3 +39,14 @@ class MySQLConfParser(object):
|
||||
config_dict = self.CODEC.deserialize(self.config)
|
||||
mysqld_section_dict = config_dict['mysqld']
|
||||
return mysqld_section_dict.items()
|
||||
|
||||
|
||||
class MongoDBConfParser(object):
|
||||
|
||||
CODEC = stream_codecs.SafeYamlCodec(default_flow_style=False)
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def parse(self):
|
||||
return self.CODEC.deserialize(self.config).items()
|
||||
|
@ -16,6 +16,7 @@
|
||||
import abc
|
||||
import ast
|
||||
import csv
|
||||
import json
|
||||
import six
|
||||
import StringIO
|
||||
import yaml
|
||||
@ -344,3 +345,12 @@ class PropertiesCodec(StreamCodec):
|
||||
container.append(item)
|
||||
|
||||
return container
|
||||
|
||||
|
||||
class JsonCodec(StreamCodec):
|
||||
|
||||
def serialize(self, dict_data):
|
||||
return json.dumps(dict_data)
|
||||
|
||||
def deserialize(self, stream):
|
||||
return json.load(StringIO.StringIO(stream))
|
||||
|
@ -31,6 +31,7 @@ ENV = utils.ENV
|
||||
|
||||
# TODO(cp16net) Maybe this should be moved to a config dict
|
||||
SERVICE_PARSERS = {
|
||||
'mongodb': configurations.MongoDBConfParser,
|
||||
'mysql': configurations.MySQLConfParser,
|
||||
'percona': configurations.MySQLConfParser,
|
||||
'redis': configurations.RedisConfParser,
|
||||
|
@ -443,14 +443,15 @@ class OneFileOverrideStrategy(ConfigurationOverrideStrategy):
|
||||
self._regenerate_base_configuration()
|
||||
|
||||
def remove(self, group_name, change_id=None):
|
||||
self._import_strategy.remove(group_name, change_id=change_id)
|
||||
self._regenerate_base_configuration()
|
||||
if not self._import_strategy.has_revisions:
|
||||
# The base revision file is no longer needed if there are no
|
||||
# overrides. It will be regenerated based on the current
|
||||
# configuration file on the first 'apply()'.
|
||||
operating_system.remove(self._base_revision_file, force=True,
|
||||
as_root=self._requires_root)
|
||||
if self._import_strategy.has_revisions:
|
||||
self._import_strategy.remove(group_name, change_id=change_id)
|
||||
self._regenerate_base_configuration()
|
||||
if not self._import_strategy.has_revisions:
|
||||
# The base revision file is no longer needed if there are no
|
||||
# overrides. It will be regenerated based on the current
|
||||
# configuration file on the first 'apply()'.
|
||||
operating_system.remove(self._base_revision_file, force=True,
|
||||
as_root=self._requires_root)
|
||||
|
||||
def _regenerate_base_configuration(self):
|
||||
"""Gather all configuration changes and apply them in order on the base
|
||||
|
@ -40,6 +40,49 @@ def update_dict(updates, target):
|
||||
return target
|
||||
|
||||
|
||||
def expand_dict(target, namespace_sep='.'):
|
||||
"""Expand a flat dict to a nested one.
|
||||
This is an inverse of 'flatten_dict'.
|
||||
|
||||
:seealso: flatten_dict
|
||||
"""
|
||||
nested = {}
|
||||
for k, v in target.items():
|
||||
sub = nested
|
||||
keys = k.split(namespace_sep)
|
||||
for key in keys[:-1]:
|
||||
sub = sub.setdefault(key, {})
|
||||
sub[keys[-1]] = v
|
||||
|
||||
return nested
|
||||
|
||||
|
||||
def flatten_dict(target, namespace_sep='.'):
|
||||
"""Flatten a nested dict.
|
||||
Return a one-level dict with all sub-level keys joined by a namespace
|
||||
separator.
|
||||
|
||||
The following nested dict:
|
||||
{'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}}
|
||||
|
||||
would be flattened to:
|
||||
{'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10}
|
||||
"""
|
||||
def flatten(target, keys, namespace_sep):
|
||||
flattened = {}
|
||||
if isinstance(target, collections.Mapping):
|
||||
for k, v in target.items():
|
||||
flattened.update(
|
||||
flatten(v, keys + [k], namespace_sep))
|
||||
else:
|
||||
ns = namespace_sep.join(keys)
|
||||
flattened[ns] = target
|
||||
|
||||
return flattened
|
||||
|
||||
return flatten(target, [], namespace_sep)
|
||||
|
||||
|
||||
def build_file_path(base_dir, base_name, *extensions):
|
||||
"""Build a path to a file in a given directory.
|
||||
The file may have an extension(s).
|
||||
|
@ -17,7 +17,6 @@ import os
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import periodic_task
|
||||
from oslo_utils import netutils
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
@ -39,14 +38,13 @@ MANAGER = CONF.datastore_manager
|
||||
class Manager(periodic_task.PeriodicTasks):
|
||||
|
||||
def __init__(self):
|
||||
self.status = service.MongoDBAppStatus()
|
||||
self.app = service.MongoDBApp(self.status)
|
||||
self.app = service.MongoDBApp()
|
||||
super(Manager, self).__init__(CONF)
|
||||
|
||||
@periodic_task.periodic_task
|
||||
def update_status(self, context):
|
||||
"""Update the status of the MongoDB service."""
|
||||
self.status.update()
|
||||
self.app.status.update()
|
||||
|
||||
def rpc_ping(self, context):
|
||||
LOG.debug("Responding to RPC ping.")
|
||||
@ -60,7 +58,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
|
||||
LOG.debug("Preparing MongoDB instance.")
|
||||
|
||||
self.status.begin_install()
|
||||
self.app.status.begin_install()
|
||||
self.app.install_if_needed(packages)
|
||||
self.app.wait_for_start()
|
||||
self.app.stop_db()
|
||||
@ -81,69 +79,46 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
LOG.debug("Mounted the volume %(path)s as %(mount)s." %
|
||||
{'path': device_path, "mount": mount_point})
|
||||
|
||||
self.app.secure(cluster_config)
|
||||
conf_changes = self.get_config_changes(cluster_config, mount_point)
|
||||
config_contents = self.app.update_config_contents(
|
||||
config_contents, conf_changes)
|
||||
if cluster_config is None:
|
||||
self.app.start_db_with_conf_changes(config_contents)
|
||||
if backup_info:
|
||||
self._perform_restore(backup_info, context,
|
||||
mount_point, self.app)
|
||||
if service.MongoDBAdmin().is_root_enabled():
|
||||
self.status.report_root('root')
|
||||
elif root_password:
|
||||
LOG.debug('Root password provided. Enabling root.')
|
||||
service.MongoDBAdmin().enable_root(root_password)
|
||||
if config_contents:
|
||||
# Save resolved configuration template first.
|
||||
self.app.configuration_manager.save_configuration(config_contents)
|
||||
|
||||
# Apply guestagent specific configuration changes.
|
||||
self.app.apply_initial_guestagent_configuration(
|
||||
cluster_config, mount_point)
|
||||
|
||||
if not cluster_config:
|
||||
# Create the Trove admin user.
|
||||
self.app.secure()
|
||||
|
||||
# Don't start mongos until add_config_servers is invoked.
|
||||
if not self.app.is_query_router:
|
||||
self.app.start_db(update_db=False)
|
||||
|
||||
if not cluster_config and backup_info:
|
||||
self._perform_restore(backup_info, context, mount_point, self.app)
|
||||
if service.MongoDBAdmin().is_root_enabled():
|
||||
self.app.status.report_root('root')
|
||||
|
||||
if not cluster_config and root_password:
|
||||
LOG.debug('Root password provided. Enabling root.')
|
||||
service.MongoDBAdmin().enable_root(root_password)
|
||||
|
||||
if not cluster_config:
|
||||
if databases:
|
||||
self.create_database(context, databases)
|
||||
if users:
|
||||
self.create_user(context, users)
|
||||
|
||||
if cluster_config:
|
||||
self.app.status.set_status(
|
||||
ds_instance.ServiceStatuses.BUILD_PENDING)
|
||||
else:
|
||||
if cluster_config['instance_type'] == "query_router":
|
||||
self.app.reset_configuration({'config_contents':
|
||||
config_contents})
|
||||
self.app.write_mongos_upstart()
|
||||
self.app.status.is_query_router = True
|
||||
# don't start mongos until add_config_servers is invoked
|
||||
self.app.status.set_status(
|
||||
ds_instance.ServiceStatuses.RUNNING)
|
||||
|
||||
elif cluster_config['instance_type'] == "config_server":
|
||||
self.app.status.is_config_server = True
|
||||
self.app.start_db_with_conf_changes(config_contents)
|
||||
|
||||
elif cluster_config['instance_type'] == "member":
|
||||
self.app.start_db_with_conf_changes(config_contents)
|
||||
|
||||
else:
|
||||
LOG.error(_("Bad cluster configuration; instance type "
|
||||
"given as %s.") % cluster_config['instance_type'])
|
||||
self.status.set_status(ds_instance.ServiceStatuses.FAILED)
|
||||
return
|
||||
|
||||
self.status.set_status(ds_instance.ServiceStatuses.BUILD_PENDING)
|
||||
LOG.info(_('Completed setup of MongoDB database instance.'))
|
||||
|
||||
def get_config_changes(self, cluster_config, mount_point=None):
|
||||
LOG.debug("Getting configuration changes.")
|
||||
config_changes = {}
|
||||
# todo mvandijk: uncomment the following when auth is being enabled
|
||||
# config_changes['auth'] = 'true'
|
||||
config_changes['bind_ip'] = ','.join([netutils.get_my_ipv4(),
|
||||
'127.0.0.1'])
|
||||
if cluster_config is not None:
|
||||
# todo mvandijk: uncomment the following when auth is being enabled
|
||||
# config_changes['keyFile'] = self.app.get_key_file()
|
||||
if cluster_config["instance_type"] == "config_server":
|
||||
config_changes["configsvr"] = "true"
|
||||
elif cluster_config["instance_type"] == "member":
|
||||
config_changes["replSet"] = cluster_config["replica_set_name"]
|
||||
if (mount_point is not None and
|
||||
(cluster_config is None or
|
||||
cluster_config['instance_type'] != "query_router")):
|
||||
config_changes['dbpath'] = mount_point
|
||||
|
||||
return config_changes
|
||||
|
||||
def restart(self, context):
|
||||
LOG.debug("Restarting MongoDB.")
|
||||
self.app.restart()
|
||||
@ -260,13 +235,14 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
|
||||
def update_overrides(self, context, overrides, remove=False):
|
||||
LOG.debug("Updating overrides.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='update_overrides', datastore=MANAGER)
|
||||
if remove:
|
||||
self.app.remove_overrides()
|
||||
else:
|
||||
self.app.update_overrides(context, overrides, remove)
|
||||
|
||||
def apply_overrides(self, context, overrides):
|
||||
LOG.debug("Applying overrides.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='apply_overrides', datastore=MANAGER)
|
||||
LOG.debug("Overrides will be applied after restart.")
|
||||
pass
|
||||
|
||||
def get_replication_snapshot(self, context, snapshot_info,
|
||||
replica_source_config=None):
|
||||
@ -320,7 +296,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
self.app.add_members(members)
|
||||
LOG.debug("add_members call has finished.")
|
||||
except Exception:
|
||||
self.status.set_status(ds_instance.ServiceStatuses.FAILED)
|
||||
self.app.status.set_status(ds_instance.ServiceStatuses.FAILED)
|
||||
raise
|
||||
|
||||
def add_config_servers(self, context, config_servers):
|
||||
@ -330,7 +306,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
self.app.add_config_servers(config_servers)
|
||||
LOG.debug("add_config_servers call has finished.")
|
||||
except Exception:
|
||||
self.status.set_status(ds_instance.ServiceStatuses.FAILED)
|
||||
self.app.status.set_status(ds_instance.ServiceStatuses.FAILED)
|
||||
raise
|
||||
|
||||
def add_shard(self, context, replica_set_name, replica_set_member):
|
||||
@ -341,14 +317,14 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
self.app.add_shard(replica_set_name, replica_set_member)
|
||||
LOG.debug("add_shard call has finished.")
|
||||
except Exception:
|
||||
self.status.set_status(ds_instance.ServiceStatuses.FAILED)
|
||||
self.app.status.set_status(ds_instance.ServiceStatuses.FAILED)
|
||||
raise
|
||||
|
||||
def cluster_complete(self, context):
|
||||
# Now that cluster creation is complete, start status checks
|
||||
LOG.debug("Cluster creation complete, starting status checks.")
|
||||
status = self.status._get_actual_db_status()
|
||||
self.status.set_status(status)
|
||||
status = self.app.status._get_actual_db_status()
|
||||
self.app.status.set_status(status)
|
||||
|
||||
def get_key(self, context):
|
||||
# Return the cluster key
|
||||
|
@ -13,10 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import netutils
|
||||
@ -28,7 +25,11 @@ from trove.common.exception import ProcessExecutionError
|
||||
from trove.common.i18n import _
|
||||
from trove.common import instance as ds_instance
|
||||
from trove.common import pagination
|
||||
from trove.common.stream_codecs import JsonCodec, SafeYamlCodec
|
||||
from trove.common import utils as utils
|
||||
from trove.guestagent.common.configuration import ConfigurationManager
|
||||
from trove.guestagent.common.configuration import OneFileOverrideStrategy
|
||||
from trove.guestagent.common import guestagent_utils
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.datastore.experimental.mongodb import system
|
||||
from trove.guestagent.datastore import service
|
||||
@ -39,6 +40,10 @@ LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
CONFIG_FILE = (operating_system.
|
||||
file_discovery(system.CONFIG_CANDIDATES))
|
||||
|
||||
# Configuration group for clustering-related settings.
|
||||
CNF_CLUSTER = 'clustering'
|
||||
|
||||
MONGODB_PORT = CONF.mongodb.mongodb_port
|
||||
CONFIGSVR_PORT = CONF.mongodb.configsvr_port
|
||||
IGNORED_DBS = CONF.mongodb.ignore_dbs
|
||||
@ -48,9 +53,34 @@ IGNORED_USERS = CONF.mongodb.ignore_users
|
||||
class MongoDBApp(object):
|
||||
"""Prepares DBaaS on a Guest container."""
|
||||
|
||||
def __init__(self, status):
|
||||
@classmethod
|
||||
def _init_overrides_dir(cls):
|
||||
"""Initialize a directory for configuration overrides.
|
||||
"""
|
||||
revision_dir = guestagent_utils.build_file_path(
|
||||
os.path.dirname(system.MONGO_USER),
|
||||
ConfigurationManager.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR)
|
||||
|
||||
if not os.path.exists(revision_dir):
|
||||
operating_system.create_directory(
|
||||
revision_dir,
|
||||
user=system.MONGO_USER, group=system.MONGO_USER,
|
||||
force=True, as_root=True)
|
||||
|
||||
return revision_dir
|
||||
|
||||
def __init__(self):
|
||||
self.state_change_wait_time = CONF.state_change_wait_time
|
||||
self.status = status
|
||||
|
||||
revision_dir = self._init_overrides_dir()
|
||||
self.configuration_manager = ConfigurationManager(
|
||||
CONFIG_FILE, system.MONGO_USER, system.MONGO_USER,
|
||||
SafeYamlCodec(default_flow_style=False),
|
||||
requires_root=True,
|
||||
override_strategy=OneFileOverrideStrategy(revision_dir))
|
||||
|
||||
self.is_query_router = False
|
||||
self.status = MongoDBAppStatus()
|
||||
|
||||
def install_if_needed(self, packages):
|
||||
"""Prepare the guest machine with a MongoDB installation."""
|
||||
@ -61,7 +91,7 @@ class MongoDBApp(object):
|
||||
LOG.info(_("Finished installing MongoDB server."))
|
||||
|
||||
def _get_service(self):
|
||||
if self.status._is_query_router() is True:
|
||||
if self.is_query_router:
|
||||
return (operating_system.
|
||||
service_discovery(system.MONGOS_SERVICE_CANDIDATES))
|
||||
else:
|
||||
@ -151,75 +181,141 @@ class MongoDBApp(object):
|
||||
raise RuntimeError("Could not start MongoDB.")
|
||||
LOG.debug('MongoDB started successfully.')
|
||||
|
||||
def update_overrides(self, context, overrides, remove=False):
|
||||
if overrides:
|
||||
self.configuration_manager.apply_user_override(overrides)
|
||||
|
||||
def remove_overrides(self):
|
||||
self.configuration_manager.remove_user_override()
|
||||
|
||||
def start_db_with_conf_changes(self, config_contents):
|
||||
LOG.info(_("Starting MongoDB with configuration changes."))
|
||||
LOG.info(_("Configuration contents:\n %s.") % config_contents)
|
||||
LOG.info(_('Starting MongoDB with configuration changes.'))
|
||||
if self.status.is_running:
|
||||
LOG.error(_("Cannot start MongoDB with configuration changes. "
|
||||
"MongoDB state == %s.") % self.status)
|
||||
raise RuntimeError("MongoDB is not stopped.")
|
||||
self._write_config(config_contents)
|
||||
format = 'Cannot start_db_with_conf_changes because status is %s.'
|
||||
LOG.debug(format, self.status)
|
||||
raise RuntimeError(format % self.status)
|
||||
LOG.info(_("Initiating config."))
|
||||
self.configuration_manager.save_configuration(config_contents)
|
||||
# The configuration template has to be updated with
|
||||
# guestagent-controlled settings.
|
||||
self.apply_initial_guestagent_configuration(
|
||||
None, mount_point=system.MONGODB_MOUNT_POINT)
|
||||
self.start_db(True)
|
||||
|
||||
def reset_configuration(self, configuration):
|
||||
config_contents = configuration['config_contents']
|
||||
LOG.info(_("Resetting configuration."))
|
||||
self._write_config(config_contents)
|
||||
config_contents = configuration['config_contents']
|
||||
self.configuration_manager.save_configuration(config_contents)
|
||||
|
||||
def update_config_contents(self, config_contents, parameters):
|
||||
LOG.info(_("Updating configuration contents."))
|
||||
if not config_contents:
|
||||
config_contents = self._read_config()
|
||||
def apply_initial_guestagent_configuration(
|
||||
self, cluster_config, mount_point=None):
|
||||
LOG.debug("Applying initial configuration.")
|
||||
|
||||
contents = self._delete_config_parameters(config_contents,
|
||||
parameters.keys())
|
||||
for param, value in parameters.items():
|
||||
if param and value:
|
||||
contents = self._add_config_parameter(contents,
|
||||
param, value)
|
||||
return contents
|
||||
# todo mvandijk: enable authorization.
|
||||
# 'security.authorization': True
|
||||
self.configuration_manager.apply_system_override(
|
||||
{'processManagement.fork': False,
|
||||
'processManagement.pidFilePath': system.MONGO_PID_FILE,
|
||||
'systemLog.destination': 'file',
|
||||
'systemLog.path': system.MONGO_LOG_FILE,
|
||||
'systemLog.logAppend': True
|
||||
})
|
||||
|
||||
def _write_config(self, config_contents):
|
||||
"""
|
||||
Update contents of MongoDB configuration file
|
||||
"""
|
||||
LOG.info(_("Updating MongoDB config."))
|
||||
if config_contents:
|
||||
LOG.info(_("Writing %s.") % system.TMP_CONFIG)
|
||||
try:
|
||||
with open(system.TMP_CONFIG, 'w') as t:
|
||||
t.write(config_contents)
|
||||
if mount_point:
|
||||
self.configuration_manager.apply_system_override(
|
||||
{'storage.dbPath': mount_point})
|
||||
|
||||
LOG.info(_("Moving %(a)s to %(b)s.")
|
||||
% {'a': system.TMP_CONFIG, 'b': CONFIG_FILE})
|
||||
operating_system.move(system.TMP_CONFIG, CONFIG_FILE,
|
||||
as_root=True)
|
||||
except Exception:
|
||||
os.unlink(system.TMP_CONFIG)
|
||||
raise
|
||||
if cluster_config is not None:
|
||||
self._configure_as_cluster_instance(cluster_config)
|
||||
else:
|
||||
LOG.debug("Empty config_contents. Do nothing.")
|
||||
self._configure_network(MONGODB_PORT)
|
||||
|
||||
def _read_config(self):
|
||||
try:
|
||||
with open(CONFIG_FILE, 'r') as f:
|
||||
return f.read()
|
||||
except IOError:
|
||||
LOG.info(_("Config file %s not found.") % CONFIG_FILE)
|
||||
return ''
|
||||
def _configure_as_cluster_instance(self, cluster_config):
|
||||
"""Configure this guest as a cluster instance and return its
|
||||
new status.
|
||||
"""
|
||||
if cluster_config['instance_type'] == "query_router":
|
||||
self._configure_as_query_router()
|
||||
elif cluster_config["instance_type"] == "config_server":
|
||||
self._configure_as_config_server()
|
||||
elif cluster_config["instance_type"] == "member":
|
||||
self._configure_as_cluster_member(
|
||||
cluster_config['replica_set_name'])
|
||||
else:
|
||||
LOG.error(_("Bad cluster configuration; instance type "
|
||||
"given as %s.") % cluster_config['instance_type'])
|
||||
return ds_instance.ServiceStatuses.FAILED
|
||||
|
||||
def _delete_config_parameters(self, config_contents, parameters):
|
||||
if not config_contents:
|
||||
return None
|
||||
if 'key' in cluster_config:
|
||||
self._configure_cluster_security(cluster_config['key'])
|
||||
|
||||
params_as_string = '|'.join(parameters)
|
||||
p = re.compile("\\s*#?\\s*(%s)\\s*=" % params_as_string)
|
||||
contents_as_list = config_contents.splitlines()
|
||||
filtered = filter(lambda line: not p.match(line), contents_as_list)
|
||||
return '\n'.join(filtered)
|
||||
def _configure_as_query_router(self):
|
||||
LOG.info(_("Configuring instance as a cluster query router."))
|
||||
self.is_query_router = True
|
||||
|
||||
def _add_config_parameter(self, config_contents, parameter, value):
|
||||
return (config_contents or '') + "\n%s = %s" % (parameter, value)
|
||||
# Write the 'mongos' upstart script.
|
||||
# FIXME(pmalik): The control script should really be written in the
|
||||
# elements.
|
||||
# The guestagent will choose the right daemon ('mongod' or 'mongos')
|
||||
# based on the 'cluster_config' values.
|
||||
upstart_contents = (system.MONGOS_UPSTART_CONTENTS.
|
||||
format(config_file_placeholder=CONFIG_FILE))
|
||||
operating_system.write_file(system.MONGOS_UPSTART, upstart_contents,
|
||||
as_root=True)
|
||||
|
||||
# FIXME(pmalik): We should really have a separate configuration
|
||||
# template for the 'mongos' process.
|
||||
# Remove all storage configurations from the template.
|
||||
# They apply only to 'mongod' processes.
|
||||
# Already applied overrides will be integrated into the base file and
|
||||
# their current groups removed.
|
||||
config = guestagent_utils.expand_dict(
|
||||
self.configuration_manager.parse_configuration())
|
||||
if 'storage' in config:
|
||||
LOG.debug("Removing 'storage' directives from the configuration "
|
||||
"template.")
|
||||
del config['storage']
|
||||
self.configuration_manager.save_configuration(
|
||||
guestagent_utils.flatten_dict(config))
|
||||
|
||||
# Apply 'mongos' configuration.
|
||||
self._configure_network(MONGODB_PORT)
|
||||
self.configuration_manager.apply_system_override(
|
||||
{'sharding.configDB': ''}, CNF_CLUSTER)
|
||||
|
||||
def _configure_as_config_server(self):
|
||||
LOG.info(_("Configuring instance as a cluster config server."))
|
||||
self._configure_network(CONFIGSVR_PORT)
|
||||
self.configuration_manager.apply_system_override(
|
||||
{'sharding.clusterRole': 'configsvr'}, CNF_CLUSTER)
|
||||
|
||||
def _configure_as_cluster_member(self, replica_set_name):
|
||||
LOG.info(_("Configuring instance as a cluster member."))
|
||||
self._configure_network(MONGODB_PORT)
|
||||
self.configuration_manager.apply_system_override(
|
||||
{'replication.replSetName': replica_set_name}, CNF_CLUSTER)
|
||||
|
||||
def _configure_cluster_security(self, key_value):
|
||||
"""Force cluster key-file-based authentication.
|
||||
"""
|
||||
# Store the cluster member authentication key.
|
||||
self.store_key(key_value)
|
||||
|
||||
self.configuration_manager.apply_system_override(
|
||||
{'security.clusterAuthMode': 'keyFile',
|
||||
'security.keyFile': self.get_key_file()}, CNF_CLUSTER)
|
||||
|
||||
def _configure_network(self, port=None):
|
||||
"""Make the service accessible at a given (or default if not) port.
|
||||
"""
|
||||
instance_ip = netutils.get_my_ipv4()
|
||||
bind_interfaces_string = ','.join([instance_ip, '127.0.0.1'])
|
||||
options = {'net.bindIp': bind_interfaces_string}
|
||||
if port is not None:
|
||||
guestagent_utils.update_dict({'net.port': port}, options)
|
||||
|
||||
self.configuration_manager.apply_system_override(options)
|
||||
self.status.set_host(instance_ip, port=port)
|
||||
|
||||
def clear_storage(self):
|
||||
mount_point = "/var/lib/mongodb/*"
|
||||
@ -229,41 +325,24 @@ class MongoDBApp(object):
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.exception(_("Error clearing storage."))
|
||||
|
||||
def _has_config_db(self):
|
||||
value_string = self.configuration_manager.get_value(
|
||||
'sharding', {}).get('configDB')
|
||||
|
||||
return value_string is not None
|
||||
|
||||
# FIXME(pmalik): This method should really be called 'set_config_servers'.
|
||||
# The current name suggests it adds more config servers, but it
|
||||
# rather replaces the existing ones.
|
||||
def add_config_servers(self, config_server_hosts):
|
||||
"""Set config servers on a query router (mongos) instance.
|
||||
"""
|
||||
This method is used by query router (mongos) instances.
|
||||
"""
|
||||
config_contents = self._read_config()
|
||||
configdb_contents = ','.join(['%(host)s:%(port)s'
|
||||
% {'host': host, 'port': CONFIGSVR_PORT}
|
||||
for host in config_server_hosts])
|
||||
LOG.debug("Config server list %s." % configdb_contents)
|
||||
# remove db path from config and update configdb
|
||||
contents = self._delete_config_parameters(config_contents,
|
||||
["dbpath", "nojournal",
|
||||
"smallfiles", "journal",
|
||||
"noprealloc", "configdb"])
|
||||
contents = self._add_config_parameter(contents,
|
||||
"configdb", configdb_contents)
|
||||
LOG.info(_("Rewriting configuration."))
|
||||
self.start_db_with_conf_changes(contents)
|
||||
|
||||
def write_mongos_upstart(self):
|
||||
upstart_contents = (system.MONGOS_UPSTART_CONTENTS.
|
||||
format(config_file_placeholder=CONFIG_FILE))
|
||||
|
||||
LOG.info(_("Writing %s.") % system.TMP_MONGOS_UPSTART)
|
||||
|
||||
with open(system.TMP_MONGOS_UPSTART, 'w') as t:
|
||||
t.write(upstart_contents)
|
||||
|
||||
LOG.info(_("Moving %(a)s to %(b)s.")
|
||||
% {'a': system.TMP_MONGOS_UPSTART,
|
||||
'b': system.MONGOS_UPSTART})
|
||||
operating_system.move(system.TMP_MONGOS_UPSTART, system.MONGOS_UPSTART,
|
||||
as_root=True)
|
||||
operating_system.remove('/etc/init/mongodb.conf', force=True,
|
||||
as_root=True)
|
||||
config_servers_string = ','.join(['%s:27019' % host
|
||||
for host in config_server_hosts])
|
||||
LOG.info(_("Setting config servers: %s") % config_servers_string)
|
||||
self.configuration_manager.apply_system_override(
|
||||
{'sharding.configDB': config_servers_string}, CNF_CLUSTER)
|
||||
self.start_db(True)
|
||||
|
||||
def add_shard(self, replica_set_name, replica_set_member):
|
||||
"""
|
||||
@ -288,7 +367,7 @@ class MongoDBApp(object):
|
||||
if((status["ok"] == 1) and
|
||||
(status["members"][0]["stateStr"] == "PRIMARY") and
|
||||
(status["myState"] == 1)):
|
||||
return True
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@ -330,6 +409,15 @@ class MongoDBApp(object):
|
||||
# TODO(ramashri) see if hardcoded values can be removed
|
||||
utils.poll_until(check_rs_status, sleep_time=60, time_out=100)
|
||||
|
||||
def _set_localhost_auth_bypass(self, enabled):
|
||||
"""When active, the localhost exception allows connections from the
|
||||
localhost interface to create the first user on the admin database.
|
||||
The exception applies only when there are no users created in the
|
||||
MongoDB instance.
|
||||
"""
|
||||
self.configuration_manager.apply_system_override(
|
||||
{'setParameter': {'enableLocalhostAuthBypass': enabled}})
|
||||
|
||||
def list_all_dbs(self):
|
||||
return MongoDBAdmin().list_database_names()
|
||||
|
||||
@ -349,11 +437,7 @@ class MongoDBApp(object):
|
||||
def store_key(self, key):
|
||||
"""Store the cluster key."""
|
||||
LOG.debug('Storing key for MongoDB cluster.')
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
f.write(key)
|
||||
f.flush()
|
||||
operating_system.copy(f.name, system.MONGO_KEY_FILE,
|
||||
force=True, as_root=True)
|
||||
operating_system.write_file(system.MONGO_KEY_FILE, key, as_root=True)
|
||||
operating_system.chmod(system.MONGO_KEY_FILE,
|
||||
operating_system.FileMode.SET_USR_RO,
|
||||
as_root=True)
|
||||
@ -375,69 +459,59 @@ class MongoDBApp(object):
|
||||
user = models.MongoDBUser(name='admin.%s' % creds.username,
|
||||
password=creds.password)
|
||||
user.roles = system.MONGO_ADMIN_ROLES
|
||||
with MongoDBClient(user, auth=False) as client:
|
||||
with MongoDBClient(None) as client:
|
||||
MongoDBAdmin().create_user(user, client=client)
|
||||
LOG.debug('Created admin user.')
|
||||
|
||||
def secure(self, cluster_config=None):
|
||||
# Secure the server by storing the cluster key if this is a cluster
|
||||
# or creating the admin user if this is a single instance.
|
||||
LOG.debug('Securing MongoDB instance.')
|
||||
if cluster_config:
|
||||
self.store_key(cluster_config['key'])
|
||||
else:
|
||||
LOG.debug('Generating admin password.')
|
||||
def secure(self):
|
||||
"""Create the Trove admin user.
|
||||
|
||||
The service should not be running at this point.
|
||||
"""
|
||||
if self.status.is_running:
|
||||
raise RuntimeError(_("Cannot secure the instance. "
|
||||
"The service is still running."))
|
||||
|
||||
try:
|
||||
self._set_localhost_auth_bypass(True)
|
||||
self.start_db(update_db=False)
|
||||
password = utils.generate_random_password()
|
||||
self.start_db()
|
||||
self.create_admin_user(password)
|
||||
LOG.debug("MongoDB secure complete.")
|
||||
finally:
|
||||
self._set_localhost_auth_bypass(False)
|
||||
self.stop_db()
|
||||
LOG.debug('MongoDB secure complete.')
|
||||
|
||||
def get_configuration_property(self, name, default=None):
|
||||
"""Return the value of a MongoDB configuration property.
|
||||
"""
|
||||
return self.configuration_manager.get_value(name, default)
|
||||
|
||||
|
||||
class MongoDBAppStatus(service.BaseDbStatus):
|
||||
|
||||
is_config_server = None
|
||||
is_query_router = None
|
||||
def __init__(self, host='localhost', port=None):
|
||||
super(MongoDBAppStatus, self).__init__()
|
||||
self.set_host(host, port=port)
|
||||
|
||||
def _is_config_server(self):
|
||||
if self.is_config_server is None:
|
||||
try:
|
||||
cmd = ("grep '^configsvr[ \t]*=[ \t]*true$' %s"
|
||||
% CONFIG_FILE)
|
||||
utils.execute_with_timeout(cmd, shell=True)
|
||||
self.is_config_server = True
|
||||
except exception.ProcessExecutionError:
|
||||
self.is_config_server = False
|
||||
return self.is_config_server
|
||||
|
||||
def _is_query_router(self):
|
||||
if self.is_query_router is None:
|
||||
try:
|
||||
cmd = ("grep '^configdb[ \t]*=.*$' %s"
|
||||
% CONFIG_FILE)
|
||||
utils.execute_with_timeout(cmd, shell=True)
|
||||
self.is_query_router = True
|
||||
except exception.ProcessExecutionError:
|
||||
self.is_query_router = False
|
||||
return self.is_query_router
|
||||
def set_host(self, host, port=None):
|
||||
# This forces refresh of the 'pymongo' engine cached in the
|
||||
# MongoDBClient class.
|
||||
# Authentication is not required to check the server status.
|
||||
MongoDBClient(None, host=host, port=port)
|
||||
|
||||
def _get_actual_db_status(self):
|
||||
try:
|
||||
port = CONFIGSVR_PORT if self._is_config_server() else MONGODB_PORT
|
||||
out, err = utils.execute_with_timeout(
|
||||
'mongostat', '--host', str(netutils.get_my_ipv4()),
|
||||
'--port', str(port), '-n', str(1), check_exit_code=[0, 1]
|
||||
)
|
||||
if not err:
|
||||
return ds_instance.ServiceStatuses.RUNNING
|
||||
else:
|
||||
return ds_instance.ServiceStatuses.SHUTDOWN
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.exception(_("Process execution %s.") % e)
|
||||
return ds_instance.ServiceStatuses.SHUTDOWN
|
||||
except OSError as e:
|
||||
LOG.exception(_("OS Error %s.") % e)
|
||||
with MongoDBClient(None) as client:
|
||||
client.server_info()
|
||||
return ds_instance.ServiceStatuses.RUNNING
|
||||
except (pymongo.errors.ServerSelectionTimeoutError,
|
||||
pymongo.errors.AutoReconnect):
|
||||
return ds_instance.ServiceStatuses.SHUTDOWN
|
||||
except Exception:
|
||||
LOG.exception(_("Error getting MongoDB status."))
|
||||
|
||||
return ds_instance.ServiceStatuses.SHUTDOWN
|
||||
|
||||
|
||||
class MongoDBAdmin(object):
|
||||
@ -667,13 +741,11 @@ class MongoDBClient(object):
|
||||
# engine information is cached by making it a class attribute
|
||||
engine = {}
|
||||
|
||||
def __init__(self, user, host=None, port=None,
|
||||
auth=True):
|
||||
def __init__(self, user, host=None, port=None):
|
||||
"""Get the client. Specifying host and/or port updates cached values.
|
||||
:param user: (required) MongoDBUser instance
|
||||
:param user: MongoDBUser instance used to authenticate
|
||||
:param host: server address, defaults to localhost
|
||||
:param port: server port, defaults to 27017
|
||||
:param auth: set to False to disable authentication, default True
|
||||
:return:
|
||||
"""
|
||||
new_client = False
|
||||
@ -688,7 +760,7 @@ class MongoDBClient(object):
|
||||
if host:
|
||||
type(self).engine['host'] = host
|
||||
if port:
|
||||
type(self).engine['host'] = port
|
||||
type(self).engine['port'] = port
|
||||
new_client = True
|
||||
if new_client:
|
||||
host = type(self).engine['host']
|
||||
@ -699,7 +771,7 @@ class MongoDBClient(object):
|
||||
port=port,
|
||||
connect=False)
|
||||
self.session = type(self).engine['client']
|
||||
if auth:
|
||||
if user:
|
||||
db_name = user.database.name
|
||||
LOG.debug("Authentication MongoDB client on %s." % db_name)
|
||||
self._db = self.session[db_name]
|
||||
@ -724,25 +796,13 @@ class MongoDBCredentials(object):
|
||||
self.password = password
|
||||
|
||||
def read(self, filename):
|
||||
with open(filename) as f:
|
||||
credentials = json.load(f)
|
||||
self.username = credentials['username']
|
||||
self.password = credentials['password']
|
||||
credentials = operating_system.read_file(filename, codec=JsonCodec())
|
||||
self.username = credentials['username']
|
||||
self.password = credentials['password']
|
||||
|
||||
def write(self, filename):
|
||||
self.clear_file(filename)
|
||||
with open(filename, 'w') as f:
|
||||
credentials = {'username': self.username,
|
||||
'password': self.password}
|
||||
json.dump(credentials, f)
|
||||
credentials = {'username': self.username,
|
||||
'password': self.password}
|
||||
|
||||
@staticmethod
|
||||
def clear_file(filename):
|
||||
LOG.debug("Creating clean file %s" % filename)
|
||||
if operating_system.file_discovery([filename]):
|
||||
operating_system.remove(filename)
|
||||
# force file creation by just opening it
|
||||
open(filename, 'wb')
|
||||
operating_system.chmod(filename,
|
||||
operating_system.FileMode.SET_USR_RW,
|
||||
as_root=True)
|
||||
operating_system.write_file(filename, credentials, codec=JsonCodec())
|
||||
operating_system.chmod(filename, operating_system.FileMode.SET_USR_RW)
|
||||
|
@ -21,11 +21,11 @@ from trove.guestagent import pkg
|
||||
OS_NAME = operating_system.get_os()
|
||||
|
||||
MONGODB_MOUNT_POINT = "/var/lib/mongodb"
|
||||
MONGO_PID_FILE = '/var/run/mongodb.pid'
|
||||
MONGO_LOG_FILE = '/var/log/mongodb/mongod.log'
|
||||
|
||||
TMP_CONFIG = "/tmp/mongodb.conf.tmp"
|
||||
CONFIG_CANDIDATES = ["/etc/mongodb.conf", "/etc/mongod.conf"]
|
||||
MONGOS_UPSTART = "/etc/init/mongos.conf"
|
||||
TMP_MONGOS_UPSTART = "/tmp/mongos.conf.tmp"
|
||||
MONGO_ADMIN_NAME = 'os_admin'
|
||||
MONGO_ADMIN_ROLES = [{'db': 'admin', 'role': 'userAdminAnyDatabase'},
|
||||
{'db': 'admin', 'role': 'dbAdminAnyDatabase'},
|
||||
|
@ -43,9 +43,7 @@ class MongoDump(base.BackupRunner):
|
||||
backup_cmd = 'mongodump --out ' + MONGO_DUMP_DIR
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.status = mongo_service.MongoDBAppStatus()
|
||||
self.app = mongo_service.MongoDBApp(self.status)
|
||||
self.admin = mongo_service.MongoDBApp(self.status)
|
||||
self.app = mongo_service.MongoDBApp()
|
||||
super(MongoDump, self).__init__(*args, **kwargs)
|
||||
|
||||
def _run_pre_backup(self):
|
||||
|
@ -39,8 +39,7 @@ class MongoDump(base.RestoreRunner):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(MongoDump, self).__init__(*args, **kwargs)
|
||||
self.status = mongo_service.MongoDBAppStatus()
|
||||
self.app = mongo_service.MongoDBApp(self.status)
|
||||
self.app = mongo_service.MongoDBApp()
|
||||
|
||||
def post_restore(self):
|
||||
"""
|
||||
|
@ -1,94 +1,4 @@
|
||||
# mongodb.conf
|
||||
|
||||
smallfiles = false
|
||||
|
||||
# Where to store the data.
|
||||
dbpath=/var/lib/mongodb
|
||||
|
||||
#where to log
|
||||
logpath=/var/log/mongodb/mongodb.log
|
||||
|
||||
logappend=true
|
||||
|
||||
#port = 27017
|
||||
|
||||
# Enable journaling, http://www.mongodb.org/display/DOCS/Journaling
|
||||
journal=true
|
||||
|
||||
# Enables periodic logging of CPU utilization and I/O wait
|
||||
#cpu = true
|
||||
|
||||
# Turn on/off security. Off is currently the default
|
||||
#noauth = true
|
||||
#auth = true
|
||||
|
||||
# Verbose logging output.
|
||||
#verbose = true
|
||||
|
||||
# Inspect all client data for validity on receipt (useful for
|
||||
# developing drivers)
|
||||
#objcheck = true
|
||||
|
||||
# Enable db quota management
|
||||
#quota = true
|
||||
|
||||
# Set oplogging level where n is
|
||||
# 0=off (default)
|
||||
# 1=W
|
||||
# 2=R
|
||||
# 3=both
|
||||
# 7=W+some reads
|
||||
#oplog = 0
|
||||
|
||||
# Diagnostic/debugging option
|
||||
#nocursors = true
|
||||
|
||||
# Ignore query hints
|
||||
#nohints = true
|
||||
|
||||
# Disable the HTTP interface (Defaults to localhost:27018).
|
||||
#nohttpinterface = true
|
||||
|
||||
# Turns off server-side scripting. This will result in greatly limited
|
||||
# functionality
|
||||
#noscripting = true
|
||||
|
||||
# Turns off table scans. Any query that would do a table scan fails.
|
||||
#notablescan = true
|
||||
|
||||
# Disable data file preallocation.
|
||||
#noprealloc = true
|
||||
|
||||
# Specify .ns file size for new databases.
|
||||
# nssize = <size>
|
||||
|
||||
# Accout token for Mongo monitoring server.
|
||||
#mms-token = <token>
|
||||
|
||||
# Server name for Mongo monitoring server.
|
||||
#mms-name = <server-name>
|
||||
|
||||
# Ping interval for Mongo monitoring server.
|
||||
#mms-interval = <seconds>
|
||||
|
||||
# Replication Options
|
||||
|
||||
# in replicated mongo databases, specify here whether this is a slave or master
|
||||
#slave = true
|
||||
#source = master.example.com
|
||||
# Slave only: specify a single database to replicate
|
||||
#only = master.example.com
|
||||
# or
|
||||
#master = true
|
||||
#source = slave.example.com
|
||||
|
||||
# Address of a server to pair with.
|
||||
#pairwith = <server:port>
|
||||
# Address of arbiter server.
|
||||
#arbiter = <server:port>
|
||||
# Automatically resync if slave data is stale
|
||||
#autoresync
|
||||
# Custom size for replication operation log.
|
||||
#oplogSize = <MB>
|
||||
# Size limit for in-memory storage of op ids.
|
||||
#opIdMem = <bytes>
|
||||
storage.mmapv1.smallFiles: false
|
||||
storage.journal.enabled: true
|
||||
|
@ -0,0 +1,3 @@
|
||||
{% for key, value in overrides.iteritems() -%}
|
||||
{{key}}: {{value}}
|
||||
{% endfor %}
|
333
trove/templates/mongodb/validation-rules.json
Normal file
333
trove/templates/mongodb/validation-rules.json
Normal file
@ -0,0 +1,333 @@
|
||||
{
|
||||
"configuration-parameters": [
|
||||
{
|
||||
"name": "systemLog.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.accessControl.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.command.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.control.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.geo.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.index.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.network.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.query.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.replication.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.sharding.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.storage.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.storage.journal.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.component.write.verbosity",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 5,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.quiet",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.traceAllExceptions",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.logAppend",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.logRotate",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "systemLog.timeStampFormat",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "net.maxIncomingConnections",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "net.wireObjectCheck",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "net.ipv6",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "net.http.enabled",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "net.http.JSONPEnabled",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "net.http.RESTInterfaceEnabled",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "security.authorization",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "security.sasl.hostName",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "security.sasl.serviceName",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "security.sasl.saslauthdSocketPath",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "security.javascriptEnabled",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "operationProfiling.slowOpThresholdMs",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "operationProfiling.mode",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "storage.indexBuildRetry",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "storage.journal.enabled",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "storage.directoryPerDB",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "storage.syncPeriodSecs",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "storage.engine",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "storage.mmapv1.nsSize",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"max": 2047,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "storage.mmapv1.quota.enforced",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "storage.mmapv1.quota.maxFilesPerDB",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "storage.mmapv1.smallFiles",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "storage.mmapv1.journal.debugFlags",
|
||||
"restart_required": true,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "storage.mmapv1.journal.commitIntervalMs",
|
||||
"restart_required": true,
|
||||
"min": 2,
|
||||
"max": 300,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "storage.wiredTiger.engineConfig.cacheSizeGB",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "storage.wiredTiger.engineConfig.statisticsLogDelaySecs",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "storage.wiredTiger.engineConfig.journalCompressor",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "storage.wiredTiger.collectionConfig.blockCompressor",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "storage.wiredTiger.indexConfig.prefixCompression",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "replication.oplogSizeMB",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "replication.secondaryIndexPrefetch",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "sharding.clusterRole",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "auditLog.format",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "auditLog.filter",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "snmp.subagent",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "snmp.master",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "replication.localPingThresholdMs",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "sharding.autoSplit",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "sharding.chunkSize",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "setParameter",
|
||||
"restart_required": true,
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
}
|
@ -17,6 +17,7 @@ from testtools.testcase import ExpectedException
|
||||
from trove.common import exception
|
||||
from trove.common import utils
|
||||
from trove.guestagent.common.operating_system import FileMode
|
||||
from trove.guestagent.datastore.experimental.mongodb.service import MongoDBApp
|
||||
from trove.guestagent.strategies.backup import base as backupBase
|
||||
from trove.guestagent.strategies.backup import mysql_impl
|
||||
from trove.guestagent.strategies.restore import base as restoreBase
|
||||
@ -327,7 +328,8 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
|
||||
# (see bug/1423759).
|
||||
remove.assert_called_once_with(ANY, force=True, as_root=True)
|
||||
|
||||
def test_backup_encrypted_mongodump_command(self):
|
||||
@mock.patch.object(MongoDBApp, '_init_overrides_dir')
|
||||
def test_backup_encrypted_mongodump_command(self, _):
|
||||
backupBase.BackupRunner.is_encrypted = True
|
||||
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
|
||||
RunnerClass = utils.import_class(BACKUP_MONGODUMP_CLS)
|
||||
@ -338,7 +340,8 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
|
||||
MONGODUMP_CMD + PIPE + ZIP + PIPE + ENCRYPT, bkp.command)
|
||||
self.assertIn("gz.enc", bkp.manifest)
|
||||
|
||||
def test_backup_not_encrypted_mongodump_command(self):
|
||||
@mock.patch.object(MongoDBApp, '_init_overrides_dir')
|
||||
def test_backup_not_encrypted_mongodump_command(self, _):
|
||||
backupBase.BackupRunner.is_encrypted = False
|
||||
backupBase.BackupRunner.encrypt_key = CRYPTO_KEY
|
||||
RunnerClass = utils.import_class(BACKUP_MONGODUMP_CLS)
|
||||
@ -348,7 +351,8 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
|
||||
self.assertEqual(MONGODUMP_CMD + PIPE + ZIP, bkp.command)
|
||||
self.assertIn("gz", bkp.manifest)
|
||||
|
||||
def test_restore_decrypted_mongodump_command(self):
|
||||
@mock.patch.object(MongoDBApp, '_init_overrides_dir')
|
||||
def test_restore_decrypted_mongodump_command(self, _):
|
||||
restoreBase.RestoreRunner.is_zipped = True
|
||||
restoreBase.RestoreRunner.is_encrypted = False
|
||||
RunnerClass = utils.import_class(RESTORE_MONGODUMP_CLS)
|
||||
@ -356,7 +360,8 @@ class GuestAgentBackupTest(trove_testtools.TestCase):
|
||||
location="filename", checksum="md5")
|
||||
self.assertEqual(restr.restore_cmd, UNZIP + PIPE + MONGODUMP_RESTORE)
|
||||
|
||||
def test_restore_encrypted_mongodump_command(self):
|
||||
@mock.patch.object(MongoDBApp, '_init_overrides_dir')
|
||||
def test_restore_encrypted_mongodump_command(self, _):
|
||||
restoreBase.RestoreRunner.is_zipped = True
|
||||
restoreBase.RestoreRunner.is_encrypted = True
|
||||
restoreBase.RestoreRunner.decrypt_key = CRYPTO_KEY
|
||||
@ -488,7 +493,8 @@ class MongodbBackupTests(trove_testtools.TestCase):
|
||||
|
||||
class MongodbRestoreTests(trove_testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@patch.object(MongoDBApp, '_init_overrides_dir')
|
||||
def setUp(self, _):
|
||||
super(MongodbRestoreTests, self).setUp()
|
||||
|
||||
self.restore_runner = utils.import_class(
|
||||
|
@ -2530,7 +2530,8 @@ class MongoDBAppTest(testtools.TestCase):
|
||||
'cmd_disable': 'disable'
|
||||
}
|
||||
|
||||
def setUp(self):
|
||||
@patch.object(mongo_service.MongoDBApp, '_init_overrides_dir')
|
||||
def setUp(self, _):
|
||||
super(MongoDBAppTest, self).setUp()
|
||||
self.orig_utils_execute_with_timeout = (mongo_service.
|
||||
utils.execute_with_timeout)
|
||||
@ -2545,9 +2546,10 @@ class MongoDBAppTest(testtools.TestCase):
|
||||
self.FAKE_ID = str(uuid4())
|
||||
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
|
||||
status=rd_instance.ServiceStatuses.NEW)
|
||||
self.appStatus = FakeAppStatus(self.FAKE_ID,
|
||||
rd_instance.ServiceStatuses.NEW)
|
||||
self.mongoDbApp = mongo_service.MongoDBApp(self.appStatus)
|
||||
|
||||
self.mongoDbApp = mongo_service.MongoDBApp()
|
||||
self.mongoDbApp.status = FakeAppStatus(self.FAKE_ID,
|
||||
rd_instance.ServiceStatuses.NEW)
|
||||
time.sleep = Mock()
|
||||
os.unlink = Mock()
|
||||
|
||||
@ -2568,7 +2570,7 @@ class MongoDBAppTest(testtools.TestCase):
|
||||
|
||||
def test_stopdb(self):
|
||||
mongo_service.utils.execute_with_timeout = Mock()
|
||||
self.appStatus.set_next_status(
|
||||
self.mongoDbApp.status.set_next_status(
|
||||
rd_instance.ServiceStatuses.SHUTDOWN)
|
||||
|
||||
self.mongoDbApp.stop_db()
|
||||
@ -2577,7 +2579,7 @@ class MongoDBAppTest(testtools.TestCase):
|
||||
def test_stop_db_with_db_update(self):
|
||||
|
||||
mongo_service.utils.execute_with_timeout = Mock()
|
||||
self.appStatus.set_next_status(
|
||||
self.mongoDbApp.status.set_next_status(
|
||||
rd_instance.ServiceStatuses.SHUTDOWN)
|
||||
|
||||
self.mongoDbApp.stop_db(True)
|
||||
@ -2587,13 +2589,15 @@ class MongoDBAppTest(testtools.TestCase):
|
||||
def test_stop_db_error(self):
|
||||
|
||||
mongo_service.utils.execute_with_timeout = Mock()
|
||||
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
|
||||
self.mongoDbApp.status.set_next_status(
|
||||
rd_instance.ServiceStatuses.RUNNING)
|
||||
self.mongoDbApp.state_change_wait_time = 1
|
||||
self.assertRaises(RuntimeError, self.mongoDbApp.stop_db)
|
||||
|
||||
def test_restart(self):
|
||||
|
||||
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
|
||||
self.mongoDbApp.status.set_next_status(
|
||||
rd_instance.ServiceStatuses.RUNNING)
|
||||
self.mongoDbApp.stop_db = Mock()
|
||||
self.mongoDbApp.start_db = Mock()
|
||||
|
||||
@ -2611,7 +2615,8 @@ class MongoDBAppTest(testtools.TestCase):
|
||||
def test_start_db(self):
|
||||
|
||||
mongo_service.utils.execute_with_timeout = Mock()
|
||||
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
|
||||
self.mongoDbApp.status.set_next_status(
|
||||
rd_instance.ServiceStatuses.RUNNING)
|
||||
|
||||
self.mongoDbApp.start_db()
|
||||
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
|
||||
@ -2619,7 +2624,8 @@ class MongoDBAppTest(testtools.TestCase):
|
||||
def test_start_db_with_update(self):
|
||||
|
||||
mongo_service.utils.execute_with_timeout = Mock()
|
||||
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
|
||||
self.mongoDbApp.status.set_next_status(
|
||||
rd_instance.ServiceStatuses.RUNNING)
|
||||
|
||||
self.mongoDbApp.start_db(True)
|
||||
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
|
||||
@ -2631,7 +2637,8 @@ class MongoDBAppTest(testtools.TestCase):
|
||||
return_value=["ubuntu 17036 0.0 0.1 618960 "
|
||||
"29232 pts/8 Sl+ Jan29 0:07 mongod", ""])
|
||||
self.mongoDbApp.state_change_wait_time = 1
|
||||
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
|
||||
self.mongoDbApp.status.set_next_status(
|
||||
rd_instance.ServiceStatuses.SHUTDOWN)
|
||||
|
||||
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
|
||||
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
|
||||
@ -2645,23 +2652,11 @@ class MongoDBAppTest(testtools.TestCase):
|
||||
|
||||
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
|
||||
|
||||
def test_mongodb_error_in_write_config_verify_unlink(self):
|
||||
configuration = {'config_contents': 'some junk'}
|
||||
|
||||
with patch.object(os.path, 'isfile', return_value=True):
|
||||
with patch.object(operating_system, 'move',
|
||||
side_effect=ProcessExecutionError):
|
||||
self.assertRaises(ProcessExecutionError,
|
||||
self.mongoDbApp.reset_configuration,
|
||||
configuration=configuration)
|
||||
self.assertEqual(1, operating_system.move.call_count)
|
||||
self.assertEqual(1, os.unlink.call_count)
|
||||
|
||||
def test_start_db_with_conf_changes_db_is_running(self):
|
||||
|
||||
self.mongoDbApp.start_db = Mock()
|
||||
|
||||
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
|
||||
self.mongoDbApp.status.status = rd_instance.ServiceStatuses.RUNNING
|
||||
self.assertRaises(RuntimeError,
|
||||
self.mongoDbApp.start_db_with_conf_changes,
|
||||
Mock())
|
||||
|
@ -92,3 +92,16 @@ class TestGuestagentUtils(trove_testtools.TestCase):
|
||||
'base_dir/base_name.ext1.ext2',
|
||||
guestagent_utils.build_file_path(
|
||||
'base_dir', 'base_name', 'ext1', 'ext2'))
|
||||
|
||||
def test_flatten_expand_dict(self):
|
||||
self._assert_flatten_expand_dict({}, {})
|
||||
self._assert_flatten_expand_dict({'ns1': 1}, {'ns1': 1})
|
||||
self._assert_flatten_expand_dict(
|
||||
{'ns1': {'ns2a': {'ns3a': True, 'ns3b': False}, 'ns2b': 10}},
|
||||
{'ns1.ns2a.ns3a': True, 'ns1.ns2a.ns3b': False, 'ns1.ns2b': 10})
|
||||
|
||||
def _assert_flatten_expand_dict(self, nested_dict, flattened_dict):
|
||||
self.assertEqual(
|
||||
flattened_dict, guestagent_utils.flatten_dict(nested_dict))
|
||||
self.assertEqual(
|
||||
nested_dict, guestagent_utils.expand_dict(flattened_dict))
|
||||
|
@ -20,18 +20,24 @@ import pymongo
|
||||
import trove.common.context as context
|
||||
import trove.common.instance as ds_instance
|
||||
import trove.common.utils as utils
|
||||
from trove.guestagent.common import operating_system
|
||||
import trove.guestagent.datastore.experimental.mongodb.manager as manager
|
||||
import trove.guestagent.datastore.experimental.mongodb.service as service
|
||||
import trove.guestagent.datastore.experimental.mongodb.system as system
|
||||
import trove.guestagent.volume as volume
|
||||
import trove.tests.unittests.trove_testtools as trove_testtools
|
||||
|
||||
|
||||
class GuestAgentMongoDBClusterManagerTest(trove_testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@mock.patch.object(service.MongoDBApp, '_init_overrides_dir')
|
||||
def setUp(self, _):
|
||||
super(GuestAgentMongoDBClusterManagerTest, self).setUp()
|
||||
self.context = context.TroveContext()
|
||||
self.manager = manager.Manager()
|
||||
self.manager.app.configuration_manager = mock.MagicMock()
|
||||
self.manager.app.status = mock.MagicMock()
|
||||
self.conf_mgr = self.manager.app.configuration_manager
|
||||
|
||||
self.pymongo_patch = mock.patch.object(
|
||||
pymongo, 'MongoClient'
|
||||
@ -42,14 +48,14 @@ class GuestAgentMongoDBClusterManagerTest(trove_testtools.TestCase):
|
||||
def tearDown(self):
|
||||
super(GuestAgentMongoDBClusterManagerTest, self).tearDown()
|
||||
|
||||
@mock.patch.object(service.MongoDBAppStatus, 'set_status')
|
||||
@mock.patch.object(service.MongoDBApp, 'add_members',
|
||||
side_effect=RuntimeError("Boom!"))
|
||||
def test_add_members_failure(self, mock_add_members, mock_set_status):
|
||||
def test_add_members_failure(self, mock_add_members):
|
||||
members = ["test1", "test2"]
|
||||
self.assertRaises(RuntimeError, self.manager.add_members,
|
||||
self.context, members)
|
||||
mock_set_status.assert_called_with(ds_instance.ServiceStatuses.FAILED)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.FAILED)
|
||||
|
||||
@mock.patch.object(utils, 'poll_until')
|
||||
@mock.patch.object(utils, 'generate_random_password', return_value='pwd')
|
||||
@ -64,127 +70,118 @@ class GuestAgentMongoDBClusterManagerTest(trove_testtools.TestCase):
|
||||
mock_initiate.assert_any_call()
|
||||
mock_add.assert_any_call(["test1", "test2"])
|
||||
|
||||
@mock.patch.object(service.MongoDBAppStatus, 'set_status')
|
||||
@mock.patch.object(service.MongoDBApp, 'add_shard',
|
||||
side_effect=RuntimeError("Boom!"))
|
||||
def test_add_shard_failure(self, mock_add_shard, mock_set_status):
|
||||
def test_add_shard_failure(self, mock_add_shard):
|
||||
self.assertRaises(RuntimeError, self.manager.add_shard,
|
||||
self.context, "rs", "rs_member")
|
||||
mock_set_status.assert_called_with(ds_instance.ServiceStatuses.FAILED)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.FAILED)
|
||||
|
||||
@mock.patch.object(service.MongoDBAdmin, 'add_shard')
|
||||
def test_add_shard(self, mock_add_shard):
|
||||
self.manager.add_shard(self.context, "rs", "rs_member")
|
||||
mock_add_shard.assert_called_with("rs/rs_member:27017")
|
||||
|
||||
@mock.patch.object(service.MongoDBAppStatus, 'set_status')
|
||||
@mock.patch.object(service.MongoDBApp, 'add_config_servers',
|
||||
side_effect=RuntimeError("Boom!"))
|
||||
def test_add_config_server_failure(self, mock_add_config,
|
||||
mock_set_status):
|
||||
def test_add_config_server_failure(self, mock_add_config):
|
||||
self.assertRaises(RuntimeError, self.manager.add_config_servers,
|
||||
self.context,
|
||||
["cfg_server1", "cfg_server2"])
|
||||
mock_set_status.assert_called_with(ds_instance.ServiceStatuses.FAILED)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.FAILED)
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, 'start_db_with_conf_changes')
|
||||
@mock.patch.object(service.MongoDBApp, '_add_config_parameter',
|
||||
return_value="")
|
||||
@mock.patch.object(service.MongoDBApp, '_delete_config_parameters',
|
||||
return_value="")
|
||||
@mock.patch.object(service.MongoDBApp, '_read_config', return_value="")
|
||||
def test_add_config_servers(self, mock_read, mock_delete,
|
||||
mock_add, mock_start):
|
||||
@mock.patch.object(service.MongoDBApp, 'start_db')
|
||||
def test_add_config_servers(self, mock_start_db):
|
||||
self.manager.add_config_servers(self.context,
|
||||
["cfg_server1",
|
||||
"cfg_server2"])
|
||||
mock_read.assert_called_with()
|
||||
mock_delete.assert_called_with("", ["dbpath", "nojournal",
|
||||
"smallfiles", "journal",
|
||||
"noprealloc", "configdb"])
|
||||
mock_add.assert_called_with("", "configdb",
|
||||
"cfg_server1:27019,cfg_server2:27019")
|
||||
mock_start.assert_called_with("")
|
||||
|
||||
@mock.patch.object(service.MongoDBAppStatus, 'set_status')
|
||||
@mock.patch.object(service.MongoDBApp, 'write_mongos_upstart')
|
||||
@mock.patch.object(service.MongoDBApp, 'reset_configuration')
|
||||
@mock.patch.object(service.MongoDBApp, 'update_config_contents')
|
||||
@mock.patch.object(service.MongoDBApp, 'secure')
|
||||
@mock.patch.object(service.MongoDBApp, 'get_key_file',
|
||||
return_value="/test/key/file")
|
||||
@mock.patch.object(netutils, 'get_my_ipv4', return_value="10.0.0.2")
|
||||
def test_prepare_mongos(self, mock_ip_address, mock_key_file,
|
||||
mock_secure, mock_update, mock_reset,
|
||||
mock_upstart, mock_set_status):
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'sharding.configDB': "cfg_server1:27019,cfg_server2:27019"},
|
||||
'clustering')
|
||||
mock_start_db.assert_called_with(True)
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_as_query_router')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_cluster_security')
|
||||
def test_prepare_mongos(self, mock_secure, mock_config):
|
||||
self._prepare_method("test-id-1", "query_router", None)
|
||||
mock_update.assert_called_with(None, {'bind_ip': '10.0.0.2,127.0.0.1',
|
||||
# 'keyFile': '/test/key/file'})
|
||||
})
|
||||
self.assertTrue(self.manager.app.status.is_query_router)
|
||||
mock_set_status.assert_called_with(
|
||||
mock_config.assert_called_once_with()
|
||||
mock_secure.assert_called_once_with(None)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.BUILD_PENDING)
|
||||
|
||||
@mock.patch.object(service.MongoDBAppStatus, 'set_status')
|
||||
@mock.patch.object(utils, 'poll_until')
|
||||
@mock.patch.object(service.MongoDBApp, 'start_db_with_conf_changes')
|
||||
@mock.patch.object(service.MongoDBApp, 'update_config_contents')
|
||||
@mock.patch.object(service.MongoDBApp, 'secure')
|
||||
@mock.patch.object(service.MongoDBApp, 'get_key_file',
|
||||
return_value="/test/key/file")
|
||||
@mock.patch.object(netutils, 'get_my_ipv4', return_value="10.0.0.3")
|
||||
def test_prepare_config_server(self, mock_ip_address, mock_key_file,
|
||||
mock_secure, mock_update, mock_start,
|
||||
mock_poll, mock_set_status):
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_as_config_server')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_cluster_security')
|
||||
def test_prepare_config_server(self, mock_secure, mock_config):
|
||||
self._prepare_method("test-id-2", "config_server", None)
|
||||
mock_update.assert_called_with(None, {'configsvr': 'true',
|
||||
'bind_ip': '10.0.0.3,127.0.0.1',
|
||||
# 'keyFile': '/test/key/file',
|
||||
'dbpath': '/var/lib/mongodb'})
|
||||
self.assertTrue(self.manager.app.status.is_config_server)
|
||||
mock_set_status.assert_called_with(
|
||||
mock_config.assert_called_once_with()
|
||||
mock_secure.assert_called_once_with(None)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.BUILD_PENDING)
|
||||
|
||||
@mock.patch.object(service.MongoDBAppStatus, 'set_status')
|
||||
@mock.patch.object(utils, 'poll_until')
|
||||
@mock.patch.object(service.MongoDBApp, 'start_db_with_conf_changes')
|
||||
@mock.patch.object(service.MongoDBApp, 'update_config_contents')
|
||||
@mock.patch.object(service.MongoDBApp, 'secure')
|
||||
@mock.patch.object(service.MongoDBApp, 'get_key_file',
|
||||
return_value="/test/key/file")
|
||||
@mock.patch.object(netutils, 'get_my_ipv4', return_value="10.0.0.4")
|
||||
def test_prepare_member(self, mock_ip_address, mock_key_file,
|
||||
mock_secure, mock_update, mock_start,
|
||||
mock_poll, mock_set_status):
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_as_cluster_member')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_cluster_security')
|
||||
def test_prepare_member(self, mock_secure, mock_config):
|
||||
self._prepare_method("test-id-3", "member", None)
|
||||
mock_update.assert_called_with(None,
|
||||
{'bind_ip': '10.0.0.4,127.0.0.1',
|
||||
# 'keyFile': '/test/key/file',
|
||||
'dbpath': '/var/lib/mongodb',
|
||||
'replSet': 'rs1'})
|
||||
mock_set_status.assert_called_with(
|
||||
mock_config.assert_called_once_with('rs1')
|
||||
mock_secure.assert_called_once_with(None)
|
||||
self.manager.app.status.set_status.assert_called_with(
|
||||
ds_instance.ServiceStatuses.BUILD_PENDING)
|
||||
|
||||
@mock.patch.object(service.MongoDBAppStatus, 'set_status')
|
||||
@mock.patch.object(utils, 'poll_until')
|
||||
@mock.patch.object(service.MongoDBApp, 'start_db_with_conf_changes')
|
||||
@mock.patch.object(service.MongoDBApp, 'update_config_contents')
|
||||
@mock.patch.object(service.MongoDBApp, 'secure')
|
||||
@mock.patch.object(netutils, 'get_my_ipv4', return_value="10.0.0.4")
|
||||
def test_prepare_secure(self, mock_ip_address, mock_secure,
|
||||
mock_update, mock_start, mock_poll,
|
||||
mock_set_status):
|
||||
key = "test_key"
|
||||
self._prepare_method("test-id-4", "member", key)
|
||||
mock_secure.assert_called_with(
|
||||
{"id": "test-id-4",
|
||||
"shard_id": "test_shard_id",
|
||||
"instance_type": 'member',
|
||||
"replica_set_name": "rs1",
|
||||
"key": key}
|
||||
@mock.patch.object(operating_system, 'write_file')
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_network')
|
||||
def test_configure_as_query_router(self, net_conf, os_write_file):
|
||||
self.conf_mgr.parse_configuration = mock.Mock(
|
||||
return_value={'storage.mmapv1.smallFiles': False,
|
||||
'storage.journal.enabled': True})
|
||||
self.manager.app._configure_as_query_router()
|
||||
os_write_file.assert_called_once_with(system.MONGOS_UPSTART, mock.ANY,
|
||||
as_root=True)
|
||||
self.conf_mgr.save_configuration.assert_called_once_with({})
|
||||
net_conf.assert_called_once_with(service.MONGODB_PORT)
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'sharding.configDB': ''}, 'clustering')
|
||||
self.assertTrue(self.manager.app.is_query_router)
|
||||
|
||||
)
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_network')
|
||||
def test_configure_as_config_server(self, net_conf):
|
||||
self.manager.app._configure_as_config_server()
|
||||
net_conf.assert_called_once_with(service.CONFIGSVR_PORT)
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'sharding.clusterRole': 'configsvr'}, 'clustering')
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, '_configure_network')
|
||||
def test_configure_as_cluster_member(self, net_conf):
|
||||
self.manager.app._configure_as_cluster_member('rs1')
|
||||
net_conf.assert_called_once_with(service.MONGODB_PORT)
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'replication.replSetName': 'rs1'}, 'clustering')
|
||||
|
||||
@mock.patch.object(service.MongoDBApp, 'store_key')
|
||||
@mock.patch.object(service.MongoDBApp, 'get_key_file',
|
||||
return_value='/var/keypath')
|
||||
def test_configure_cluster_security(self, get_key_mock, store_key_mock):
|
||||
self.manager.app._configure_cluster_security('key')
|
||||
store_key_mock.assert_called_once_with('key')
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'security.clusterAuthMode': 'keyFile',
|
||||
'security.keyFile': '/var/keypath'}, 'clustering')
|
||||
|
||||
@mock.patch.object(netutils, 'get_my_ipv4', return_value="10.0.0.2")
|
||||
def test_configure_network(self, ip_mock):
|
||||
self.manager.app._configure_network()
|
||||
self.conf_mgr.apply_system_override.assert_called_once_with(
|
||||
{'net.bindIp': '10.0.0.2,127.0.0.1'})
|
||||
self.manager.app.status.set_host.assert_called_once_with(
|
||||
'10.0.0.2', port=None)
|
||||
|
||||
self.manager.app._configure_network(10000)
|
||||
self.conf_mgr.apply_system_override.assert_called_with(
|
||||
{'net.bindIp': '10.0.0.2,127.0.0.1',
|
||||
'net.port': 10000})
|
||||
self.manager.app.status.set_host.assert_called_with(
|
||||
'10.0.0.2', port=10000)
|
||||
|
||||
@mock.patch.object(volume.VolumeDevice, 'mount_points', return_value=[])
|
||||
@mock.patch.object(volume.VolumeDevice, 'mount', return_value=None)
|
||||
|
@ -27,7 +27,8 @@ import trove.tests.unittests.trove_testtools as trove_testtools
|
||||
|
||||
class GuestAgentMongoDBManagerTest(trove_testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@mock.patch.object(service.MongoDBApp, '_init_overrides_dir')
|
||||
def setUp(self, _):
|
||||
super(GuestAgentMongoDBManagerTest, self).setUp()
|
||||
self.context = context.TroveContext()
|
||||
self.manager = manager.Manager()
|
||||
@ -50,9 +51,9 @@ class GuestAgentMongoDBManagerTest(trove_testtools.TestCase):
|
||||
super(GuestAgentMongoDBManagerTest, self).tearDown()
|
||||
|
||||
def test_update_status(self):
|
||||
with mock.patch.object(self.manager, 'status') as status:
|
||||
self.manager.update_status(self.context)
|
||||
status.update.assert_any_call()
|
||||
self.manager.app.status = mock.MagicMock()
|
||||
self.manager.update_status(self.context)
|
||||
self.manager.app.status.update.assert_any_call()
|
||||
|
||||
def _prepare_method(self, packages=['packages'], databases=None,
|
||||
memory_mb='2048', users=None, device_path=None,
|
||||
@ -61,8 +62,7 @@ class GuestAgentMongoDBManagerTest(trove_testtools.TestCase):
|
||||
overrides=None, cluster_config=None,):
|
||||
"""self.manager.app must be correctly mocked before calling."""
|
||||
|
||||
self.manager.status = mock.Mock()
|
||||
self.manager.get_config_changes = mock.Mock()
|
||||
self.manager.app.status = mock.Mock()
|
||||
|
||||
self.manager.prepare(self.context, packages,
|
||||
databases, memory_mb, users,
|
||||
@ -74,12 +74,13 @@ class GuestAgentMongoDBManagerTest(trove_testtools.TestCase):
|
||||
overrides=overrides,
|
||||
cluster_config=cluster_config)
|
||||
|
||||
self.manager.status.begin_install.assert_any_call()
|
||||
self.manager.app.status.begin_install.assert_any_call()
|
||||
self.manager.app.install_if_needed.assert_called_with(packages)
|
||||
self.manager.app.stop_db.assert_any_call()
|
||||
self.manager.app.clear_storage.assert_any_call()
|
||||
self.manager.get_config_changes.assert_called_with(cluster_config,
|
||||
self.mount_point)
|
||||
|
||||
(self.manager.app.apply_initial_guestagent_configuration.
|
||||
assert_called_once_with(cluster_config, self.mount_point))
|
||||
|
||||
@mock.patch.object(volume, 'VolumeDevice')
|
||||
@mock.patch('os.path.exists')
|
||||
@ -103,7 +104,7 @@ class GuestAgentMongoDBManagerTest(trove_testtools.TestCase):
|
||||
|
||||
self._prepare_method()
|
||||
|
||||
mock_secure.assert_called_with(None)
|
||||
mock_secure.assert_called_with()
|
||||
|
||||
@mock.patch.object(backup, 'restore')
|
||||
@mock.patch.object(service.MongoDBAdmin, 'is_root_enabled')
|
||||
|
@ -25,7 +25,7 @@ from testtools import ExpectedException
|
||||
|
||||
from trove.common import exception
|
||||
from trove.common.stream_codecs import (
|
||||
IdentityCodec, IniCodec, PropertiesCodec, YamlCodec)
|
||||
IdentityCodec, IniCodec, JsonCodec, PropertiesCodec, YamlCodec)
|
||||
from trove.common import utils
|
||||
from trove.guestagent.common import guestagent_utils
|
||||
from trove.guestagent.common import operating_system
|
||||
@ -95,6 +95,16 @@ class TestOperatingSystem(trove_testtools.TestCase):
|
||||
self._test_file_codec(data, PropertiesCodec(
|
||||
string_mappings={'yes': True, 'no': False, "''": None}))
|
||||
|
||||
def test_json_file_codec(self):
|
||||
data = {"Section1": 's1v1',
|
||||
"Section2": {"s2k1": '1',
|
||||
"s2k2": 'True'},
|
||||
"Section3": {"Section4": {"s4k1": '3.1415926535',
|
||||
"s4k2": None}}
|
||||
}
|
||||
|
||||
self._test_file_codec(data, JsonCodec())
|
||||
|
||||
def _test_file_codec(self, data, read_codec, write_codec=None,
|
||||
expected_data=None,
|
||||
expected_exception=None):
|
||||
|
Loading…
x
Reference in New Issue
Block a user