Merge "Configuration Groups for Redis"
This commit is contained in:
commit
eed54dbf0c
@ -25,3 +25,4 @@ testtools>=1.4.0
|
||||
discover
|
||||
testrepository>=0.0.18
|
||||
pymongo>=3.0.2
|
||||
redis>=2.10.0
|
||||
|
@ -16,6 +16,17 @@
|
||||
from trove.common import stream_codecs
|
||||
|
||||
|
||||
class RedisConfParser(object):
|
||||
|
||||
CODEC = stream_codecs.PropertiesCodec()
|
||||
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
||||
def parse(self):
|
||||
return self.CODEC.deserialize(self.config).items()
|
||||
|
||||
|
||||
class MySQLConfParser(object):
|
||||
|
||||
CODEC = stream_codecs.IniCodec(
|
||||
|
@ -33,6 +33,7 @@ ENV = utils.ENV
|
||||
SERVICE_PARSERS = {
|
||||
'mysql': configurations.MySQLConfParser,
|
||||
'percona': configurations.MySQLConfParser,
|
||||
'redis': configurations.RedisConfParser,
|
||||
}
|
||||
|
||||
|
||||
|
@ -230,6 +230,46 @@ def file_discovery(file_candidates):
|
||||
return file
|
||||
|
||||
|
||||
def start_service(service_candidates):
|
||||
_execute_service_command(service_candidates, 'cmd_start')
|
||||
|
||||
|
||||
def stop_service(service_candidates):
|
||||
_execute_service_command(service_candidates, 'cmd_stop')
|
||||
|
||||
|
||||
def enable_service_on_boot(service_candidates):
|
||||
_execute_service_command(service_candidates, 'cmd_enable')
|
||||
|
||||
|
||||
def disable_service_on_boot(service_candidates):
|
||||
_execute_service_command(service_candidates, 'cmd_disable')
|
||||
|
||||
|
||||
def _execute_service_command(service_candidates, command_key):
|
||||
"""
|
||||
:param service_candidates List of possible system service names.
|
||||
:type service_candidates list
|
||||
|
||||
:param command_key One of the actions returned by
|
||||
'service_discovery'.
|
||||
:type command_key string
|
||||
|
||||
:raises: :class:`UnprocessableEntity` if no candidate names given.
|
||||
:raises: :class:`RuntimeError` if command not found.
|
||||
"""
|
||||
if service_candidates:
|
||||
service = service_discovery(service_candidates)
|
||||
if command_key in service:
|
||||
utils.execute_with_timeout(service[command_key], shell=True)
|
||||
else:
|
||||
raise RuntimeError(_("Service control command not available: %s")
|
||||
% command_key)
|
||||
else:
|
||||
raise exception.UnprocessableEntity(_("Candidate service names not "
|
||||
"specified."))
|
||||
|
||||
|
||||
def service_discovery(service_candidates):
|
||||
"""
|
||||
This function discovers how to start, stop, enable and disable services
|
||||
|
@ -22,8 +22,6 @@ from trove.common import instance as rd_instance
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.datastore.experimental.redis.service import (
|
||||
RedisApp)
|
||||
from trove.guestagent.datastore.experimental.redis.service import (
|
||||
RedisAppStatus)
|
||||
from trove.guestagent import dbaas
|
||||
from trove.guestagent import volume
|
||||
from trove.openstack.common import log as logging
|
||||
@ -42,6 +40,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
|
||||
def __init__(self):
|
||||
super(Manager, self).__init__(CONF)
|
||||
self._app = RedisApp()
|
||||
|
||||
@periodic_task.periodic_task
|
||||
def update_status(self, context):
|
||||
@ -50,7 +49,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
perodic task so it is automatically called every 3 ticks.
|
||||
"""
|
||||
LOG.debug("Update status called.")
|
||||
RedisAppStatus.get().update()
|
||||
self._app.status.update()
|
||||
|
||||
def rpc_ping(self, context):
|
||||
LOG.debug("Responding to RPC ping.")
|
||||
@ -71,8 +70,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
currently this does nothing.
|
||||
"""
|
||||
LOG.debug("Reset configuration called.")
|
||||
app = RedisApp(RedisAppStatus.get())
|
||||
app.reset_configuration(configuration)
|
||||
self._app.reset_configuration(configuration)
|
||||
|
||||
def _perform_restore(self, backup_info, context, restore_location, app):
|
||||
"""
|
||||
@ -93,8 +91,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
prepare handles all the base configuration of the redis instance.
|
||||
"""
|
||||
try:
|
||||
app = RedisApp(RedisAppStatus.get())
|
||||
RedisAppStatus.get().begin_install()
|
||||
self._app.status.begin_install()
|
||||
if device_path:
|
||||
device = volume.VolumeDevice(device_path)
|
||||
# unmount if device is already mounted
|
||||
@ -104,14 +101,15 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
operating_system.chown(mount_point, 'redis', 'redis',
|
||||
as_root=True)
|
||||
LOG.debug('Mounted the volume.')
|
||||
app.install_if_needed(packages)
|
||||
self._app.install_if_needed(packages)
|
||||
LOG.info(_('Writing redis configuration.'))
|
||||
app.write_config(config_contents)
|
||||
app.restart()
|
||||
self._app.configuration_manager.save_configuration(config_contents)
|
||||
self._app.apply_initial_guestagent_configuration()
|
||||
self._app.restart()
|
||||
LOG.info(_('Redis instance has been setup and configured.'))
|
||||
except Exception:
|
||||
LOG.exception(_("Error setting up Redis instance."))
|
||||
app.status.set_status(rd_instance.ServiceStatuses.FAILED)
|
||||
self._app.status.set_status(rd_instance.ServiceStatuses.FAILED)
|
||||
raise RuntimeError("prepare call has failed.")
|
||||
|
||||
def restart(self, context):
|
||||
@ -121,16 +119,14 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
gets a restart message from the taskmanager.
|
||||
"""
|
||||
LOG.debug("Restart called.")
|
||||
app = RedisApp(RedisAppStatus.get())
|
||||
app.restart()
|
||||
self._app.restart()
|
||||
|
||||
def start_db_with_conf_changes(self, context, config_contents):
|
||||
"""
|
||||
Start this redis instance with new conf changes.
|
||||
"""
|
||||
LOG.debug("Start DB with conf changes called.")
|
||||
app = RedisApp(RedisAppStatus.get())
|
||||
app.start_db_with_conf_changes(config_contents)
|
||||
self._app.start_db_with_conf_changes(config_contents)
|
||||
|
||||
def stop_db(self, context, do_not_start_on_reboot=False):
|
||||
"""
|
||||
@ -139,8 +135,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
gets a stop message from the taskmanager.
|
||||
"""
|
||||
LOG.debug("Stop DB called.")
|
||||
app = RedisApp(RedisAppStatus.get())
|
||||
app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
|
||||
self._app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
|
||||
|
||||
def get_filesystem_stats(self, context, fs_path):
|
||||
"""Gets the filesystem stats for the path given."""
|
||||
@ -177,13 +172,14 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
|
||||
def update_overrides(self, context, overrides, remove=False):
|
||||
LOG.debug("Updating overrides.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='update_overrides', datastore=MANAGER)
|
||||
if remove:
|
||||
self._app.remove_overrides()
|
||||
else:
|
||||
self._app.update_overrides(context, overrides, remove)
|
||||
|
||||
def apply_overrides(self, context, overrides):
|
||||
LOG.debug("Applying overrides.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='apply_overrides', datastore=MANAGER)
|
||||
self._app.apply_overrides(self._app.admin, overrides)
|
||||
|
||||
def update_attributes(self, context, username, hostname, user_attrs):
|
||||
LOG.debug("Updating attributes.")
|
||||
|
@ -14,12 +14,17 @@
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import redis
|
||||
from redis.exceptions import BusyLoadingError, ConnectionError
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common.i18n import _
|
||||
from trove.common import instance as rd_instance
|
||||
from trove.common.stream_codecs import PropertiesCodec, StringConverter
|
||||
from trove.common import utils as utils
|
||||
from trove.guestagent.common.configuration import ConfigurationManager
|
||||
from trove.guestagent.common.configuration import RollingOverrideStrategy
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.datastore.experimental.redis import system
|
||||
from trove.guestagent.datastore import service
|
||||
@ -27,99 +32,35 @@ from trove.guestagent import pkg
|
||||
from trove.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
TMP_REDIS_CONF = '/tmp/redis.conf.tmp'
|
||||
TIME_OUT = 1200
|
||||
TIME_OUT = 1200 # FIXME(pmalik): should probably use config timeout
|
||||
CONF = cfg.CONF
|
||||
packager = pkg.Package()
|
||||
|
||||
|
||||
def _load_redis_options():
|
||||
"""
|
||||
Reads the redis config file for all redis options.
|
||||
Right now this does not do any smart parsing and returns only key
|
||||
value pairs as a str, str.
|
||||
So: 'foo bar baz' becomes {'foo' : 'bar baz'}
|
||||
"""
|
||||
options = {}
|
||||
LOG.debug("Loading Redis options.")
|
||||
with open(system.REDIS_CONFIG, 'r') as fd:
|
||||
for opt in fd.readlines():
|
||||
opt = opt.rstrip().split(' ')
|
||||
options.update({opt[0]: ' '.join(opt[1:])})
|
||||
return options
|
||||
|
||||
|
||||
class RedisAppStatus(service.BaseDbStatus):
|
||||
"""
|
||||
Handles all of the status updating for the redis guest agent.
|
||||
"""
|
||||
@classmethod
|
||||
def get(cls):
|
||||
"""
|
||||
Gets an instance of the RedisAppStatus class.
|
||||
"""
|
||||
if not cls._instance:
|
||||
cls._instance = RedisAppStatus()
|
||||
return cls._instance
|
||||
|
||||
def __init__(self, client):
|
||||
super(RedisAppStatus, self).__init__()
|
||||
self.__client = client
|
||||
|
||||
def set_client(self, client):
|
||||
self.__client = client
|
||||
|
||||
def _get_actual_db_status(self):
|
||||
"""
|
||||
Gets the actual status of the Redis instance
|
||||
First it attempts to make a connection to the redis instance
|
||||
by making a PING request.
|
||||
If PING does not return PONG we do a ps
|
||||
to see if the process is blocked or hung.
|
||||
This implementation stinks but redis-cli only returns 0
|
||||
at this time.
|
||||
http://redis.googlecode.com/svn/trunk/redis-cli.c
|
||||
If we raise another exception.ProcessExecutionError while
|
||||
running ps.
|
||||
We attempt to locate the PID file and see if the process
|
||||
is crashed or shutdown.
|
||||
Remember by default execute_with_timeout raises this exception
|
||||
if a non 0 status code is returned from the cmd called.
|
||||
"""
|
||||
options = _load_redis_options()
|
||||
out = ""
|
||||
err = ""
|
||||
try:
|
||||
if 'requirepass' in options:
|
||||
LOG.debug('Password is set, running ping with password.')
|
||||
out, err = utils.execute_with_timeout(
|
||||
system.REDIS_CLI,
|
||||
'-a',
|
||||
options['requirepass'],
|
||||
'PING',
|
||||
run_as_root=True,
|
||||
root_helper='sudo')
|
||||
else:
|
||||
LOG.debug('Password not set, running ping without password.')
|
||||
out, err = utils.execute_with_timeout(
|
||||
system.REDIS_CLI,
|
||||
'PING',
|
||||
run_as_root=True,
|
||||
root_helper='sudo')
|
||||
LOG.info(_('Redis Service Status is RUNNING.'))
|
||||
return rd_instance.ServiceStatuses.RUNNING
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.exception(_('Process execution error on redis-cli.'))
|
||||
if 'PONG' not in out:
|
||||
try:
|
||||
out, err = utils.execute_with_timeout('/bin/ps', '-C',
|
||||
'redis-server', 'h')
|
||||
pid = out.split()[0]
|
||||
LOG.debug('Redis pid: %s.' % (pid))
|
||||
LOG.info(_('Redis Service Status is BLOCKED.'))
|
||||
return rd_instance.ServiceStatuses.BLOCKED
|
||||
except exception.ProcessExecutionError:
|
||||
pid_file = options.get('pidfile',
|
||||
'/var/run/redis/redis-server.pid')
|
||||
if os.path.exists(pid_file):
|
||||
LOG.info(_('Redis Service Status is CRASHED.'))
|
||||
return rd_instance.ServiceStatuses.CRASHED
|
||||
else:
|
||||
LOG.info(_('Redis Service Status is SHUTDOWN.'))
|
||||
return rd_instance.ServiceStatuses.SHUTDOWN
|
||||
if self.__client.ping():
|
||||
return rd_instance.ServiceStatuses.RUNNING
|
||||
except ConnectionError:
|
||||
return rd_instance.ServiceStatuses.SHUTDOWN
|
||||
except BusyLoadingError:
|
||||
return rd_instance.ServiceStatuses.BLOCKED
|
||||
except Exception:
|
||||
LOG.exception(_("Error getting Redis status."))
|
||||
|
||||
return rd_instance.ServiceStatuses.CRASHED
|
||||
|
||||
|
||||
class RedisApp(object):
|
||||
@ -128,7 +69,7 @@ class RedisApp(object):
|
||||
on a trove instance.
|
||||
"""
|
||||
|
||||
def __init__(self, status, state_change_wait_time=None):
|
||||
def __init__(self, state_change_wait_time=None):
|
||||
"""
|
||||
Sets default status and state_change_wait_time
|
||||
"""
|
||||
@ -136,7 +77,30 @@ class RedisApp(object):
|
||||
self.state_change_wait_time = state_change_wait_time
|
||||
else:
|
||||
self.state_change_wait_time = CONF.state_change_wait_time
|
||||
self.status = status
|
||||
|
||||
config_value_mappings = {'yes': True, 'no': False, "''": None}
|
||||
self._value_converter = StringConverter(config_value_mappings)
|
||||
self.configuration_manager = ConfigurationManager(
|
||||
system.REDIS_CONFIG,
|
||||
system.REDIS_OWNER, system.REDIS_OWNER,
|
||||
PropertiesCodec(
|
||||
unpack_singletons=False,
|
||||
string_mappings=config_value_mappings
|
||||
), requires_root=True)
|
||||
|
||||
import_dir = os.path.dirname(system.REDIS_CONFIG)
|
||||
override_strategy = RollingOverrideStrategy(import_dir)
|
||||
|
||||
self.configuration_manager.set_override_strategy(override_strategy)
|
||||
|
||||
self.admin = self._build_admin_client()
|
||||
self.status = RedisAppStatus(self.admin)
|
||||
|
||||
def _build_admin_client(self):
|
||||
password = self.get_configuration_property('requirepass')
|
||||
socket = self.get_configuration_property('unixsocket')
|
||||
|
||||
return RedisAdmin(password=password, unix_socket_path=socket)
|
||||
|
||||
def install_if_needed(self, packages):
|
||||
"""
|
||||
@ -173,28 +137,14 @@ class RedisApp(object):
|
||||
Enables redis on boot.
|
||||
"""
|
||||
LOG.info(_('Enabling Redis on boot.'))
|
||||
try:
|
||||
redis_service = operating_system.service_discovery(
|
||||
system.SERVICE_CANDIDATES)
|
||||
utils.execute_with_timeout(
|
||||
redis_service['cmd_enable'], shell=True)
|
||||
except KeyError:
|
||||
raise RuntimeError(_(
|
||||
"Command to enable Redis on boot not found."))
|
||||
operating_system.enable_service_on_boot(system.SERVICE_CANDIDATES)
|
||||
|
||||
def _disable_redis_on_boot(self):
|
||||
"""
|
||||
Disables redis on boot.
|
||||
"""
|
||||
LOG.info(_("Disabling Redis on boot."))
|
||||
try:
|
||||
redis_service = operating_system.service_discovery(
|
||||
system.SERVICE_CANDIDATES)
|
||||
utils.execute_with_timeout(
|
||||
redis_service['cmd_disable'], shell=True)
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
"Command to disable Redis on boot not found.")
|
||||
operating_system.disable_service_on_boot(system.SERVICE_CANDIDATES)
|
||||
|
||||
def stop_db(self, update_db=False, do_not_start_on_reboot=False):
|
||||
"""
|
||||
@ -203,9 +153,8 @@ class RedisApp(object):
|
||||
LOG.info(_('Stopping redis.'))
|
||||
if do_not_start_on_reboot:
|
||||
self._disable_redis_on_boot()
|
||||
cmd = 'sudo %s' % (system.REDIS_CMD_STOP)
|
||||
utils.execute_with_timeout(cmd,
|
||||
shell=True)
|
||||
|
||||
operating_system.stop_service(system.SERVICE_CANDIDATES)
|
||||
if not self.status.wait_for_real_status_to_change_to(
|
||||
rd_instance.ServiceStatuses.SHUTDOWN,
|
||||
self.state_change_wait_time, update_db):
|
||||
@ -224,15 +173,46 @@ class RedisApp(object):
|
||||
finally:
|
||||
self.status.end_install_or_restart()
|
||||
|
||||
def write_config(self, config_contents):
|
||||
def update_overrides(self, context, overrides, remove=False):
|
||||
self.configuration_manager.update_override(overrides)
|
||||
|
||||
def apply_overrides(self, client, overrides):
|
||||
"""Use the 'CONFIG SET' command to apply configuration at runtime.
|
||||
|
||||
Commands that appear multiple times have values separated by a
|
||||
white space. For instance, the following two 'save' directives from the
|
||||
configuration file...
|
||||
|
||||
save 900 1
|
||||
save 300 10
|
||||
|
||||
... would be applied in a single command as:
|
||||
|
||||
CONFIG SET save "900 1 300 10"
|
||||
|
||||
Note that the 'CONFIG' command has been renamed to prevent
|
||||
users from using it to bypass configuration groups.
|
||||
"""
|
||||
Write the redis config.
|
||||
for prop_name, prop_args in overrides.items():
|
||||
args_string = self._join_lists(
|
||||
self._value_converter.to_strings(prop_args), ' ')
|
||||
client.config_set(prop_name, args_string)
|
||||
|
||||
def _join_lists(self, items, sep):
|
||||
"""Join list items (including items from sub-lists) into a string.
|
||||
Non-list inputs are returned unchanged.
|
||||
|
||||
_join_lists('1234', ' ') = "1234"
|
||||
_join_lists(['1','2','3','4'], ' ') = "1 2 3 4"
|
||||
_join_lists([['1','2'], ['3','4']], ' ') = "1 2 3 4"
|
||||
"""
|
||||
LOG.debug("Writing Redis config.")
|
||||
with open(TMP_REDIS_CONF, 'w') as fd:
|
||||
fd.write(config_contents)
|
||||
operating_system.move(TMP_REDIS_CONF, system.REDIS_CONFIG,
|
||||
as_root=True)
|
||||
if isinstance(items, list):
|
||||
return sep.join([sep.join(e) if isinstance(e, list) else e
|
||||
for e in items])
|
||||
return items
|
||||
|
||||
def remove_overrides(self):
|
||||
self.configuration_manager.remove_override()
|
||||
|
||||
def start_db_with_conf_changes(self, config_contents):
|
||||
LOG.info(_('Starting redis with conf changes.'))
|
||||
@ -241,13 +221,16 @@ class RedisApp(object):
|
||||
LOG.debug(format, self.status)
|
||||
raise RuntimeError(format % self.status)
|
||||
LOG.info(_("Initiating config."))
|
||||
self.write_config(config_contents)
|
||||
self.configuration_manager.save_configuration(config_contents)
|
||||
# The configuration template has to be updated with
|
||||
# guestagent-controlled settings.
|
||||
self.apply_initial_guestagent_configuration()
|
||||
self.start_redis(True)
|
||||
|
||||
def reset_configuration(self, configuration):
|
||||
config_contents = configuration['config_contents']
|
||||
LOG.info(_("Resetting configuration."))
|
||||
self.write_config(config_contents)
|
||||
config_contents = configuration['config_contents']
|
||||
self.configuration_manager.save_configuration(config_contents)
|
||||
|
||||
def start_redis(self, update_db=False):
|
||||
"""
|
||||
@ -255,12 +238,7 @@ class RedisApp(object):
|
||||
"""
|
||||
LOG.info(_("Starting redis."))
|
||||
self._enable_redis_on_boot()
|
||||
try:
|
||||
cmd = 'sudo %s' % (system.REDIS_CMD_START)
|
||||
utils.execute_with_timeout(cmd,
|
||||
shell=True)
|
||||
except exception.ProcessExecutionError:
|
||||
pass
|
||||
operating_system.start_service(system.SERVICE_CANDIDATES)
|
||||
if not self.status.wait_for_real_status_to_change_to(
|
||||
rd_instance.ServiceStatuses.RUNNING,
|
||||
self.state_change_wait_time, update_db):
|
||||
@ -273,3 +251,167 @@ class RedisApp(object):
|
||||
except exception.ProcessExecutionError:
|
||||
LOG.exception(_('Error killing stalled redis start command.'))
|
||||
self.status.end_install_or_restart()
|
||||
|
||||
def apply_initial_guestagent_configuration(self):
|
||||
"""Update guestagent-controlled configuration properties.
|
||||
"""
|
||||
|
||||
# Hide the 'CONFIG' command from end users by mangling its name.
|
||||
self.admin.set_config_command_name(self._mangle_config_command_name())
|
||||
|
||||
self.configuration_manager.update_configuration(
|
||||
{'daemonize': 'yes',
|
||||
'pidfile': system.REDIS_PID_FILE,
|
||||
'logfile': system.REDIS_LOG_FILE,
|
||||
'dir': system.REDIS_DATA_DIR})
|
||||
|
||||
def get_config_command_name(self):
|
||||
"""Get current name of the 'CONFIG' command.
|
||||
"""
|
||||
renamed_cmds = self.configuration_manager.get_value('rename-command')
|
||||
for name_pair in renamed_cmds:
|
||||
if name_pair[0] == 'CONFIG':
|
||||
return name_pair[1]
|
||||
|
||||
return None
|
||||
|
||||
def _mangle_config_command_name(self):
|
||||
"""Hide the 'CONFIG' command from the clients by renaming it to a
|
||||
random string known only to the guestagent.
|
||||
Return the mangled name.
|
||||
"""
|
||||
mangled = utils.generate_random_password()
|
||||
self._rename_command('CONFIG', mangled)
|
||||
return mangled
|
||||
|
||||
def _rename_command(self, old_name, new_name):
|
||||
"""It is possible to completely disable a command by renaming it
|
||||
to an empty string.
|
||||
"""
|
||||
self.configuration_manager.update_configuration(
|
||||
{'rename-command': [old_name, new_name]})
|
||||
|
||||
def get_logfile(self):
|
||||
"""Specify the log file name. Also the empty string can be used to
|
||||
force Redis to log on the standard output.
|
||||
Note that if you use standard output for logging but daemonize,
|
||||
logs will be sent to /dev/null
|
||||
"""
|
||||
return self.get_configuration_property('logfile')
|
||||
|
||||
def get_db_filename(self):
|
||||
"""The filename where to dump the DB.
|
||||
"""
|
||||
return self.get_configuration_property('dbfilename')
|
||||
|
||||
def get_working_dir(self):
|
||||
"""The DB will be written inside this directory,
|
||||
with the filename specified the 'dbfilename' configuration directive.
|
||||
The Append Only File will also be created inside this directory.
|
||||
"""
|
||||
return self.get_configuration_property('dir')
|
||||
|
||||
def get_auth_password(self):
|
||||
"""Client authentication password for this instance or None if not set.
|
||||
"""
|
||||
return self.get_configuration_property('requirepass')
|
||||
|
||||
def is_appendonly_enabled(self):
|
||||
"""True if the Append Only File (AOF) persistence mode is enabled.
|
||||
"""
|
||||
return self.get_configuration_property('appendonly', False)
|
||||
|
||||
def get_append_file_name(self):
|
||||
"""The name of the append only file (AOF).
|
||||
"""
|
||||
return self.get_configuration_property('appendfilename')
|
||||
|
||||
def is_cluster_enabled(self):
|
||||
"""Only nodes that are started as cluster nodes can be part of a
|
||||
Redis Cluster.
|
||||
"""
|
||||
return self.get_configuration_property('cluster-enabled', False)
|
||||
|
||||
def enable_cluster(self):
|
||||
"""In order to start a Redis instance as a cluster node enable the
|
||||
cluster support
|
||||
"""
|
||||
self.configuration_manager.update_configuration(
|
||||
{'cluster-enabled': 'yes'})
|
||||
|
||||
def get_cluster_config_filename(self):
|
||||
"""Cluster node configuration file.
|
||||
"""
|
||||
return self.get_configuration_property('cluster-config-file')
|
||||
|
||||
def set_cluster_config_filename(self, name):
|
||||
"""Make sure that instances running in the same system do not have
|
||||
overlapping cluster configuration file names.
|
||||
"""
|
||||
self.configuration_manager.update_configuration(
|
||||
{'cluster-config-file': name})
|
||||
|
||||
def get_cluster_node_timeout(self):
|
||||
"""Cluster node timeout is the amount of milliseconds a node must be
|
||||
unreachable for it to be considered in failure state.
|
||||
"""
|
||||
return self.get_configuration_property('cluster-node-timeout')
|
||||
|
||||
def get_configuration_property(self, name, default=None):
|
||||
"""Return the value of a Redis configuration property.
|
||||
Returns a single value for single-argument properties or
|
||||
a list otherwise.
|
||||
"""
|
||||
return utils.unpack_singleton(
|
||||
self.configuration_manager.get_value(name, default))
|
||||
|
||||
|
||||
class RedisAdmin(object):
|
||||
"""Handles administrative tasks on the Redis database.
|
||||
"""
|
||||
|
||||
DEFAULT_CONFIG_CMD = 'CONFIG'
|
||||
|
||||
def __init__(self, password=None, unix_socket_path=None):
|
||||
self.__client = redis.StrictRedis(
|
||||
password=password, unix_socket_path=unix_socket_path)
|
||||
self.__config_cmd_name = self.DEFAULT_CONFIG_CMD
|
||||
|
||||
def set_config_command_name(self, name):
|
||||
"""Set name of the 'CONFIG' command or None for default.
|
||||
"""
|
||||
self.__config_cmd_name = name or self.DEFAULT_CONFIG_CMD
|
||||
|
||||
def ping(self):
|
||||
"""Ping the Redis server and return True if a response is received.
|
||||
"""
|
||||
return self.__client.ping()
|
||||
|
||||
def config_set(self, name, value):
|
||||
response = self.execute(
|
||||
'%s %s' % (self.__config_cmd_name, 'SET'), name, value)
|
||||
if not self._is_ok_response(response):
|
||||
raise exception.UnprocessableEntity(
|
||||
_("Could not set configuration property '%(name)s' to "
|
||||
"'%(value)s'.") % {'name': name, 'value': value})
|
||||
|
||||
def _is_ok_response(self, response):
|
||||
"""Return True if a given Redis response is 'OK'.
|
||||
"""
|
||||
return response and redis.client.bool_ok(response)
|
||||
|
||||
def execute(self, cmd_name, *cmd_args):
|
||||
"""Execute a command and return a parsed response.
|
||||
"""
|
||||
try:
|
||||
return self._execute_command(cmd_name, *cmd_args)
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
raise exception.TroveError(
|
||||
_("Redis command '%(cmd_name)s %(cmd_args)s' failed.")
|
||||
% {'cmd_name': cmd_name, 'cmd_args': ' '.join(cmd_args)})
|
||||
|
||||
def _execute_command(self, *args, **options):
|
||||
"""Execute a command and return a parsed response.
|
||||
"""
|
||||
return self.__client.execute_command(*args, **options)
|
||||
|
@ -18,24 +18,20 @@ Determines operating system version and OS dependent commands.
|
||||
"""
|
||||
from trove.guestagent.common.operating_system import get_os
|
||||
|
||||
REDIS_OWNER = 'redis'
|
||||
|
||||
OS = get_os()
|
||||
REDIS_CONFIG = '/etc/redis/redis.conf'
|
||||
REDIS_PID_FILE = '/var/run/redis/redis-server.pid'
|
||||
REDIS_LOG_FILE = '/var/log/redis/server.log'
|
||||
REDIS_CONF_DIR = '/etc/redis'
|
||||
REDIS_DATA_DIR = '/var/lib/redis'
|
||||
REDIS_INIT = '/etc/init/redis-server.conf'
|
||||
REDIS_CLI = '/usr/bin/redis-cli'
|
||||
REDIS_BIN = '/usr/bin/redis-server'
|
||||
REDIS_CMD_ENABLE = 'update-rc.d redis-server enable'
|
||||
REDIS_CMD_DISABLE = 'update-rc.d redis-server disable'
|
||||
REDIS_CMD_START = 'service redis-server start || /bin/true'
|
||||
REDIS_CMD_STOP = 'service redis-server stop || /bin/true'
|
||||
REDIS_PACKAGE = 'redis-server'
|
||||
SERVICE_CANDIDATES = ['redis-server']
|
||||
|
||||
if OS is 'redhat':
|
||||
REDIS_CONFIG = '/etc/redis.conf'
|
||||
REDIS_CMD_ENABLE = 'systemctl enable redis'
|
||||
REDIS_CMD_DISABLE = 'systemctl disable redis'
|
||||
REDIS_CMD_START = 'systemctl start redis'
|
||||
REDIS_CMD_STOP = 'systemctl stop redis'
|
||||
REDIS_PACKAGE = 'redis'
|
||||
SERVICE_CANDIDATES = ['redis']
|
||||
|
@ -1,41 +1,954 @@
|
||||
# Redis configuration file example
|
||||
|
||||
# Note on units: when memory size is needed, it is possible to specify
|
||||
# it in the usual form of 1k 5GB 4M and so forth:
|
||||
#
|
||||
# 1k => 1000 bytes
|
||||
# 1kb => 1024 bytes
|
||||
# 1m => 1000000 bytes
|
||||
# 1mb => 1024*1024 bytes
|
||||
# 1g => 1000000000 bytes
|
||||
# 1gb => 1024*1024*1024 bytes
|
||||
#
|
||||
# units are case insensitive so 1GB 1Gb 1gB are all the same.
|
||||
|
||||
################################## INCLUDES ###################################
|
||||
|
||||
# Include one or more other config files here. This is useful if you
|
||||
# have a standard template that goes to all Redis servers but also need
|
||||
# to customize a few per-server settings. Include files can include
|
||||
# other files, so use this wisely.
|
||||
#
|
||||
# Notice option "include" won't be rewritten by command "CONFIG REWRITE"
|
||||
# from admin or Redis Sentinel. Since Redis always uses the last processed
|
||||
# line as value of a configuration directive, you'd better put includes
|
||||
# at the beginning of this file to avoid overwriting config change at runtime.
|
||||
#
|
||||
# If instead you are interested in using includes to override configuration
|
||||
# options, it is better to use include as the last line.
|
||||
#
|
||||
# include /path/to/local.conf
|
||||
# include /path/to/other.conf
|
||||
|
||||
################################ GENERAL #####################################
|
||||
|
||||
# By default Redis does not run as a daemon. Use 'yes' if you need it.
|
||||
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
|
||||
#
|
||||
# Trove currently requires the database to run as a service.
|
||||
daemonize yes
|
||||
|
||||
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
|
||||
# default. You can specify a custom pid file location here.
|
||||
#
|
||||
# This has to be a writable path.
|
||||
# Trove will override this property based on the underlying OS.
|
||||
pidfile /var/run/redis/redis-server.pid
|
||||
|
||||
# Accept connections on the specified port, default is 6379.
|
||||
# If port 0 is specified Redis will not listen on a TCP socket.
|
||||
port 6379
|
||||
|
||||
# TCP listen() backlog.
|
||||
#
|
||||
# In high requests-per-second environments you need an high backlog in order
|
||||
# to avoid slow clients connections issues. Note that the Linux kernel
|
||||
# will silently truncate it to the value of /proc/sys/net/core/somaxconn so
|
||||
# make sure to raise both the value of somaxconn and tcp_max_syn_backlog
|
||||
# in order to get the desired effect.
|
||||
tcp-backlog 511
|
||||
|
||||
# By default Redis listens for connections from all the network interfaces
|
||||
# available on the server. It is possible to listen to just one or multiple
|
||||
# interfaces using the "bind" configuration directive, followed by one or
|
||||
# more IP addresses.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# bind 192.168.1.100 10.0.0.1
|
||||
# bind 127.0.0.1
|
||||
|
||||
# Specify the path for the Unix socket that will be used to listen for
|
||||
# incoming connections. There is no default, so Redis will not listen
|
||||
# on a unix socket when not specified.
|
||||
#
|
||||
# Trove uses Unix sockets internally to connect to the database.
|
||||
# Trove will override this property based on the underlying OS.
|
||||
# unixsocket /tmp/redis.sock
|
||||
# unixsocketperm 700
|
||||
|
||||
# Close the connection after a client is idle for N seconds (0 to disable)
|
||||
timeout 0
|
||||
|
||||
# TCP keepalive.
|
||||
#
|
||||
# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
|
||||
# of communication. This is useful for two reasons:
|
||||
#
|
||||
# 1) Detect dead peers.
|
||||
# 2) Take the connection alive from the point of view of network
|
||||
# equipment in the middle.
|
||||
#
|
||||
# On Linux, the specified value (in seconds) is the period used to send ACKs.
|
||||
# Note that to close the connection the double of the time is needed.
|
||||
# On other kernels the period depends on the kernel configuration.
|
||||
#
|
||||
# A reasonable value for this option is 60 seconds.
|
||||
tcp-keepalive 0
|
||||
|
||||
# Specify the server verbosity level.
|
||||
# This can be one of:
|
||||
# debug (a lot of information, useful for development/testing)
|
||||
# verbose (many rarely useful info, but not a mess like the debug level)
|
||||
# notice (moderately verbose, what you want in production probably)
|
||||
# warning (only very important / critical messages are logged)
|
||||
loglevel notice
|
||||
|
||||
# Specify the log file name. Also the empty string can be used to force
|
||||
# Redis to log on the standard output. Note that if you use standard
|
||||
# output for logging but daemonize, logs will be sent to /dev/null
|
||||
#
|
||||
# Trove will override this property based on the underlying OS.
|
||||
logfile ""
|
||||
|
||||
# To enable logging to the system logger, just set 'syslog-enabled' to yes,
|
||||
# and optionally update the other syslog parameters to suit your needs.
|
||||
# syslog-enabled no
|
||||
|
||||
# Specify the syslog identity.
|
||||
# syslog-ident redis
|
||||
|
||||
# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
|
||||
# syslog-facility local0
|
||||
|
||||
# Set the number of databases. The default database is DB 0, you can select
|
||||
# a different one on a per-connection basis using SELECT <dbid> where
|
||||
# dbid is a number between 0 and 'databases'-1
|
||||
databases 16
|
||||
|
||||
################################ SNAPSHOTTING ################################
|
||||
#
|
||||
# Save the DB on disk:
|
||||
#
|
||||
# save <seconds> <changes>
|
||||
#
|
||||
# Will save the DB if both the given number of seconds and the given
|
||||
# number of write operations against the DB occurred.
|
||||
#
|
||||
# In the example below the behaviour will be to save:
|
||||
# after 900 sec (15 min) if at least 1 key changed
|
||||
# after 300 sec (5 min) if at least 10 keys changed
|
||||
# after 60 sec if at least 10000 keys changed
|
||||
#
|
||||
# Note: you can disable saving completely by commenting out all "save" lines.
|
||||
#
|
||||
# It is also possible to remove all the previously configured save
|
||||
# points by adding a save directive with a single empty string argument
|
||||
# like in the following example:
|
||||
#
|
||||
# save ""
|
||||
|
||||
save 900 1
|
||||
save 300 10
|
||||
save 60 10000
|
||||
|
||||
# By default Redis will stop accepting writes if RDB snapshots are enabled
|
||||
# (at least one save point) and the latest background save failed.
|
||||
# This will make the user aware (in a hard way) that data is not persisting
|
||||
# on disk properly, otherwise chances are that no one will notice and some
|
||||
# disaster will happen.
|
||||
#
|
||||
# If the background saving process will start working again Redis will
|
||||
# automatically allow writes again.
|
||||
#
|
||||
# However if you have setup your proper monitoring of the Redis server
|
||||
# and persistence, you may want to disable this feature so that Redis will
|
||||
# continue to work as usual even if there are problems with disk,
|
||||
# permissions, and so forth.
|
||||
stop-writes-on-bgsave-error yes
|
||||
|
||||
# Compress string objects using LZF when dump .rdb databases?
|
||||
# For default that's set to 'yes' as it's almost always a win.
|
||||
# If you want to save some CPU in the saving child set it to 'no' but
|
||||
# the dataset will likely be bigger if you have compressible values or keys.
|
||||
rdbcompression yes
|
||||
|
||||
# Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
|
||||
# This makes the format more resistant to corruption but there is a performance
|
||||
# hit to pay (around 10%) when saving and loading RDB files, so you can disable it
|
||||
# for maximum performances.
|
||||
#
|
||||
# RDB files created with checksum disabled have a checksum of zero that will
|
||||
# tell the loading code to skip the check.
|
||||
rdbchecksum yes
|
||||
|
||||
# The filename where to dump the DB
|
||||
dbfilename dump.rdb
|
||||
dir /var/lib/redis
|
||||
|
||||
# The working directory.
|
||||
#
|
||||
# The DB will be written inside this directory, with the filename specified
|
||||
# above using the 'dbfilename' configuration directive.
|
||||
#
|
||||
# The Append Only File will also be created inside this directory.
|
||||
#
|
||||
# Note that you must specify a directory here, not a file name.
|
||||
#
|
||||
# This has to be an existing path to a writable directory.
|
||||
# Trove will override this property based on the underlying OS.
|
||||
dir /tmp
|
||||
|
||||
################################# REPLICATION #################################
|
||||
|
||||
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
|
||||
# another Redis server. A few things to understand ASAP about Redis replication.
|
||||
#
|
||||
# 1) Redis replication is asynchronous, but you can configure a master to
|
||||
# stop accepting writes if it appears to be not connected with at least
|
||||
# a given number of slaves.
|
||||
# 2) Redis slaves are able to perform a partial resynchronization with the
|
||||
# master if the replication link is lost for a relatively small amount of
|
||||
# time. You may want to configure the replication backlog size (see the next
|
||||
# sections of this file) with a sensible value depending on your needs.
|
||||
# 3) Replication is automatic and does not need user intervention. After a
|
||||
# network partition slaves automatically try to reconnect to masters
|
||||
# and resynchronize with them.
|
||||
#
|
||||
# slaveof <masterip> <masterport>
|
||||
|
||||
# If the master is password protected (using the "requirepass" configuration
|
||||
# directive below) it is possible to tell the slave to authenticate before
|
||||
# starting the replication synchronization process, otherwise the master will
|
||||
# refuse the slave request.
|
||||
#
|
||||
# masterauth <master-password>
|
||||
|
||||
# When a slave loses its connection with the master, or when the replication
|
||||
# is still in progress, the slave can act in two different ways:
|
||||
#
|
||||
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
|
||||
# still reply to client requests, possibly with out of date data, or the
|
||||
# data set may just be empty if this is the first synchronization.
|
||||
#
|
||||
# 2) if slave-serve-stale-data is set to 'no' the slave will reply with
|
||||
# an error "SYNC with master in progress" to all the kind of commands
|
||||
# but to INFO and SLAVEOF.
|
||||
#
|
||||
slave-serve-stale-data yes
|
||||
|
||||
# You can configure a slave instance to accept writes or not. Writing against
|
||||
# a slave instance may be useful to store some ephemeral data (because data
|
||||
# written on a slave will be easily deleted after resync with the master) but
|
||||
# may also cause problems if clients are writing to it because of a
|
||||
# misconfiguration.
|
||||
#
|
||||
# Since Redis 2.6 by default slaves are read-only.
|
||||
#
|
||||
# Note: read only slaves are not designed to be exposed to untrusted clients
|
||||
# on the internet. It's just a protection layer against misuse of the instance.
|
||||
# Still a read only slave exports by default all the administrative commands
|
||||
# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
|
||||
# security of read only slaves using 'rename-command' to shadow all the
|
||||
# administrative / dangerous commands.
|
||||
slave-read-only yes
|
||||
|
||||
# Replication SYNC strategy: disk or socket.
|
||||
#
|
||||
# -------------------------------------------------------
|
||||
# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
|
||||
# -------------------------------------------------------
|
||||
#
|
||||
# New slaves and reconnecting slaves that are not able to continue the replication
|
||||
# process just receiving differences, need to do what is called a "full
|
||||
# synchronization". An RDB file is transmitted from the master to the slaves.
|
||||
# The transmission can happen in two different ways:
|
||||
#
|
||||
# 1) Disk-backed: The Redis master creates a new process that writes the RDB
|
||||
# file on disk. Later the file is transferred by the parent
|
||||
# process to the slaves incrementally.
|
||||
# 2) Diskless: The Redis master creates a new process that directly writes the
|
||||
# RDB file to slave sockets, without touching the disk at all.
|
||||
#
|
||||
# With disk-backed replication, while the RDB file is generated, more slaves
|
||||
# can be queued and served with the RDB file as soon as the current child producing
|
||||
# the RDB file finishes its work. With diskless replication instead once
|
||||
# the transfer starts, new slaves arriving will be queued and a new transfer
|
||||
# will start when the current one terminates.
|
||||
#
|
||||
# When diskless replication is used, the master waits a configurable amount of
|
||||
# time (in seconds) before starting the transfer in the hope that multiple slaves
|
||||
# will arrive and the transfer can be parallelized.
|
||||
#
|
||||
# With slow disks and fast (large bandwidth) networks, diskless replication
|
||||
# works better.
|
||||
repl-diskless-sync no
|
||||
|
||||
# When diskless replication is enabled, it is possible to configure the delay
|
||||
# the server waits in order to spawn the child that transfers the RDB via socket
|
||||
# to the slaves.
|
||||
#
|
||||
# This is important since once the transfer starts, it is not possible to serve
|
||||
# new slaves arriving, that will be queued for the next RDB transfer, so the server
|
||||
# waits a delay in order to let more slaves arrive.
|
||||
#
|
||||
# The delay is specified in seconds, and by default is 5 seconds. To disable
|
||||
# it entirely just set it to 0 seconds and the transfer will start ASAP.
|
||||
repl-diskless-sync-delay 5
|
||||
|
||||
# Slaves send PINGs to server in a predefined interval. It's possible to change
|
||||
# this interval with the repl_ping_slave_period option. The default value is 10
|
||||
# seconds.
|
||||
#
|
||||
# repl-ping-slave-period 10
|
||||
|
||||
# The following option sets the replication timeout for:
|
||||
#
|
||||
# 1) Bulk transfer I/O during SYNC, from the point of view of slave.
|
||||
# 2) Master timeout from the point of view of slaves (data, pings).
|
||||
# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
|
||||
#
|
||||
# It is important to make sure that this value is greater than the value
|
||||
# specified for repl-ping-slave-period otherwise a timeout will be detected
|
||||
# every time there is low traffic between the master and the slave.
|
||||
#
|
||||
# repl-timeout 60
|
||||
|
||||
# Disable TCP_NODELAY on the slave socket after SYNC?
|
||||
#
|
||||
# If you select "yes" Redis will use a smaller number of TCP packets and
|
||||
# less bandwidth to send data to slaves. But this can add a delay for
|
||||
# the data to appear on the slave side, up to 40 milliseconds with
|
||||
# Linux kernels using a default configuration.
|
||||
#
|
||||
# If you select "no" the delay for data to appear on the slave side will
|
||||
# be reduced but more bandwidth will be used for replication.
|
||||
#
|
||||
# By default we optimize for low latency, but in very high traffic conditions
|
||||
# or when the master and slaves are many hops away, turning this to "yes" may
|
||||
# be a good idea.
|
||||
repl-disable-tcp-nodelay no
|
||||
|
||||
# Set the replication backlog size. The backlog is a buffer that accumulates
|
||||
# slave data when slaves are disconnected for some time, so that when a slave
|
||||
# wants to reconnect again, often a full resync is not needed, but a partial
|
||||
# resync is enough, just passing the portion of data the slave missed while
|
||||
# disconnected.
|
||||
#
|
||||
# The bigger the replication backlog, the longer the time the slave can be
|
||||
# disconnected and later be able to perform a partial resynchronization.
|
||||
#
|
||||
# The backlog is only allocated once there is at least a slave connected.
|
||||
#
|
||||
# repl-backlog-size 1mb
|
||||
|
||||
# After a master has no longer connected slaves for some time, the backlog
|
||||
# will be freed. The following option configures the amount of seconds that
|
||||
# need to elapse, starting from the time the last slave disconnected, for
|
||||
# the backlog buffer to be freed.
|
||||
#
|
||||
# A value of 0 means to never release the backlog.
|
||||
#
|
||||
# repl-backlog-ttl 3600
|
||||
|
||||
# The slave priority is an integer number published by Redis in the INFO output.
|
||||
# It is used by Redis Sentinel in order to select a slave to promote into a
|
||||
# master if the master is no longer working correctly.
|
||||
#
|
||||
# A slave with a low priority number is considered better for promotion, so
|
||||
# for instance if there are three slaves with priority 10, 100, 25 Sentinel will
|
||||
# pick the one with priority 10, that is the lowest.
|
||||
#
|
||||
# However a special priority of 0 marks the slave as not able to perform the
|
||||
# role of master, so a slave with priority of 0 will never be selected by
|
||||
# Redis Sentinel for promotion.
|
||||
#
|
||||
# By default the priority is 100.
|
||||
slave-priority 100
|
||||
rename-command CONFIG ""
|
||||
maxclients 10000
|
||||
maxmemory 1073741824
|
||||
maxmemory-policy volatile-lru
|
||||
maxmemory-samples 3
|
||||
appendonly yes
|
||||
appendfilename appendonly.aof
|
||||
|
||||
# It is possible for a master to stop accepting writes if there are less than
|
||||
# N slaves connected, having a lag less or equal than M seconds.
|
||||
#
|
||||
# The N slaves need to be in "online" state.
|
||||
#
|
||||
# The lag in seconds, that must be <= the specified value, is calculated from
|
||||
# the last ping received from the slave, that is usually sent every second.
|
||||
#
|
||||
# This option does not GUARANTEE that N replicas will accept the write, but
|
||||
# will limit the window of exposure for lost writes in case not enough slaves
|
||||
# are available, to the specified number of seconds.
|
||||
#
|
||||
# For example to require at least 3 slaves with a lag <= 10 seconds use:
|
||||
#
|
||||
# min-slaves-to-write 3
|
||||
# min-slaves-max-lag 10
|
||||
#
|
||||
# Setting one or the other to 0 disables the feature.
|
||||
#
|
||||
# By default min-slaves-to-write is set to 0 (feature disabled) and
|
||||
# min-slaves-max-lag is set to 10.
|
||||
|
||||
################################## SECURITY ###################################
|
||||
|
||||
# Require clients to issue AUTH <PASSWORD> before processing any other
|
||||
# commands. This might be useful in environments in which you do not trust
|
||||
# others with access to the host running redis-server.
|
||||
#
|
||||
# This should stay commented out for backward compatibility and because most
|
||||
# people do not need auth (e.g. they run their own servers).
|
||||
#
|
||||
# Warning: since Redis is pretty fast an outside user can try up to
|
||||
# 150k passwords per second against a good box. This means that you should
|
||||
# use a very strong password otherwise it will be very easy to break.
|
||||
#
|
||||
# requirepass foobared
|
||||
|
||||
# Command renaming.
|
||||
#
|
||||
# It is possible to change the name of dangerous commands in a shared
|
||||
# environment. For instance the CONFIG command may be renamed into something
|
||||
# hard to guess so that it will still be available for internal-use tools
|
||||
# but not available for general clients.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
|
||||
#
|
||||
# It is also possible to completely kill a command by renaming it into
|
||||
# an empty string:
|
||||
#
|
||||
# rename-command CONFIG ""
|
||||
#
|
||||
# Please note that changing the name of commands that are logged into the
|
||||
# AOF file or transmitted to slaves may cause problems.
|
||||
#
|
||||
# Trove uses 'rename-command' internally to hide certain commands.
|
||||
|
||||
################################### LIMITS ####################################
|
||||
|
||||
# Set the max number of connected clients at the same time. By default
|
||||
# this limit is set to 10000 clients, however if the Redis server is not
|
||||
# able to configure the process file limit to allow for the specified limit
|
||||
# the max number of allowed clients is set to the current file limit
|
||||
# minus 32 (as Redis reserves a few file descriptors for internal uses).
|
||||
#
|
||||
# Once the limit is reached Redis will close all the new connections sending
|
||||
# an error 'max number of clients reached'.
|
||||
#
|
||||
# maxclients 10000
|
||||
|
||||
# Don't use more memory than the specified amount of bytes.
|
||||
# When the memory limit is reached Redis will try to remove keys
|
||||
# according to the eviction policy selected (see maxmemory-policy).
|
||||
#
|
||||
# If Redis can't remove keys according to the policy, or if the policy is
|
||||
# set to 'noeviction', Redis will start to reply with errors to commands
|
||||
# that would use more memory, like SET, LPUSH, and so on, and will continue
|
||||
# to reply to read-only commands like GET.
|
||||
#
|
||||
# This option is usually useful when using Redis as an LRU cache, or to set
|
||||
# a hard memory limit for an instance (using the 'noeviction' policy).
|
||||
#
|
||||
# WARNING: If you have slaves attached to an instance with maxmemory on,
|
||||
# the size of the output buffers needed to feed the slaves are subtracted
|
||||
# from the used memory count, so that network problems / resyncs will
|
||||
# not trigger a loop where keys are evicted, and in turn the output
|
||||
# buffer of slaves is full with DELs of keys evicted triggering the deletion
|
||||
# of more keys, and so forth until the database is completely emptied.
|
||||
#
|
||||
# In short... if you have slaves attached it is suggested that you set a lower
|
||||
# limit for maxmemory so that there is some free RAM on the system for slave
|
||||
# output buffers (but this is not needed if the policy is 'noeviction').
|
||||
#
|
||||
# maxmemory <bytes>
|
||||
|
||||
# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
|
||||
# is reached. You can select among five behaviors:
|
||||
#
|
||||
# volatile-lru -> remove the key with an expire set using an LRU algorithm
|
||||
# allkeys-lru -> remove any key according to the LRU algorithm
|
||||
# volatile-random -> remove a random key with an expire set
|
||||
# allkeys-random -> remove a random key, any key
|
||||
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
|
||||
# noeviction -> don't expire at all, just return an error on write operations
|
||||
#
|
||||
# Note: with any of the above policies, Redis will return an error on write
|
||||
# operations, when there are no suitable keys for eviction.
|
||||
#
|
||||
# At the date of writing these commands are: set setnx setex append
|
||||
# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
|
||||
# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
|
||||
# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
|
||||
# getset mset msetnx exec sort
|
||||
#
|
||||
# The default is:
|
||||
#
|
||||
# maxmemory-policy noeviction
|
||||
|
||||
# LRU and minimal TTL algorithms are not precise algorithms but approximated
|
||||
# algorithms (in order to save memory), so you can tune it for speed or
|
||||
# accuracy. For default Redis will check five keys and pick the one that was
|
||||
# used less recently, you can change the sample size using the following
|
||||
# configuration directive.
|
||||
#
|
||||
# The default of 5 produces good enough results. 10 Approximates very closely
|
||||
# true LRU but costs a bit more CPU. 3 is very fast but not very accurate.
|
||||
#
|
||||
# maxmemory-samples 5
|
||||
|
||||
############################## APPEND ONLY MODE ###############################
|
||||
|
||||
# By default Redis asynchronously dumps the dataset on disk. This mode is
|
||||
# good enough in many applications, but an issue with the Redis process or
|
||||
# a power outage may result into a few minutes of writes lost (depending on
|
||||
# the configured save points).
|
||||
#
|
||||
# The Append Only File is an alternative persistence mode that provides
|
||||
# much better durability. For instance using the default data fsync policy
|
||||
# (see later in the config file) Redis can lose just one second of writes in a
|
||||
# dramatic event like a server power outage, or a single write if something
|
||||
# wrong with the Redis process itself happens, but the operating system is
|
||||
# still running correctly.
|
||||
#
|
||||
# AOF and RDB persistence can be enabled at the same time without problems.
|
||||
# If the AOF is enabled on startup Redis will load the AOF, that is the file
|
||||
# with the better durability guarantees.
|
||||
#
|
||||
# Please check http://redis.io/topics/persistence for more information.
|
||||
|
||||
appendonly no
|
||||
|
||||
# The name of the append only file (default: "appendonly.aof")
|
||||
|
||||
appendfilename "appendonly.aof"
|
||||
|
||||
# The fsync() call tells the Operating System to actually write data on disk
|
||||
# instead of waiting for more data in the output buffer. Some OS will really flush
|
||||
# data on disk, some other OS will just try to do it ASAP.
|
||||
#
|
||||
# Redis supports three different modes:
|
||||
#
|
||||
# no: don't fsync, just let the OS flush the data when it wants. Faster.
|
||||
# always: fsync after every write to the append only log. Slow, Safest.
|
||||
# everysec: fsync only one time every second. Compromise.
|
||||
#
|
||||
# The default is "everysec", as that's usually the right compromise between
|
||||
# speed and data safety. It's up to you to understand if you can relax this to
|
||||
# "no" that will let the operating system flush the output buffer when
|
||||
# it wants, for better performances (but if you can live with the idea of
|
||||
# some data loss consider the default persistence mode that's snapshotting),
|
||||
# or on the contrary, use "always" that's very slow but a bit safer than
|
||||
# everysec.
|
||||
#
|
||||
# More details please check the following article:
|
||||
# http://antirez.com/post/redis-persistence-demystified.html
|
||||
#
|
||||
# If unsure, use "everysec".
|
||||
|
||||
# appendfsync always
|
||||
appendfsync everysec
|
||||
# appendfsync no
|
||||
|
||||
# When the AOF fsync policy is set to always or everysec, and a background
|
||||
# saving process (a background save or AOF log background rewriting) is
|
||||
# performing a lot of I/O against the disk, in some Linux configurations
|
||||
# Redis may block too long on the fsync() call. Note that there is no fix for
|
||||
# this currently, as even performing fsync in a different thread will block
|
||||
# our synchronous write(2) call.
|
||||
#
|
||||
# In order to mitigate this problem it's possible to use the following option
|
||||
# that will prevent fsync() from being called in the main process while a
|
||||
# BGSAVE or BGREWRITEAOF is in progress.
|
||||
#
|
||||
# This means that while another child is saving, the durability of Redis is
|
||||
# the same as "appendfsync none". In practical terms, this means that it is
|
||||
# possible to lose up to 30 seconds of log in the worst scenario (with the
|
||||
# default Linux settings).
|
||||
#
|
||||
# If you have latency problems turn this to "yes". Otherwise leave it as
|
||||
# "no" that is the safest pick from the point of view of durability.
|
||||
|
||||
no-appendfsync-on-rewrite no
|
||||
|
||||
# Automatic rewrite of the append only file.
|
||||
# Redis is able to automatically rewrite the log file implicitly calling
|
||||
# BGREWRITEAOF when the AOF log size grows by the specified percentage.
|
||||
#
|
||||
# This is how it works: Redis remembers the size of the AOF file after the
|
||||
# latest rewrite (if no rewrite has happened since the restart, the size of
|
||||
# the AOF at startup is used).
|
||||
#
|
||||
# This base size is compared to the current size. If the current size is
|
||||
# bigger than the specified percentage, the rewrite is triggered. Also
|
||||
# you need to specify a minimal size for the AOF file to be rewritten, this
|
||||
# is useful to avoid rewriting the AOF file even if the percentage increase
|
||||
# is reached but it is still pretty small.
|
||||
#
|
||||
# Specify a percentage of zero in order to disable the automatic AOF
|
||||
# rewrite feature.
|
||||
|
||||
auto-aof-rewrite-percentage 100
|
||||
auto-aof-rewrite-min-size 64mb
|
||||
|
||||
# An AOF file may be found to be truncated at the end during the Redis
|
||||
# startup process, when the AOF data gets loaded back into memory.
|
||||
# This may happen when the system where Redis is running
|
||||
# crashes, especially when an ext4 filesystem is mounted without the
|
||||
# data=ordered option (however this can't happen when Redis itself
|
||||
# crashes or aborts but the operating system still works correctly).
|
||||
#
|
||||
# Redis can either exit with an error when this happens, or load as much
|
||||
# data as possible (the default now) and start if the AOF file is found
|
||||
# to be truncated at the end. The following option controls this behavior.
|
||||
#
|
||||
# If aof-load-truncated is set to yes, a truncated AOF file is loaded and
|
||||
# the Redis server starts emitting a log to inform the user of the event.
|
||||
# Otherwise if the option is set to no, the server aborts with an error
|
||||
# and refuses to start. When the option is set to no, the user requires
|
||||
# to fix the AOF file using the "redis-check-aof" utility before to restart
|
||||
# the server.
|
||||
#
|
||||
# Note that if the AOF file will be found to be corrupted in the middle
|
||||
# the server will still exit with an error. This option only applies when
|
||||
# Redis will try to read more data from the AOF file but not enough bytes
|
||||
# will be found.
|
||||
aof-load-truncated yes
|
||||
|
||||
################################ LUA SCRIPTING ###############################
|
||||
|
||||
# Max execution time of a Lua script in milliseconds.
|
||||
#
|
||||
# If the maximum execution time is reached Redis will log that a script is
|
||||
# still in execution after the maximum allowed time and will start to
|
||||
# reply to queries with an error.
|
||||
#
|
||||
# When a long running script exceeds the maximum execution time only the
|
||||
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
|
||||
# used to stop a script that did not yet called write commands. The second
|
||||
# is the only way to shut down the server in the case a write command was
|
||||
# already issued by the script but the user doesn't want to wait for the natural
|
||||
# termination of the script.
|
||||
#
|
||||
# Set it to 0 or a negative value for unlimited execution without warnings.
|
||||
lua-time-limit 5000
|
||||
|
||||
################################ REDIS CLUSTER ###############################
|
||||
#
|
||||
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
|
||||
# in order to mark it as "mature" we need to wait for a non trivial percentage
|
||||
# of users to deploy it in production.
|
||||
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
#
|
||||
# Normal Redis instances can't be part of a Redis Cluster; only nodes that are
|
||||
# started as cluster nodes can. In order to start a Redis instance as a
|
||||
# cluster node enable the cluster support uncommenting the following:
|
||||
#
|
||||
# cluster-enabled yes
|
||||
|
||||
# Every cluster node has a cluster configuration file. This file is not
|
||||
# intended to be edited by hand. It is created and updated by Redis nodes.
|
||||
# Every Redis Cluster node requires a different cluster configuration file.
|
||||
# Make sure that instances running in the same system do not have
|
||||
# overlapping cluster configuration file names.
|
||||
#
|
||||
# cluster-config-file nodes-6379.conf
|
||||
|
||||
# Cluster node timeout is the amount of milliseconds a node must be unreachable
|
||||
# for it to be considered in failure state.
|
||||
# Most other internal time limits are multiple of the node timeout.
|
||||
#
|
||||
# cluster-node-timeout 15000
|
||||
|
||||
# A slave of a failing master will avoid to start a failover if its data
|
||||
# looks too old.
|
||||
#
|
||||
# There is no simple way for a slave to actually have a exact measure of
|
||||
# its "data age", so the following two checks are performed:
|
||||
#
|
||||
# 1) If there are multiple slaves able to failover, they exchange messages
|
||||
# in order to try to give an advantage to the slave with the best
|
||||
# replication offset (more data from the master processed).
|
||||
# Slaves will try to get their rank by offset, and apply to the start
|
||||
# of the failover a delay proportional to their rank.
|
||||
#
|
||||
# 2) Every single slave computes the time of the last interaction with
|
||||
# its master. This can be the last ping or command received (if the master
|
||||
# is still in the "connected" state), or the time that elapsed since the
|
||||
# disconnection with the master (if the replication link is currently down).
|
||||
# If the last interaction is too old, the slave will not try to failover
|
||||
# at all.
|
||||
#
|
||||
# The point "2" can be tuned by user. Specifically a slave will not perform
|
||||
# the failover if, since the last interaction with the master, the time
|
||||
# elapsed is greater than:
|
||||
#
|
||||
# (node-timeout * slave-validity-factor) + repl-ping-slave-period
|
||||
#
|
||||
# So for example if node-timeout is 30 seconds, and the slave-validity-factor
|
||||
# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
|
||||
# slave will not try to failover if it was not able to talk with the master
|
||||
# for longer than 310 seconds.
|
||||
#
|
||||
# A large slave-validity-factor may allow slaves with too old data to failover
|
||||
# a master, while a too small value may prevent the cluster from being able to
|
||||
# elect a slave at all.
|
||||
#
|
||||
# For maximum availability, it is possible to set the slave-validity-factor
|
||||
# to a value of 0, which means, that slaves will always try to failover the
|
||||
# master regardless of the last time they interacted with the master.
|
||||
# (However they'll always try to apply a delay proportional to their
|
||||
# offset rank).
|
||||
#
|
||||
# Zero is the only value able to guarantee that when all the partitions heal
|
||||
# the cluster will always be able to continue.
|
||||
#
|
||||
# cluster-slave-validity-factor 10
|
||||
|
||||
# Cluster slaves are able to migrate to orphaned masters, that are masters
|
||||
# that are left without working slaves. This improves the cluster ability
|
||||
# to resist to failures as otherwise an orphaned master can't be failed over
|
||||
# in case of failure if it has no working slaves.
|
||||
#
|
||||
# Slaves migrate to orphaned masters only if there are still at least a
|
||||
# given number of other working slaves for their old master. This number
|
||||
# is the "migration barrier". A migration barrier of 1 means that a slave
|
||||
# will migrate only if there is at least 1 other working slave for its master
|
||||
# and so forth. It usually reflects the number of slaves you want for every
|
||||
# master in your cluster.
|
||||
#
|
||||
# Default is 1 (slaves migrate only if their masters remain with at least
|
||||
# one slave). To disable migration just set it to a very large value.
|
||||
# A value of 0 can be set but is useful only for debugging and dangerous
|
||||
# in production.
|
||||
#
|
||||
# cluster-migration-barrier 1
|
||||
|
||||
# By default Redis Cluster nodes stop accepting queries if they detect there
|
||||
# is at least an hash slot uncovered (no available node is serving it).
|
||||
# This way if the cluster is partially down (for example a range of hash slots
|
||||
# are no longer covered) all the cluster becomes, eventually, unavailable.
|
||||
# It automatically returns available as soon as all the slots are covered again.
|
||||
#
|
||||
# However sometimes you want the subset of the cluster which is working,
|
||||
# to continue to accept queries for the part of the key space that is still
|
||||
# covered. In order to do so, just set the cluster-require-full-coverage
|
||||
# option to no.
|
||||
#
|
||||
# cluster-require-full-coverage yes
|
||||
|
||||
# In order to setup your cluster make sure to read the documentation
|
||||
# available at http://redis.io web site.
|
||||
|
||||
################################## SLOW LOG ###################################
|
||||
|
||||
# The Redis Slow Log is a system to log queries that exceeded a specified
|
||||
# execution time. The execution time does not include the I/O operations
|
||||
# like talking with the client, sending the reply and so forth,
|
||||
# but just the time needed to actually execute the command (this is the only
|
||||
# stage of command execution where the thread is blocked and can not serve
|
||||
# other requests in the meantime).
|
||||
#
|
||||
# You can configure the slow log with two parameters: one tells Redis
|
||||
# what is the execution time, in microseconds, to exceed in order for the
|
||||
# command to get logged, and the other parameter is the length of the
|
||||
# slow log. When a new command is logged the oldest one is removed from the
|
||||
# queue of logged commands.
|
||||
|
||||
# The following time is expressed in microseconds, so 1000000 is equivalent
|
||||
# to one second. Note that a negative number disables the slow log, while
|
||||
# a value of zero forces the logging of every command.
|
||||
slowlog-log-slower-than 10000
|
||||
|
||||
# There is no limit to this length. Just be aware that it will consume memory.
|
||||
# You can reclaim memory used by the slow log with SLOWLOG RESET.
|
||||
slowlog-max-len 128
|
||||
|
||||
################################ LATENCY MONITOR ##############################
|
||||
|
||||
# The Redis latency monitoring subsystem samples different operations
|
||||
# at runtime in order to collect data related to possible sources of
|
||||
# latency of a Redis instance.
|
||||
#
|
||||
# Via the LATENCY command this information is available to the user that can
|
||||
# print graphs and obtain reports.
|
||||
#
|
||||
# The system only logs operations that were performed in a time equal or
|
||||
# greater than the amount of milliseconds specified via the
|
||||
# latency-monitor-threshold configuration directive. When its value is set
|
||||
# to zero, the latency monitor is turned off.
|
||||
#
|
||||
# By default latency monitoring is disabled since it is mostly not needed
|
||||
# if you don't have latency issues, and collecting data has a performance
|
||||
# impact, that while very small, can be measured under big load. Latency
|
||||
# monitoring can easily be enabled at runtime using the command
|
||||
# "CONFIG SET latency-monitor-threshold <milliseconds>" if needed.
|
||||
latency-monitor-threshold 0
|
||||
|
||||
############################# EVENT NOTIFICATION ##############################
|
||||
|
||||
# Redis can notify Pub/Sub clients about events happening in the key space.
|
||||
# This feature is documented at http://redis.io/topics/notifications
|
||||
#
|
||||
# For instance if keyspace events notification is enabled, and a client
|
||||
# performs a DEL operation on key "foo" stored in the Database 0, two
|
||||
# messages will be published via Pub/Sub:
|
||||
#
|
||||
# PUBLISH __keyspace@0__:foo del
|
||||
# PUBLISH __keyevent@0__:del foo
|
||||
#
|
||||
# It is possible to select the events that Redis will notify among a set
|
||||
# of classes. Every class is identified by a single character:
|
||||
#
|
||||
# K Keyspace events, published with __keyspace@<db>__ prefix.
|
||||
# E Keyevent events, published with __keyevent@<db>__ prefix.
|
||||
# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
|
||||
# $ String commands
|
||||
# l List commands
|
||||
# s Set commands
|
||||
# h Hash commands
|
||||
# z Sorted set commands
|
||||
# x Expired events (events generated every time a key expires)
|
||||
# e Evicted events (events generated when a key is evicted for maxmemory)
|
||||
# A Alias for g$lshzxe, so that the "AKE" string means all the events.
|
||||
#
|
||||
# The "notify-keyspace-events" takes as argument a string that is composed
|
||||
# of zero or multiple characters. The empty string means that notifications
|
||||
# are disabled.
|
||||
#
|
||||
# Example: to enable list and generic events, from the point of view of the
|
||||
# event name, use:
|
||||
#
|
||||
# notify-keyspace-events Elg
|
||||
#
|
||||
# Example 2: to get the stream of the expired keys subscribing to channel
|
||||
# name __keyevent@0__:expired use:
|
||||
#
|
||||
# notify-keyspace-events Ex
|
||||
#
|
||||
# By default all notifications are disabled because most users don't need
|
||||
# this feature and the feature has some overhead. Note that if you don't
|
||||
# specify at least one of K or E, no events will be delivered.
|
||||
notify-keyspace-events ""
|
||||
|
||||
############################### ADVANCED CONFIG ###############################
|
||||
|
||||
# Hashes are encoded using a memory efficient data structure when they have a
|
||||
# small number of entries, and the biggest entry does not exceed a given
|
||||
# threshold. These thresholds can be configured using the following directives.
|
||||
hash-max-ziplist-entries 512
|
||||
hash-max-ziplist-value 64
|
||||
|
||||
# Similarly to hashes, small lists are also encoded in a special way in order
|
||||
# to save a lot of space. The special representation is only used when
|
||||
# you are under the following limits:
|
||||
list-max-ziplist-entries 512
|
||||
list-max-ziplist-value 64
|
||||
|
||||
# Sets have a special encoding in just one case: when a set is composed
|
||||
# of just strings that happen to be integers in radix 10 in the range
|
||||
# of 64 bit signed integers.
|
||||
# The following configuration setting sets the limit in the size of the
|
||||
# set in order to use this special memory saving encoding.
|
||||
set-max-intset-entries 512
|
||||
|
||||
# Similarly to hashes and lists, sorted sets are also specially encoded in
|
||||
# order to save a lot of space. This encoding is only used when the length and
|
||||
# elements of a sorted set are below the following limits:
|
||||
zset-max-ziplist-entries 128
|
||||
zset-max-ziplist-value 64
|
||||
|
||||
# HyperLogLog sparse representation bytes limit. The limit includes the
|
||||
# 16 bytes header. When an HyperLogLog using the sparse representation crosses
|
||||
# this limit, it is converted into the dense representation.
|
||||
#
|
||||
# A value greater than 16000 is totally useless, since at that point the
|
||||
# dense representation is more memory efficient.
|
||||
#
|
||||
# The suggested value is ~ 3000 in order to have the benefits of
|
||||
# the space efficient encoding without slowing down too much PFADD,
|
||||
# which is O(N) with the sparse encoding. The value can be raised to
|
||||
# ~ 10000 when CPU is not a concern, but space is, and the data set is
|
||||
# composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
|
||||
hll-sparse-max-bytes 3000
|
||||
|
||||
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
|
||||
# order to help rehashing the main Redis hash table (the one mapping top-level
|
||||
# keys to values). The hash table implementation Redis uses (see dict.c)
|
||||
# performs a lazy rehashing: the more operation you run into a hash table
|
||||
# that is rehashing, the more rehashing "steps" are performed, so if the
|
||||
# server is idle the rehashing is never complete and some more memory is used
|
||||
# by the hash table.
|
||||
#
|
||||
# The default is to use this millisecond 10 times every second in order to
|
||||
# actively rehash the main dictionaries, freeing memory when possible.
|
||||
#
|
||||
# If unsure:
|
||||
# use "activerehashing no" if you have hard latency requirements and it is
|
||||
# not a good thing in your environment that Redis can reply from time to time
|
||||
# to queries with 2 milliseconds delay.
|
||||
#
|
||||
# use "activerehashing yes" if you don't have such hard requirements but
|
||||
# want to free memory asap when possible.
|
||||
activerehashing yes
|
||||
|
||||
# The client output buffer limits can be used to force disconnection of clients
|
||||
# that are not reading data from the server fast enough for some reason (a
|
||||
# common reason is that a Pub/Sub client can't consume messages as fast as the
|
||||
# publisher can produce them).
|
||||
#
|
||||
# The limit can be set differently for the three different classes of clients:
|
||||
#
|
||||
# normal -> normal clients including MONITOR clients
|
||||
# slave -> slave clients
|
||||
# pubsub -> clients subscribed to at least one pubsub channel or pattern
|
||||
#
|
||||
# The syntax of every client-output-buffer-limit directive is the following:
|
||||
#
|
||||
# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
|
||||
#
|
||||
# A client is immediately disconnected once the hard limit is reached, or if
|
||||
# the soft limit is reached and remains reached for the specified number of
|
||||
# seconds (continuously).
|
||||
# So for instance if the hard limit is 32 megabytes and the soft limit is
|
||||
# 16 megabytes / 10 seconds, the client will get disconnected immediately
|
||||
# if the size of the output buffers reach 32 megabytes, but will also get
|
||||
# disconnected if the client reaches 16 megabytes and continuously overcomes
|
||||
# the limit for 10 seconds.
|
||||
#
|
||||
# By default normal clients are not limited because they don't receive data
|
||||
# without asking (in a push way), but just after a request, so only
|
||||
# asynchronous clients may create a scenario where data is requested faster
|
||||
# than it can read.
|
||||
#
|
||||
# Instead there is a default limit for pubsub and slave clients, since
|
||||
# subscribers and slaves receive data in a push fashion.
|
||||
#
|
||||
# Both the hard or the soft limit can be disabled by setting them to zero.
|
||||
client-output-buffer-limit normal 0 0 0
|
||||
client-output-buffer-limit slave 256mb 64mb 60
|
||||
client-output-buffer-limit pubsub 32mb 8mb 60
|
||||
unixsocket /var/run/redis/redis.sock
|
||||
unixsocketperm 777
|
||||
|
||||
# Redis calls an internal function to perform many background tasks, like
|
||||
# closing connections of clients in timeout, purging expired keys that are
|
||||
# never requested, and so forth.
|
||||
#
|
||||
# Not all tasks are performed with the same frequency, but Redis checks for
|
||||
# tasks to perform according to the specified "hz" value.
|
||||
#
|
||||
# By default "hz" is set to 10. Raising the value will use more CPU when
|
||||
# Redis is idle, but at the same time will make Redis more responsive when
|
||||
# there are many keys expiring at the same time, and timeouts may be
|
||||
# handled with more precision.
|
||||
#
|
||||
# The range is between 1 and 500, however a value over 100 is usually not
|
||||
# a good idea. Most users should use the default of 10 and raise this up to
|
||||
# 100 only in environments where very low latency is required.
|
||||
hz 10
|
||||
|
||||
# When a child rewrites the AOF file, if the following option is enabled
|
||||
# the file will be fsync-ed every 32 MB of data generated. This is useful
|
||||
# in order to commit the file to the disk more incrementally and avoid
|
||||
# big latency spikes.
|
||||
aof-rewrite-incremental-fsync yes
|
@ -0,0 +1,3 @@
|
||||
{% for key, value in overrides.iteritems() -%}
|
||||
{{key}} {{value}}
|
||||
{% endfor %}
|
304
trove/templates/redis/validation-rules.json
Normal file
304
trove/templates/redis/validation-rules.json
Normal file
@ -0,0 +1,304 @@
|
||||
{
|
||||
"configuration-parameters": [
|
||||
{
|
||||
"name": "tcp-backlog",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "timeout",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "tcp-keepalive",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "loglevel",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "databases",
|
||||
"restart_required": true,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "save",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "stop-writes-on-bgsave-error",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "rdbcompression",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "rdbchecksum",
|
||||
"restart_required": true,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "slave-serve-stale-data",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "slave-read-only",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "repl-diskless-sync",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "repl-diskless-sync-delay",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "repl-ping-slave-period",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "repl-timeout",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "repl-disable-tcp-nodelay",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "repl-backlog-size",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "repl-backlog-ttl",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "slave-priority",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "min-slaves-to-write",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "min-slaves-max-lag",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "requirepass",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "maxclients",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "maxmemory",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "maxmemory-policy",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "maxmemory-samples",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "appendonly",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "appendfsync",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "no-appendfsync-on-rewrite",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "auto-aof-rewrite-percentage",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "auto-aof-rewrite-min-size",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "aof-load-truncated",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "lua-time-limit",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "cluster-node-timeout",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "cluster-slave-validity-factor",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "cluster-migration-barrier",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "cluster-require-full-coverage",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "slowlog-log-slower-than",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "slowlog-max-len",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "latency-monitor-threshold",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "notify-keyspace-events",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "hash-max-ziplist-entries",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "hash-max-ziplist-value",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "list-max-ziplist-entries",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "list-max-ziplist-value",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "set-max-intset-entries",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "zset-max-ziplist-entries",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "zset-max-ziplist-value",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "hll-sparse-max-bytes",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"max": 16000,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "activerehashing",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
},
|
||||
{
|
||||
"name": "client-output-buffer-limit",
|
||||
"restart_required": false,
|
||||
"type": "string"
|
||||
},
|
||||
{
|
||||
"name": "hz",
|
||||
"restart_required": false,
|
||||
"min": 0,
|
||||
"max": 500,
|
||||
"type": "integer"
|
||||
},
|
||||
{
|
||||
"name": "aof-rewrite-incremental-fsync",
|
||||
"restart_required": false,
|
||||
"type": "boolean"
|
||||
}
|
||||
]
|
||||
}
|
@ -1428,6 +1428,7 @@ class MySqlAppInstallTest(MySqlAppTest):
|
||||
|
||||
|
||||
class TextClauseMatcher(object):
|
||||
|
||||
def __init__(self, text):
|
||||
self.text = text
|
||||
|
||||
@ -1701,6 +1702,7 @@ class ServiceRegistryTest(testtools.TestCase):
|
||||
class KeepAliveConnectionTest(testtools.TestCase):
|
||||
|
||||
class OperationalError(Exception):
|
||||
|
||||
def __init__(self, value):
|
||||
self.args = [value]
|
||||
|
||||
@ -1938,7 +1940,10 @@ class TestRedisApp(testtools.TestCase):
|
||||
self.FAKE_ID = 1000
|
||||
self.appStatus = FakeAppStatus(self.FAKE_ID,
|
||||
rd_instance.ServiceStatuses.NEW)
|
||||
self.app = RedisApp(self.appStatus)
|
||||
|
||||
with patch.object(RedisApp, '_build_admin_client'):
|
||||
self.app = RedisApp(state_change_wait_time=0)
|
||||
|
||||
self.orig_os_path_isfile = os.path.isfile
|
||||
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
|
||||
utils.execute_with_timeout = Mock()
|
||||
@ -2027,18 +2032,17 @@ class TestRedisApp(testtools.TestCase):
|
||||
mock_status = MagicMock()
|
||||
mock_status.wait_for_real_status_to_change_to = MagicMock(
|
||||
return_value=True)
|
||||
app = RedisApp(mock_status, state_change_wait_time=0)
|
||||
self.app.status = mock_status
|
||||
RedisApp._disable_redis_on_boot = MagicMock(
|
||||
return_value=None)
|
||||
|
||||
with patch.object(utils, 'execute_with_timeout', return_value=None):
|
||||
with patch.object(operating_system, 'stop_service') as stop_srv_mock:
|
||||
mock_status.wait_for_real_status_to_change_to = MagicMock(
|
||||
return_value=True)
|
||||
app.stop_db(do_not_start_on_reboot=True)
|
||||
self.app.stop_db(do_not_start_on_reboot=True)
|
||||
|
||||
utils.execute_with_timeout.assert_any_call(
|
||||
'sudo ' + RedisSystem.REDIS_CMD_STOP,
|
||||
shell=True)
|
||||
stop_srv_mock.assert_called_once_with(
|
||||
RedisSystem.SERVICE_CANDIDATES)
|
||||
self.assertTrue(RedisApp._disable_redis_on_boot.called)
|
||||
self.assertTrue(
|
||||
mock_status.wait_for_real_status_to_change_to.called)
|
||||
@ -2047,18 +2051,17 @@ class TestRedisApp(testtools.TestCase):
|
||||
mock_status = MagicMock()
|
||||
mock_status.wait_for_real_status_to_change_to = MagicMock(
|
||||
return_value=True)
|
||||
app = RedisApp(mock_status, state_change_wait_time=0)
|
||||
self.app.status = mock_status
|
||||
RedisApp._disable_redis_on_boot = MagicMock(
|
||||
return_value=None)
|
||||
|
||||
with patch.object(utils, 'execute_with_timeout', return_value=None):
|
||||
with patch.object(operating_system, 'stop_service') as stop_srv_mock:
|
||||
mock_status.wait_for_real_status_to_change_to = MagicMock(
|
||||
return_value=False)
|
||||
app.stop_db(do_not_start_on_reboot=True)
|
||||
self.app.stop_db(do_not_start_on_reboot=True)
|
||||
|
||||
utils.execute_with_timeout.assert_any_call(
|
||||
'sudo ' + RedisSystem.REDIS_CMD_STOP,
|
||||
shell=True)
|
||||
stop_srv_mock.assert_called_once_with(
|
||||
RedisSystem.SERVICE_CANDIDATES)
|
||||
self.assertTrue(RedisApp._disable_redis_on_boot.called)
|
||||
self.assertTrue(mock_status.end_install_or_restart.called)
|
||||
self.assertTrue(
|
||||
@ -2066,13 +2069,13 @@ class TestRedisApp(testtools.TestCase):
|
||||
|
||||
def test_restart(self):
|
||||
mock_status = MagicMock()
|
||||
app = RedisApp(mock_status, state_change_wait_time=0)
|
||||
self.app.status = mock_status
|
||||
mock_status.begin_restart = MagicMock(return_value=None)
|
||||
with patch.object(RedisApp, 'stop_db', return_value=None):
|
||||
with patch.object(RedisApp, 'start_redis', return_value=None):
|
||||
mock_status.end_install_or_restart = MagicMock(
|
||||
return_value=None)
|
||||
app.restart()
|
||||
self.app.restart()
|
||||
mock_status.begin_restart.assert_any_call()
|
||||
RedisApp.stop_db.assert_any_call()
|
||||
RedisApp.start_redis.assert_any_call()
|
||||
@ -2080,28 +2083,38 @@ class TestRedisApp(testtools.TestCase):
|
||||
|
||||
def test_start_redis(self):
|
||||
mock_status = MagicMock()
|
||||
app = RedisApp(mock_status, state_change_wait_time=0)
|
||||
with patch.object(RedisApp, '_enable_redis_on_boot',
|
||||
return_value=None):
|
||||
with patch.object(utils, 'execute_with_timeout',
|
||||
return_value=None):
|
||||
mock_status.wait_for_real_status_to_change_to = MagicMock(
|
||||
return_value=None)
|
||||
mock_status.end_install_or_restart = MagicMock(
|
||||
return_value=None)
|
||||
app.start_redis()
|
||||
mock_status.wait_for_real_status_to_change_to = MagicMock(
|
||||
return_value=True)
|
||||
|
||||
utils.execute_with_timeout.assert_any_call(
|
||||
'sudo ' + RedisSystem.REDIS_CMD_START,
|
||||
shell=True)
|
||||
utils.execute_with_timeout.assert_any_call('pkill', '-9',
|
||||
'redis-server',
|
||||
run_as_root=True,
|
||||
root_helper='sudo')
|
||||
self.assertTrue(RedisApp._enable_redis_on_boot.called)
|
||||
self.assertTrue(mock_status.end_install_or_restart.called)
|
||||
self.assertTrue(
|
||||
mock_status.wait_for_real_status_to_change_to.callled)
|
||||
self._assert_start_redis(mock_status)
|
||||
|
||||
@patch.object(utils, 'execute_with_timeout')
|
||||
def test_start_redis_with_failure(self, exec_mock):
|
||||
mock_status = MagicMock()
|
||||
mock_status.wait_for_real_status_to_change_to = MagicMock(
|
||||
return_value=False)
|
||||
mock_status.end_install_or_restart = MagicMock()
|
||||
|
||||
self._assert_start_redis(mock_status)
|
||||
|
||||
exec_mock.assert_called_once_with('pkill', '-9', 'redis-server',
|
||||
run_as_root=True, root_helper='sudo')
|
||||
|
||||
mock_status.end_install_or_restart.assert_called_once_with()
|
||||
|
||||
@patch.multiple(operating_system, start_service=DEFAULT,
|
||||
enable_service_on_boot=DEFAULT)
|
||||
def _assert_start_redis(self, mock_status, start_service,
|
||||
enable_service_on_boot):
|
||||
self.app.status = mock_status
|
||||
|
||||
self.app.start_redis()
|
||||
|
||||
mock_status.wait_for_real_status_to_change_to.assert_called_once_with(
|
||||
rd_instance.ServiceStatuses.RUNNING, ANY, False)
|
||||
enable_service_on_boot.assert_called_once_with(
|
||||
RedisSystem.SERVICE_CANDIDATES)
|
||||
start_service.assert_called_once_with(RedisSystem.SERVICE_CANDIDATES)
|
||||
|
||||
|
||||
class CassandraDBAppTest(testtools.TestCase):
|
||||
@ -2939,11 +2952,11 @@ class VerticaAppTest(testtools.TestCase):
|
||||
with patch.object(app, '_disable_db_on_boot', return_value=None):
|
||||
with patch.object(app, 'read_config',
|
||||
return_value=self.test_config):
|
||||
app.stop_db()
|
||||
# Since database stop command does not gets executed,
|
||||
# so only 2 shell calls were there.
|
||||
self.assertEqual(
|
||||
2, vertica_system.shell_execute.call_count)
|
||||
app.stop_db()
|
||||
# Since database stop command does not gets executed,
|
||||
# so only 2 shell calls were there.
|
||||
self.assertEqual(
|
||||
2, vertica_system.shell_execute.call_count)
|
||||
|
||||
def test_stop_db_failure(self):
|
||||
mock_status = MagicMock()
|
||||
@ -2979,7 +2992,7 @@ class VerticaAppTest(testtools.TestCase):
|
||||
keys = ['test_key@machine1', 'test_key@machine2']
|
||||
with patch.object(os.path, 'expanduser',
|
||||
return_value=('/home/' + user)):
|
||||
self.app.authorize_public_keys(user=user, public_keys=keys)
|
||||
self.app.authorize_public_keys(user=user, public_keys=keys)
|
||||
self.assertEqual(2, vertica_system.shell_execute.call_count)
|
||||
vertica_system.shell_execute.assert_any_call(
|
||||
'cat ' + '/home/' + user + '/.ssh/authorized_keys')
|
||||
@ -3128,6 +3141,7 @@ class VerticaAppTest(testtools.TestCase):
|
||||
|
||||
|
||||
class DB2AppTest(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(DB2AppTest, self).setUp()
|
||||
self.orig_utils_execute_with_timeout = (
|
||||
@ -3183,6 +3197,7 @@ class DB2AppTest(testtools.TestCase):
|
||||
|
||||
|
||||
class DB2AdminTest(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(DB2AdminTest, self).setUp()
|
||||
self.db2Admin = db2service.DB2Admin()
|
||||
@ -3262,23 +3277,23 @@ class DB2AdminTest(testtools.TestCase):
|
||||
{"_name": "random2", "_password": "guesswhat", "_databases": []})
|
||||
with patch.object(db2service, 'run_command',
|
||||
MagicMock(return_value=None)):
|
||||
with patch.object(db2service.DB2Admin, 'list_access',
|
||||
MagicMock(return_value=[FAKE_DB])):
|
||||
utils.execute_with_timeout = MagicMock(return_value=None)
|
||||
self.db2Admin.delete_user(FAKE_USER[1])
|
||||
self.assertTrue(db2service.run_command.called)
|
||||
self.assertTrue(db2service.DB2Admin.list_access.called)
|
||||
self.assertTrue(
|
||||
db2service.utils.execute_with_timeout.called)
|
||||
args, _ = db2service.run_command.call_args_list[0]
|
||||
expected = "db2 connect to testDB; " \
|
||||
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT," \
|
||||
"DATAACCESS ON DATABASE FROM USER random2; " \
|
||||
"db2 connect reset"
|
||||
self.assertEqual(
|
||||
expected, args[0],
|
||||
"Revoke database access queries are not the same")
|
||||
self.assertEqual(1, db2service.run_command.call_count)
|
||||
with patch.object(db2service.DB2Admin, 'list_access',
|
||||
MagicMock(return_value=[FAKE_DB])):
|
||||
utils.execute_with_timeout = MagicMock(return_value=None)
|
||||
self.db2Admin.delete_user(FAKE_USER[1])
|
||||
self.assertTrue(db2service.run_command.called)
|
||||
self.assertTrue(db2service.DB2Admin.list_access.called)
|
||||
self.assertTrue(
|
||||
db2service.utils.execute_with_timeout.called)
|
||||
args, _ = db2service.run_command.call_args_list[0]
|
||||
expected = "db2 connect to testDB; " \
|
||||
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT," \
|
||||
"DATAACCESS ON DATABASE FROM USER random2; " \
|
||||
"db2 connect reset"
|
||||
self.assertEqual(
|
||||
expected, args[0],
|
||||
"Revoke database access queries are not the same")
|
||||
self.assertEqual(1, db2service.run_command.call_count)
|
||||
|
||||
def test_list_users(self):
|
||||
databases = []
|
||||
|
@ -153,6 +153,55 @@ class TestOperatingSystem(trove_testtools.TestCase):
|
||||
"Lorem Ipsum", as_root=True)
|
||||
self.assertFalse(os.path.exists(temp_file.name))
|
||||
|
||||
def test_start_service(self):
|
||||
self._assert_service_call(operating_system.start_service,
|
||||
'cmd_start')
|
||||
|
||||
def test_stop_service(self):
|
||||
self._assert_service_call(operating_system.stop_service,
|
||||
'cmd_stop')
|
||||
|
||||
def test_enable_service_on_boot(self):
|
||||
self._assert_service_call(operating_system.enable_service_on_boot,
|
||||
'cmd_enable')
|
||||
|
||||
def test_disable_service_on_boot(self):
|
||||
self._assert_service_call(operating_system.disable_service_on_boot,
|
||||
'cmd_disable')
|
||||
|
||||
@patch.object(operating_system, '_execute_service_command')
|
||||
def _assert_service_call(self, fun, expected_cmd_key,
|
||||
exec_service_cmd_mock):
|
||||
test_candidate_names = ['test_service_1', 'test_service_2']
|
||||
fun(test_candidate_names)
|
||||
exec_service_cmd_mock.assert_called_once_with(test_candidate_names,
|
||||
expected_cmd_key)
|
||||
|
||||
@patch.object(operating_system, 'service_discovery',
|
||||
return_value={'cmd_start': 'start',
|
||||
'cmd_stop': 'stop',
|
||||
'cmd_enable': 'enable',
|
||||
'cmd_disable': 'disable'})
|
||||
def test_execute_service_command(self, discovery_mock):
|
||||
test_service_candidates = ['service_name']
|
||||
self._assert_execute_call([['start']], [{'shell': True}],
|
||||
operating_system._execute_service_command,
|
||||
None, test_service_candidates, 'cmd_start')
|
||||
discovery_mock.assert_called_once_with(test_service_candidates)
|
||||
|
||||
with ExpectedException(exception.UnprocessableEntity,
|
||||
"Candidate service names not specified."):
|
||||
operating_system._execute_service_command([], 'cmd_disable')
|
||||
|
||||
with ExpectedException(exception.UnprocessableEntity,
|
||||
"Candidate service names not specified."):
|
||||
operating_system._execute_service_command(None, 'cmd_start')
|
||||
|
||||
with ExpectedException(RuntimeError, "Service control command not "
|
||||
"available: unknown"):
|
||||
operating_system._execute_service_command(test_service_candidates,
|
||||
'unknown')
|
||||
|
||||
def test_modes(self):
|
||||
self._assert_modes(None, None, None, operating_system.FileMode())
|
||||
self._assert_modes(None, None, None,
|
||||
|
@ -12,11 +12,12 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import MagicMock
|
||||
from mock import DEFAULT, MagicMock, patch
|
||||
import testtools
|
||||
|
||||
from trove.common.context import TroveContext
|
||||
from trove.guestagent import backup
|
||||
from trove.guestagent.common.configuration import ConfigurationManager
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.datastore.experimental.redis import (
|
||||
service as redis_service)
|
||||
@ -27,7 +28,8 @@ from trove.guestagent.volume import VolumeDevice
|
||||
|
||||
class RedisGuestAgentManagerTest(testtools.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@patch.object(redis_service.RedisApp, '_build_admin_client')
|
||||
def setUp(self, config_loader):
|
||||
super(RedisGuestAgentManagerTest, self).setUp()
|
||||
self.context = TroveContext()
|
||||
self.manager = RedisManager()
|
||||
@ -36,7 +38,6 @@ class RedisGuestAgentManagerTest(testtools.TestCase):
|
||||
self.origin_start_redis = redis_service.RedisApp.start_redis
|
||||
self.origin_stop_redis = redis_service.RedisApp.stop_db
|
||||
self.origin_install_redis = redis_service.RedisApp._install_redis
|
||||
self.origin_write_config = redis_service.RedisApp.write_config
|
||||
self.origin_install_if_needed = \
|
||||
redis_service.RedisApp.install_if_needed
|
||||
self.origin_complete_install_or_restart = \
|
||||
@ -52,7 +53,6 @@ class RedisGuestAgentManagerTest(testtools.TestCase):
|
||||
redis_service.RedisApp.stop_db = self.origin_stop_redis
|
||||
redis_service.RedisApp.start_redis = self.origin_start_redis
|
||||
redis_service.RedisApp._install_redis = self.origin_install_redis
|
||||
redis_service.RedisApp.write_config = self.origin_write_config
|
||||
redis_service.RedisApp.install_if_needed = \
|
||||
self.origin_install_if_needed
|
||||
redis_service.RedisApp.complete_install_or_restart = \
|
||||
@ -64,25 +64,28 @@ class RedisGuestAgentManagerTest(testtools.TestCase):
|
||||
|
||||
def test_update_status(self):
|
||||
mock_status = MagicMock()
|
||||
self.manager.appStatus = mock_status
|
||||
redis_service.RedisAppStatus.get = MagicMock(return_value=mock_status)
|
||||
self.manager._app.status = mock_status
|
||||
self.manager.update_status(self.context)
|
||||
redis_service.RedisAppStatus.get.assert_any_call()
|
||||
mock_status.update.assert_any_call()
|
||||
|
||||
def test_prepare_redis_not_installed(self):
|
||||
self._prepare_dynamic(is_redis_installed=False)
|
||||
|
||||
def _prepare_dynamic(self, device_path='/dev/vdb', is_redis_installed=True,
|
||||
@patch.multiple(redis_service.RedisApp,
|
||||
apply_initial_guestagent_configuration=DEFAULT)
|
||||
@patch.object(ConfigurationManager, 'save_configuration')
|
||||
def _prepare_dynamic(self, save_configuration_mock,
|
||||
apply_initial_guestagent_configuration,
|
||||
device_path='/dev/vdb', is_redis_installed=True,
|
||||
backup_info=None, is_root_enabled=False,
|
||||
mount_point='var/lib/redis'):
|
||||
|
||||
# covering all outcomes is starting to cause trouble here
|
||||
mock_status = MagicMock()
|
||||
redis_service.RedisAppStatus.get = MagicMock(return_value=mock_status)
|
||||
self.manager._app.status = mock_status
|
||||
self.manager._build_admin_client = MagicMock(return_value=MagicMock())
|
||||
redis_service.RedisApp.start_redis = MagicMock(return_value=None)
|
||||
redis_service.RedisApp.install_if_needed = MagicMock(return_value=None)
|
||||
redis_service.RedisApp.write_config = MagicMock(return_value=None)
|
||||
operating_system.chown = MagicMock(return_value=None)
|
||||
redis_service.RedisApp.restart = MagicMock(return_value=None)
|
||||
mock_status.begin_install = MagicMock(return_value=None)
|
||||
@ -99,11 +102,11 @@ class RedisGuestAgentManagerTest(testtools.TestCase):
|
||||
overrides=None,
|
||||
cluster_config=None)
|
||||
|
||||
self.assertEqual(2, redis_service.RedisAppStatus.get.call_count)
|
||||
mock_status.begin_install.assert_any_call()
|
||||
VolumeDevice.format.assert_any_call()
|
||||
redis_service.RedisApp.install_if_needed.assert_any_call(self.packages)
|
||||
redis_service.RedisApp.write_config.assert_any_call(None)
|
||||
save_configuration_mock.assert_any_call(None)
|
||||
apply_initial_guestagent_configuration.assert_called_once_with()
|
||||
operating_system.chown.assert_any_call(
|
||||
mount_point, 'redis', 'redis', as_root=True)
|
||||
redis_service.RedisApp.restart.assert_any_call()
|
||||
@ -114,7 +117,6 @@ class RedisGuestAgentManagerTest(testtools.TestCase):
|
||||
redis_service.RedisAppStatus.get = MagicMock(return_value=mock_status)
|
||||
redis_service.RedisApp.restart = MagicMock(return_value=None)
|
||||
self.manager.restart(self.context)
|
||||
redis_service.RedisAppStatus.get.assert_any_call()
|
||||
redis_service.RedisApp.restart.assert_any_call()
|
||||
|
||||
def test_stop_db(self):
|
||||
@ -123,6 +125,5 @@ class RedisGuestAgentManagerTest(testtools.TestCase):
|
||||
redis_service.RedisAppStatus.get = MagicMock(return_value=mock_status)
|
||||
redis_service.RedisApp.stop_db = MagicMock(return_value=None)
|
||||
self.manager.stop_db(self.context)
|
||||
redis_service.RedisAppStatus.get.assert_any_call()
|
||||
redis_service.RedisApp.stop_db.assert_any_call(
|
||||
do_not_start_on_reboot=False)
|
||||
|
Loading…
x
Reference in New Issue
Block a user