PostgreSQL support
Change-Id: I7c77b210f5a1438739daebffea104eda3bda1a45
This commit is contained in:
parent
8daade000c
commit
d0cfb41b25
@ -98,7 +98,7 @@ pika-pool==0.1.3
|
||||
pika==0.10.0
|
||||
prettytable==0.7.2
|
||||
proboscis==1.2.5.3
|
||||
psycopg2==2.6.2
|
||||
psycopg2-binary==2.6.2
|
||||
ptyprocess==0.5.2
|
||||
pycadf==2.7.0
|
||||
pycparser==2.18
|
||||
|
@ -48,3 +48,4 @@ cryptography>=2.1.4 # BSD/Apache-2.0
|
||||
oslo.policy>=1.30.0 # Apache-2.0
|
||||
diskimage-builder!=1.6.0,!=1.7.0,!=1.7.1,>=1.1.2 # Apache-2.0
|
||||
docker>=4.2.0 # Apache-2.0
|
||||
psycopg2-binary>=2.6.2 # LGPL/ZPL
|
||||
|
@ -15,7 +15,6 @@ python-troveclient>=2.2.0 # Apache-2.0
|
||||
testtools>=2.2.0 # MIT
|
||||
pymongo!=3.1,>=3.0.2 # Apache-2.0
|
||||
redis>=2.10.0 # MIT
|
||||
psycopg2>=2.6.2 # LGPL/ZPL
|
||||
cassandra-driver!=3.6.0,>=2.1.4 # Apache-2.0
|
||||
couchdb>=0.8 # Apache-2.0
|
||||
stestr>=1.1.0 # Apache-2.0
|
||||
|
@ -1050,6 +1050,10 @@ postgresql_group = cfg.OptGroup(
|
||||
'postgresql', title='PostgreSQL options',
|
||||
help="Oslo option group for the PostgreSQL datastore.")
|
||||
postgresql_opts = [
|
||||
cfg.StrOpt(
|
||||
'docker_image', default='postgres',
|
||||
help='Database docker image.'
|
||||
),
|
||||
cfg.BoolOpt('icmp', default=False,
|
||||
help='Whether to permit ICMP.',
|
||||
deprecated_for_removal=True),
|
||||
@ -1087,7 +1091,7 @@ postgresql_opts = [
|
||||
cfg.BoolOpt('volume_support', default=True,
|
||||
help='Whether to provision a Cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb'),
|
||||
cfg.ListOpt('ignore_users', default=['os_admin', 'postgres', 'root']),
|
||||
cfg.ListOpt('ignore_users', default=['os_admin', 'postgres']),
|
||||
cfg.ListOpt('ignore_dbs', default=['os_admin', 'postgres']),
|
||||
cfg.StrOpt('root_controller',
|
||||
default='trove.extensions.common.service.DefaultRootController',
|
||||
|
@ -16,7 +16,7 @@
|
||||
import abc
|
||||
import ast
|
||||
import csv
|
||||
import json
|
||||
import io
|
||||
import re
|
||||
import sys
|
||||
|
||||
@ -26,6 +26,8 @@ import xmltodict
|
||||
import yaml
|
||||
|
||||
from oslo_serialization import base64
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import strutils
|
||||
|
||||
from trove.common import utils as trove_utils
|
||||
|
||||
@ -282,7 +284,7 @@ class PropertiesCodec(StreamCodec):
|
||||
:type comment_markers: list
|
||||
|
||||
:param unpack_singletons: Whether to unpack singleton collections
|
||||
(collections with only a single value).
|
||||
(collections with only a single item).
|
||||
:type unpack_singletons: boolean
|
||||
|
||||
:param string_mappings: User-defined string representations of
|
||||
@ -469,6 +471,8 @@ class KeyValueCodec(StreamCodec):
|
||||
v = v.lstrip(
|
||||
self._value_quote_char).rstrip(
|
||||
self._value_quote_char)
|
||||
elif v.lower() in ['true', 'false']:
|
||||
v = strutils.bool_from_string(v.lower())
|
||||
else:
|
||||
# remove trailing comments
|
||||
v = re.sub('%s.*$' % self._comment_marker, '', v)
|
||||
@ -509,10 +513,13 @@ class KeyValueCodec(StreamCodec):
|
||||
class JsonCodec(StreamCodec):
|
||||
|
||||
def serialize(self, dict_data):
|
||||
return json.dumps(dict_data)
|
||||
return jsonutils.dumps(dict_data)
|
||||
|
||||
def deserialize(self, stream):
|
||||
return json.load(six.StringIO(stream))
|
||||
if type(stream) == str:
|
||||
return jsonutils.load(io.StringIO(stream))
|
||||
if type(stream) == bytes:
|
||||
return jsonutils.load(io.BytesIO(stream))
|
||||
|
||||
|
||||
class Base64Codec(StreamCodec):
|
||||
|
@ -81,7 +81,7 @@ class ConfigurationManager(object):
|
||||
:type override_strategy ConfigurationOverrideStrategy
|
||||
"""
|
||||
base_config_dir = os.path.dirname(base_config_path)
|
||||
operating_system.create_directory(
|
||||
operating_system.ensure_directory(
|
||||
base_config_dir, user=owner, group=group, force=True, as_root=True
|
||||
)
|
||||
|
||||
@ -348,6 +348,8 @@ class ImportOverrideStrategy(ConfigurationOverrideStrategy):
|
||||
self._codec = codec
|
||||
self._requires_root = requires_root
|
||||
|
||||
self._initialize_import_directory()
|
||||
|
||||
def exists(self, group_name, change_id):
|
||||
return self._find_revision_file(group_name, change_id) is not None
|
||||
|
||||
@ -381,7 +383,7 @@ class ImportOverrideStrategy(ConfigurationOverrideStrategy):
|
||||
"""Lazy-initialize the directory for imported revision files.
|
||||
"""
|
||||
if not os.path.exists(self._revision_dir):
|
||||
operating_system.create_directory(
|
||||
operating_system.ensure_directory(
|
||||
self._revision_dir, user=self._owner, group=self._group,
|
||||
force=True, as_root=self._requires_root)
|
||||
|
||||
|
@ -178,6 +178,6 @@ def get_conf_dir():
|
||||
mount_point = CONF.get(CONF.datastore_manager).mount_point
|
||||
conf_dir = os.path.join(mount_point, 'conf.d')
|
||||
if not operating_system.exists(conf_dir, is_directory=True, as_root=True):
|
||||
operating_system.create_directory(conf_dir, as_root=True)
|
||||
operating_system.ensure_directory(conf_dir, as_root=True)
|
||||
|
||||
return conf_dir
|
||||
|
@ -143,6 +143,7 @@ def _read_file_as_root(path, open_flag, convert_func):
|
||||
|
||||
def write_file(path, data, codec=IdentityCodec(), as_root=False, encode=True):
|
||||
"""Write data into file using a given codec.
|
||||
|
||||
Overwrite any existing contents.
|
||||
The written file can be read back into its original
|
||||
form by 'read_file'.
|
||||
@ -514,7 +515,7 @@ def _execute_shell_cmd(cmd, options, *args, **kwargs):
|
||||
return stdout
|
||||
|
||||
|
||||
def create_directory(dir_path, user=None, group=None, force=True, **kwargs):
|
||||
def ensure_directory(dir_path, user=None, group=None, force=True, **kwargs):
|
||||
"""Create a given directory and update its ownership
|
||||
(recursively) to the given user and group if any.
|
||||
|
||||
@ -756,6 +757,7 @@ def move(source, destination, force=False, **kwargs):
|
||||
def copy(source, destination, force=False, preserve=False, recursive=True,
|
||||
dereference=False, **kwargs):
|
||||
"""Copy a given file or directory to another location.
|
||||
|
||||
Copy does NOT attempt to preserve ownership, permissions and timestamps
|
||||
unless the 'preserve' option is enabled.
|
||||
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
import abc
|
||||
import operator
|
||||
import os
|
||||
|
||||
import docker
|
||||
from oslo_config import cfg as oslo_cfg
|
||||
@ -74,7 +73,6 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
self.__manager_name = manager_name
|
||||
self.__manager = None
|
||||
self.__prepare_error = False
|
||||
self.status = None
|
||||
|
||||
# Guest log
|
||||
self._guest_log_context = None
|
||||
@ -85,6 +83,11 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
# Module
|
||||
self.module_driver_manager = driver_manager.ModuleDriverManager()
|
||||
|
||||
# Drivers should implement
|
||||
self.adm = None
|
||||
self.app = None
|
||||
self.status = None
|
||||
|
||||
@property
|
||||
def manager_name(self):
|
||||
"""This returns the passed-in name of the manager."""
|
||||
@ -300,6 +303,14 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
LOG.info('No post_prepare work has been defined.')
|
||||
pass
|
||||
|
||||
def restart(self, context):
|
||||
self.app.restart()
|
||||
|
||||
def rebuild(self, context, ds_version, config_contents=None,
|
||||
config_overrides=None):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='rebuild', datastore=self.manager)
|
||||
|
||||
def pre_upgrade(self, context):
|
||||
"""Prepares the guest for upgrade, returning a dict to be passed
|
||||
to post_upgrade
|
||||
@ -316,29 +327,6 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
"""
|
||||
pass
|
||||
|
||||
def _restore_directory(self, restore_dir, target_dir, owner=None):
|
||||
restore_path = os.path.join(restore_dir, ".")
|
||||
operating_system.copy(restore_path, target_dir,
|
||||
preserve=True, as_root=True)
|
||||
if owner is not None:
|
||||
operating_system.chown(path=target_dir, user=owner, group=owner,
|
||||
recursive=True, as_root=True)
|
||||
|
||||
def _restore_home_directory(self, saved_home_dir):
|
||||
home_dir = os.path.expanduser("~")
|
||||
home_owner = operating_system.get_current_user()
|
||||
self._restore_directory(restore_dir=saved_home_dir,
|
||||
target_dir=home_dir,
|
||||
owner=home_owner)
|
||||
|
||||
#################
|
||||
# Service related
|
||||
#################
|
||||
@abc.abstractmethod
|
||||
def restart(self, context):
|
||||
"""Restart the database service."""
|
||||
pass
|
||||
|
||||
#####################
|
||||
# File System related
|
||||
#####################
|
||||
@ -379,7 +367,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
method needs to be implemented to allow the rollback of
|
||||
flavor-resize on the guestagent side.
|
||||
"""
|
||||
LOG.debug("Resetting configuration.")
|
||||
LOG.info("Resetting configuration.")
|
||||
if self.configuration_manager:
|
||||
config_contents = configuration['config_contents']
|
||||
self.configuration_manager.save_configuration(config_contents)
|
||||
@ -389,12 +377,13 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
self.restart(context)
|
||||
|
||||
def update_overrides(self, context, overrides, remove=False):
|
||||
LOG.debug("Updating overrides.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='update_overrides', datastore=self.manager)
|
||||
LOG.info(f"Updating config options: {overrides}, remove={remove}")
|
||||
if remove:
|
||||
self.app.remove_overrides()
|
||||
self.app.update_overrides(overrides)
|
||||
|
||||
def apply_overrides(self, context, overrides):
|
||||
LOG.debug("Applying overrides.")
|
||||
LOG.info(f"Applying overrides {overrides}.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='apply_overrides', datastore=self.manager)
|
||||
|
||||
@ -402,7 +391,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
# Cluster related
|
||||
#################
|
||||
def cluster_complete(self, context):
|
||||
LOG.debug("Cluster creation complete, starting status checks.")
|
||||
LOG.info("Cluster creation complete, starting status checks.")
|
||||
self.status.end_install()
|
||||
|
||||
#############
|
||||
@ -431,6 +420,9 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
"""
|
||||
return {}
|
||||
|
||||
def is_log_enabled(self, logname):
|
||||
return False
|
||||
|
||||
def get_guest_log_defs(self):
|
||||
"""Return all the guest log defs."""
|
||||
if not self._guest_log_defs:
|
||||
@ -464,20 +456,26 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
exposed_logs = exposed_logs.lower().replace(',', ' ').split()
|
||||
LOG.debug("Exposing log defs: %s", ",".join(exposed_logs))
|
||||
expose_all = 'all' in exposed_logs
|
||||
|
||||
for log_name in gl_defs.keys():
|
||||
gl_def = gl_defs[log_name]
|
||||
exposed = expose_all or log_name in exposed_logs
|
||||
LOG.debug("Building guest log '%(name)s' from def: %(def)s"
|
||||
" (exposed: %(exposed)s)",
|
||||
{'name': log_name, 'def': gl_def,
|
||||
'exposed': exposed})
|
||||
self._guest_log_cache[log_name] = guest_log.GuestLog(
|
||||
guestlog = guest_log.GuestLog(
|
||||
self.guest_log_context, log_name,
|
||||
gl_def[self.GUEST_LOG_TYPE_LABEL],
|
||||
gl_def[self.GUEST_LOG_USER_LABEL],
|
||||
gl_def[self.GUEST_LOG_FILE_LABEL],
|
||||
exposed)
|
||||
|
||||
if (gl_def[self.GUEST_LOG_TYPE_LABEL] ==
|
||||
guest_log.LogType.USER):
|
||||
guestlog.enabled = self.is_log_enabled(log_name)
|
||||
guestlog.status = (guest_log.LogStatus.Enabled
|
||||
if guestlog.enabled
|
||||
else guest_log.LogStatus.Disabled)
|
||||
|
||||
self._guest_log_cache[log_name] = guestlog
|
||||
|
||||
self._guest_log_loaded_context = self.guest_log_context
|
||||
|
||||
def guest_log_list(self, context):
|
||||
@ -487,7 +485,6 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
result = filter(None, [gl_cache[log_name].show()
|
||||
if gl_cache[log_name].exposed else None
|
||||
for log_name in gl_cache.keys()])
|
||||
LOG.info("Returning list of logs: %s", result)
|
||||
return result
|
||||
|
||||
def guest_log_action(self, context, log_name, enable, disable,
|
||||
@ -503,9 +500,15 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
"publish=%(pub)s, discard=%(disc)s).",
|
||||
{'log': log_name, 'en': enable, 'dis': disable,
|
||||
'pub': publish, 'disc': discard})
|
||||
|
||||
self.guest_log_context = context
|
||||
gl_cache = self.get_guest_log_cache()
|
||||
|
||||
if log_name in gl_cache:
|
||||
LOG.debug(f"Found log {log_name}, type={gl_cache[log_name].type}, "
|
||||
f"enable={gl_cache[log_name].enabled}")
|
||||
|
||||
# system log can only be published
|
||||
if ((gl_cache[log_name].type == guest_log.LogType.SYS) and
|
||||
not publish):
|
||||
if enable or disable:
|
||||
@ -515,22 +518,26 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
action_text = "disable"
|
||||
raise exception.BadRequest("Cannot %s a SYSTEM log ('%s')."
|
||||
% (action_text, log_name))
|
||||
|
||||
if gl_cache[log_name].type == guest_log.LogType.USER:
|
||||
requires_change = (
|
||||
(gl_cache[log_name].enabled and disable) or
|
||||
(not gl_cache[log_name].enabled and enable))
|
||||
if requires_change:
|
||||
restart_required = self.guest_log_enable(
|
||||
context, log_name, disable)
|
||||
if restart_required:
|
||||
self.set_guest_log_status(
|
||||
guest_log.LogStatus.Restart_Required, log_name)
|
||||
self.guest_log_enable(context, log_name, disable)
|
||||
gl_cache[log_name].enabled = enable
|
||||
gl_cache[log_name].status = (
|
||||
guest_log.LogStatus.Enabled
|
||||
if enable
|
||||
else guest_log.LogStatus.Disabled
|
||||
)
|
||||
|
||||
log_details = gl_cache[log_name].show()
|
||||
if discard:
|
||||
log_details = gl_cache[log_name].discard_log()
|
||||
if publish:
|
||||
log_details = gl_cache[log_name].publish_log()
|
||||
|
||||
LOG.info("Details for log '%(log)s': %(det)s",
|
||||
{'log': log_name, 'det': log_details})
|
||||
return log_details
|
||||
@ -598,23 +605,8 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
else:
|
||||
self.apply_overrides(context, cfg_values)
|
||||
|
||||
def set_guest_log_status(self, status, log_name=None):
|
||||
"""Sets the status of log_name to 'status' - if log_name is not
|
||||
provided, sets the status on all logs.
|
||||
"""
|
||||
gl_cache = self.get_guest_log_cache()
|
||||
names = [log_name]
|
||||
if not log_name or log_name not in gl_cache:
|
||||
names = gl_cache.keys()
|
||||
for name in names:
|
||||
# If we're already in restart mode and we're asked to set the
|
||||
# status to restart, assume enable/disable has been flipped
|
||||
# without a restart and set the status to restart done
|
||||
if (gl_cache[name].status == guest_log.LogStatus.Restart_Required
|
||||
and status == guest_log.LogStatus.Restart_Required):
|
||||
gl_cache[name].status = guest_log.LogStatus.Restart_Completed
|
||||
else:
|
||||
gl_cache[name].status = status
|
||||
def get_log_status(self, label):
|
||||
self.configuration_manager.get_value(label)
|
||||
|
||||
def build_log_file_name(self, log_name, owner, datastore_dir=None):
|
||||
"""Build a log file name based on the log_name and make sure the
|
||||
@ -623,14 +615,14 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
if datastore_dir is None:
|
||||
base_dir = self.GUEST_LOG_BASE_DIR
|
||||
if not operating_system.exists(base_dir, is_directory=True):
|
||||
operating_system.create_directory(
|
||||
operating_system.ensure_directory(
|
||||
base_dir, user=owner, group=owner, force=True,
|
||||
as_root=True)
|
||||
datastore_dir = guestagent_utils.build_file_path(
|
||||
base_dir, self.GUEST_LOG_DATASTORE_DIRNAME)
|
||||
|
||||
if not operating_system.exists(datastore_dir, is_directory=True):
|
||||
operating_system.create_directory(
|
||||
operating_system.ensure_directory(
|
||||
datastore_dir, user=owner, group=owner, force=True,
|
||||
as_root=True)
|
||||
log_file_name = guestagent_utils.build_file_path(
|
||||
@ -648,7 +640,7 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
as_root=True)
|
||||
operating_system.chmod(log_file, FileMode.ADD_USR_RW_GRP_RW_OTH_R,
|
||||
as_root=True)
|
||||
LOG.debug("Set log file '%s' as readable", log_file)
|
||||
|
||||
return log_file
|
||||
|
||||
################
|
||||
@ -734,117 +726,94 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
driver, module_type, id, name, datastore, ds_version)
|
||||
LOG.info("Deleted module: %s", name)
|
||||
|
||||
def change_passwords(self, context, users):
|
||||
LOG.debug("Changing passwords.")
|
||||
################
|
||||
# Backup and restore
|
||||
################
|
||||
def create_backup(self, context, backup_info):
|
||||
"""Create backup for the database.
|
||||
|
||||
:param context: User context object.
|
||||
:param backup_info: a dictionary containing the db instance id of the
|
||||
backup task, location, type, and other data.
|
||||
"""
|
||||
with EndNotification(context):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='change_passwords', datastore=self.manager)
|
||||
self.app.create_backup(context, backup_info)
|
||||
|
||||
def perform_restore(self, context, restore_location, backup_info):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='_perform_restore', datastore=self.manager)
|
||||
|
||||
################
|
||||
# Database and user management
|
||||
################
|
||||
def create_database(self, context, databases):
|
||||
with EndNotification(context):
|
||||
return self.adm.create_databases(databases)
|
||||
|
||||
def list_databases(self, context, limit=None, marker=None,
|
||||
include_marker=False):
|
||||
return self.adm.list_databases(limit, marker, include_marker)
|
||||
|
||||
def delete_database(self, context, database):
|
||||
with EndNotification(context):
|
||||
return self.adm.delete_database(database)
|
||||
|
||||
def change_passwords(self, context, users):
|
||||
with EndNotification(context):
|
||||
self.adm.change_passwords(users)
|
||||
|
||||
def get_root_password(self, context):
|
||||
LOG.debug("Getting root password.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='get_root_password', datastore=self.manager)
|
||||
|
||||
def enable_root(self, context):
|
||||
LOG.debug("Enabling root.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='enable_root', datastore=self.manager)
|
||||
LOG.info("Enabling root for the database.")
|
||||
return self.adm.enable_root()
|
||||
|
||||
def enable_root_on_prepare(self, context, root_password):
|
||||
self.enable_root_with_password(context, root_password)
|
||||
|
||||
def enable_root_with_password(self, context, root_password=None):
|
||||
LOG.debug("Enabling root with password.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='enable_root_with_password', datastore=self.manager)
|
||||
return self.adm.enable_root(root_password)
|
||||
|
||||
def disable_root(self, context):
|
||||
LOG.debug("Disabling root.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='disable_root', datastore=self.manager)
|
||||
LOG.info("Disabling root for the database.")
|
||||
return self.adm.disable_root()
|
||||
|
||||
def is_root_enabled(self, context):
|
||||
LOG.debug("Checking if root was ever enabled.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='is_root_enabled', datastore=self.manager)
|
||||
|
||||
def create_backup(self, context, backup_info):
|
||||
LOG.debug("Creating backup.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='create_backup', datastore=self.manager)
|
||||
|
||||
def perform_restore(self, context, restore_location, backup_info):
|
||||
LOG.debug("Performing restore.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='_perform_restore', datastore=self.manager)
|
||||
|
||||
def create_database(self, context, databases):
|
||||
LOG.debug("Creating databases.")
|
||||
with EndNotification(context):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='create_database', datastore=self.manager)
|
||||
|
||||
def list_databases(self, context, limit=None, marker=None,
|
||||
include_marker=False):
|
||||
LOG.debug("Listing databases.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='list_databases', datastore=self.manager)
|
||||
|
||||
def delete_database(self, context, database):
|
||||
LOG.debug("Deleting database.")
|
||||
with EndNotification(context):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='delete_database', datastore=self.manager)
|
||||
return self.adm.is_root_enabled()
|
||||
|
||||
def create_user(self, context, users):
|
||||
LOG.debug("Creating users.")
|
||||
with EndNotification(context):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='create_user', datastore=self.manager)
|
||||
self.adm.create_users(users)
|
||||
|
||||
def list_users(self, context, limit=None, marker=None,
|
||||
include_marker=False):
|
||||
LOG.debug("Listing users.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='list_users', datastore=self.manager)
|
||||
return self.adm.list_users(limit, marker, include_marker)
|
||||
|
||||
def delete_user(self, context, user):
|
||||
LOG.debug("Deleting user.")
|
||||
with EndNotification(context):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='delete_user', datastore=self.manager)
|
||||
self.adm.delete_user(user)
|
||||
|
||||
def get_user(self, context, username, hostname):
|
||||
LOG.debug("Getting user.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='get_user', datastore=self.manager)
|
||||
return self.adm.get_user(username, hostname)
|
||||
|
||||
def update_attributes(self, context, username, hostname, user_attrs):
|
||||
LOG.debug("Updating user attributes.")
|
||||
with EndNotification(context):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='update_attributes', datastore=self.manager)
|
||||
self.adm.update_attributes(username, hostname, user_attrs)
|
||||
|
||||
def grant_access(self, context, username, hostname, databases):
|
||||
LOG.debug("Granting user access.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='grant_access', datastore=self.manager)
|
||||
return self.adm.grant_access(username, hostname, databases)
|
||||
|
||||
def revoke_access(self, context, username, hostname, database):
|
||||
LOG.debug("Revoking user access.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='revoke_access', datastore=self.manager)
|
||||
return self.adm.revoke_access(username, hostname, database)
|
||||
|
||||
def list_access(self, context, username, hostname):
|
||||
LOG.debug("Listing user access.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='list_access', datastore=self.manager)
|
||||
|
||||
def get_config_changes(self, cluster_config, mount_point=None):
|
||||
LOG.debug("Get configuration changes.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='get_configuration_changes', datastore=self.manager)
|
||||
return self.adm.list_access(username, hostname)
|
||||
|
||||
################
|
||||
# Replication related
|
||||
################
|
||||
def get_replication_snapshot(self, context, snapshot_info,
|
||||
replica_source_config=None):
|
||||
LOG.debug("Getting replication snapshot.")
|
||||
@ -895,8 +864,3 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
LOG.debug("Waiting for transaction.")
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='wait_for_txn', datastore=self.manager)
|
||||
|
||||
def rebuild(self, context, ds_version, config_contents=None,
|
||||
config_overrides=None):
|
||||
raise exception.DatastoreOperationNotSupported(
|
||||
operation='rebuild', datastore=self.manager)
|
||||
|
@ -23,7 +23,6 @@ from trove.common import cfg
|
||||
from trove.common import configurations
|
||||
from trove.common import exception
|
||||
from trove.common import utils
|
||||
from trove.common.notification import EndNotification
|
||||
from trove.guestagent import guest_log
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.datastore import manager
|
||||
@ -74,62 +73,6 @@ class MySqlManager(manager.Manager):
|
||||
except Exception:
|
||||
return super(MySqlManager, self).get_service_status()
|
||||
|
||||
def create_database(self, context, databases):
|
||||
with EndNotification(context):
|
||||
return self.adm.create_database(databases)
|
||||
|
||||
def create_user(self, context, users):
|
||||
with EndNotification(context):
|
||||
self.adm.create_user(users)
|
||||
|
||||
def delete_database(self, context, database):
|
||||
with EndNotification(context):
|
||||
return self.adm.delete_database(database)
|
||||
|
||||
def delete_user(self, context, user):
|
||||
with EndNotification(context):
|
||||
self.adm.delete_user(user)
|
||||
|
||||
def list_databases(self, context, limit=None, marker=None,
|
||||
include_marker=False):
|
||||
return self.adm.list_databases(limit, marker, include_marker)
|
||||
|
||||
def list_users(self, context, limit=None, marker=None,
|
||||
include_marker=False):
|
||||
return self.adm.list_users(limit, marker, include_marker)
|
||||
|
||||
def get_user(self, context, username, hostname):
|
||||
return self.adm.get_user(username, hostname)
|
||||
|
||||
def update_attributes(self, context, username, hostname, user_attrs):
|
||||
with EndNotification(context):
|
||||
self.adm.update_attributes(username, hostname, user_attrs)
|
||||
|
||||
def grant_access(self, context, username, hostname, databases):
|
||||
return self.adm.grant_access(username, hostname, databases)
|
||||
|
||||
def revoke_access(self, context, username, hostname, database):
|
||||
return self.adm.revoke_access(username, hostname, database)
|
||||
|
||||
def list_access(self, context, username, hostname):
|
||||
return self.adm.list_access(username, hostname)
|
||||
|
||||
def enable_root(self, context):
|
||||
return self.adm.enable_root()
|
||||
|
||||
def enable_root_with_password(self, context, root_password=None):
|
||||
return self.adm.enable_root(root_password)
|
||||
|
||||
def is_root_enabled(self, context):
|
||||
return self.adm.is_root_enabled()
|
||||
|
||||
def disable_root(self, context):
|
||||
return self.adm.disable_root()
|
||||
|
||||
def change_passwords(self, context, users):
|
||||
with EndNotification(context):
|
||||
self.adm.change_passwords(users)
|
||||
|
||||
def do_prepare(self, context, packages, databases, memory_mb, users,
|
||||
device_path, mount_point, backup_info,
|
||||
config_contents, root_password, overrides,
|
||||
@ -137,7 +80,7 @@ class MySqlManager(manager.Manager):
|
||||
"""This is called from prepare in the base class."""
|
||||
data_dir = mount_point + '/data'
|
||||
self.app.stop_db()
|
||||
operating_system.create_directory(data_dir,
|
||||
operating_system.ensure_directory(data_dir,
|
||||
user=CONF.database_service_uid,
|
||||
group=CONF.database_service_uid,
|
||||
as_root=True)
|
||||
@ -176,27 +119,9 @@ class MySqlManager(manager.Manager):
|
||||
# This instance is a replication slave
|
||||
self.attach_replica(context, snapshot, snapshot['config'])
|
||||
|
||||
def _validate_slave_for_replication(self, context, replica_info):
|
||||
if replica_info['replication_strategy'] != self.replication_strategy:
|
||||
raise exception.IncompatibleReplicationStrategy(
|
||||
replica_info.update({
|
||||
'guest_strategy': self.replication_strategy
|
||||
}))
|
||||
|
||||
volume_stats = self.get_filesystem_stats(context, None)
|
||||
if (volume_stats.get('total', 0.0) <
|
||||
replica_info['dataset']['dataset_size']):
|
||||
raise exception.InsufficientSpaceForReplica(
|
||||
replica_info.update({
|
||||
'slave_volume_size': volume_stats.get('total', 0.0)
|
||||
}))
|
||||
|
||||
def stop_db(self, context):
|
||||
self.app.stop_db()
|
||||
|
||||
def restart(self, context):
|
||||
self.app.restart()
|
||||
|
||||
def start_db_with_conf_changes(self, context, config_contents, ds_version):
|
||||
self.app.start_db_with_conf_changes(config_contents, ds_version)
|
||||
|
||||
@ -249,29 +174,21 @@ class MySqlManager(manager.Manager):
|
||||
},
|
||||
}
|
||||
|
||||
def is_log_enabled(self, logname):
|
||||
if logname == self.GUEST_LOG_DEFS_GENERAL_LABEL:
|
||||
value = self.configuration_manager.get_value('general_log', 'off')
|
||||
return value == 'on'
|
||||
elif logname == self.GUEST_LOG_DEFS_SLOW_QUERY_LABEL:
|
||||
value = self.configuration_manager.get_value('slow_query_log',
|
||||
'off')
|
||||
return value == 'on'
|
||||
|
||||
return False
|
||||
|
||||
def apply_overrides(self, context, overrides):
|
||||
LOG.info("Applying overrides (%s).", overrides)
|
||||
self.app.apply_overrides(overrides)
|
||||
|
||||
def update_overrides(self, context, overrides, remove=False):
|
||||
if remove:
|
||||
self.app.remove_overrides()
|
||||
self.app.update_overrides(overrides)
|
||||
|
||||
def create_backup(self, context, backup_info):
|
||||
"""
|
||||
Entry point for initiating a backup for this guest agents db instance.
|
||||
The call currently blocks until the backup is complete or errors. If
|
||||
device_path is specified, it will be mounted based to a point specified
|
||||
in configuration.
|
||||
|
||||
:param context: User context object.
|
||||
:param backup_info: a dictionary containing the db instance id of the
|
||||
backup task, location, type, and other data.
|
||||
"""
|
||||
with EndNotification(context):
|
||||
self.app.create_backup(context, backup_info)
|
||||
|
||||
def perform_restore(self, context, restore_location, backup_info):
|
||||
LOG.info("Starting to restore database from backup %s, "
|
||||
"backup_info: %s", backup_info['id'], backup_info)
|
||||
@ -340,6 +257,21 @@ class MySqlManager(manager.Manager):
|
||||
|
||||
LOG.info('Finished to reset password for restore')
|
||||
|
||||
def _validate_slave_for_replication(self, context, replica_info):
|
||||
if replica_info['replication_strategy'] != self.replication_strategy:
|
||||
raise exception.IncompatibleReplicationStrategy(
|
||||
replica_info.update({
|
||||
'guest_strategy': self.replication_strategy
|
||||
}))
|
||||
|
||||
volume_stats = self.get_filesystem_stats(context, None)
|
||||
if (volume_stats.get('total', 0.0) <
|
||||
replica_info['dataset']['dataset_size']):
|
||||
raise exception.InsufficientSpaceForReplica(
|
||||
replica_info.update({
|
||||
'slave_volume_size': volume_stats.get('total', 0.0)
|
||||
}))
|
||||
|
||||
def attach_replica(self, context, replica_info, slave_config):
|
||||
LOG.info("Attaching replica, replica_info: %s", replica_info)
|
||||
try:
|
||||
@ -431,7 +363,7 @@ class MySqlManager(manager.Manager):
|
||||
|
||||
mount_point = CONF.get(CONF.datastore_manager).mount_point
|
||||
data_dir = mount_point + '/data'
|
||||
operating_system.create_directory(data_dir,
|
||||
operating_system.ensure_directory(data_dir,
|
||||
user=CONF.database_service_uid,
|
||||
group=CONF.database_service_uid,
|
||||
as_root=True)
|
||||
|
@ -31,7 +31,6 @@ from trove.common import utils
|
||||
from trove.common.configurations import MySQLConfParser
|
||||
from trove.common.db.mysql import models
|
||||
from trove.common.i18n import _
|
||||
from trove.common.stream_codecs import IniCodec
|
||||
from trove.conductor import api as conductor_api
|
||||
from trove.guestagent.common import guestagent_utils
|
||||
from trove.guestagent.common import operating_system
|
||||
@ -39,7 +38,6 @@ from trove.guestagent.common import sql_query
|
||||
from trove.guestagent.common.configuration import ConfigurationManager
|
||||
from trove.guestagent.common.configuration import ImportOverrideStrategy
|
||||
from trove.guestagent.datastore import service
|
||||
from trove.guestagent.datastore.mysql_common import service as commmon_service
|
||||
from trove.guestagent.utils import docker as docker_util
|
||||
from trove.guestagent.utils import mysql as mysql_util
|
||||
from trove.instance import service_status
|
||||
@ -72,8 +70,7 @@ class BaseMySqlAppStatus(service.BaseDbStatus):
|
||||
"""Check database service status."""
|
||||
status = docker_util.get_container_status(self.docker_client)
|
||||
if status == "running":
|
||||
root_pass = commmon_service.BaseMySqlApp.get_auth_password(
|
||||
file="root.cnf")
|
||||
root_pass = service.BaseDbApp.get_auth_password(file="root.cnf")
|
||||
cmd = 'mysql -uroot -p%s -e "select 1;"' % root_pass
|
||||
try:
|
||||
docker_util.run_command(self.docker_client, cmd)
|
||||
@ -87,6 +84,8 @@ class BaseMySqlAppStatus(service.BaseDbStatus):
|
||||
return service_status.ServiceStatuses.RUNNING
|
||||
elif status == "not running":
|
||||
return service_status.ServiceStatuses.SHUTDOWN
|
||||
elif status == "restarting":
|
||||
return service_status.ServiceStatuses.SHUTDOWN
|
||||
elif status == "paused":
|
||||
return service_status.ServiceStatuses.PAUSED
|
||||
elif status == "exited":
|
||||
@ -165,7 +164,7 @@ class BaseMySqlAdmin(object):
|
||||
t = text(str(uu))
|
||||
client.execute(t)
|
||||
|
||||
def create_database(self, databases):
|
||||
def create_databases(self, databases):
|
||||
"""Create the list of specified databases."""
|
||||
with mysql_util.SqlClient(self.mysql_app.get_engine()) as client:
|
||||
for item in databases:
|
||||
@ -178,7 +177,7 @@ class BaseMySqlAdmin(object):
|
||||
LOG.debug('Creating database, command: %s', str(cd))
|
||||
client.execute(t)
|
||||
|
||||
def create_user(self, users):
|
||||
def create_users(self, users):
|
||||
"""Create users and grant them privileges for the
|
||||
specified databases.
|
||||
"""
|
||||
@ -424,22 +423,13 @@ class BaseMySqlAdmin(object):
|
||||
return user.databases
|
||||
|
||||
|
||||
@six.add_metaclass(abc.ABCMeta)
|
||||
class BaseMySqlApp(object):
|
||||
"""Prepares DBaaS on a Guest container."""
|
||||
|
||||
CFG_CODEC = IniCodec()
|
||||
class BaseMySqlApp(service.BaseDbApp):
|
||||
configuration_manager = ConfigurationManager(
|
||||
MYSQL_CONFIG, CONF.database_service_uid, CONF.database_service_uid,
|
||||
CFG_CODEC, requires_root=True,
|
||||
service.BaseDbApp.CFG_CODEC, requires_root=True,
|
||||
override_strategy=ImportOverrideStrategy(CNF_INCLUDE_DIR, CNF_EXT)
|
||||
)
|
||||
|
||||
def __init__(self, status, docker_client):
|
||||
"""By default login with root no password for initial setup."""
|
||||
self.status = status
|
||||
self.docker_client = docker_client
|
||||
|
||||
def get_engine(self):
|
||||
"""Create the default engine with the updated admin user.
|
||||
|
||||
@ -470,12 +460,6 @@ class BaseMySqlApp(object):
|
||||
with mysql_util.SqlClient(self.get_engine()) as client:
|
||||
return client.execute(sql_statement)
|
||||
|
||||
@classmethod
|
||||
def get_auth_password(cls, file="os_admin.cnf"):
|
||||
auth_config = operating_system.read_file(
|
||||
cls.get_client_auth_file(file), codec=cls.CFG_CODEC, as_root=True)
|
||||
return auth_config['client']['password']
|
||||
|
||||
@classmethod
|
||||
def get_data_dir(cls):
|
||||
return cls.configuration_manager.get_value(
|
||||
@ -486,13 +470,6 @@ class BaseMySqlApp(object):
|
||||
cls.configuration_manager.apply_system_override(
|
||||
{MySQLConfParser.SERVER_CONF_SECTION: {'datadir': value}})
|
||||
|
||||
@classmethod
|
||||
def get_client_auth_file(cls, file="os_admin.cnf"):
|
||||
# Save the password inside the mount point directory so we could
|
||||
# restore everyting when rebuilding the instance.
|
||||
conf_dir = guestagent_utils.get_conf_dir()
|
||||
return guestagent_utils.build_file_path(conf_dir, file)
|
||||
|
||||
def _create_admin_user(self, client, password):
|
||||
"""
|
||||
Create a os_admin user with a random password
|
||||
@ -520,16 +497,6 @@ class BaseMySqlApp(object):
|
||||
client.execute(t)
|
||||
LOG.info("Trove admin user '%s' created.", ADMIN_USER_NAME)
|
||||
|
||||
@staticmethod
|
||||
def save_password(user, password):
|
||||
content = {'client': {'user': user,
|
||||
'password': password,
|
||||
'host': "localhost"}}
|
||||
|
||||
conf_dir = guestagent_utils.get_conf_dir()
|
||||
operating_system.write_file(
|
||||
f'{conf_dir}/{user}.cnf', content, codec=IniCodec(), as_root=True)
|
||||
|
||||
def secure(self):
|
||||
LOG.info("Securing MySQL now.")
|
||||
|
||||
@ -572,9 +539,6 @@ class BaseMySqlApp(object):
|
||||
self.configuration_manager.apply_user_override(
|
||||
{MySQLConfParser.SERVER_CONF_SECTION: overrides})
|
||||
|
||||
def remove_overrides(self):
|
||||
self.configuration_manager.remove_user_override()
|
||||
|
||||
def apply_overrides(self, overrides):
|
||||
LOG.info("Applying overrides to running MySQL, overrides: %s",
|
||||
overrides)
|
||||
@ -608,7 +572,7 @@ class BaseMySqlApp(object):
|
||||
|
||||
# Create folders for mysql on localhost
|
||||
for folder in ['/etc/mysql', '/var/run/mysqld']:
|
||||
operating_system.create_directory(
|
||||
operating_system.ensure_directory(
|
||||
folder, user=CONF.database_service_uid,
|
||||
group=CONF.database_service_uid, force=True,
|
||||
as_root=True)
|
||||
@ -644,37 +608,12 @@ class BaseMySqlApp(object):
|
||||
LOG.exception("Failed to start mysql")
|
||||
raise exception.TroveError(_("Failed to start mysql"))
|
||||
|
||||
if not self.status.wait_for_real_status_to_change_to(
|
||||
if not self.status.wait_for_status(
|
||||
service_status.ServiceStatuses.HEALTHY,
|
||||
CONF.state_change_wait_time, update_db
|
||||
):
|
||||
raise exception.TroveError(_("Failed to start mysql"))
|
||||
|
||||
def start_db_with_conf_changes(self, config_contents, ds_version):
|
||||
LOG.info(f"Starting database service with new configuration and "
|
||||
f"datastore version {ds_version}.")
|
||||
|
||||
if self.status.is_running:
|
||||
LOG.info("Stopping MySQL before applying changes.")
|
||||
self.stop_db()
|
||||
|
||||
self._reset_configuration(config_contents)
|
||||
self.start_db(update_db=True, ds_version=ds_version)
|
||||
|
||||
def stop_db(self, update_db=False):
|
||||
LOG.info("Stopping MySQL.")
|
||||
|
||||
try:
|
||||
docker_util.stop_container(self.docker_client)
|
||||
except Exception:
|
||||
LOG.exception("Failed to stop mysql")
|
||||
raise exception.TroveError("Failed to stop mysql")
|
||||
|
||||
if not self.status.wait_for_real_status_to_change_to(
|
||||
service_status.ServiceStatuses.SHUTDOWN,
|
||||
CONF.state_change_wait_time, update_db):
|
||||
raise exception.TroveError("Failed to stop mysql")
|
||||
|
||||
def wipe_ib_logfiles(self):
|
||||
"""Destroys the iblogfiles.
|
||||
|
||||
@ -695,23 +634,17 @@ class BaseMySqlApp(object):
|
||||
LOG.exception("Could not delete logfile.")
|
||||
raise
|
||||
|
||||
def _reset_configuration(self, configuration, admin_password=None):
|
||||
self.configuration_manager.save_configuration(configuration)
|
||||
if admin_password:
|
||||
self.save_password(ADMIN_USER_NAME, admin_password)
|
||||
self.wipe_ib_logfiles()
|
||||
|
||||
def reset_configuration(self, configuration):
|
||||
config_contents = configuration['config_contents']
|
||||
LOG.info("Resetting configuration.")
|
||||
self._reset_configuration(config_contents)
|
||||
self.configuration_manager.save_configuration(configuration)
|
||||
self.wipe_ib_logfiles()
|
||||
|
||||
def restart(self):
|
||||
LOG.info("Restarting mysql")
|
||||
|
||||
# Ensure folders permission for database.
|
||||
for folder in ['/etc/mysql', '/var/run/mysqld']:
|
||||
operating_system.create_directory(
|
||||
operating_system.ensure_directory(
|
||||
folder, user=CONF.database_service_uid,
|
||||
group=CONF.database_service_uid, force=True,
|
||||
as_root=True)
|
||||
@ -722,9 +655,10 @@ class BaseMySqlApp(object):
|
||||
LOG.exception("Failed to restart mysql")
|
||||
raise exception.TroveError("Failed to restart mysql")
|
||||
|
||||
if not self.status.wait_for_real_status_to_change_to(
|
||||
if not self.status.wait_for_status(
|
||||
service_status.ServiceStatuses.HEALTHY,
|
||||
CONF.state_change_wait_time, update_db=False):
|
||||
CONF.state_change_wait_time, update_db=False
|
||||
):
|
||||
raise exception.TroveError("Failed to start mysql")
|
||||
|
||||
LOG.info("Finished restarting mysql")
|
||||
@ -754,14 +688,10 @@ class BaseMySqlApp(object):
|
||||
f'datastore:{backup_info["datastore"]},'
|
||||
f'datastore_version:{backup_info["datastore_version"]}'
|
||||
)
|
||||
swift_params = f'--swift-extra-metadata={swift_metadata}'
|
||||
swift_container = backup_info.get('swift_container',
|
||||
CONF.backup_swift_container)
|
||||
if backup_info.get('swift_container'):
|
||||
swift_params = (
|
||||
f'{swift_params} '
|
||||
f'--swift-container {swift_container}'
|
||||
)
|
||||
swift_params = (f'--swift-extra-metadata={swift_metadata} '
|
||||
f'--swift-container {swift_container}')
|
||||
|
||||
command = (
|
||||
f'/usr/bin/python3 main.py --backup --backup-id={backup_id} '
|
||||
|
0
trove/guestagent/datastore/postgres/__init__.py
Normal file
0
trove/guestagent/datastore/postgres/__init__.py
Normal file
103
trove/guestagent/datastore/postgres/manager.py
Normal file
103
trove/guestagent/datastore/postgres/manager.py
Normal file
@ -0,0 +1,103 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.datastore.postgres import service
|
||||
from trove.guestagent.datastore import manager
|
||||
from trove.guestagent import guest_log
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class PostgresManager(manager.Manager):
|
||||
def __init__(self):
|
||||
super(PostgresManager, self).__init__('postgres')
|
||||
|
||||
self.status = service.PgSqlAppStatus(self.docker_client)
|
||||
self.app = service.PgSqlApp(self.status, self.docker_client)
|
||||
self.adm = service.PgSqlAdmin(service.SUPER_USER_NAME)
|
||||
|
||||
@property
|
||||
def configuration_manager(self):
|
||||
return self.app.configuration_manager
|
||||
|
||||
def do_prepare(self, context, packages, databases, memory_mb, users,
|
||||
device_path, mount_point, backup_info,
|
||||
config_contents, root_password, overrides,
|
||||
cluster_config, snapshot, ds_version=None):
|
||||
operating_system.ensure_directory(self.app.datadir,
|
||||
user=CONF.database_service_uid,
|
||||
group=CONF.database_service_uid,
|
||||
as_root=True)
|
||||
|
||||
LOG.info('Preparing database config files')
|
||||
self.app.configuration_manager.save_configuration(config_contents)
|
||||
self.app.set_data_dir(self.app.datadir)
|
||||
self.app.update_overrides(overrides)
|
||||
|
||||
# # Restore data from backup and reset root password
|
||||
# if backup_info:
|
||||
# self.perform_restore(context, data_dir, backup_info)
|
||||
# self.reset_password_for_restore(ds_version=ds_version,
|
||||
# data_dir=data_dir)
|
||||
|
||||
# config_file can only be set on the postgres command line
|
||||
command = f"postgres -c config_file={service.CONFIG_FILE}"
|
||||
self.app.start_db(ds_version=ds_version, command=command)
|
||||
self.app.secure()
|
||||
|
||||
# if snapshot:
|
||||
# # This instance is a replication slave
|
||||
# self.attach_replica(context, snapshot, snapshot['config'])
|
||||
|
||||
def apply_overrides(self, context, overrides):
|
||||
pass
|
||||
|
||||
def get_datastore_log_defs(self):
|
||||
owner = cfg.get_configuration_property('database_service_uid')
|
||||
datastore_dir = self.app.get_data_dir()
|
||||
long_query_time = CONF.get(self.manager).get(
|
||||
'guest_log_long_query_time')
|
||||
general_log_file = self.build_log_file_name(
|
||||
self.GUEST_LOG_DEFS_GENERAL_LABEL, owner,
|
||||
datastore_dir=datastore_dir)
|
||||
general_log_dir, general_log_filename = os.path.split(general_log_file)
|
||||
return {
|
||||
self.GUEST_LOG_DEFS_GENERAL_LABEL: {
|
||||
self.GUEST_LOG_TYPE_LABEL: guest_log.LogType.USER,
|
||||
self.GUEST_LOG_USER_LABEL: owner,
|
||||
self.GUEST_LOG_FILE_LABEL: general_log_file,
|
||||
self.GUEST_LOG_ENABLE_LABEL: {
|
||||
'logging_collector': True,
|
||||
'log_destination': 'stderr',
|
||||
'log_directory': general_log_dir,
|
||||
'log_filename': general_log_filename,
|
||||
'log_statement': 'all',
|
||||
'debug_print_plan': True,
|
||||
'log_min_duration_statement': long_query_time,
|
||||
},
|
||||
self.GUEST_LOG_DISABLE_LABEL: {
|
||||
'logging_collector': False,
|
||||
},
|
||||
self.GUEST_LOG_RESTART_LABEL: True,
|
||||
},
|
||||
}
|
||||
|
||||
def is_log_enabled(self, logname):
|
||||
return self.configuration_manager.get_value('logging_collector', False)
|
154
trove/guestagent/datastore/postgres/query.py
Normal file
154
trove/guestagent/datastore/postgres/query.py
Normal file
@ -0,0 +1,154 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class DatabaseQuery(object):
|
||||
@classmethod
|
||||
def list(cls, ignore=()):
|
||||
"""Query to list all databases."""
|
||||
statement = (
|
||||
"SELECT datname, pg_encoding_to_char(encoding), "
|
||||
"datcollate FROM pg_database "
|
||||
"WHERE datistemplate = false"
|
||||
)
|
||||
|
||||
for name in ignore:
|
||||
statement += " AND datname != '{name}'".format(name=name)
|
||||
|
||||
return statement
|
||||
|
||||
@classmethod
|
||||
def create(cls, name, encoding=None, collation=None):
|
||||
"""Query to create a database."""
|
||||
statement = "CREATE DATABASE \"{name}\"".format(name=name)
|
||||
if encoding is not None:
|
||||
statement += " ENCODING = '{encoding}'".format(
|
||||
encoding=encoding,
|
||||
)
|
||||
if collation is not None:
|
||||
statement += " LC_COLLATE = '{collation}'".format(
|
||||
collation=collation,
|
||||
)
|
||||
|
||||
return statement
|
||||
|
||||
@classmethod
|
||||
def drop(cls, name):
|
||||
return f'DROP DATABASE IF EXISTS "{name}"'
|
||||
|
||||
|
||||
class UserQuery(object):
|
||||
|
||||
@classmethod
|
||||
def list(cls, ignore=()):
|
||||
"""Query to list all users."""
|
||||
statement = (
|
||||
"SELECT usename, datname, pg_encoding_to_char(encoding), "
|
||||
"datcollate FROM pg_catalog.pg_user "
|
||||
"LEFT JOIN pg_catalog.pg_database "
|
||||
"ON CONCAT(usename, '=CTc/postgres') = ANY(datacl::text[]) "
|
||||
"WHERE (datistemplate ISNULL OR datistemplate = false)")
|
||||
if ignore:
|
||||
for name in ignore:
|
||||
statement += f" AND usename != '{name}'"
|
||||
|
||||
return statement
|
||||
|
||||
@classmethod
|
||||
def list_root(cls, ignore=()):
|
||||
"""Query to list all superuser accounts."""
|
||||
statement = (
|
||||
"SELECT usename FROM pg_catalog.pg_user WHERE usesuper = true"
|
||||
)
|
||||
|
||||
for name in ignore:
|
||||
statement += f" AND usename != '{name}'"
|
||||
|
||||
return statement
|
||||
|
||||
@classmethod
|
||||
def get(cls, name):
|
||||
"""Query to get a single user."""
|
||||
return cls.list() + f" AND usename = '{name}'"
|
||||
|
||||
@classmethod
|
||||
def create(cls, name, password, encrypt_password=None, *options):
|
||||
"""Query to create a user with a password."""
|
||||
create_clause = "CREATE USER \"{name}\"".format(name=name)
|
||||
with_clause = cls._build_with_clause(
|
||||
password, encrypt_password, *options)
|
||||
return ' '.join([create_clause, with_clause])
|
||||
|
||||
@classmethod
|
||||
def _build_with_clause(cls, password, encrypt_password=None, *options):
|
||||
tokens = ['WITH']
|
||||
if password:
|
||||
# Do not specify the encryption option if 'encrypt_password'
|
||||
# is None. PostgreSQL will use the configuration default.
|
||||
if encrypt_password is True:
|
||||
tokens.append('ENCRYPTED')
|
||||
elif encrypt_password is False:
|
||||
tokens.append('UNENCRYPTED')
|
||||
tokens.append('PASSWORD')
|
||||
tokens.append("'{password}'".format(password=password))
|
||||
if options:
|
||||
tokens.extend(options)
|
||||
|
||||
if len(tokens) > 1:
|
||||
return ' '.join(tokens)
|
||||
|
||||
return ''
|
||||
|
||||
@classmethod
|
||||
def update_password(cls, name, password, encrypt_password=None):
|
||||
"""Query to update the password for a user."""
|
||||
|
||||
return cls.alter_user(name, password, encrypt_password)
|
||||
|
||||
@classmethod
|
||||
def alter_user(cls, name, password, encrypt_password=None, *options):
|
||||
"""Query to alter a user."""
|
||||
|
||||
alter_clause = f'ALTER USER "{name}"'
|
||||
with_clause = cls._build_with_clause(
|
||||
password, encrypt_password, *options)
|
||||
return ''.join([alter_clause, with_clause])
|
||||
|
||||
@classmethod
|
||||
def update_name(cls, old, new):
|
||||
"""Query to update the name of a user.
|
||||
This statement also results in an automatic permission transfer to the
|
||||
new username.
|
||||
"""
|
||||
return f'ALTER USER "{old}" RENAME TO "{new}"'
|
||||
|
||||
@classmethod
|
||||
def drop(cls, name):
|
||||
"""Query to drop a user."""
|
||||
return f'DROP USER "{name}"'
|
||||
|
||||
|
||||
class AccessQuery(object):
|
||||
|
||||
@classmethod
|
||||
def grant(cls, user, database):
|
||||
"""Query to grant user access to a database."""
|
||||
return f'GRANT ALL ON DATABASE "{database}" TO "{user}"'
|
||||
|
||||
@classmethod
|
||||
def revoke(cls, user, database):
|
||||
"""Query to revoke user access to a database."""
|
||||
return f'REVOKE ALL ON DATABASE "{database}" FROM "{user}"'
|
675
trove/guestagent/datastore/postgres/service.py
Normal file
675
trove/guestagent/datastore/postgres/service.py
Normal file
@ -0,0 +1,675 @@
|
||||
# Copyright 2020 Catalyst Cloud
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from collections import OrderedDict
|
||||
|
||||
from oslo_log import log as logging
|
||||
import psycopg2
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import stream_codecs
|
||||
from trove.common import utils
|
||||
from trove.common.db.postgresql import models
|
||||
from trove.guestagent.common import configuration
|
||||
from trove.guestagent.common import guestagent_utils
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.datastore import service
|
||||
from trove.guestagent.datastore.postgres import query
|
||||
from trove.guestagent.utils import docker as docker_util
|
||||
from trove.instance import service_status
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
ADMIN_USER_NAME = "os_admin"
|
||||
SUPER_USER_NAME = "postgres"
|
||||
CONFIG_FILE = "/etc/postgresql/postgresql.conf"
|
||||
CNF_EXT = 'conf'
|
||||
# The same with include_dir config option
|
||||
CNF_INCLUDE_DIR = '/etc/postgresql/conf.d'
|
||||
HBA_CONFIG_FILE = '/etc/postgresql/pg_hba.conf'
|
||||
|
||||
|
||||
class PgSqlAppStatus(service.BaseDbStatus):
|
||||
def __init__(self, docker_client):
|
||||
super(PgSqlAppStatus, self).__init__(docker_client)
|
||||
|
||||
def get_actual_db_status(self):
|
||||
"""Check database service status."""
|
||||
status = docker_util.get_container_status(self.docker_client)
|
||||
if status == "running":
|
||||
cmd = "psql -U postgres -c 'select 1;'"
|
||||
try:
|
||||
docker_util.run_command(self.docker_client, cmd)
|
||||
return service_status.ServiceStatuses.HEALTHY
|
||||
except Exception as exc:
|
||||
LOG.warning('Failed to run docker command, error: %s',
|
||||
str(exc))
|
||||
container_log = docker_util.get_container_logs(
|
||||
self.docker_client, tail='all')
|
||||
LOG.debug('container log: \n%s', '\n'.join(container_log))
|
||||
return service_status.ServiceStatuses.RUNNING
|
||||
elif status == "not running":
|
||||
return service_status.ServiceStatuses.SHUTDOWN
|
||||
elif status == "paused":
|
||||
return service_status.ServiceStatuses.PAUSED
|
||||
elif status == "exited":
|
||||
return service_status.ServiceStatuses.SHUTDOWN
|
||||
elif status == "dead":
|
||||
return service_status.ServiceStatuses.CRASHED
|
||||
else:
|
||||
return service_status.ServiceStatuses.UNKNOWN
|
||||
|
||||
|
||||
class PgSqlApp(service.BaseDbApp):
|
||||
configuration_manager = configuration.ConfigurationManager(
|
||||
CONFIG_FILE,
|
||||
CONF.database_service_uid,
|
||||
CONF.database_service_uid,
|
||||
stream_codecs.KeyValueCodec(
|
||||
value_quoting=True,
|
||||
bool_case=stream_codecs.KeyValueCodec.BOOL_LOWER,
|
||||
big_ints=True),
|
||||
requires_root=True,
|
||||
override_strategy=configuration.ImportOverrideStrategy(
|
||||
CNF_INCLUDE_DIR, CNF_EXT)
|
||||
)
|
||||
|
||||
def __init__(self, status, docker_client):
|
||||
super(PgSqlApp, self).__init__(status, docker_client)
|
||||
|
||||
# See
|
||||
# https://github.com/docker-library/docs/blob/master/postgres/README.md#pgdata
|
||||
mount_point = cfg.get_configuration_property('mount_point')
|
||||
self.datadir = f"{mount_point}/data/pgdata"
|
||||
|
||||
@classmethod
|
||||
def get_data_dir(cls):
|
||||
return cls.configuration_manager.get_value('data_directory')
|
||||
|
||||
@classmethod
|
||||
def set_data_dir(cls, value):
|
||||
cls.configuration_manager.apply_system_override(
|
||||
{'data_directory': value})
|
||||
|
||||
def reload(self):
|
||||
cmd = f"pg_ctl reload -D {self.datadir}"
|
||||
docker_util.run_command(self.docker_client, cmd)
|
||||
|
||||
def secure(self):
|
||||
LOG.info("Securing PostgreSQL now.")
|
||||
|
||||
admin_password = utils.generate_random_password()
|
||||
os_admin = models.PostgreSQLUser(ADMIN_USER_NAME, admin_password)
|
||||
|
||||
PgSqlAdmin(SUPER_USER_NAME).create_admin_user(os_admin,
|
||||
encrypt_password=True)
|
||||
self.save_password(ADMIN_USER_NAME, admin_password)
|
||||
|
||||
self.apply_access_rules()
|
||||
self.configuration_manager.apply_system_override(
|
||||
{'hba_file': HBA_CONFIG_FILE})
|
||||
self.restart()
|
||||
|
||||
LOG.info("PostgreSQL secure complete.")
|
||||
|
||||
def apply_access_rules(self):
|
||||
"""PostgreSQL Client authentication settings
|
||||
|
||||
The order of entries is important. The first failure to authenticate
|
||||
stops the lookup. That is why the 'local' connections validate first.
|
||||
The OrderedDict is necessary to guarantee the iteration order.
|
||||
"""
|
||||
LOG.debug("Applying client authentication access rules.")
|
||||
|
||||
local_admins = ','.join([SUPER_USER_NAME, ADMIN_USER_NAME])
|
||||
remote_admins = SUPER_USER_NAME
|
||||
access_rules = OrderedDict(
|
||||
[('local', [['all', local_admins, None, 'trust'],
|
||||
['replication', local_admins, None, 'trust'],
|
||||
['all', 'all', None, 'md5']]),
|
||||
('host', [['all', local_admins, '127.0.0.1/32', 'trust'],
|
||||
['all', local_admins, '::1/128', 'trust'],
|
||||
['all', local_admins, 'localhost', 'trust'],
|
||||
['all', remote_admins, '0.0.0.0/0', 'reject'],
|
||||
['all', remote_admins, '::/0', 'reject'],
|
||||
['all', 'all', '0.0.0.0/0', 'md5'],
|
||||
['all', 'all', '::/0', 'md5']])
|
||||
])
|
||||
operating_system.write_file(
|
||||
HBA_CONFIG_FILE, access_rules,
|
||||
stream_codecs.PropertiesCodec(string_mappings={'\t': None}),
|
||||
as_root=True)
|
||||
operating_system.chown(HBA_CONFIG_FILE,
|
||||
CONF.database_service_uid,
|
||||
CONF.database_service_uid,
|
||||
as_root=True)
|
||||
operating_system.chmod(HBA_CONFIG_FILE,
|
||||
operating_system.FileMode.SET_USR_RO,
|
||||
as_root=True)
|
||||
|
||||
def update_overrides(self, overrides):
|
||||
"""Update config options in the include directory."""
|
||||
if overrides:
|
||||
self.configuration_manager.apply_user_override(overrides)
|
||||
|
||||
def reset_configuration(self, configuration):
|
||||
self.configuration_manager.save_configuration(configuration)
|
||||
|
||||
def start_db(self, update_db=False, ds_version=None, command=None,
|
||||
extra_volumes=None):
|
||||
"""Start and wait for database service."""
|
||||
docker_image = CONF.get(CONF.datastore_manager).docker_image
|
||||
image = (f'{docker_image}:latest' if not ds_version else
|
||||
f'{docker_image}:{ds_version}')
|
||||
command = command if command else ''
|
||||
|
||||
try:
|
||||
root_pass = self.get_auth_password(file="root.cnf")
|
||||
except exception.UnprocessableEntity:
|
||||
root_pass = utils.generate_random_password()
|
||||
|
||||
# Get uid and gid
|
||||
user = "%s:%s" % (CONF.database_service_uid, CONF.database_service_uid)
|
||||
|
||||
# Create folders for postgres on localhost
|
||||
for folder in ['/etc/postgresql', '/var/run/postgresql']:
|
||||
operating_system.ensure_directory(
|
||||
folder, user=CONF.database_service_uid,
|
||||
group=CONF.database_service_uid, force=True,
|
||||
as_root=True)
|
||||
|
||||
volumes = {
|
||||
"/etc/postgresql": {"bind": "/etc/postgresql", "mode": "rw"},
|
||||
"/var/run/postgresql": {"bind": "/var/run/postgresql",
|
||||
"mode": "rw"},
|
||||
"/var/lib/postgresql": {"bind": "/var/lib/postgresql",
|
||||
"mode": "rw"},
|
||||
"/var/lib/postgresql/data": {"bind": "/var/lib/postgresql/data",
|
||||
"mode": "rw"},
|
||||
}
|
||||
if extra_volumes:
|
||||
volumes.update(extra_volumes)
|
||||
|
||||
try:
|
||||
LOG.info("Starting docker container, image: %s", image)
|
||||
docker_util.start_container(
|
||||
self.docker_client,
|
||||
image,
|
||||
volumes=volumes,
|
||||
network_mode="host",
|
||||
user=user,
|
||||
environment={
|
||||
"POSTGRES_PASSWORD": root_pass,
|
||||
"PGDATA": self.datadir,
|
||||
},
|
||||
command=command
|
||||
)
|
||||
|
||||
# Save root password
|
||||
LOG.debug("Saving root credentials to local host.")
|
||||
self.save_password('postgres', root_pass)
|
||||
except Exception:
|
||||
LOG.exception("Failed to start database service")
|
||||
raise exception.TroveError("Failed to start database service")
|
||||
|
||||
if not self.status.wait_for_status(
|
||||
service_status.ServiceStatuses.HEALTHY,
|
||||
CONF.state_change_wait_time, update_db
|
||||
):
|
||||
raise exception.TroveError("Failed to start database service")
|
||||
|
||||
def restart(self):
|
||||
LOG.info("Restarting database")
|
||||
|
||||
# Ensure folders permission for database.
|
||||
for folder in ['/etc/postgresql', '/var/run/postgresql']:
|
||||
operating_system.ensure_directory(
|
||||
folder, user=CONF.database_service_uid,
|
||||
group=CONF.database_service_uid, force=True,
|
||||
as_root=True)
|
||||
|
||||
try:
|
||||
docker_util.restart_container(self.docker_client)
|
||||
except Exception:
|
||||
LOG.exception("Failed to restart database")
|
||||
raise exception.TroveError("Failed to restart database")
|
||||
|
||||
if not self.status.wait_for_status(
|
||||
service_status.ServiceStatuses.HEALTHY,
|
||||
CONF.state_change_wait_time, update_db=False
|
||||
):
|
||||
raise exception.TroveError("Failed to start database")
|
||||
|
||||
LOG.info("Finished restarting database")
|
||||
|
||||
|
||||
class PgSqlAdmin(object):
|
||||
# Default set of options of an administrative account.
|
||||
ADMIN_OPTIONS = (
|
||||
'SUPERUSER', 'CREATEDB', 'CREATEROLE', 'INHERIT', 'REPLICATION',
|
||||
'BYPASSRLS', 'LOGIN'
|
||||
)
|
||||
|
||||
def __init__(self, username):
|
||||
port = cfg.get_configuration_property('postgresql_port')
|
||||
self.connection = PostgresConnection(username, port=port)
|
||||
|
||||
def build_root_user(self, password=None):
|
||||
return models.PostgreSQLUser.root(name='root', password=password)
|
||||
|
||||
def enable_root(self, root_password=None):
|
||||
"""Create a superuser user or reset the superuser password.
|
||||
|
||||
The default PostgreSQL administration account is 'postgres'.
|
||||
This account always exists and cannot be removed.
|
||||
Its attributes and access can however be altered.
|
||||
|
||||
Clients can connect from the localhost or remotely via TCP/IP:
|
||||
|
||||
Local clients (e.g. psql) can connect from a preset *system* account
|
||||
called 'postgres'.
|
||||
This system account has no password and is *locked* by default,
|
||||
so that it can be used by *local* users only.
|
||||
It should *never* be enabled (or its password set)!!!
|
||||
That would just open up a new attack vector on the system account.
|
||||
|
||||
Remote clients should use a build-in *database* account of the same
|
||||
name. It's password can be changed using the "ALTER USER" statement.
|
||||
"""
|
||||
root = self.build_root_user(root_password)
|
||||
results = self.query(query.UserQuery.list_root(self.ignore_users))
|
||||
cur_roots = [row[0] for row in results]
|
||||
if 'root' not in cur_roots:
|
||||
self.create_user(root)
|
||||
|
||||
self.alter_user(root, None, *PgSqlAdmin.ADMIN_OPTIONS)
|
||||
return root.serialize()
|
||||
|
||||
def disable_root(self):
|
||||
"""Generate a new random password for the public superuser account.
|
||||
|
||||
Do not disable its access rights. Once enabled the account should
|
||||
stay that way.
|
||||
"""
|
||||
self.enable_root()
|
||||
|
||||
def list_root(self, ignore=()):
|
||||
"""Query to list all superuser accounts."""
|
||||
statement = (
|
||||
"SELECT usename FROM pg_catalog.pg_user WHERE usesuper = true"
|
||||
)
|
||||
|
||||
for name in ignore:
|
||||
statement += " AND usename != '{name}'".format(name=name)
|
||||
|
||||
return statement
|
||||
|
||||
def grant_access(self, username, hostname, databases):
|
||||
"""Give a user permission to use a given database.
|
||||
|
||||
The username and hostname parameters are strings.
|
||||
The databases parameter is a list of strings representing the names of
|
||||
the databases to grant permission on.
|
||||
"""
|
||||
for database in databases:
|
||||
LOG.info(f"Granting user {username} access to database {database}")
|
||||
self.psql(
|
||||
query.AccessQuery.grant(
|
||||
user=username,
|
||||
database=database,
|
||||
)
|
||||
)
|
||||
|
||||
def revoke_access(self, username, hostname, database):
|
||||
"""Revoke a user's permission to use a given database.
|
||||
|
||||
The username and hostname parameters are strings.
|
||||
The database parameter is a string representing the name of the
|
||||
database.
|
||||
"""
|
||||
LOG.info(f"Revoking user ({username}) access to database {database}")
|
||||
self.psql(
|
||||
query.AccessQuery.revoke(
|
||||
user=username,
|
||||
database=database,
|
||||
)
|
||||
)
|
||||
|
||||
def list_access(self, username, hostname):
|
||||
"""List database for which the given user as access.
|
||||
Return a list of serialized Postgres databases.
|
||||
"""
|
||||
user = self._find_user(username)
|
||||
if user is not None:
|
||||
return user.databases
|
||||
|
||||
raise exception.UserNotFound(username)
|
||||
|
||||
def create_databases(self, databases):
|
||||
"""Create the list of specified databases.
|
||||
|
||||
The databases parameter is a list of serialized Postgres databases.
|
||||
"""
|
||||
for database in databases:
|
||||
self.create_database(models.PostgreSQLSchema.deserialize(database))
|
||||
|
||||
def create_database(self, database):
|
||||
"""Create a database.
|
||||
|
||||
:param database: Database to be created.
|
||||
:type database: PostgreSQLSchema
|
||||
"""
|
||||
LOG.info(f"Creating database {database.name}")
|
||||
self.psql(
|
||||
query.DatabaseQuery.create(
|
||||
name=database.name,
|
||||
encoding=database.character_set,
|
||||
collation=database.collate,
|
||||
)
|
||||
)
|
||||
|
||||
def delete_database(self, database):
|
||||
"""Delete the specified database.
|
||||
"""
|
||||
self._drop_database(
|
||||
models.PostgreSQLSchema.deserialize(database))
|
||||
|
||||
def _drop_database(self, database):
|
||||
"""Drop a given Postgres database.
|
||||
|
||||
:param database: Database to be dropped.
|
||||
:type database: PostgreSQLSchema
|
||||
"""
|
||||
LOG.info(f"Dropping database {database.name}")
|
||||
self.psql(query.DatabaseQuery.drop(name=database.name))
|
||||
|
||||
def list_databases(self, limit=None, marker=None, include_marker=False):
|
||||
return guestagent_utils.serialize_list(
|
||||
self._get_databases(),
|
||||
limit=limit, marker=marker, include_marker=include_marker)
|
||||
|
||||
def _get_databases(self):
|
||||
"""Return all non-system Postgres databases on the instance."""
|
||||
results = self.query(
|
||||
query.DatabaseQuery.list(ignore=self.ignore_dbs)
|
||||
)
|
||||
return [models.PostgreSQLSchema(
|
||||
row[0].strip(), character_set=row[1], collate=row[2])
|
||||
for row in results]
|
||||
|
||||
def create_users(self, users):
|
||||
"""Create users and grant privileges for the specified databases.
|
||||
|
||||
The users parameter is a list of serialized Postgres users.
|
||||
"""
|
||||
for user in users:
|
||||
self.create_user(models.PostgreSQLUser.deserialize(user), None)
|
||||
|
||||
def create_user(self, user, encrypt_password=None, *options):
|
||||
"""Create a user and grant privileges for the specified databases.
|
||||
|
||||
:param user: User to be created.
|
||||
:type user: PostgreSQLUser
|
||||
|
||||
:param encrypt_password: Store passwords encrypted if True.
|
||||
Fallback to configured default
|
||||
behavior if None.
|
||||
:type encrypt_password: boolean
|
||||
|
||||
:param options: Other user options.
|
||||
:type options: list
|
||||
"""
|
||||
with_clause = query.UserQuery._build_with_clause(
|
||||
'<SANITIZED>',
|
||||
encrypt_password,
|
||||
*options
|
||||
)
|
||||
LOG.info(f"Creating user {user.name} {with_clause}")
|
||||
|
||||
self.psql(
|
||||
query.UserQuery.create(
|
||||
user.name,
|
||||
user.password,
|
||||
encrypt_password,
|
||||
*options
|
||||
)
|
||||
)
|
||||
self._grant_access(
|
||||
user.name,
|
||||
[models.PostgreSQLSchema.deserialize(db) for db in user.databases])
|
||||
|
||||
def create_admin_user(self, user, encrypt_password=None):
|
||||
self.create_user(user, encrypt_password, *self.ADMIN_OPTIONS)
|
||||
|
||||
def _grant_access(self, username, databases):
|
||||
self.grant_access(
|
||||
username,
|
||||
None,
|
||||
[db.name for db in databases],
|
||||
)
|
||||
|
||||
def list_users(self, limit=None, marker=None, include_marker=False):
|
||||
"""List all users on the instance along with their access permissions.
|
||||
Return a paginated list of serialized Postgres users.
|
||||
"""
|
||||
return guestagent_utils.serialize_list(
|
||||
self._get_users(),
|
||||
limit=limit, marker=marker, include_marker=include_marker)
|
||||
|
||||
def _get_users(self):
|
||||
"""Return all non-system Postgres users on the instance."""
|
||||
results = self.query(
|
||||
query.UserQuery.list(ignore=self.ignore_users)
|
||||
)
|
||||
|
||||
names = set([row[0].strip() for row in results])
|
||||
return [self._build_user(name, results) for name in names]
|
||||
|
||||
def _build_user(self, username, acl=None):
|
||||
"""Build a model representation of a Postgres user.
|
||||
|
||||
Include all databases it has access to.
|
||||
"""
|
||||
user = models.PostgreSQLUser(username)
|
||||
if acl:
|
||||
dbs = [models.PostgreSQLSchema(row[1].strip(),
|
||||
character_set=row[2],
|
||||
collate=row[3])
|
||||
for row in acl if row[0] == username and row[1] is not None]
|
||||
for d in dbs:
|
||||
user.databases.append(d.serialize())
|
||||
|
||||
return user
|
||||
|
||||
def delete_user(self, user):
|
||||
"""Delete the specified user.
|
||||
"""
|
||||
self._drop_user(models.PostgreSQLUser.deserialize(user))
|
||||
|
||||
def _drop_user(self, user):
|
||||
"""Drop a given Postgres user.
|
||||
|
||||
:param user: User to be dropped.
|
||||
:type user: PostgreSQLUser
|
||||
"""
|
||||
# Postgresql requires that you revoke grants before dropping the user
|
||||
databases = list(self.list_access(user.name, None))
|
||||
for db in databases:
|
||||
db_schema = models.PostgreSQLSchema.deserialize(db)
|
||||
self.revoke_access(user.name, None, db_schema.name)
|
||||
|
||||
LOG.info(f"Dropping user {user.name}")
|
||||
self.psql(query.UserQuery.drop(name=user.name))
|
||||
|
||||
def get_user(self, username, hostname):
|
||||
"""Return a serialized representation of a user with a given name.
|
||||
"""
|
||||
user = self._find_user(username)
|
||||
return user.serialize() if user is not None else None
|
||||
|
||||
def _find_user(self, username):
|
||||
"""Lookup a user with a given username.
|
||||
|
||||
Return a new Postgres user instance or None if no match is found.
|
||||
"""
|
||||
results = self.query(query.UserQuery.get(name=username))
|
||||
|
||||
if results:
|
||||
return self._build_user(username, results)
|
||||
|
||||
return None
|
||||
|
||||
def user_exists(self, username):
|
||||
"""Return whether a given user exists on the instance."""
|
||||
results = self.query(query.UserQuery.get(name=username))
|
||||
|
||||
return bool(results)
|
||||
|
||||
def change_passwords(self, users):
|
||||
"""Change the passwords of one or more existing users.
|
||||
|
||||
The users parameter is a list of serialized Postgres users.
|
||||
"""
|
||||
for user in users:
|
||||
self.alter_user(
|
||||
models.PostgreSQLUser.deserialize(user))
|
||||
|
||||
def alter_user(self, user, encrypt_password=None, *options):
|
||||
"""Change the password and options of an existing users.
|
||||
|
||||
:param user: User to be altered.
|
||||
:type user: PostgreSQLUser
|
||||
|
||||
:param encrypt_password: Store passwords encrypted if True.
|
||||
Fallback to configured default
|
||||
behavior if None.
|
||||
:type encrypt_password: boolean
|
||||
|
||||
:param options: Other user options.
|
||||
:type options: list
|
||||
"""
|
||||
with_clause = query.UserQuery._build_with_clause(
|
||||
'<SANITIZED>',
|
||||
encrypt_password,
|
||||
*options
|
||||
)
|
||||
LOG.info(f"Altering user {user.name} {with_clause}")
|
||||
|
||||
self.psql(
|
||||
query.UserQuery.alter_user(
|
||||
user.name,
|
||||
user.password,
|
||||
encrypt_password,
|
||||
*options)
|
||||
)
|
||||
|
||||
def update_attributes(self, username, hostname, user_attrs):
|
||||
"""Change the attributes of one existing user.
|
||||
|
||||
The username and hostname parameters are strings.
|
||||
The user_attrs parameter is a dictionary in the following form:
|
||||
|
||||
{"password": "", "name": ""}
|
||||
|
||||
Each key/value pair in user_attrs is optional.
|
||||
"""
|
||||
user = self._build_user(username)
|
||||
new_username = user_attrs.get('name')
|
||||
new_password = user_attrs.get('password')
|
||||
|
||||
if new_username is not None:
|
||||
self._rename_user(user, new_username)
|
||||
# Make sure we can retrieve the renamed user.
|
||||
user = self._find_user(new_username)
|
||||
if user is None:
|
||||
raise exception.TroveError(
|
||||
"Renamed user %s could not be found on the instance."
|
||||
% new_username)
|
||||
|
||||
if new_password is not None:
|
||||
user.password = new_password
|
||||
self.alter_user(user)
|
||||
|
||||
def _rename_user(self, user, new_username):
|
||||
"""Rename a Postgres user and transfer all access to the new name.
|
||||
|
||||
:param user: User to be renamed.
|
||||
:type user: PostgreSQLUser
|
||||
"""
|
||||
LOG.info(f"Changing username for {user.name} to {new_username}")
|
||||
# PostgreSQL handles the permission transfer itself.
|
||||
self.psql(
|
||||
query.UserQuery.update_name(
|
||||
old=user.name,
|
||||
new=new_username,
|
||||
)
|
||||
)
|
||||
|
||||
def psql(self, statement):
|
||||
"""Execute a non-returning statement (usually DDL);
|
||||
Turn autocommit ON (this is necessary for statements that cannot run
|
||||
within an implicit transaction, like CREATE DATABASE).
|
||||
"""
|
||||
return self.connection.execute(statement)
|
||||
|
||||
def query(self, query):
|
||||
"""Execute a query and return the result set.
|
||||
"""
|
||||
return self.connection.query(query)
|
||||
|
||||
@property
|
||||
def ignore_users(self):
|
||||
return cfg.get_ignored_users()
|
||||
|
||||
@property
|
||||
def ignore_dbs(self):
|
||||
return cfg.get_ignored_dbs()
|
||||
|
||||
|
||||
class PostgresConnection(object):
|
||||
def __init__(self, user, password=None, host='localhost', port=5432):
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.host = host
|
||||
self.port = port
|
||||
|
||||
self.connect_str = (f"user='{self.user}' password='{self.password}' "
|
||||
f"host='{self.host}' port='{self.port}'")
|
||||
|
||||
def execute(self, statement, identifiers=None, data_values=None):
|
||||
"""Execute a non-returning statement.
|
||||
"""
|
||||
self._execute_stmt(statement, identifiers, data_values, False,
|
||||
autocommit=True)
|
||||
|
||||
def query(self, query, identifiers=None, data_values=None):
|
||||
"""Execute a query and return the result set.
|
||||
"""
|
||||
return self._execute_stmt(query, identifiers, data_values, True)
|
||||
|
||||
def _execute_stmt(self, statement, identifiers, data_values, fetch,
|
||||
autocommit=False):
|
||||
cmd = self._bind(statement, identifiers)
|
||||
with psycopg2.connect(self.connect_str) as connection:
|
||||
connection.autocommit = autocommit
|
||||
with connection.cursor() as cursor:
|
||||
cursor.execute(cmd, data_values)
|
||||
if fetch:
|
||||
return cursor.fetchall()
|
||||
|
||||
def _bind(self, statement, identifiers):
|
||||
if identifiers:
|
||||
return statement.format(*identifiers)
|
||||
return statement
|
@ -20,10 +20,13 @@ from oslo_utils import timeutils
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.common import context as trove_context
|
||||
from trove.common import exception
|
||||
from trove.common.i18n import _
|
||||
from trove.common import stream_codecs
|
||||
from trove.conductor import api as conductor_api
|
||||
from trove.guestagent.common import guestagent_utils
|
||||
from trove.guestagent.common import operating_system
|
||||
from trove.guestagent.utils import docker as docker_util
|
||||
from trove.instance import service_status
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -266,10 +269,10 @@ class BaseDbStatus(object):
|
||||
|
||||
return True
|
||||
|
||||
def wait_for_real_status_to_change_to(self, status, max_time,
|
||||
update_db=False):
|
||||
def wait_for_status(self, status, max_time, update_db=False):
|
||||
"""Waits the given time for the real status to change to the one
|
||||
specified.
|
||||
|
||||
The internal status is always updated. The public instance
|
||||
state stored in the Trove database is updated only if "update_db" is
|
||||
True.
|
||||
@ -330,3 +333,71 @@ class BaseDbStatus(object):
|
||||
LOG.debug("Casting report_root message to conductor.")
|
||||
conductor_api.API(context).report_root(CONF.guest_id)
|
||||
LOG.debug("Successfully cast report_root.")
|
||||
|
||||
|
||||
class BaseDbApp(object):
|
||||
CFG_CODEC = stream_codecs.IniCodec()
|
||||
|
||||
def __init__(self, status, docker_client):
|
||||
self.status = status
|
||||
self.docker_client = docker_client
|
||||
|
||||
@classmethod
|
||||
def get_client_auth_file(cls, file="os_admin.cnf"):
|
||||
# Save the password inside the mount point directory so we could
|
||||
# restore everyting when rebuilding the instance.
|
||||
conf_dir = guestagent_utils.get_conf_dir()
|
||||
return guestagent_utils.build_file_path(conf_dir, file)
|
||||
|
||||
@classmethod
|
||||
def get_auth_password(cls, file="os_admin.cnf"):
|
||||
auth_config = operating_system.read_file(
|
||||
cls.get_client_auth_file(file), codec=cls.CFG_CODEC, as_root=True)
|
||||
return auth_config['client']['password']
|
||||
|
||||
@classmethod
|
||||
def save_password(cls, user, password):
|
||||
content = {
|
||||
'client': {
|
||||
'user': user,
|
||||
'password': password,
|
||||
'host': "localhost"
|
||||
}
|
||||
}
|
||||
|
||||
conf_dir = guestagent_utils.get_conf_dir()
|
||||
operating_system.write_file(
|
||||
f'{conf_dir}/{user}.cnf', content, codec=cls.CFG_CODEC,
|
||||
as_root=True)
|
||||
|
||||
def remove_overrides(self):
|
||||
self.configuration_manager.remove_user_override()
|
||||
|
||||
def reset_configuration(self, configuration):
|
||||
pass
|
||||
|
||||
def stop_db(self, update_db=False):
|
||||
LOG.info("Stopping database.")
|
||||
|
||||
try:
|
||||
docker_util.stop_container(self.docker_client)
|
||||
except Exception:
|
||||
LOG.exception("Failed to stop database")
|
||||
raise exception.TroveError("Failed to stop database")
|
||||
|
||||
if not self.status.wait_for_status(
|
||||
service_status.ServiceStatuses.SHUTDOWN,
|
||||
CONF.state_change_wait_time, update_db
|
||||
):
|
||||
raise exception.TroveError("Failed to stop database")
|
||||
|
||||
def start_db_with_conf_changes(self, config_contents, ds_version):
|
||||
LOG.info(f"Starting database service with new configuration and "
|
||||
f"datastore version {ds_version}.")
|
||||
|
||||
if self.status.is_running:
|
||||
LOG.info("Stopping database before applying changes.")
|
||||
self.stop_db()
|
||||
|
||||
self.reset_configuration(config_contents)
|
||||
self.start_db(update_db=True, ds_version=ds_version)
|
||||
|
@ -38,6 +38,8 @@ defaults = {
|
||||
'trove.guestagent.datastore.mysql.manager.Manager',
|
||||
'mariadb':
|
||||
'trove.guestagent.datastore.mariadb.manager.Manager',
|
||||
'postgresql':
|
||||
'trove.guestagent.datastore.postgres.manager.PostgresManager',
|
||||
'percona':
|
||||
'trove.guestagent.datastore.experimental.percona.manager.Manager',
|
||||
'pxc':
|
||||
@ -50,8 +52,6 @@ defaults = {
|
||||
'trove.guestagent.datastore.experimental.couchbase.manager.Manager',
|
||||
'mongodb':
|
||||
'trove.guestagent.datastore.experimental.mongodb.manager.Manager',
|
||||
'postgresql':
|
||||
'trove.guestagent.datastore.experimental.postgresql.manager.Manager',
|
||||
'couchdb':
|
||||
'trove.guestagent.datastore.experimental.couchdb.manager.Manager',
|
||||
'vertica':
|
||||
|
@ -215,7 +215,6 @@ class GuestLog(object):
|
||||
raise exception.LogAccessForbidden(action='show', log=self._name)
|
||||
|
||||
def _refresh_details(self):
|
||||
|
||||
if self._published_size is None:
|
||||
# Initializing, so get all the values
|
||||
try:
|
||||
@ -259,30 +258,24 @@ class GuestLog(object):
|
||||
self._size = logstat.st_size
|
||||
self._update_log_header_digest(self._file)
|
||||
|
||||
if self._log_rotated():
|
||||
self.status = LogStatus.Rotated
|
||||
# See if we have stuff to publish
|
||||
elif logstat.st_size > self._published_size:
|
||||
self._set_status(self._published_size,
|
||||
LogStatus.Partial, LogStatus.Ready)
|
||||
# We've published everything so far
|
||||
elif logstat.st_size == self._published_size:
|
||||
self._set_status(self._published_size,
|
||||
LogStatus.Published, LogStatus.Enabled)
|
||||
# We've already handled this case (log rotated) so what gives?
|
||||
else:
|
||||
raise Exception(_("Bug in _log_rotated ?"))
|
||||
if self.status != LogStatus.Disabled:
|
||||
if self._log_rotated():
|
||||
self.status = LogStatus.Rotated
|
||||
# See if we have stuff to publish
|
||||
elif logstat.st_size > self._published_size:
|
||||
self._set_status(self._published_size,
|
||||
LogStatus.Partial, LogStatus.Ready)
|
||||
# We've published everything so far
|
||||
elif logstat.st_size == self._published_size:
|
||||
self._set_status(self._published_size,
|
||||
LogStatus.Published, LogStatus.Enabled)
|
||||
# We've already handled this case (log rotated) so what gives?
|
||||
else:
|
||||
raise Exception(_("Bug in _log_rotated ?"))
|
||||
else:
|
||||
self._published_size = 0
|
||||
self._size = 0
|
||||
|
||||
if not self._size or not self.enabled:
|
||||
user_status = LogStatus.Disabled
|
||||
if self.enabled:
|
||||
user_status = LogStatus.Enabled
|
||||
self._set_status(self._type == LogType.USER,
|
||||
user_status, LogStatus.Unavailable)
|
||||
|
||||
def _log_rotated(self):
|
||||
"""If the file is smaller than the last reported size
|
||||
or the first line hash is different, we can probably assume
|
||||
@ -306,7 +299,7 @@ class GuestLog(object):
|
||||
LOG.debug("Log file rotation detected for '%s' - "
|
||||
"discarding old log", self._name)
|
||||
self._delete_log_components()
|
||||
if os.path.isfile(self._file):
|
||||
if operating_system.exists(self._file, as_root=True):
|
||||
self._publish_to_container(self._file)
|
||||
else:
|
||||
raise RuntimeError(_(
|
||||
@ -334,8 +327,6 @@ class GuestLog(object):
|
||||
swift_files.append(self._metafile_name())
|
||||
for swift_file in swift_files:
|
||||
self.swift_client.delete_object(container_name, swift_file)
|
||||
self._set_status(self._type == LogType.USER,
|
||||
LogStatus.Disabled, LogStatus.Enabled)
|
||||
self._published_size = 0
|
||||
|
||||
def _publish_to_container(self, log_filename):
|
||||
|
@ -94,7 +94,7 @@ class ModuleManager(object):
|
||||
module_dir = guestagent_utils.build_file_path(
|
||||
cls.MODULE_BASE_DIR, sub_dir)
|
||||
if not operating_system.exists(module_dir, is_directory=True):
|
||||
operating_system.create_directory(module_dir, force=True)
|
||||
operating_system.ensure_directory(module_dir, force=True)
|
||||
return module_dir
|
||||
|
||||
@classmethod
|
||||
|
@ -62,7 +62,7 @@ class MysqlReplicationBase(base.Replication):
|
||||
|
||||
LOG.debug("Trying to create replication user %s",
|
||||
mysql_user.name)
|
||||
adm.create_user([mysql_user.serialize()])
|
||||
adm.create_users([mysql_user.serialize()])
|
||||
|
||||
replication_user = {
|
||||
'name': mysql_user.name,
|
||||
|
@ -342,7 +342,7 @@ class VolumeMountPoint(object):
|
||||
def mount(self):
|
||||
if not operating_system.exists(self.mount_point, is_directory=True,
|
||||
as_root=True):
|
||||
operating_system.create_directory(self.mount_point, as_root=True)
|
||||
operating_system.ensure_directory(self.mount_point, as_root=True)
|
||||
LOG.debug("Mounting volume. Device path:{0}, mount_point:{1}, "
|
||||
"volume_type:{2}, mount options:{3}".format(
|
||||
self.device_path, self.mount_point, self.volume_fstype,
|
||||
|
@ -1614,7 +1614,7 @@ class Instance(BuiltInstance):
|
||||
return False
|
||||
|
||||
def detach_configuration(self):
|
||||
LOG.debug("Detaching configuration from instance: %s", self.id)
|
||||
LOG.info("Detaching configuration from instance: %s", self.id)
|
||||
if self.configuration and self.configuration.id:
|
||||
self._validate_can_perform_assign()
|
||||
LOG.debug("Detaching configuration: %s", self.configuration.id)
|
||||
|
@ -105,6 +105,7 @@ class ServiceStatuses(object):
|
||||
'RESTART_REQUIRED')
|
||||
HEALTHY = ServiceStatus(0x21, 'healthy', 'HEALTHY')
|
||||
UPGRADING = ServiceStatus(0x22, 'upgrading', 'UPGRADING')
|
||||
RESTARTING = ServiceStatus(0x22, 'restarting', 'RESTARTING')
|
||||
|
||||
|
||||
# Dissuade further additions at run-time.
|
||||
|
@ -1,6 +1,3 @@
|
||||
# Pre-compute values used by the template expressions.
|
||||
# Note: The variables have to be in lists due to how scoping works in JINJA templates.
|
||||
#
|
||||
# The recommended amount for 'shared_buffers' on a dedicated database server is 25% of RAM.
|
||||
# Servers with less than 3GB of RAM require a more conservative value to save memory for other processes.
|
||||
{% set shared_buffers_mb = [(0.25 if flavor['ram'] >= 3072 else 0.10) * flavor['ram']] %}
|
||||
@ -23,9 +20,9 @@
|
||||
#
|
||||
# This file is read on server startup and when the server receives a SIGHUP
|
||||
# signal. If you edit the file on a running system, you have to SIGHUP the
|
||||
# server for the changes to take effect, or use "pg_ctl reload". Some
|
||||
# parameters, which are marked below, require a server shutdown and restart to
|
||||
# take effect.
|
||||
# server for the changes to take effect, run "pg_ctl reload", or execute
|
||||
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
||||
# require a server shutdown and restart to take effect.
|
||||
#
|
||||
# Any parameter can also be given as a command-line option to the server, e.g.,
|
||||
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
||||
@ -36,9 +33,7 @@
|
||||
# GB = gigabytes min = minutes
|
||||
# TB = terabytes h = hours
|
||||
# d = days
|
||||
#
|
||||
# The properties marked as controlled by Trove are managed by the Trove
|
||||
# guest-agent. Any changes to them will be overwritten.
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# FILE LOCATIONS
|
||||
@ -49,18 +44,14 @@
|
||||
|
||||
#data_directory = 'ConfigDir' # use data in another directory
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
|
||||
# If external_pid_file is not explicitly set, no extra PID file is written.
|
||||
#external_pid_file = '' # write an extra PID file
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
@ -69,51 +60,24 @@
|
||||
|
||||
# - Connection Settings -
|
||||
|
||||
#listen_addresses = 'localhost' # what IP address(es) to listen on;
|
||||
listen_addresses = '*'
|
||||
# comma-separated list of addresses;
|
||||
# defaults to 'localhost'; use '*' for all
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
#port = 5432 # (change requires restart)
|
||||
# (controlled by Trove)
|
||||
#max_connections = 100 # (change requires restart)
|
||||
# Note: Increasing max_connections costs ~400 bytes of shared memory per
|
||||
# connection slot, plus lock space (see max_locks_per_transaction).
|
||||
max_connections = 100 # (change requires restart)
|
||||
#superuser_reserved_connections = 3 # (change requires restart)
|
||||
#unix_socket_directories = '/tmp' # comma-separated list of directories
|
||||
#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
#unix_socket_group = '' # (change requires restart)
|
||||
# (controlled by Trove)
|
||||
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
#bonjour = off # advertise server via Bonjour
|
||||
# (change requires restart)
|
||||
#bonjour_name = '' # defaults to the computer name
|
||||
# (change requires restart)
|
||||
|
||||
# - Security and Authentication -
|
||||
|
||||
#authentication_timeout = 1min # 1s-600s
|
||||
#ssl = off # (change requires restart)
|
||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||
# (change requires restart)
|
||||
#ssl_prefer_server_ciphers = on # (change requires restart)
|
||||
#ssl_ecdh_curve = 'prime256v1' # (change requires restart)
|
||||
#ssl_renegotiation_limit = 0 # amount of data between renegotiations
|
||||
#ssl_cert_file = 'server.crt' # (change requires restart)
|
||||
#ssl_key_file = 'server.key' # (change requires restart)
|
||||
#ssl_ca_file = '' # (change requires restart)
|
||||
#ssl_crl_file = '' # (change requires restart)
|
||||
#password_encryption = on
|
||||
#db_user_namespace = off
|
||||
|
||||
# GSSAPI using Kerberos
|
||||
#krb_server_keyfile = ''
|
||||
#krb_caseins_users = off
|
||||
|
||||
# - TCP Keepalives -
|
||||
# - TCP settings -
|
||||
# see "man 7 tcp" for details
|
||||
|
||||
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
||||
@ -122,6 +86,34 @@
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
||||
# 0 selects the system default
|
||||
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
|
||||
# 0 selects the system default
|
||||
|
||||
# - Authentication -
|
||||
|
||||
#authentication_timeout = 1min # 1s-600s
|
||||
#password_encryption = md5 # md5 or scram-sha-256
|
||||
#db_user_namespace = off
|
||||
|
||||
# GSSAPI using Kerberos
|
||||
#krb_server_keyfile = ''
|
||||
#krb_caseins_users = off
|
||||
|
||||
# - SSL -
|
||||
|
||||
#ssl = off
|
||||
#ssl_ca_file = ''
|
||||
#ssl_cert_file = 'server.crt'
|
||||
#ssl_crl_file = ''
|
||||
#ssl_key_file = 'server.key'
|
||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||
#ssl_prefer_server_ciphers = on
|
||||
#ssl_ecdh_curve = 'prime256v1'
|
||||
#ssl_min_protocol_version = 'TLSv1'
|
||||
#ssl_max_protocol_version = ''
|
||||
#ssl_dh_params_file = ''
|
||||
#ssl_passphrase_command = ''
|
||||
#ssl_passphrase_command_supports_reload = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
@ -137,10 +129,8 @@ shared_buffers = {{ shared_buffers_mb[0]|int }}MB # min 128kB
|
||||
#temp_buffers = 8MB # min 800kB
|
||||
#max_prepared_transactions = 0 # zero disables the feature
|
||||
# (change requires restart)
|
||||
# Note: Increasing max_prepared_transactions costs ~600 bytes of shared memory
|
||||
# per transaction slot, plus lock space (see max_locks_per_transaction).
|
||||
# It is not advisable to set max_prepared_transactions nonzero unless you
|
||||
# actively intend to use prepared transactions.
|
||||
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
||||
# you actively intend to use prepared transactions.
|
||||
#work_mem = 4MB # min 64kB
|
||||
#maintenance_work_mem = 64MB # min 1MB
|
||||
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
||||
@ -148,28 +138,33 @@ max_stack_depth = 7MB # min 100kB
|
||||
# The ideal value is the actual limit enforced
|
||||
# by the OS (8MB on 64-bit flavors) less a safety
|
||||
# margin of 1MB or so.
|
||||
#dynamic_shared_memory_type = posix # the default is the first option
|
||||
#shared_memory_type = mmap # the default is the first option
|
||||
# supported by the operating system:
|
||||
# mmap
|
||||
# sysv
|
||||
# windows
|
||||
# (change requires restart)
|
||||
dynamic_shared_memory_type = posix # the default is the first option
|
||||
# supported by the operating system:
|
||||
# posix
|
||||
# sysv
|
||||
# windows
|
||||
# mmap
|
||||
# use none to disable dynamic shared memory
|
||||
# (change requires restart)
|
||||
|
||||
# - Disk -
|
||||
|
||||
#temp_file_limit = -1 # limits per-session temp file space
|
||||
#temp_file_limit = -1 # limits per-process temp file space
|
||||
# in kB, or -1 for no limit
|
||||
|
||||
# - Kernel Resource Usage -
|
||||
# - Kernel Resources -
|
||||
|
||||
#max_files_per_process = 1000 # min 25
|
||||
# (change requires restart)
|
||||
#shared_preload_libraries = '' # (change requires restart)
|
||||
|
||||
# - Cost-Based Vacuum Delay -
|
||||
|
||||
#vacuum_cost_delay = 0 # 0-100 milliseconds
|
||||
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
|
||||
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
||||
#vacuum_cost_page_miss = 10 # 0-10000 credits
|
||||
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
||||
@ -178,27 +173,38 @@ max_stack_depth = 7MB # min 100kB
|
||||
# - Background Writer -
|
||||
|
||||
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
||||
#bgwriter_lru_maxpages = 100 # 0-1000 max buffers written/round
|
||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multipler on buffers scanned/round
|
||||
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
|
||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
||||
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
|
||||
|
||||
# - Asynchronous Behavior -
|
||||
|
||||
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
|
||||
#max_worker_processes = 8
|
||||
#max_worker_processes = 8 # (change requires restart)
|
||||
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
|
||||
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||
#parallel_leader_participation = on
|
||||
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||
# can be used in parallel operations
|
||||
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||
# (change requires restart)
|
||||
#backend_flush_after = 0 # measured in pages, 0 disables
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# WRITE AHEAD LOG
|
||||
# WRITE-AHEAD LOG
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Settings -
|
||||
|
||||
wal_level = minimal # minimal, archive, hot_standby, or logical
|
||||
wal_level = replica # minimal, replica, or logical
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
#fsync = on # turns forced synchronization on or off
|
||||
# (Trove default)
|
||||
#fsync = on # flush data to disk for crash safety
|
||||
# (turning this off can cause
|
||||
# unrecoverable data corruption)
|
||||
#synchronous_commit = on # synchronization level;
|
||||
# off, local, remote_write, or on
|
||||
# off, local, remote_write, remote_apply, or on
|
||||
#wal_sync_method = fsync # the default is the first option
|
||||
# supported by the operating system:
|
||||
# open_datasync
|
||||
@ -207,50 +213,93 @@ wal_level = minimal # minimal, archive, hot_standby, or logical
|
||||
# fsync_writethrough
|
||||
# open_sync
|
||||
#full_page_writes = on # recover from partial page writes
|
||||
#wal_compression = off # enable compression of full-page writes
|
||||
#wal_log_hints = off # also do full page writes of non-critical updates
|
||||
# (change requires restart)
|
||||
#wal_init_zero = on # zero-fill new WAL files
|
||||
#wal_recycle = on # recycle WAL files
|
||||
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
||||
# (change requires restart)
|
||||
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
||||
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
||||
|
||||
#commit_delay = 0 # range 0-100000, in microseconds
|
||||
#commit_siblings = 5 # range 1-1000
|
||||
|
||||
# - Checkpoints -
|
||||
|
||||
#checkpoint_timeout = 5min # range 30s-1h
|
||||
#checkpoint_timeout = 5min # range 30s-1d
|
||||
max_wal_size = 1GB
|
||||
min_wal_size = 80MB
|
||||
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
|
||||
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
|
||||
#checkpoint_warning = 30s # 0 disables
|
||||
|
||||
# - Archiving -
|
||||
|
||||
archive_mode = off # allows archiving to be done
|
||||
archive_mode = on # enables archiving; off, on, or always
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
# (Trove default)
|
||||
#archive_command = '' # command to use to archive a logfile segment
|
||||
# placeholders: %p = path of file to archive
|
||||
# %f = file name only
|
||||
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
||||
# (controlled by Trove)
|
||||
#archive_timeout = 0 # force a logfile segment switch after this
|
||||
# number of seconds; 0 disables
|
||||
# (controlled by Trove)
|
||||
|
||||
# - Archive Recovery -
|
||||
|
||||
# These are only used in recovery mode.
|
||||
|
||||
#restore_command = '' # command to use to restore an archived logfile segment
|
||||
# placeholders: %p = path of file to restore
|
||||
# %f = file name only
|
||||
# e.g. 'cp /mnt/server/archivedir/%f %p'
|
||||
# (change requires restart)
|
||||
#archive_cleanup_command = '' # command to execute at every restartpoint
|
||||
#recovery_end_command = '' # command to execute at completion of recovery
|
||||
|
||||
# - Recovery Target -
|
||||
|
||||
# Set these only when performing a targeted recovery.
|
||||
|
||||
#recovery_target = '' # 'immediate' to end recovery as soon as a
|
||||
# consistent state is reached
|
||||
# (change requires restart)
|
||||
#recovery_target_name = '' # the named restore point to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_time = '' # the time stamp up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_inclusive = on # Specifies whether to stop:
|
||||
# just after the specified recovery target (on)
|
||||
# just before the recovery target (off)
|
||||
# (change requires restart)
|
||||
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
|
||||
# (change requires restart)
|
||||
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPLICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Sending Server(s) -
|
||||
# - Sending Servers -
|
||||
|
||||
# Set these on the master and on any standby that will send replication data.
|
||||
|
||||
#max_wal_senders = 0 # max number of walsender processes
|
||||
#max_wal_senders = 10 # max number of walsender processes
|
||||
# (change requires restart)
|
||||
#wal_keep_segments = 0 # in logfile segments, 16MB each; 0 disables
|
||||
#wal_keep_segments = 0 # in logfile segments; 0 disables
|
||||
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
||||
|
||||
#max_replication_slots = 0 # max number of replication slots
|
||||
#max_replication_slots = 10 # max number of replication slots
|
||||
# (change requires restart)
|
||||
#track_commit_timestamp = off # collect timestamp of transaction commit
|
||||
# (change requires restart)
|
||||
|
||||
# - Master Server -
|
||||
@ -258,7 +307,8 @@ archive_mode = off # allows archiving to be done
|
||||
# These settings are ignored on a standby server.
|
||||
|
||||
#synchronous_standby_names = '' # standby servers that provide sync rep
|
||||
# comma-separated list of application_name
|
||||
# method to choose sync standbys, number of sync standbys,
|
||||
# and comma-separated list of application_name
|
||||
# from standby(s); '*' = all
|
||||
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
||||
|
||||
@ -266,7 +316,12 @@ archive_mode = off # allows archiving to be done
|
||||
|
||||
# These settings are ignored on a master server.
|
||||
|
||||
#hot_standby = off # "on" allows queries during recovery
|
||||
#primary_conninfo = '' # connection string to sending server
|
||||
# (change requires restart)
|
||||
#primary_slot_name = '' # replication slot on sending server
|
||||
# (change requires restart)
|
||||
#promote_trigger_file = '' # file name whose presence ends recovery
|
||||
#hot_standby = on # "off" disallows queries during recovery
|
||||
# (change requires restart)
|
||||
#max_standby_archive_delay = 30s # max delay before canceling queries
|
||||
# when reading WAL from archive;
|
||||
@ -281,6 +336,17 @@ archive_mode = off # allows archiving to be done
|
||||
#wal_receiver_timeout = 60s # time that receiver waits for
|
||||
# communication from master
|
||||
# in milliseconds; 0 disables
|
||||
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
||||
# retrieve WAL after a failed attempt
|
||||
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
|
||||
|
||||
# - Subscribers -
|
||||
|
||||
# These settings are ignored on a publisher.
|
||||
|
||||
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
||||
# (change requires restart)
|
||||
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
@ -297,9 +363,14 @@ archive_mode = off # allows archiving to be done
|
||||
#enable_material = on
|
||||
#enable_mergejoin = on
|
||||
#enable_nestloop = on
|
||||
#enable_parallel_append = on
|
||||
#enable_seqscan = on
|
||||
#enable_sort = on
|
||||
#enable_tidscan = on
|
||||
#enable_partitionwise_join = off
|
||||
#enable_partitionwise_aggregate = off
|
||||
#enable_parallel_hash = on
|
||||
#enable_partition_pruning = on
|
||||
|
||||
# - Planner Cost Constants -
|
||||
|
||||
@ -308,8 +379,21 @@ archive_mode = off # allows archiving to be done
|
||||
#cpu_tuple_cost = 0.01 # same scale as above
|
||||
#cpu_index_tuple_cost = 0.005 # same scale as above
|
||||
#cpu_operator_cost = 0.0025 # same scale as above
|
||||
effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the amount of available RAM
|
||||
# less the minimum required for other processes or 512MB.
|
||||
#parallel_tuple_cost = 0.1 # same scale as above
|
||||
#parallel_setup_cost = 1000.0 # same scale as above
|
||||
|
||||
#jit_above_cost = 100000 # perform JIT compilation if available
|
||||
# and query more expensive than this;
|
||||
# -1 disables
|
||||
#jit_inline_above_cost = 500000 # inline small functions if query is
|
||||
# more expensive than this; -1 disables
|
||||
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
|
||||
# query is more expensive than this;
|
||||
# -1 disables
|
||||
|
||||
#min_parallel_table_scan_size = 8MB
|
||||
#min_parallel_index_scan_size = 512kB
|
||||
effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB
|
||||
|
||||
# - Genetic Query Optimizer -
|
||||
|
||||
@ -329,10 +413,14 @@ effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the am
|
||||
#from_collapse_limit = 8
|
||||
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
||||
# JOIN clauses
|
||||
#force_parallel_mode = off
|
||||
#jit = on # allow JIT compilation
|
||||
#plan_cache_mode = auto # auto, force_generic_plan or
|
||||
# force_custom_plan
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# ERROR REPORTING AND LOGGING
|
||||
# REPORTING AND LOGGING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Where to Log -
|
||||
@ -341,24 +429,20 @@ effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the am
|
||||
# stderr, csvlog, syslog, and eventlog,
|
||||
# depending on platform. csvlog
|
||||
# requires logging_collector to be on.
|
||||
# (controlled by Trove)
|
||||
|
||||
# This is used when logging to stderr:
|
||||
#logging_collector = off # Enable capturing of stderr and csvlog
|
||||
# into log files. Required to be on for
|
||||
# csvlogs.
|
||||
# (change requires restart)
|
||||
# (controlled by Trove)
|
||||
|
||||
# These are only used if logging_collector is on:
|
||||
#log_directory = 'pg_log' # directory where log files are written,
|
||||
#log_directory = 'log' # directory where log files are written,
|
||||
# can be absolute or relative to PGDATA
|
||||
# (controlled by Trove)
|
||||
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
||||
# can include strftime() escapes
|
||||
#log_file_mode = 0600 # creation mode for log files,
|
||||
# begin with 0 to use octal notation
|
||||
# (controlled by Trove)
|
||||
#log_truncate_on_rotation = off # If on, an existing log file with the
|
||||
# same name as the new log file will be
|
||||
# truncated rather than appended to.
|
||||
@ -376,23 +460,15 @@ effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the am
|
||||
# These are relevant when logging to syslog:
|
||||
#syslog_facility = 'LOCAL0'
|
||||
#syslog_ident = 'postgres'
|
||||
#syslog_sequence_numbers = on
|
||||
#syslog_split_messages = on
|
||||
|
||||
# This is only relevant when logging to eventlog (win32):
|
||||
# (change requires restart)
|
||||
#event_source = 'PostgreSQL'
|
||||
|
||||
# - When to Log -
|
||||
|
||||
#client_min_messages = notice # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# log
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
|
||||
#log_min_messages = warning # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
@ -426,6 +502,9 @@ effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the am
|
||||
# statements running at least this number
|
||||
# of milliseconds
|
||||
|
||||
#log_transaction_sample_rate = 0.0 # Fraction of transactions whose statements
|
||||
# are logged regardless of their duration. 1.0 logs all
|
||||
# statements from all transactions, 0.0 never logs.
|
||||
|
||||
# - What to Log -
|
||||
|
||||
@ -439,7 +518,7 @@ effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the am
|
||||
#log_duration = off
|
||||
#log_error_verbosity = default # terse, default, or verbose messages
|
||||
#log_hostname = off
|
||||
#log_line_prefix = '' # special values:
|
||||
#log_line_prefix = '%m [%p] ' # special values:
|
||||
# %a = application name
|
||||
# %u = user name
|
||||
# %d = database name
|
||||
@ -448,6 +527,7 @@ effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the am
|
||||
# %p = process ID
|
||||
# %t = timestamp without milliseconds
|
||||
# %m = timestamp with milliseconds
|
||||
# %n = timestamp with milliseconds (as a Unix epoch)
|
||||
# %i = command tag
|
||||
# %e = SQL state
|
||||
# %c = session ID
|
||||
@ -461,28 +541,36 @@ effective_cache_size = {{ max(flavor['ram'] - 512, 512)|int }}MB # Set to the am
|
||||
# e.g. '<%u%%%d> '
|
||||
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
||||
#log_statement = 'none' # none, ddl, mod, all
|
||||
#log_replication_commands = off
|
||||
#log_temp_files = -1 # log temporary files equal or larger
|
||||
# than the specified size in kilobytes;
|
||||
# -1 disables, 0 logs all temp files
|
||||
#log_timezone = 'GMT'
|
||||
log_timezone = 'Etc/UTC'
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# PROCESS TITLE
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#cluster_name = '' # added to process titles if nonempty
|
||||
# (change requires restart)
|
||||
update_process_title = off # (controlled by Trove)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# RUNTIME STATISTICS
|
||||
# STATISTICS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Query/Index Statistics Collector -
|
||||
# - Query and Index Statistics Collector -
|
||||
|
||||
#track_activities = on
|
||||
#track_counts = on
|
||||
#track_io_timing = off
|
||||
#track_functions = none # none, pl, all
|
||||
#track_activity_query_size = 1024 # (change requires restart)
|
||||
update_process_title = off # (controlled by Trove)
|
||||
#stats_temp_directory = 'pg_stat_tmp'
|
||||
|
||||
|
||||
# - Statistics Monitoring -
|
||||
# - Monitoring -
|
||||
|
||||
#log_parser_stats = off
|
||||
#log_planner_stats = off
|
||||
@ -491,7 +579,7 @@ update_process_title = off # (controlled by Trove)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# AUTOVACUUM PARAMETERS
|
||||
# AUTOVACUUM
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
||||
@ -514,7 +602,7 @@ update_process_title = off # (controlled by Trove)
|
||||
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
||||
# before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for
|
||||
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
|
||||
# autovacuum, in milliseconds;
|
||||
# -1 means use vacuum_cost_delay
|
||||
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
||||
@ -528,10 +616,22 @@ update_process_title = off # (controlled by Trove)
|
||||
|
||||
# - Statement Behavior -
|
||||
|
||||
#search_path = '"$user",public' # schema names
|
||||
#client_min_messages = notice # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# log
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
#search_path = '"$user", public' # schema names
|
||||
#row_security = on
|
||||
#default_tablespace = '' # a tablespace name, '' uses the default
|
||||
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
||||
# only default tablespace
|
||||
#default_table_access_method = 'heap'
|
||||
#check_function_bodies = on
|
||||
#default_transaction_isolation = 'read committed'
|
||||
#default_transaction_read_only = off
|
||||
@ -539,19 +639,25 @@ update_process_title = off # (controlled by Trove)
|
||||
#session_replication_role = 'origin'
|
||||
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#vacuum_freeze_min_age = 50000000
|
||||
#vacuum_freeze_table_age = 150000000
|
||||
#vacuum_multixact_freeze_min_age = 5000000
|
||||
#vacuum_multixact_freeze_table_age = 150000000
|
||||
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
|
||||
# before index cleanup, 0 always performs
|
||||
# index cleanup
|
||||
#bytea_output = 'hex' # hex, escape
|
||||
#xmlbinary = 'base64'
|
||||
#xmloption = 'content'
|
||||
#gin_fuzzy_search_limit = 0
|
||||
#gin_pending_list_limit = 4MB
|
||||
|
||||
# - Locale and Formatting -
|
||||
|
||||
#datestyle = 'iso, mdy'
|
||||
datestyle = 'iso, mdy'
|
||||
#intervalstyle = 'postgres'
|
||||
#timezone = 'GMT'
|
||||
timezone = 'Etc/UTC'
|
||||
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
||||
# abbreviations. Currently, there are
|
||||
# Default
|
||||
@ -559,25 +665,31 @@ update_process_title = off # (controlled by Trove)
|
||||
# India
|
||||
# You can create your own file in
|
||||
# share/timezonesets/.
|
||||
#extra_float_digits = 0 # min -15, max 3
|
||||
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
|
||||
# selects precise output mode
|
||||
#client_encoding = sql_ascii # actually, defaults to database
|
||||
# encoding
|
||||
|
||||
# These settings are initialized by initdb, but they can be changed.
|
||||
#lc_messages = 'C' # locale for system error message
|
||||
lc_messages = 'en_US.utf8' # locale for system error message
|
||||
# strings
|
||||
#lc_monetary = 'C' # locale for monetary formatting
|
||||
#lc_numeric = 'C' # locale for number formatting
|
||||
#lc_time = 'C' # locale for time formatting
|
||||
lc_monetary = 'en_US.utf8' # locale for monetary formatting
|
||||
lc_numeric = 'en_US.utf8' # locale for number formatting
|
||||
lc_time = 'en_US.utf8' # locale for time formatting
|
||||
|
||||
# default configuration for text search
|
||||
#default_text_search_config = 'pg_catalog.simple'
|
||||
default_text_search_config = 'pg_catalog.english'
|
||||
|
||||
# - Shared Library Preloading -
|
||||
|
||||
#shared_preload_libraries = '' # (change requires restart)
|
||||
#local_preload_libraries = ''
|
||||
#session_preload_libraries = ''
|
||||
#jit_provider = 'llvmjit' # JIT library to use
|
||||
|
||||
# - Other Defaults -
|
||||
|
||||
#dynamic_library_path = '$libdir'
|
||||
#local_preload_libraries = ''
|
||||
#session_preload_libraries = ''
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
@ -587,26 +699,26 @@ update_process_title = off # (controlled by Trove)
|
||||
#deadlock_timeout = 1s
|
||||
#max_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
# Note: Each lock table slot uses ~270 bytes of shared memory, and there are
|
||||
# max_locks_per_transaction * (max_connections + max_prepared_transactions)
|
||||
# lock table slots.
|
||||
#max_pred_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_relation = -2 # negative values mean
|
||||
# (max_pred_locks_per_transaction
|
||||
# / -max_pred_locks_per_relation) - 1
|
||||
#max_pred_locks_per_page = 2 # min 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# VERSION/PLATFORM COMPATIBILITY
|
||||
# VERSION AND PLATFORM COMPATIBILITY
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Previous PostgreSQL Versions -
|
||||
|
||||
#array_nulls = on
|
||||
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
||||
#default_with_oids = off
|
||||
#escape_string_warning = on
|
||||
#lo_compat_privileges = off
|
||||
#operator_precedence_warning = off
|
||||
#quote_all_identifiers = off
|
||||
#sql_inheritance = on
|
||||
#standard_conforming_strings = on
|
||||
#synchronize_seqscans = on
|
||||
|
||||
@ -621,6 +733,9 @@ update_process_title = off # (controlled by Trove)
|
||||
|
||||
#exit_on_error = off # terminate session on any error?
|
||||
#restart_after_crash = on # reinitialize after backend crash?
|
||||
#data_sync_retry = off # retry or panic on failure to fsync
|
||||
# data?
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
@ -628,16 +743,18 @@ update_process_title = off # (controlled by Trove)
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# These options allow settings to be loaded from files other than the
|
||||
# default postgresql.conf.
|
||||
# default postgresql.conf. Note that these are directives, not variable
|
||||
# assignments, so they can usefully be given more than once.
|
||||
|
||||
#include_dir = 'conf.d' # include files ending in '.conf' from
|
||||
# directory 'conf.d'
|
||||
#include_if_exists = 'exists.conf' # include file only if it exists
|
||||
#include = 'special.conf' # include file
|
||||
include_dir = '/etc/postgresql/conf.d' # include files ending in '.conf' from
|
||||
# a directory, e.g., 'conf.d'
|
||||
# (Enabled by Trove)
|
||||
#include_if_exists = '...' # include file only if it exists
|
||||
#include = '...' # include file
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CUSTOMIZED OPTIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# Add settings for extensions here
|
||||
# Add settings for extensions here
|
@ -448,7 +448,13 @@ class ConfigurationRunner(TestRunner):
|
||||
self, instance_id, group_id, expected_states, expected_http_code,
|
||||
restart_inst=False):
|
||||
client = self.auth_client
|
||||
client.instances.modify(instance_id, configuration=group_id)
|
||||
params = {}
|
||||
if group_id:
|
||||
params['configuration'] = group_id
|
||||
else:
|
||||
params['remove_configuration'] = True
|
||||
client.instances.update(instance_id, **params)
|
||||
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
self.assert_instance_action(instance_id, expected_states)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user