Fix problems with DB utilities in HTK and Postgresql

This PS fixes:
1) Removes printing of the word "Done" after the restore/list command
   executes, which is not needed and clutters the output.
2) Fixes problem with list_tables related to command output.
3) Fixes parameter ordering problem with list_rows and list_schema
4) Adds the missing menu/parameter parsing code for list_schema
5) Fixes backup-restore secret and handling of PD_DUMPALL_OPTIONS.
6) Fixes single db restore, which wasn't dropping the database, and
   ended up adding duplicate rows.
7) Fixes cronjob deficiencies - added security context and init containers,
   fixed backup related service account related typos.
8) Fixes get_schema so that it only finds the table requested, rather
   than other tables that also start with the same substring.
9) Fixes swift endpoint issue where it sometimes returns the wrong
   endpoint, due to bad grep command.

Change-Id: I0e3ab81732db031cb6e162b622efaf77bbc7ec25
This commit is contained in:
Cliff Parsons 2020-06-03 22:27:57 +00:00 committed by Parsons, Cliff (cp769u)
parent 401d4e70ce
commit 1da7a5b0f8
7 changed files with 104 additions and 37 deletions

View File

@ -128,7 +128,7 @@ send_to_remote_server() {
echo $RESULT | grep $CONTAINER_NAME echo $RESULT | grep $CONTAINER_NAME
if [[ $? -ne 0 ]]; then if [[ $? -ne 0 ]]; then
# Find the swift URL from the keystone endpoint list # Find the swift URL from the keystone endpoint list
SWIFT_URL=$(openstack catalog list -f value | grep -A5 swift | grep public | awk '{print $2}') SWIFT_URL=$(openstack catalog list -f value | grep swift | grep public | awk '{print $2}')
# Get a token from keystone # Get a token from keystone
TOKEN=$(openstack token issue -f value -c id) TOKEN=$(openstack token issue -f value -c id)
@ -187,7 +187,7 @@ send_to_remote_server() {
# This function attempts to store the built tarball to the remote gateway, # This function attempts to store the built tarball to the remote gateway,
# with built-in logic to handle error cases like: # with built-in logic to handle error cases like:
# 1) Network connectivity issues - retries for a specific amount of time # 1) Network connectivity issues - retries for a specific amount of time
# 2) Authorization errors - immediately logs an ERROR and exits # 2) Authorization errors - immediately logs an ERROR and returns
store_backup_remotely() { store_backup_remotely() {
FILEPATH=$1 FILEPATH=$1
FILE=$2 FILE=$2
@ -327,17 +327,30 @@ backup_databases() {
rm -f $ERR_LOG_FILE rm -f $ERR_LOG_FILE
#Only delete the old archive after a successful archive #Only delete the old archive after a successful archive
export LOCAL_DAYS_TO_KEEP=$(echo $LOCAL_DAYS_TO_KEEP | sed 's/"//g')
if [[ "$LOCAL_DAYS_TO_KEEP" -gt 0 ]]; then if [[ "$LOCAL_DAYS_TO_KEEP" -gt 0 ]]; then
remove_old_local_archives remove_old_local_archives
fi fi
if $REMOTE_BACKUP_ENABLED; then REMOTE_BACKUP=$(echo $REMOTE_BACKUP_ENABLED | sed 's/"//g')
if $REMOTE_BACKUP; then
store_backup_remotely $ARCHIVE_DIR $TARBALL_FILE store_backup_remotely $ARCHIVE_DIR $TARBALL_FILE
if [[ $? -ne 0 ]]; then if [[ $? -ne 0 ]]; then
log_backup_error_exit "Backup could not be sent to remote RGW." # This error should print first, then print the summary as the last
# thing that the user sees in the output.
log ERROR "${DB_NAME}_backup" "Backup could not be sent to remote RGW."
set +x
echo "=================================================================="
echo "Local backup successful, but could not send to remote RGW."
echo "Backup archive name: $TARBALL_FILE"
echo "Backup archive size: $ARCHIVE_SIZE"
echo "=================================================================="
set -x
exit 1
fi fi
#Only delete the old archive after a successful archive #Only delete the old archive after a successful archive
export REMOTE_DAYS_TO_KEEP=$(echo $REMOTE_DAYS_TO_KEEP | sed 's/"//g')
if [[ "$REMOTE_DAYS_TO_KEEP" -gt 0 ]]; then if [[ "$REMOTE_DAYS_TO_KEEP" -gt 0 ]]; then
remove_old_remote_archives remove_old_remote_archives
fi fi

View File

@ -130,8 +130,6 @@
# or "get_schema" when it needs that data requested by the user. # or "get_schema" when it needs that data requested by the user.
# #
export LOG_FILE=/tmp/dbrestore.log
usage() { usage() {
ret_val=$1 ret_val=$1
echo "Usage:" echo "Usage:"
@ -235,14 +233,14 @@ list_archives() {
echo echo
echo "All Archives from RGW Data Store" echo "All Archives from RGW Data Store"
echo "==============================================" echo "=============================================="
cat $TMP_DIR/archive_list cat $TMP_DIR/archive_list | sort
clean_and_exit 0 "" clean_and_exit 0 ""
else else
clean_and_exit 1 "ERROR: Archives could not be retrieved from the RGW." clean_and_exit 1 "ERROR: Archives could not be retrieved from the RGW."
fi fi
elif [[ "x${REMOTE}" == "x" ]]; then elif [[ "x${REMOTE}" == "x" ]]; then
if [[ -d $ARCHIVE_DIR ]]; then if [[ -d $ARCHIVE_DIR ]]; then
archives=$(find $ARCHIVE_DIR/ -iname "*.gz" -print) archives=$(find $ARCHIVE_DIR/ -iname "*.gz" -print | sort)
echo echo
echo "All Local Archives" echo "All Local Archives"
echo "==============================================" echo "=============================================="
@ -266,6 +264,7 @@ get_archive() {
REMOTE=$2 REMOTE=$2
if [[ "x$REMOTE" == "xremote" ]]; then if [[ "x$REMOTE" == "xremote" ]]; then
echo "Retrieving archive ${ARCHIVE_FILE} from the remote RGW..."
retrieve_remote_archive $ARCHIVE_FILE retrieve_remote_archive $ARCHIVE_FILE
if [[ $? -ne 0 ]]; then if [[ $? -ne 0 ]]; then
clean_and_exit 1 "ERROR: Could not retrieve remote archive: $ARCHIVE_FILE" clean_and_exit 1 "ERROR: Could not retrieve remote archive: $ARCHIVE_FILE"
@ -431,6 +430,9 @@ database_exists() {
cli_main() { cli_main() {
ARGS=("$@") ARGS=("$@")
# Create the ARCHIVE DIR if it's not already there.
mkdir -p $ARCHIVE_DIR
# Create temp directory for a staging area to decompress files into # Create temp directory for a staging area to decompress files into
export TMP_DIR=$(mktemp -d) export TMP_DIR=$(mktemp -d)
@ -483,6 +485,16 @@ cli_main() {
fi fi
;; ;;
"list_schema")
if [[ ${#ARGS[@]} -lt 4 || ${#ARGS[@]} -gt 5 ]]; then
usage 1
elif [[ ${#ARGS[@]} -eq 4 ]]; then
list_schema ${ARGS[1]} ${ARGS[2]} ${ARGS[3]}
else
list_schema ${ARGS[1]} ${ARGS[2]} ${ARGS[3]} ${ARGS[4]}
fi
;;
"restore") "restore")
REMOTE="" REMOTE=""
if [[ ${#ARGS[@]} -lt 3 || ${#ARGS[@]} -gt 4 ]]; then if [[ ${#ARGS[@]} -lt 3 || ${#ARGS[@]} -gt 4 ]]; then
@ -505,11 +517,13 @@ cli_main() {
clean_and_exit 1 "ERROR: Could not get the list of databases to restore." clean_and_exit 1 "ERROR: Could not get the list of databases to restore."
fi fi
if [[ ! $DB_NAMESPACE == "kube-system" ]]; then
#check if the requested database is available in the archive #check if the requested database is available in the archive
database_exists $DB_SPEC database_exists $DB_SPEC
if [[ $? -ne 1 ]]; then if [[ $? -ne 1 ]]; then
clean_and_exit 1 "ERROR: Database ${DB_SPEC} does not exist." clean_and_exit 1 "ERROR: Database ${DB_SPEC} does not exist."
fi fi
fi
echo "Restoring Database $DB_SPEC And Grants" echo "Restoring Database $DB_SPEC And Grants"
restore_single_db $DB_SPEC $TMP_DIR restore_single_db $DB_SPEC $TMP_DIR
@ -518,7 +532,6 @@ cli_main() {
else else
clean_and_exit 1 "ERROR: Single database restore failed." clean_and_exit 1 "ERROR: Single database restore failed."
fi fi
echo "Tail ${LOG_FILE} for restore log."
clean_and_exit 0 "" clean_and_exit 0 ""
else else
echo "Restoring All The Databases. This could take a few minutes..." echo "Restoring All The Databases. This could take a few minutes..."
@ -528,7 +541,7 @@ cli_main() {
else else
clean_and_exit 1 "ERROR: Database restore failed." clean_and_exit 1 "ERROR: Database restore failed."
fi fi
clean_and_exit 0 "Tail ${LOG_FILE} for restore log." clean_and_exit 0 ""
fi fi
;; ;;
*) *)
@ -536,6 +549,6 @@ cli_main() {
;; ;;
esac esac
clean_and_exit 0 "Done" clean_and_exit 0 ""
} }
{{- end }} {{- end }}

View File

@ -41,9 +41,9 @@ dump_databases_to_directory() {
TMP_DIR=$1 TMP_DIR=$1
LOG_FILE=$2 LOG_FILE=$2
PG_DUMPALL_OPTIONS=$POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS PG_DUMPALL_OPTIONS=$(echo $POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS | sed 's/"//g')
PG_DUMPALL="pg_dumpall \ PG_DUMPALL="pg_dumpall \
$POSTGRESQL_BACKUP_PG_DUMPALL_OPTIONS \ $PG_DUMPALL_OPTIONS \
-U $POSTGRESQL_ADMIN_USER \ -U $POSTGRESQL_ADMIN_USER \
-h $POSTGRESQL_SERVICE_HOST" -h $POSTGRESQL_SERVICE_HOST"

View File

@ -30,6 +30,7 @@ export ARCHIVE_DIR=${POSTGRESQL_BACKUP_BASE_DIR}/db/${DB_NAMESPACE}/${DB_NAME}/a
# Define variables needed in this file # Define variables needed in this file
POSTGRESQL_HOST=$(cat /etc/postgresql/admin_user.conf | cut -d: -f 1) POSTGRESQL_HOST=$(cat /etc/postgresql/admin_user.conf | cut -d: -f 1)
export PSQL="psql -U $POSTGRESQL_ADMIN_USER -h $POSTGRESQL_HOST" export PSQL="psql -U $POSTGRESQL_ADMIN_USER -h $POSTGRESQL_HOST"
export LOG_FILE=/tmp/dbrestore.log
# Extract all databases from an archive and put them in the requested # Extract all databases from an archive and put them in the requested
# file. # file.
@ -56,7 +57,7 @@ get_tables() {
SQL_FILE=postgres.$POSTGRESQL_POD_NAMESPACE.all.sql SQL_FILE=postgres.$POSTGRESQL_POD_NAMESPACE.all.sql
if [[ -e $TMP_DIR/$SQL_FILE ]]; then if [[ -e $TMP_DIR/$SQL_FILE ]]; then
cat $TMP_DIR/$SQL_FILE | sed -n /'\\connect '$DATABASE/,/'\\connect'/p | grep "CREATE TABLE" | awk -F'[. ]' '{print $4}' > TABLE_FILE cat $TMP_DIR/$SQL_FILE | sed -n /'\\connect '$DATABASE/,/'\\connect'/p | grep "CREATE TABLE" | awk -F'[. ]' '{print $4}' > $TABLE_FILE
else else
# Error, cannot report the tables # Error, cannot report the tables
echo "No SQL file found - cannot extract the tables" echo "No SQL file found - cannot extract the tables"
@ -67,8 +68,8 @@ get_tables() {
# Extract all rows in the given table of a database from an archive and put them in the requested # Extract all rows in the given table of a database from an archive and put them in the requested
# file. # file.
get_rows() { get_rows() {
TABLE=$1 DATABASE=$1
DATABASE=$2 TABLE=$2
TMP_DIR=$3 TMP_DIR=$3
ROW_FILE=$4 ROW_FILE=$4
@ -87,8 +88,8 @@ get_rows() {
# Extract the schema for the given table in the given database belonging to the archive file # Extract the schema for the given table in the given database belonging to the archive file
# found in the TMP_DIR. # found in the TMP_DIR.
get_schema() { get_schema() {
TABLE=$1 DATABASE=$1
DATABASE=$2 TABLE=$2
TMP_DIR=$3 TMP_DIR=$3
SCHEMA_FILE=$4 SCHEMA_FILE=$4
@ -96,14 +97,14 @@ get_schema() {
if [[ -e $TMP_DIR/$SQL_FILE ]]; then if [[ -e $TMP_DIR/$SQL_FILE ]]; then
DB_FILE=$(mktemp -p /tmp) DB_FILE=$(mktemp -p /tmp)
cat $TMP_DIR/$SQL_FILE | sed -n /'\\connect '${DATABASE}/,/'\\connect'/p > ${DB_FILE} cat $TMP_DIR/$SQL_FILE | sed -n /'\\connect '${DATABASE}/,/'\\connect'/p > ${DB_FILE}
cat ${DB_FILE} | sed -n /'CREATE TABLE public.'${TABLE}/,/'--'/p > ${SCHEMA_FILE} cat ${DB_FILE} | sed -n /'CREATE TABLE public.'${TABLE}' ('/,/'--'/p > ${SCHEMA_FILE}
cat ${DB_FILE} | sed -n /'CREATE SEQUENCE public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE} cat ${DB_FILE} | sed -n /'CREATE SEQUENCE public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}
cat ${DB_FILE} | sed -n /'ALTER TABLE public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE} cat ${DB_FILE} | sed -n /'ALTER TABLE public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}
cat ${DB_FILE} | sed -n /'ALTER TABLE ONLY public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE} cat ${DB_FILE} | sed -n /'ALTER TABLE ONLY public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}
cat ${DB_FILE} | sed -n /'ALTER SEQUENCE public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE} cat ${DB_FILE} | sed -n /'ALTER SEQUENCE public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}
cat ${DB_FILE} | sed -n /'SELECT pg_catalog.*public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE} cat ${DB_FILE} | sed -n /'SELECT pg_catalog.*public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE}
cat ${DB_FILE} | sed -n /'CREATE INDEX.*public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE} cat ${DB_FILE} | sed -n /'CREATE INDEX.*public.'${TABLE}' USING'/,/'--'/p >> ${SCHEMA_FILE}
cat ${DB_FILE} | sed -n /'GRANT.*public.'${TABLE}/,/'--'/p >> ${SCHEMA_FILE} cat ${DB_FILE} | sed -n /'GRANT.*public.'${TABLE}' TO'/,/'--'/p >> ${SCHEMA_FILE}
rm -f ${DB_FILE} rm -f ${DB_FILE}
else else
# Error, cannot report the rows # Error, cannot report the rows
@ -126,6 +127,9 @@ restore_single_db() {
if [[ -f $TMP_DIR/$SQL_FILE ]]; then if [[ -f $TMP_DIR/$SQL_FILE ]]; then
extract_single_db_dump $TMP_DIR/$SQL_FILE $SINGLE_DB_NAME $TMP_DIR extract_single_db_dump $TMP_DIR/$SQL_FILE $SINGLE_DB_NAME $TMP_DIR
if [[ -f $TMP_DIR/$SINGLE_DB_NAME.sql && -s $TMP_DIR/$SINGLE_DB_NAME.sql ]]; then if [[ -f $TMP_DIR/$SINGLE_DB_NAME.sql && -s $TMP_DIR/$SINGLE_DB_NAME.sql ]]; then
# First drop the database
$PSQL -tc "DROP DATABASE $SINGLE_DB_NAME;"
# Postgresql does not have the concept of creating database if condition. # Postgresql does not have the concept of creating database if condition.
# This next command creates the database in case it does not exist. # This next command creates the database in case it does not exist.
$PSQL -tc "SELECT 1 FROM pg_database WHERE datname = '$SINGLE_DB_NAME'" | grep -q 1 || \ $PSQL -tc "SELECT 1 FROM pg_database WHERE datname = '$SINGLE_DB_NAME'" | grep -q 1 || \
@ -138,7 +142,9 @@ restore_single_db() {
if [[ "$?" -eq 0 ]]; then if [[ "$?" -eq 0 ]]; then
echo "Database restore Successful." echo "Database restore Successful."
else else
echo "Database restore Failed." # Dump out the log file for debugging
cat $LOG_FILE
echo -e "\nDatabase restore Failed."
return 1 return 1
fi fi
else else
@ -162,7 +168,9 @@ restore_all_dbs() {
if [[ "$?" -eq 0 ]]; then if [[ "$?" -eq 0 ]]; then
echo "Database Restore successful." echo "Database Restore successful."
else else
echo "Database Restore failed." # Dump out the log file for debugging
cat $LOG_FILE
echo -e "\nDatabase Restore failed."
return 1 return 1
fi fi
else else

View File

@ -16,7 +16,7 @@ limitations under the License.
{{- $envAll := . }} {{- $envAll := . }}
{{- $serviceAccountName := "postgresql-backup" }} {{- $serviceAccountName := "postgresql-backup" }}
{{ tuple $envAll "postgresql_account" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }} {{ tuple $envAll "postgresql_backup" $serviceAccountName | include "helm-toolkit.snippets.kubernetes_pod_rbac_serviceaccount" }}
--- ---
apiVersion: batch/v1beta1 apiVersion: batch/v1beta1
kind: CronJob kind: CronJob
@ -41,23 +41,43 @@ spec:
metadata: metadata:
labels: labels:
{{ tuple $envAll "postgresql-backup" "backup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }} {{ tuple $envAll "postgresql-backup" "backup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 8 }}
annotations:
{{ dict "envAll" $envAll "podName" "postgresql-backup" "containerNames" (list "init" "backup-perms" "postgresql-backup") | include "helm-toolkit.snippets.kubernetes_mandatory_access_control_annotation" | indent 8 }}
spec: spec:
template: template:
metadata: metadata:
labels: labels:
{{ tuple $envAll "postgresql-backup" "backup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }} {{ tuple $envAll "postgresql-backup" "backup" | include "helm-toolkit.snippets.kubernetes_metadata_labels" | indent 12 }}
spec: spec:
securityContext: {{ dict "envAll" $envAll "application" "postgresql_backup" | include "helm-toolkit.snippets.kubernetes_pod_security_context" | indent 10 }}
runAsUser: 65534
fsGroup: 999
serviceAccountName: {{ $serviceAccountName }} serviceAccountName: {{ $serviceAccountName }}
restartPolicy: OnFailure restartPolicy: OnFailure
nodeSelector: nodeSelector:
{{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }} {{ .Values.labels.job.node_selector_key }}: {{ .Values.labels.job.node_selector_value }}
initContainers:
{{ tuple $envAll "postgresql_backup" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 12 }}
- name: backup-perms
{{ tuple $envAll "postgresql_backup" | include "helm-toolkit.snippets.image" | indent 14 }}
{{ tuple $envAll $envAll.Values.pod.resources.jobs.postgresql_backup | include "helm-toolkit.snippets.kubernetes_resources" | indent 14 }}
{{ dict "envAll" $envAll "application" "postgresql_backup" "container" "backup_perms" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 14 }}
command:
- chown
- -R
- "65534:65534"
- $(POSTGRESQL_BACKUP_BASE_DIR)
env:
- name: POSTGRESQL_BACKUP_BASE_DIR
value: {{ .Values.conf.backup.base_path }}
volumeMounts:
- mountPath: /tmp
name: pod-tmp
- mountPath: {{ .Values.conf.backup.base_path }}
name: postgresql-backup-dir
containers: containers:
- name: postgresql-backup - name: postgresql-backup
{{ tuple $envAll "postgresql_backup" | include "helm-toolkit.snippets.image" | indent 14 }} {{ tuple $envAll "postgresql_backup" | include "helm-toolkit.snippets.image" | indent 14 }}
{{ tuple $envAll $envAll.Values.pod.resources.jobs.postgresql_backup | include "helm-toolkit.snippets.kubernetes_resources" | indent 14 }} {{ tuple $envAll $envAll.Values.pod.resources.jobs.postgresql_backup | include "helm-toolkit.snippets.kubernetes_resources" | indent 14 }}
{{ dict "envAll" $envAll "application" "postgresql_backup" "container" "postgresql_backup" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 14 }}
command: command:
- /tmp/backup_postgresql.sh - /tmp/backup_postgresql.sh
env: env:
@ -120,7 +140,7 @@ spec:
- name: postgresql-secrets - name: postgresql-secrets
secret: secret:
secretName: postgresql-secrets secretName: postgresql-secrets
defaultMode: 0600 defaultMode: 292
- name: postgresql-bin - name: postgresql-bin
secret: secret:
secretName: postgresql-bin secretName: postgresql-bin

View File

@ -15,11 +15,11 @@ metadata:
name: {{ $secretName }} name: {{ $secretName }}
type: Opaque type: Opaque
data: data:
BACKUP_ENABLED: {{ $envAll.Values.conf.backup.enabled | b64enc }} BACKUP_ENABLED: {{ $envAll.Values.conf.backup.enabled | quote | b64enc }}
BACKUP_BASE_PATH: {{ $envAll.Values.conf.backup.base_path | b64enc }} BACKUP_BASE_PATH: {{ $envAll.Values.conf.backup.base_path | b64enc }}
LOCAL_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.days_to_keep | quote | b64enc }} LOCAL_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.days_to_keep | quote | b64enc }}
PG_DUMPALL_OPTIONS: {{ $envAll.Values.conf.backup.pg_dumpall_options | b64enc }} PG_DUMPALL_OPTIONS: {{ $envAll.Values.conf.backup.pg_dumpall_options | quote | b64enc }}
REMOTE_BACKUP_ENABLED: {{ $envAll.Values.conf.backup.remote_backup.enabled | b64enc }} REMOTE_BACKUP_ENABLED: {{ $envAll.Values.conf.backup.remote_backup.enabled | quote | b64enc }}
REMOTE_BACKUP_CONTAINER: {{ $envAll.Values.conf.backup.remote_backup.container_name | b64enc }} REMOTE_BACKUP_CONTAINER: {{ $envAll.Values.conf.backup.remote_backup.container_name | b64enc }}
REMOTE_BACKUP_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.remote_backup.days_to_keep | quote | b64enc }} REMOTE_BACKUP_DAYS_TO_KEEP: {{ $envAll.Values.conf.backup.remote_backup.days_to_keep | quote | b64enc }}
REMOTE_BACKUP_STORAGE_POLICY: {{ $envAll.Values.conf.backup.remote_backup.storage_policy | b64enc }} REMOTE_BACKUP_STORAGE_POLICY: {{ $envAll.Values.conf.backup.remote_backup.storage_policy | b64enc }}

View File

@ -44,6 +44,19 @@ pod:
runAsUser: 999 runAsUser: 999
allowPrivilegeEscalation: false allowPrivilegeEscalation: false
readOnlyRootFilesystem: true readOnlyRootFilesystem: true
postgresql_backup:
pod:
runAsUser: 65534
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
container:
backup_perms:
runAsUser: 0
readOnlyRootFilesystem: true
postgresql_backup:
runAsUser: 65534
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
affinity: affinity:
anti: anti:
type: type:
@ -198,7 +211,7 @@ dependencies:
service: postgresql service: postgresql
jobs: jobs:
- prometheus-postgresql-exporter-create-user - prometheus-postgresql-exporter-create-user
postgresql-backup: postgresql_backup:
services: services:
- endpoint: internal - endpoint: internal
service: postgresql service: postgresql
@ -369,7 +382,7 @@ conf:
enabled: false enabled: false
base_path: /var/backup base_path: /var/backup
days_to_keep: 3 days_to_keep: 3
pg_dumpall_options: null pg_dumpall_options: '--inserts --clean'
remote_backup: remote_backup:
enabled: false enabled: false
container_name: postgresql container_name: postgresql