Merge "[DATABASE] Code improvement"
This commit is contained in:
commit
9a5da15288
@ -15,7 +15,7 @@ apiVersion: v1
|
||||
appVersion: v1.0.0
|
||||
description: OpenStack-Helm Helm-Toolkit
|
||||
name: helm-toolkit
|
||||
version: 0.2.34
|
||||
version: 0.2.35
|
||||
home: https://docs.openstack.org/openstack-helm
|
||||
icon: https://www.openstack.org/themes/openstack/images/project-mascots/OpenStack-Helm/OpenStack_Project_OpenStackHelm_vertical.png
|
||||
sources:
|
||||
|
@ -271,25 +271,63 @@ store_backup_remotely() {
|
||||
# possible case, when we have several backups of the same date. E.g.
|
||||
# one manual, and one automatic.
|
||||
|
||||
declare -A FILETABLE
|
||||
function get_archive_date(){
|
||||
# get_archive_date function returns correct archive date
|
||||
# for different formats of archives' names
|
||||
# the old one: <database name>.<namespace>.<table name | all>.<date-time>.tar.gz
|
||||
# the new one: <database name>.<namespace>.<table name | all>.<backup mode>.<date-time>.tar.gz
|
||||
local A_FILE="$1"
|
||||
local A_DATE=""
|
||||
if [[ -z ${BACK_UP_MODE} ]]; then
|
||||
A_DATE=$( awk -F/ '{print $NF}' <<< ${ARCHIVE_FILE} | cut -d'.' -f 4 | tr -d "Z")
|
||||
else
|
||||
A_DATE=$( awk -F/ '{print $NF}' <<< ${ARCHIVE_FILE} | cut -d'.' -f 5 | tr -d "Z")
|
||||
fi
|
||||
echo ${A_DATE}
|
||||
}
|
||||
|
||||
declare -A fileTable
|
||||
create_hash_table() {
|
||||
unset FILETABLE
|
||||
unset fileTable
|
||||
fileList=$@
|
||||
for ARCHIVE_FILE in ${fileList}; do
|
||||
ARCHIVE_DATE=$( echo $ARCHIVE_FILE | awk -F/ '{print $NF}' | cut -d'.' -f 4)
|
||||
# Creating index, we will round given ARCHIVE_DATE to the midnight (00:00:00)
|
||||
# to take in account a possibility, that we can have more than one scheduled
|
||||
# backup per day.
|
||||
INDEX=$(seconds_difference $(date --date $ARCHIVE_DATE +"%D"))
|
||||
if [[ -z FILETABLE[${INDEX}] ]]; then
|
||||
FILETABLE[${INDEX}]=${ARCHIVE_FILE}
|
||||
ARCHIVE_DATE=$(get_archive_date ${ARCHIVE_FILE})
|
||||
ARCHIVE_DATE=$(date --date=${ARCHIVE_DATE} +%D)
|
||||
log INFO "${DB_NAME}_backup" "Archive date to build index: ${ARCHIVE_DATE}"
|
||||
INDEX=$(seconds_difference ${ARCHIVE_DATE})
|
||||
if [[ -z fileTable[${INDEX}] ]]; then
|
||||
fileTable[${INDEX}]=${ARCHIVE_FILE}
|
||||
else
|
||||
FILETABLE[${INDEX}]="${FILETABLE[${INDEX}]} ${ARCHIVE_FILE}"
|
||||
fileTable[${INDEX}]="${fileTable[${INDEX}]} ${ARCHIVE_FILE}"
|
||||
fi
|
||||
echo "INDEX: ${INDEX} VALUE: ${FILETABLE[${INDEX}]}"
|
||||
echo "INDEX: ${INDEX} VALUE: ${fileTable[${INDEX}]}"
|
||||
done
|
||||
}
|
||||
|
||||
function get_backup_prefix() {
|
||||
# Create list of all possible prefixes in a format:
|
||||
# <db_name>.<namespace> to cover a possible situation
|
||||
# when different backups of different databases and/or
|
||||
# namespaces share the same local or remote storage.
|
||||
ALL_FILES=($@)
|
||||
PREFIXES=()
|
||||
for fname in ${ALL_FILES[@]}; do
|
||||
prefix=$(basename ${fname} | cut -d'.' -f1,2 )
|
||||
for ((i=0; i<${#PREFIXES[@]}; i++)) do
|
||||
if [[ ${PREFIXES[${i}]} == ${prefix} ]]; then
|
||||
prefix=""
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ ! -z ${prefix} ]]; then
|
||||
PREFIXES+=(${prefix})
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
remove_old_local_archives() {
|
||||
if [[ -d $ARCHIVE_DIR ]]; then
|
||||
count=0
|
||||
@ -317,6 +355,33 @@ remove_old_local_archives() {
|
||||
fi
|
||||
}
|
||||
|
||||
remove_old_local_archives() {
|
||||
SECONDS_TO_KEEP=$(( $((${LOCAL_DAYS_TO_KEEP}))*86400))
|
||||
log INFO "${DB_NAME}_backup" "Deleting backups older than ${LOCAL_DAYS_TO_KEEP} days (${SECONDS_TO_KEEP} seconds)"
|
||||
if [[ -d $ARCHIVE_DIR ]]; then
|
||||
count=0
|
||||
# We iterate over the hash table, checking the delta in seconds (hash keys),
|
||||
# and minimum number of backups we must have in place. List of keys has to be sorted.
|
||||
for INDEX in $(tr " " "\n" <<< ${!fileTable[@]} | sort -n -); do
|
||||
ARCHIVE_FILE=${fileTable[${INDEX}]}
|
||||
if [[ ${INDEX} -lt ${SECONDS_TO_KEEP} || ${count} -lt ${LOCAL_DAYS_TO_KEEP} ]]; then
|
||||
((count++))
|
||||
log INFO "${DB_NAME}_backup" "Keeping file(s) ${ARCHIVE_FILE}."
|
||||
else
|
||||
log INFO "${DB_NAME}_backup" "Deleting file(s) ${ARCHIVE_FILE}."
|
||||
rm -f ${ARCHIVE_FILE}
|
||||
if [[ $? -ne 0 ]]; then
|
||||
# Log error but don't exit so we can finish the script
|
||||
# because at this point we haven't sent backup to RGW yet
|
||||
log ERROR "${DB_NAME}_backup" "Failed to cleanup local backup. Cannot remove some of ${ARCHIVE_FILE}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
else
|
||||
log WARN "${DB_NAME}_backup" "The local backup directory ${$ARCHIVE_DIR} does not exist."
|
||||
fi
|
||||
}
|
||||
|
||||
prepare_list_of_remote_backups() {
|
||||
BACKUP_FILES=$(mktemp -p /tmp)
|
||||
DB_BACKUP_FILES=$(mktemp -p /tmp)
|
||||
@ -332,18 +397,18 @@ prepare_list_of_remote_backups() {
|
||||
# The logic implemented with this function is absolutely similar
|
||||
# to the function remove_old_local_archives (see above)
|
||||
remove_old_remote_archives() {
|
||||
log INFO "${DB_NAME}_backup" "Deleting backups older than ${REMOTE_DAYS_TO_KEEP} days"
|
||||
count=0
|
||||
SECONDS_TO_KEEP=$((${REMOTE_DAYS_TO_KEEP}*86400))
|
||||
log INFO "${DB_NAME}_backup" "Deleting backups older than ${REMOTE_DAYS_TO_KEEP} days (${SECONDS_TO_KEEP} seconds)"
|
||||
for INDEX in $(tr " " "\n" <<< ${!FILETABLE[@]} | sort -n -); do
|
||||
ARCHIVE_FILE=${FILETABLE[${INDEX}]}
|
||||
if [[ ${INDEX} -le ${SECONDS_TO_KEEP} || ${count} -lt ${REMOTE_DAYS_TO_KEEP} ]]; then
|
||||
if [[ ${INDEX} -lt ${SECONDS_TO_KEEP} || ${count} -lt ${REMOTE_DAYS_TO_KEEP} ]]; then
|
||||
((count++))
|
||||
log INFO "${DB_NAME}_backup" "Keeping remote backup(s) ${ARCHIVE_FILE}."
|
||||
else
|
||||
log INFO "${DB_NAME}_backup" "Deleting remote backup(s) ${ARCHIVE_FILE} from the RGW"
|
||||
openstack object delete ${CONTAINER_NAME} ${ARCHIVE_FILE} || log_backup_error_exit \
|
||||
"Failed to cleanup remote backup. Cannot delete container object ${ARCHIVE_FILE}!"
|
||||
openstack object delete ${CONTAINER_NAME} ${ARCHIVE_FILE} || log WARN "${DB_NAME}_backup" \
|
||||
"Failed to cleanup remote backup. Cannot delete container object ${ARCHIVE_FILE}"
|
||||
fi
|
||||
done
|
||||
|
||||
@ -416,8 +481,12 @@ backup_databases() {
|
||||
#Only delete the old archive after a successful archive
|
||||
export LOCAL_DAYS_TO_KEEP=$(echo $LOCAL_DAYS_TO_KEEP | sed 's/"//g')
|
||||
if [[ "$LOCAL_DAYS_TO_KEEP" -gt 0 ]]; then
|
||||
create_hash_table $(ls -1 $ARCHIVE_DIR/*.gz)
|
||||
remove_old_local_archives
|
||||
get_backup_prefix $(ls -1 ${ARCHIVE_DIR}/*.gz)
|
||||
for ((i=0; i<${#PREFIXES[@]}; i++)); do
|
||||
echo "Working with prefix: ${PREFIXES[i]}"
|
||||
create_hash_table $(ls -1 ${ARCHIVE_DIR}/${PREFIXES[i]}*.gz)
|
||||
remove_old_local_archives
|
||||
done
|
||||
fi
|
||||
|
||||
REMOTE_BACKUP=$(echo $REMOTE_BACKUP_ENABLED | sed 's/"//g')
|
||||
|
@ -41,4 +41,5 @@ helm-toolkit:
|
||||
- 0.2.32 Consolidate mon_endpoints discovery
|
||||
- 0.2.33 Remove set -x
|
||||
- 0.2.34 Modify database backup logic to maintain minimum number of backups
|
||||
- 0.2.35 Database B/R improvements
|
||||
...
|
||||
|
Loading…
Reference in New Issue
Block a user