RabbitMQ: prune any extra nodes from cluster if scaling down
This PS updates the cluster wait job to prune any extra nodes from the cluster if scaling down. Change-Id: I58d22121a07cd99448add62502582a6873776622 Signed-off-by: Pete Birley <pete@port.direct>
This commit is contained in:
parent
e96bdd9fb6
commit
af17153627
@ -48,6 +48,24 @@ function active_rabbit_nodes () {
|
||||
}
|
||||
|
||||
until test "$(active_rabbit_nodes)" -ge "$RABBIT_REPLICA_COUNT"; do
|
||||
echo "Waiting for number of nodes in cluster to match number of desired pods ($RABBIT_REPLICA_COUNT)"
|
||||
echo "Waiting for number of nodes in cluster to meet or exceed number of desired pods ($RABBIT_REPLICA_COUNT)"
|
||||
sleep 10
|
||||
done
|
||||
|
||||
function sorted_node_list () {
|
||||
rabbitmqadmin_authed list nodes -f bash | tr ' ' '\n' | sort | tr '\n' ' '
|
||||
}
|
||||
|
||||
if test "$(active_rabbit_nodes)" -gt "$RABBIT_REPLICA_COUNT"; then
|
||||
echo "There are more nodes registed in the cluster than desired, pruning the cluster"
|
||||
PRIMARY_NODE="$(sorted_node_list | awk '{ print $1; exit }')"
|
||||
echo "Current cluster:"
|
||||
rabbitmqctl -l -n "${PRIMARY_NODE}" cluster_status
|
||||
NODES_TO_REMOVE="$(sorted_node_list | awk "{print substr(\$0, index(\$0,\$$((RABBIT_REPLICA_COUNT+1))))}")"
|
||||
for NODE in ${NODES_TO_REMOVE}; do
|
||||
rabbitmqctl -l -n "${NODE}" stop_app || true
|
||||
rabbitmqctl -l -n "${PRIMARY_NODE}" forget_cluster_node "${NODE}"
|
||||
done
|
||||
echo "Updated cluster:"
|
||||
rabbitmqctl -l -n "${PRIMARY_NODE}" cluster_status
|
||||
fi
|
||||
|
@ -41,6 +41,24 @@ spec:
|
||||
{{ $envAll.Values.labels.jobs.node_selector_key }}: {{ $envAll.Values.labels.test.node_selector_value | quote }}
|
||||
initContainers:
|
||||
{{ tuple $envAll "cluster_wait" list | include "helm-toolkit.snippets.kubernetes_entrypoint_init_container" | indent 8 }}
|
||||
- name: rabbitmq-cookie
|
||||
{{ tuple $envAll "scripted_test" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
{{ dict "envAll" $envAll "application" "cluster_wait" "container" "rabbitmq_cookie" | include "helm-toolkit.snippets.kubernetes_container_security_context" | indent 10 }}
|
||||
command:
|
||||
- /tmp/rabbitmq-cookie.sh
|
||||
volumeMounts:
|
||||
- name: pod-tmp
|
||||
mountPath: /tmp
|
||||
- name: rabbitmq-bin
|
||||
mountPath: /tmp/rabbitmq-cookie.sh
|
||||
subPath: rabbitmq-cookie.sh
|
||||
readOnly: true
|
||||
- name: rabbitmq-data
|
||||
mountPath: /var/lib/rabbitmq
|
||||
- name: rabbitmq-erlang-cookie
|
||||
mountPath: /var/run/lib/rabbitmq/.erlang.cookie
|
||||
subPath: erlang_cookie
|
||||
readOnly: true
|
||||
containers:
|
||||
- name: {{.Release.Name}}-rabbitmq-cluster-wait
|
||||
{{ tuple $envAll "scripted_test" | include "helm-toolkit.snippets.image" | indent 10 }}
|
||||
@ -59,11 +77,19 @@ spec:
|
||||
mountPath: /tmp/rabbitmq-wait-for-cluster.sh
|
||||
subPath: rabbitmq-wait-for-cluster.sh
|
||||
readOnly: true
|
||||
- name: rabbitmq-data
|
||||
mountPath: /var/lib/rabbitmq
|
||||
volumes:
|
||||
- name: pod-tmp
|
||||
emptyDir: {}
|
||||
- name: rabbitmq-data
|
||||
emptyDir: {}
|
||||
- name: rabbitmq-bin
|
||||
configMap:
|
||||
name: {{ printf "%s-%s" $envAll.Release.Name "rabbitmq-bin" | quote }}
|
||||
defaultMode: 0555
|
||||
- name: rabbitmq-erlang-cookie
|
||||
secret:
|
||||
secretName: {{ printf "%s-%s" $envAll.Release.Name "erlang-cookie" | quote }}
|
||||
defaultMode: 0444
|
||||
{{- end }}
|
||||
|
@ -79,6 +79,9 @@ pod:
|
||||
rabbitmq_cluster_wait:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
rabbitmq_cookie:
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
test:
|
||||
pod:
|
||||
runAsUser: 999
|
||||
|
Loading…
Reference in New Issue
Block a user