diff --git a/roles/instance-ha/tasks/undo.yml b/roles/instance-ha/tasks/undo.yml
index 0e2a382..ccbf4b1 100644
--- a/roles/instance-ha/tasks/undo.yml
+++ b/roles/instance-ha/tasks/undo.yml
@@ -2,62 +2,64 @@
- block:
- name: Remove fence-nova STONITH device
shell: |
+ for stonithid in $(pcs stonith show | awk '/fence_compute/ {print $1}')
+ do
pcs stonith delete fence-nova
+ done
- name: Remove resources associated to remote nodes
shell: |
- for resourceid in $(pcs resource show | grep compute | grep -v -e Stopped: -e Started: -e disabled -e remote | awk '{print $3}')
- do
- pcs resource cleanup $resourceid
- pcs --force resource delete $resourceid
- done
+ for resourceid in $(pcs resource show | grep compute | grep -v -e Stopped: -e Started: -e disabled -e remote | awk '{print $3}')
+ do
+ pcs resource cleanup $resourceid
+ pcs --force resource delete $resourceid
+ done
- name: Remove NovaEvacuate resource
shell: |
- for resourceid in $(pcs resource show | grep NovaEvacuate | awk '{print $1}')
- do
- pcs resource cleanup $resourceid
- pcs --force resource delete $resourceid
- done
+ for resourceid in $(pcs resource show | grep NovaEvacuate | awk '/NovaEvacuate/ {print $1}')
+ do
+ pcs resource cleanup $resourceid
+ pcs --force resource delete $resourceid
+ done
- name: Remove pacemaker remote resource
shell: |
- for resourceid in $(pcs resource show | grep :remote | awk '{print $1}')
- do
- pcs resource cleanup $resourceid
- pcs --force resource delete $resourceid
- done
-
-# - name: Erase the status entries corresponding to the compute nodes
-# shell: |
-# cibadmin --delete --xml-text ""
-# cibadmin --delete --xml-text ""
-# with_items:
-# - "{{ groups['compute'] }}"
+ for resourceid in $(pcs resource show | awk '/:remote/ {print $1}')
+ do
+ pcs resource cleanup $resourceid
+ pcs --force resource delete $resourceid
+ done
- name: Remove constraints related to role controller
shell: |
- for constraintid in $(pcs config show | grep -B 3 "osprole eq controller" | awk '/Constraint/ {print $2}')
- do
- pcs constraint delete $constraintid
- done
+ for constraintid in $(pcs config show | grep -B 3 "osprole eq controller" | awk '/Constraint/ {print $2}')
+ do
+ pcs constraint delete $constraintid
+ done
- name: Unset controller pacemaker property on controllers
- shell: "pcs property unset --node {{ hostvars[item]['ansible_hostname'] }} osprole"
- with_items: "{{ groups['controller'] }}"
+ shell: |
+ for nodeid in $(pcs property | awk '/osprole/ { print $1 }' | cut -d: -f1)
+ do
+ pcs property unset --node $nodeid osprole
+ done
- name: Unset cluster recheck interval to 1 minute
- shell: "pcs property unset cluster-recheck-interval"
-
+ shell: |
+ for propertyid in $(pcs property | awk '/cluster-recheck-interval/ { print $1 }' | cut -d: -f1)
+ do
+ pcs property unset cluster-recheck-interval
+ done
become: yes
delegate_to: "{{ groups.controller[0] }}"
- name: Cleanup failed resources (if any)
shell: |
- for resource in $(pcs status | sed -n -e '/Failed Actions:/,/^$/p' | egrep 'OCF_|not running|unknown' | awk '{print $2}' | cut -f1 -d_ | sort |uniq)
- do
- pcs resource cleanup $resource
- done
+ for resource in $(pcs status | sed -n -e '/Failed Actions:/,/^$/p' | egrep 'OCF_|not running|unknown' | awk '{print $2}' | cut -f1 -d_ | sort |uniq)
+ do
+ pcs resource cleanup $resource
+ done
become: yes
delegate_to: "{{ groups.controller[0] }}"
@@ -126,8 +128,11 @@
- name: Disable iptables traffic for pacemaker_remote
become: yes
- shell: >
- iptables -D INPUT -p tcp --dport 3121 -j ACCEPT;
+ shell: |
+ for rule in $(iptables-save | grep "\-A INPUT \-p tcp \-\-dport 3121 \-j ACCEPT")
+ do
+ iptables -D INPUT -p tcp --dport 3121 -j ACCEPT
+ done
/sbin/service iptables save
delegate_to: "{{ item }}"
with_items: