Fix overcloud node names to be dynamic
This commit adds the gathering for all the facts in each playbook, so to be able to point dynamic overcloud node names and use variables such as "{{ hostvars[item]['ansible_hostname'] }}" to identify the hostname from what's inside the inventory. Change-Id: I9ac6937a641f07f2e75bc764d057f2d1d8ec9bda
This commit is contained in:
parent
e5e27a46e9
commit
711b7e50d3
@ -1,4 +1,7 @@
|
|||||||
---
|
---
|
||||||
|
- name: Gather undercloud and overcloud facts
|
||||||
|
hosts: undercloud overcloud
|
||||||
|
gather_facts: yes
|
||||||
|
|
||||||
- name: Configure Instance HA
|
- name: Configure Instance HA
|
||||||
hosts: undercloud
|
hosts: undercloud
|
||||||
|
@ -2,6 +2,6 @@
|
|||||||
|
|
||||||
- name: Configure STONITH for all the hosts on the overcloud
|
- name: Configure STONITH for all the hosts on the overcloud
|
||||||
hosts: undercloud
|
hosts: undercloud
|
||||||
gather_facts: no
|
gather_facts: yes
|
||||||
roles:
|
roles:
|
||||||
- stonith-config
|
- stonith-config
|
||||||
|
@ -2,6 +2,6 @@
|
|||||||
|
|
||||||
- name: Validate overcloud HA status
|
- name: Validate overcloud HA status
|
||||||
hosts: undercloud
|
hosts: undercloud
|
||||||
gather_facts: no
|
gather_facts: yes
|
||||||
roles:
|
roles:
|
||||||
- validate-ha
|
- validate-ha
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
name: stonith-config
|
name: stonith-config
|
||||||
vars:
|
vars:
|
||||||
stonith_devices: "computes"
|
stonith_devices: "computes"
|
||||||
|
when:
|
||||||
|
- stonith_devices in ["all","computes"]
|
||||||
|
|
||||||
- name: Disable openstack-nova-compute on compute
|
- name: Disable openstack-nova-compute on compute
|
||||||
service:
|
service:
|
||||||
@ -11,7 +13,7 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
|
|
||||||
@ -21,7 +23,7 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
||||||
@ -32,7 +34,7 @@
|
|||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
||||||
@ -43,7 +45,7 @@
|
|||||||
name: libvirtd
|
name: libvirtd
|
||||||
state: stopped
|
state: stopped
|
||||||
enabled: no
|
enabled: no
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
||||||
@ -60,7 +62,7 @@
|
|||||||
state: directory
|
state: directory
|
||||||
mode: 0750
|
mode: 0750
|
||||||
group: "haclient"
|
group: "haclient"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['controller'] }}"
|
- "{{ groups['controller'] }}"
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
@ -72,7 +74,7 @@
|
|||||||
dest: /etc/pacemaker/authkey
|
dest: /etc/pacemaker/authkey
|
||||||
mode: 0640
|
mode: 0640
|
||||||
group: "haclient"
|
group: "haclient"
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['controller'] }}"
|
- "{{ groups['controller'] }}"
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
@ -88,7 +90,7 @@
|
|||||||
shell: |
|
shell: |
|
||||||
iptables -I INPUT -p tcp --dport 3121 -j ACCEPT
|
iptables -I INPUT -p tcp --dport 3121 -j ACCEPT
|
||||||
/sbin/service iptables save
|
/sbin/service iptables save
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['controller'] }}"
|
- "{{ groups['controller'] }}"
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
@ -99,7 +101,7 @@
|
|||||||
name: pacemaker_remote
|
name: pacemaker_remote
|
||||||
enabled: yes
|
enabled: yes
|
||||||
state: started
|
state: started
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
|
|
||||||
@ -108,43 +110,39 @@
|
|||||||
cat {{ working_dir }}/overcloudrc
|
cat {{ working_dir }}/overcloudrc
|
||||||
register: overcloudrc
|
register: overcloudrc
|
||||||
|
|
||||||
- name: Remove overcloudrc file on overcloud-controller-0 (if exists)
|
- block:
|
||||||
file:
|
- name: Remove overcloudrc file on first controller (if exists)
|
||||||
path: "{{ overcloud_working_dir }}/overcloudrc"
|
file:
|
||||||
state: absent
|
path: "{{ overcloud_working_dir }}/overcloudrc"
|
||||||
delegate_to: overcloud-controller-0
|
state: absent
|
||||||
|
|
||||||
- name: Copy overcloudrc file on overcloud-controller-0
|
- name: Copy overcloudrc file on first controller
|
||||||
lineinfile:
|
lineinfile:
|
||||||
destfile: "{{ overcloud_working_dir }}/overcloudrc"
|
destfile: "{{ overcloud_working_dir }}/overcloudrc"
|
||||||
line: "{{ overcloudrc.stdout }}"
|
line: "{{ overcloudrc.stdout }}"
|
||||||
create: yes
|
create: yes
|
||||||
mode: 0644
|
mode: 0644
|
||||||
delegate_to: overcloud-controller-0
|
|
||||||
|
|
||||||
- name: Get environment vars from overcloudrc
|
- name: Get environment vars from overcloudrc
|
||||||
delegate_to: "overcloud-controller-0"
|
shell: |
|
||||||
shell: |
|
grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g'
|
||||||
grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g'
|
register: "OS_USERNAME"
|
||||||
register: "OS_USERNAME"
|
|
||||||
|
|
||||||
- name: Get environment vars from overcloudrc
|
- name: Get environment vars from overcloudrc
|
||||||
delegate_to: "overcloud-controller-0"
|
shell: |
|
||||||
shell: |
|
grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g'
|
||||||
grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g'
|
register: "OS_PASSWORD"
|
||||||
register: "OS_PASSWORD"
|
|
||||||
|
|
||||||
- name: Get environment vars from overcloudrc
|
- name: Get environment vars from overcloudrc
|
||||||
delegate_to: "overcloud-controller-0"
|
shell: |
|
||||||
shell: |
|
grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g'
|
||||||
grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g'
|
register: "OS_AUTH_URL"
|
||||||
register: "OS_AUTH_URL"
|
|
||||||
|
|
||||||
- name: Get environment vars from overcloudrc
|
- name: Get environment vars from overcloudrc
|
||||||
delegate_to: "overcloud-controller-0"
|
shell: |
|
||||||
shell: |
|
grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g'
|
||||||
grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g'
|
register: "OS_TENANT_NAME"
|
||||||
register: "OS_TENANT_NAME"
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
|
|
||||||
- block:
|
- block:
|
||||||
- name: Create resource nova-evacuate
|
- name: Create resource nova-evacuate
|
||||||
@ -181,7 +179,7 @@
|
|||||||
when: release in [ 'mitaka', 'rhos-9' ]
|
when: release in [ 'mitaka', 'rhos-9' ]
|
||||||
|
|
||||||
- name: Set controller pacemaker property on controllers
|
- name: Set controller pacemaker property on controllers
|
||||||
shell: "pcs property set --node {{ item }} osprole=controller"
|
shell: "pcs property set --node {{ hostvars[item]['ansible_hostname'] }} osprole=controller"
|
||||||
with_items: "{{ groups['controller'] }}"
|
with_items: "{{ groups['controller'] }}"
|
||||||
|
|
||||||
- name: Get stonith devices
|
- name: Get stonith devices
|
||||||
@ -190,7 +188,7 @@
|
|||||||
|
|
||||||
- name: Setup stonith devices
|
- name: Setup stonith devices
|
||||||
shell: |
|
shell: |
|
||||||
for i in $(sudo cibadmin -Q --xpath //primitive --node-path | awk -F "id='" '{print $2}' | awk -F "'" '{print $1}' | uniq); do
|
for i in $(cibadmin -Q --xpath //primitive --node-path | awk -F "id='" '{print $2}' | awk -F "'" '{print $1}' | uniq); do
|
||||||
found=0
|
found=0
|
||||||
if [ -n "{{ stonithdevs.stdout }}" ]; then
|
if [ -n "{{ stonithdevs.stdout }}" ]; then
|
||||||
for x in {{ stonithdevs.stdout }}; do
|
for x in {{ stonithdevs.stdout }}; do
|
||||||
@ -200,7 +198,7 @@
|
|||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
if [ $found = 0 ]; then
|
if [ $found = 0 ]; then
|
||||||
sudo pcs constraint location $i rule resource-discovery=exclusive score=0 osprole eq controller
|
pcs constraint location $i rule resource-discovery=exclusive score=0 osprole eq controller
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
@ -238,7 +236,7 @@
|
|||||||
|
|
||||||
- name: Check if ipmi exists for all compute nodes
|
- name: Check if ipmi exists for all compute nodes
|
||||||
shell: |
|
shell: |
|
||||||
sudo pcs stonith show ipmilan-{{ item }}
|
pcs stonith show ipmilan-{{ hostvars[item]['ansible_hostname'] }}
|
||||||
with_items: "{{ groups['compute'] }}"
|
with_items: "{{ groups['compute'] }}"
|
||||||
|
|
||||||
- name: Create fence-nova pacemaker resource
|
- name: Create fence-nova pacemaker resource
|
||||||
@ -262,15 +260,15 @@
|
|||||||
shell: "pcs property set cluster-recheck-interval=1min"
|
shell: "pcs property set cluster-recheck-interval=1min"
|
||||||
|
|
||||||
- name: Create pacemaker remote resource on compute nodes
|
- name: Create pacemaker remote resource on compute nodes
|
||||||
shell: "pcs resource create {{ item }} ocf:pacemaker:remote reconnect_interval=240 op monitor interval=20"
|
shell: "pcs resource create {{ hostvars[item]['ansible_hostname'] }} ocf:pacemaker:remote reconnect_interval=240 op monitor interval=20"
|
||||||
with_items: "{{ groups['compute'] }}"
|
with_items: "{{ groups['compute'] }}"
|
||||||
|
|
||||||
- name: Set osprole for compute nodes
|
- name: Set osprole for compute nodes
|
||||||
shell: "pcs property set --node {{ item }} osprole=compute"
|
shell: "pcs property set --node {{ hostvars[item]['ansible_hostname'] }} osprole=compute"
|
||||||
with_items: "{{ groups['compute'] }}"
|
with_items: "{{ groups['compute'] }}"
|
||||||
|
|
||||||
- name: Add pacemaker stonith devices of compute nodes to level 1
|
- name: Add pacemaker stonith devices of compute nodes to level 1
|
||||||
shell: "pcs stonith level add 1 {{ item }} ipmilan-{{ item }},fence-nova"
|
shell: "pcs stonith level add 1 {{ hostvars[item]['ansible_hostname'] }} ipmilan-{{ hostvars[item]['ansible_hostname'] }},fence-nova"
|
||||||
with_items: "{{ groups['compute'] }}"
|
with_items: "{{ groups['compute'] }}"
|
||||||
|
|
||||||
- name: Enable keystone resource
|
- name: Enable keystone resource
|
||||||
@ -317,7 +315,7 @@
|
|||||||
pcs resource cleanup $resource
|
pcs resource cleanup $resource
|
||||||
done
|
done
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "overcloud-controller-0"
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
|
|
||||||
- name: Wait for (if any) failed resources to recover
|
- name: Wait for (if any) failed resources to recover
|
||||||
shell: pcs status | sed -n -e '/Failed Actions:/,/^$/p' | egrep 'OCF_|not running|unknown' | awk '{print $2}' | cut -f1 -d_ | sort |uniq
|
shell: pcs status | sed -n -e '/Failed Actions:/,/^$/p' | egrep 'OCF_|not running|unknown' | awk '{print $2}' | cut -f1 -d_ | sort |uniq
|
||||||
@ -326,4 +324,4 @@
|
|||||||
retries: 10
|
retries: 10
|
||||||
delay: 10
|
delay: 10
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "overcloud-controller-0"
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
|
@ -1,16 +1,26 @@
|
|||||||
---
|
---
|
||||||
- name: Apply STONITH for controller nodes (if selected)
|
- name: Apply STONITH for controller nodes
|
||||||
include_role:
|
include_role:
|
||||||
name: stonith-config
|
name: stonith-config
|
||||||
when: stonith_action == "install"
|
when:
|
||||||
|
- instance_ha_action == "install"
|
||||||
|
- stonith_devices in ["all","controllers"]
|
||||||
|
|
||||||
- include: apply.yml
|
- name: Apply Instance High Availability steps
|
||||||
when: instance_ha_action == 'install'
|
include: apply.yml
|
||||||
|
when:
|
||||||
|
- instance_ha_action == "install"
|
||||||
|
|
||||||
- include: undo.yml
|
- name: Undo Instance High Availability steps
|
||||||
when: instance_ha_action == 'uninstall'
|
include: undo.yml
|
||||||
|
when:
|
||||||
|
- instance_ha_action == "uninstall"
|
||||||
|
|
||||||
- name: Remove STONITH for controller nodes (if selected)
|
- name: Remove STONITH for controller nodes
|
||||||
include_role:
|
include_role:
|
||||||
name: stonith-config
|
name: stonith-config
|
||||||
when: stonith_action == "uninstall"
|
vars:
|
||||||
|
stonith_action: "uninstall"
|
||||||
|
when:
|
||||||
|
- instance_ha_action == "uninstall"
|
||||||
|
- stonith_devices in ["all","controllers"]
|
||||||
|
@ -4,37 +4,39 @@
|
|||||||
cat {{ working_dir }}/overcloudrc
|
cat {{ working_dir }}/overcloudrc
|
||||||
register: overcloudrc
|
register: overcloudrc
|
||||||
|
|
||||||
- name: Copy overcloudrc file on overcloud-controller-0
|
- block:
|
||||||
lineinfile:
|
- name: Remove overcloudrc file on first controller (if exists)
|
||||||
destfile: "{{ overcloud_working_dir }}/overcloudrc"
|
file:
|
||||||
line: "{{ overcloudrc.stdout }}"
|
path: "{{ overcloud_working_dir }}/overcloudrc"
|
||||||
create: yes
|
state: absent
|
||||||
mode: 0644
|
|
||||||
delegate_to: overcloud-controller-0
|
|
||||||
|
|
||||||
- name: Get environment vars from overcloudrc
|
- name: Copy overcloudrc file on first controller
|
||||||
delegate_to: "overcloud-controller-0"
|
lineinfile:
|
||||||
shell: >
|
destfile: "{{ overcloud_working_dir }}/overcloudrc"
|
||||||
grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g'
|
line: "{{ overcloudrc.stdout }}"
|
||||||
register: "OS_USERNAME"
|
create: yes
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
- name: Get environment vars from overcloudrc
|
- name: Get environment vars from overcloudrc
|
||||||
delegate_to: "overcloud-controller-0"
|
shell: >
|
||||||
shell: >
|
grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g'
|
||||||
grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g'
|
register: "OS_USERNAME"
|
||||||
register: "OS_PASSWORD"
|
|
||||||
|
|
||||||
- name: Get environment vars from overcloudrc
|
- name: Get environment vars from overcloudrc
|
||||||
delegate_to: "overcloud-controller-0"
|
shell: >
|
||||||
shell: >
|
grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g'
|
||||||
grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g'
|
register: "OS_PASSWORD"
|
||||||
register: "OS_AUTH_URL"
|
|
||||||
|
|
||||||
- name: Get environment vars from overcloudrc
|
- name: Get environment vars from overcloudrc
|
||||||
delegate_to: "overcloud-controller-0"
|
shell: >
|
||||||
shell: >
|
grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g'
|
||||||
grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g'
|
register: "OS_AUTH_URL"
|
||||||
register: "OS_TENANT_NAME"
|
|
||||||
|
- name: Get environment vars from overcloudrc
|
||||||
|
shell: >
|
||||||
|
grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g'
|
||||||
|
register: "OS_TENANT_NAME"
|
||||||
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
|
|
||||||
- block:
|
- block:
|
||||||
- name: Remove fence-nova STONITH device
|
- name: Remove fence-nova STONITH device
|
||||||
@ -76,11 +78,11 @@
|
|||||||
shell: |
|
shell: |
|
||||||
for constraintid in $(pcs config show | grep -B 3 "osprole eq controller" | awk '/Constraint/ {print $2}')
|
for constraintid in $(pcs config show | grep -B 3 "osprole eq controller" | awk '/Constraint/ {print $2}')
|
||||||
do
|
do
|
||||||
sudo pcs constraint delete $constraintid
|
pcs constraint delete $constraintid
|
||||||
done
|
done
|
||||||
|
|
||||||
- name: Unset controller pacemaker property on controllers
|
- name: Unset controller pacemaker property on controllers
|
||||||
shell: "pcs property unset --node {{ item }} osprole"
|
shell: "pcs property unset --node {{ hostvars[item]['ansible_hostname'] }} osprole"
|
||||||
with_items: "{{ groups['controller'] }}"
|
with_items: "{{ groups['controller'] }}"
|
||||||
|
|
||||||
- name: Unset cluster recheck interval to 1 minute
|
- name: Unset cluster recheck interval to 1 minute
|
||||||
@ -92,7 +94,7 @@
|
|||||||
OS_AUTH_URL: "{{ OS_AUTH_URL.stdout }}"
|
OS_AUTH_URL: "{{ OS_AUTH_URL.stdout }}"
|
||||||
OS_TENANT_NAME: "{{ OS_TENANT_NAME.stdout }}"
|
OS_TENANT_NAME: "{{ OS_TENANT_NAME.stdout }}"
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "overcloud-controller-0"
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
|
|
||||||
- name: Cleanup failed resources (if any)
|
- name: Cleanup failed resources (if any)
|
||||||
shell: |
|
shell: |
|
||||||
@ -101,7 +103,7 @@
|
|||||||
pcs resource cleanup $resource
|
pcs resource cleanup $resource
|
||||||
done
|
done
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "overcloud-controller-0"
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
|
|
||||||
- name: Wait for failed resources to recover (if any)
|
- name: Wait for failed resources to recover (if any)
|
||||||
shell: pcs status | sed -n -e '/Failed Actions:/,/^$/p' | egrep 'OCF_|not running|unknown' | awk '{print $2}' | cut -f1 -d_ | sort |uniq
|
shell: pcs status | sed -n -e '/Failed Actions:/,/^$/p' | egrep 'OCF_|not running|unknown' | awk '{print $2}' | cut -f1 -d_ | sort |uniq
|
||||||
@ -110,7 +112,7 @@
|
|||||||
retries: 10
|
retries: 10
|
||||||
delay: 10
|
delay: 10
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "overcloud-controller-0"
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
|
|
||||||
- name: Enable openstack-nova-compute on compute
|
- name: Enable openstack-nova-compute on compute
|
||||||
service:
|
service:
|
||||||
@ -118,7 +120,7 @@
|
|||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
|
|
||||||
@ -128,7 +130,7 @@
|
|||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
||||||
@ -139,7 +141,7 @@
|
|||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
become: yes
|
become: yes
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
||||||
@ -150,7 +152,7 @@
|
|||||||
name: libvirtd
|
name: libvirtd
|
||||||
state: started
|
state: started
|
||||||
enabled: yes
|
enabled: yes
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
|
||||||
@ -161,7 +163,7 @@
|
|||||||
name: pacemaker_remote
|
name: pacemaker_remote
|
||||||
enabled: no
|
enabled: no
|
||||||
state: stopped
|
state: stopped
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
|
|
||||||
@ -170,7 +172,7 @@
|
|||||||
shell: >
|
shell: >
|
||||||
iptables -D INPUT -p tcp --dport 3121 -j ACCEPT;
|
iptables -D INPUT -p tcp --dport 3121 -j ACCEPT;
|
||||||
/sbin/service iptables save
|
/sbin/service iptables save
|
||||||
delegate_to: "{{ item }}"
|
delegate_to: "{{ hostvars[item]['ansible_hostname'] }}"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['controller'] }}"
|
- "{{ groups['controller'] }}"
|
||||||
- "{{ groups['compute'] }}"
|
- "{{ groups['compute'] }}"
|
||||||
@ -181,3 +183,5 @@
|
|||||||
vars:
|
vars:
|
||||||
stonith_action: "uninstall"
|
stonith_action: "uninstall"
|
||||||
stonith_devices: "computes"
|
stonith_devices: "computes"
|
||||||
|
when:
|
||||||
|
- stonith_devices in ["all","computes"]
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
file:
|
file:
|
||||||
path: "{{ overcloud_working_dir }}/config-stonith.sh"
|
path: "{{ overcloud_working_dir }}/config-stonith.sh"
|
||||||
state: absent
|
state: absent
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
|
|
||||||
- name: Create the STONITH script on the overcloud
|
- name: Create the STONITH script on the overcloud
|
||||||
lineinfile:
|
lineinfile:
|
||||||
@ -23,10 +23,10 @@
|
|||||||
line: "{{ stonith_script.stdout }}"
|
line: "{{ stonith_script.stdout }}"
|
||||||
create: yes
|
create: yes
|
||||||
mode: 0755
|
mode: 0755
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
|
|
||||||
- name: Execute STONITH script
|
- name: Execute STONITH script
|
||||||
become: true
|
become: true
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/config-stonith.sh &> config_stonith.log
|
{{ overcloud_working_dir }}/config-stonith.sh &> config_stonith.log
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
- name: Copy ha-test-suite on undercloud and controllers
|
- name: Copy ha-test-suite on undercloud and controllers
|
||||||
shell: >
|
shell: >
|
||||||
/usr/bin/rsync --delay-updates -F --compress --archive -e 'ssh -F {{ local_working_dir }}/ssh.config.ansible' {{ local_working_dir }}/tripleo-quickstart-utils/tools/ha-test-suite {{ item }}:
|
/usr/bin/rsync --delay-updates -F --compress --archive -e 'ssh -F {{ local_working_dir }}/ssh.config.ansible' {{ local_working_dir }}/tripleo-quickstart-utils/tools/ha-test-suite {{ hostvars[item]['ansible_hostname'] }}:
|
||||||
delegate_to: "localhost"
|
delegate_to: "localhost"
|
||||||
with_items:
|
with_items:
|
||||||
- "{{ groups['controller'] }}"
|
- "{{ groups['controller'] }}"
|
||||||
@ -36,7 +36,7 @@
|
|||||||
# Test: failed actions
|
# Test: failed actions
|
||||||
- block:
|
- block:
|
||||||
- name: HA test - Failed actions (overcloud)
|
- name: HA test - Failed actions (overcloud)
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_check-failed-actions
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_check-failed-actions
|
||||||
register: test_ha_failed_actions_cmd
|
register: test_ha_failed_actions_cmd
|
||||||
@ -51,7 +51,7 @@
|
|||||||
# Test: Master/Slave
|
# Test: Master/Slave
|
||||||
- block:
|
- block:
|
||||||
- name: HA test - Master/Slave core resource stop and start (overcloud)
|
- name: HA test - Master/Slave core resource stop and start (overcloud)
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_master-slave -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_master-slave
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_master-slave -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_master-slave
|
||||||
register: test_ha_master_slave_cmd
|
register: test_ha_master_slave_cmd
|
||||||
@ -66,7 +66,7 @@
|
|||||||
# Test: Keystone stop
|
# Test: Keystone stop
|
||||||
- block:
|
- block:
|
||||||
- name: HA test Keystone stop (overcloud)
|
- name: HA test Keystone stop (overcloud)
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal
|
||||||
register: test_ha_keystone_stop_cmd
|
register: test_ha_keystone_stop_cmd
|
||||||
@ -81,7 +81,7 @@
|
|||||||
# Test: Keystone removal
|
# Test: Keystone removal
|
||||||
- block:
|
- block:
|
||||||
- name: HA test Keystone removal (overcloud)
|
- name: HA test Keystone removal (overcloud)
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal
|
||||||
register: test_ha_keystone_constraint_removal_cmd
|
register: test_ha_keystone_constraint_removal_cmd
|
||||||
@ -96,7 +96,7 @@
|
|||||||
# Test: NG A
|
# Test: NG A
|
||||||
- block:
|
- block:
|
||||||
- name: HA test NG A (overcloud)
|
- name: HA test NG A (overcloud)
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-a -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-a -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
||||||
register: test_ha_ng_a_cmd
|
register: test_ha_ng_a_cmd
|
||||||
@ -111,7 +111,7 @@
|
|||||||
# Test: NG B
|
# Test: NG B
|
||||||
- block:
|
- block:
|
||||||
- name: HA test NG B (overcloud)
|
- name: HA test NG B (overcloud)
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-b -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-b -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
||||||
register: test_ha_ng_b_cmd
|
register: test_ha_ng_b_cmd
|
||||||
@ -126,7 +126,7 @@
|
|||||||
# Test: NG C
|
# Test: NG C
|
||||||
- block:
|
- block:
|
||||||
- name: HA test NG C (overcloud)
|
- name: HA test NG C (overcloud)
|
||||||
delegate_to: overcloud-controller-0
|
delegate_to: "{{ groups.controller[0] }}"
|
||||||
shell: >
|
shell: >
|
||||||
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-c -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
{{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-c -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light
|
||||||
register: test_ha_ng_c_cmd
|
register: test_ha_ng_c_cmd
|
||||||
|
Loading…
Reference in New Issue
Block a user