From 711b7e50d3d987436ddb7a6cde4bd3cac1a9c62b Mon Sep 17 00:00:00 2001 From: Raoul Scarazzini Date: Mon, 11 Sep 2017 06:28:45 -0400 Subject: [PATCH] Fix overcloud node names to be dynamic This commit adds the gathering for all the facts in each playbook, so to be able to point dynamic overcloud node names and use variables such as "{{ hostvars[item]['ansible_hostname'] }}" to identify the hostname from what's inside the inventory. Change-Id: I9ac6937a641f07f2e75bc764d057f2d1d8ec9bda --- playbooks/overcloud-instance-ha.yml | 3 + playbooks/overcloud-stonith-config.yml | 2 +- playbooks/overcloud-validate-ha.yml | 2 +- roles/instance-ha/tasks/apply.yml | 96 +++++++++++++------------- roles/instance-ha/tasks/main.yml | 26 ++++--- roles/instance-ha/tasks/undo.yml | 80 +++++++++++---------- roles/stonith-config/tasks/main.yml | 6 +- roles/validate-ha/tasks/main.yml | 16 ++--- 8 files changed, 123 insertions(+), 108 deletions(-) diff --git a/playbooks/overcloud-instance-ha.yml b/playbooks/overcloud-instance-ha.yml index a7ed55d..fe94439 100644 --- a/playbooks/overcloud-instance-ha.yml +++ b/playbooks/overcloud-instance-ha.yml @@ -1,4 +1,7 @@ --- +- name: Gather undercloud and overcloud facts + hosts: undercloud overcloud + gather_facts: yes - name: Configure Instance HA hosts: undercloud diff --git a/playbooks/overcloud-stonith-config.yml b/playbooks/overcloud-stonith-config.yml index ee0bea5..367f168 100644 --- a/playbooks/overcloud-stonith-config.yml +++ b/playbooks/overcloud-stonith-config.yml @@ -2,6 +2,6 @@ - name: Configure STONITH for all the hosts on the overcloud hosts: undercloud - gather_facts: no + gather_facts: yes roles: - stonith-config diff --git a/playbooks/overcloud-validate-ha.yml b/playbooks/overcloud-validate-ha.yml index f097428..29d4fb8 100644 --- a/playbooks/overcloud-validate-ha.yml +++ b/playbooks/overcloud-validate-ha.yml @@ -2,6 +2,6 @@ - name: Validate overcloud HA status hosts: undercloud - gather_facts: no + gather_facts: yes roles: - validate-ha diff --git a/roles/instance-ha/tasks/apply.yml b/roles/instance-ha/tasks/apply.yml index 59313f1..a3878d0 100644 --- a/roles/instance-ha/tasks/apply.yml +++ b/roles/instance-ha/tasks/apply.yml @@ -4,6 +4,8 @@ name: stonith-config vars: stonith_devices: "computes" + when: + - stonith_devices in ["all","computes"] - name: Disable openstack-nova-compute on compute service: @@ -11,7 +13,7 @@ state: stopped enabled: no become: yes - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" @@ -21,7 +23,7 @@ state: stopped enabled: no become: yes - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ] @@ -32,7 +34,7 @@ state: stopped enabled: no become: yes - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ] @@ -43,7 +45,7 @@ name: libvirtd state: stopped enabled: no - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ] @@ -60,7 +62,7 @@ state: directory mode: 0750 group: "haclient" - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['controller'] }}" - "{{ groups['compute'] }}" @@ -72,7 +74,7 @@ dest: /etc/pacemaker/authkey mode: 0640 group: "haclient" - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['controller'] }}" - "{{ groups['compute'] }}" @@ -88,7 +90,7 @@ shell: | iptables -I INPUT -p tcp --dport 3121 -j ACCEPT /sbin/service iptables save - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['controller'] }}" - "{{ groups['compute'] }}" @@ -99,7 +101,7 @@ name: pacemaker_remote enabled: yes state: started - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" @@ -108,43 +110,39 @@ cat {{ working_dir }}/overcloudrc register: overcloudrc -- name: Remove overcloudrc file on overcloud-controller-0 (if exists) - file: - path: "{{ overcloud_working_dir }}/overcloudrc" - state: absent - delegate_to: overcloud-controller-0 +- block: + - name: Remove overcloudrc file on first controller (if exists) + file: + path: "{{ overcloud_working_dir }}/overcloudrc" + state: absent -- name: Copy overcloudrc file on overcloud-controller-0 - lineinfile: - destfile: "{{ overcloud_working_dir }}/overcloudrc" - line: "{{ overcloudrc.stdout }}" - create: yes - mode: 0644 - delegate_to: overcloud-controller-0 + - name: Copy overcloudrc file on first controller + lineinfile: + destfile: "{{ overcloud_working_dir }}/overcloudrc" + line: "{{ overcloudrc.stdout }}" + create: yes + mode: 0644 -- name: Get environment vars from overcloudrc - delegate_to: "overcloud-controller-0" - shell: | - grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g' - register: "OS_USERNAME" + - name: Get environment vars from overcloudrc + shell: | + grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g' + register: "OS_USERNAME" -- name: Get environment vars from overcloudrc - delegate_to: "overcloud-controller-0" - shell: | - grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g' - register: "OS_PASSWORD" + - name: Get environment vars from overcloudrc + shell: | + grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g' + register: "OS_PASSWORD" -- name: Get environment vars from overcloudrc - delegate_to: "overcloud-controller-0" - shell: | - grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g' - register: "OS_AUTH_URL" + - name: Get environment vars from overcloudrc + shell: | + grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g' + register: "OS_AUTH_URL" -- name: Get environment vars from overcloudrc - delegate_to: "overcloud-controller-0" - shell: | - grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g' - register: "OS_TENANT_NAME" + - name: Get environment vars from overcloudrc + shell: | + grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g' + register: "OS_TENANT_NAME" + delegate_to: "{{ groups.controller[0] }}" - block: - name: Create resource nova-evacuate @@ -181,7 +179,7 @@ when: release in [ 'mitaka', 'rhos-9' ] - name: Set controller pacemaker property on controllers - shell: "pcs property set --node {{ item }} osprole=controller" + shell: "pcs property set --node {{ hostvars[item]['ansible_hostname'] }} osprole=controller" with_items: "{{ groups['controller'] }}" - name: Get stonith devices @@ -190,7 +188,7 @@ - name: Setup stonith devices shell: | - for i in $(sudo cibadmin -Q --xpath //primitive --node-path | awk -F "id='" '{print $2}' | awk -F "'" '{print $1}' | uniq); do + for i in $(cibadmin -Q --xpath //primitive --node-path | awk -F "id='" '{print $2}' | awk -F "'" '{print $1}' | uniq); do found=0 if [ -n "{{ stonithdevs.stdout }}" ]; then for x in {{ stonithdevs.stdout }}; do @@ -200,7 +198,7 @@ done fi if [ $found = 0 ]; then - sudo pcs constraint location $i rule resource-discovery=exclusive score=0 osprole eq controller + pcs constraint location $i rule resource-discovery=exclusive score=0 osprole eq controller fi done @@ -238,7 +236,7 @@ - name: Check if ipmi exists for all compute nodes shell: | - sudo pcs stonith show ipmilan-{{ item }} + pcs stonith show ipmilan-{{ hostvars[item]['ansible_hostname'] }} with_items: "{{ groups['compute'] }}" - name: Create fence-nova pacemaker resource @@ -262,15 +260,15 @@ shell: "pcs property set cluster-recheck-interval=1min" - name: Create pacemaker remote resource on compute nodes - shell: "pcs resource create {{ item }} ocf:pacemaker:remote reconnect_interval=240 op monitor interval=20" + shell: "pcs resource create {{ hostvars[item]['ansible_hostname'] }} ocf:pacemaker:remote reconnect_interval=240 op monitor interval=20" with_items: "{{ groups['compute'] }}" - name: Set osprole for compute nodes - shell: "pcs property set --node {{ item }} osprole=compute" + shell: "pcs property set --node {{ hostvars[item]['ansible_hostname'] }} osprole=compute" with_items: "{{ groups['compute'] }}" - name: Add pacemaker stonith devices of compute nodes to level 1 - shell: "pcs stonith level add 1 {{ item }} ipmilan-{{ item }},fence-nova" + shell: "pcs stonith level add 1 {{ hostvars[item]['ansible_hostname'] }} ipmilan-{{ hostvars[item]['ansible_hostname'] }},fence-nova" with_items: "{{ groups['compute'] }}" - name: Enable keystone resource @@ -317,7 +315,7 @@ pcs resource cleanup $resource done become: yes - delegate_to: "overcloud-controller-0" + delegate_to: "{{ groups.controller[0] }}" - name: Wait for (if any) failed resources to recover shell: pcs status | sed -n -e '/Failed Actions:/,/^$/p' | egrep 'OCF_|not running|unknown' | awk '{print $2}' | cut -f1 -d_ | sort |uniq @@ -326,4 +324,4 @@ retries: 10 delay: 10 become: yes - delegate_to: "overcloud-controller-0" + delegate_to: "{{ groups.controller[0] }}" diff --git a/roles/instance-ha/tasks/main.yml b/roles/instance-ha/tasks/main.yml index 5330827..45e9a0e 100644 --- a/roles/instance-ha/tasks/main.yml +++ b/roles/instance-ha/tasks/main.yml @@ -1,16 +1,26 @@ --- -- name: Apply STONITH for controller nodes (if selected) +- name: Apply STONITH for controller nodes include_role: name: stonith-config - when: stonith_action == "install" + when: + - instance_ha_action == "install" + - stonith_devices in ["all","controllers"] -- include: apply.yml - when: instance_ha_action == 'install' +- name: Apply Instance High Availability steps + include: apply.yml + when: + - instance_ha_action == "install" -- include: undo.yml - when: instance_ha_action == 'uninstall' +- name: Undo Instance High Availability steps + include: undo.yml + when: + - instance_ha_action == "uninstall" -- name: Remove STONITH for controller nodes (if selected) +- name: Remove STONITH for controller nodes include_role: name: stonith-config - when: stonith_action == "uninstall" + vars: + stonith_action: "uninstall" + when: + - instance_ha_action == "uninstall" + - stonith_devices in ["all","controllers"] diff --git a/roles/instance-ha/tasks/undo.yml b/roles/instance-ha/tasks/undo.yml index b65843f..31f83b1 100644 --- a/roles/instance-ha/tasks/undo.yml +++ b/roles/instance-ha/tasks/undo.yml @@ -4,37 +4,39 @@ cat {{ working_dir }}/overcloudrc register: overcloudrc -- name: Copy overcloudrc file on overcloud-controller-0 - lineinfile: - destfile: "{{ overcloud_working_dir }}/overcloudrc" - line: "{{ overcloudrc.stdout }}" - create: yes - mode: 0644 - delegate_to: overcloud-controller-0 +- block: + - name: Remove overcloudrc file on first controller (if exists) + file: + path: "{{ overcloud_working_dir }}/overcloudrc" + state: absent -- name: Get environment vars from overcloudrc - delegate_to: "overcloud-controller-0" - shell: > - grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g' - register: "OS_USERNAME" + - name: Copy overcloudrc file on first controller + lineinfile: + destfile: "{{ overcloud_working_dir }}/overcloudrc" + line: "{{ overcloudrc.stdout }}" + create: yes + mode: 0644 -- name: Get environment vars from overcloudrc - delegate_to: "overcloud-controller-0" - shell: > - grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g' - register: "OS_PASSWORD" + - name: Get environment vars from overcloudrc + shell: > + grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g' + register: "OS_USERNAME" -- name: Get environment vars from overcloudrc - delegate_to: "overcloud-controller-0" - shell: > - grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g' - register: "OS_AUTH_URL" + - name: Get environment vars from overcloudrc + shell: > + grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g' + register: "OS_PASSWORD" -- name: Get environment vars from overcloudrc - delegate_to: "overcloud-controller-0" - shell: > - grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g' - register: "OS_TENANT_NAME" + - name: Get environment vars from overcloudrc + shell: > + grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g' + register: "OS_AUTH_URL" + + - name: Get environment vars from overcloudrc + shell: > + grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g' + register: "OS_TENANT_NAME" + delegate_to: "{{ groups.controller[0] }}" - block: - name: Remove fence-nova STONITH device @@ -76,11 +78,11 @@ shell: | for constraintid in $(pcs config show | grep -B 3 "osprole eq controller" | awk '/Constraint/ {print $2}') do - sudo pcs constraint delete $constraintid + pcs constraint delete $constraintid done - name: Unset controller pacemaker property on controllers - shell: "pcs property unset --node {{ item }} osprole" + shell: "pcs property unset --node {{ hostvars[item]['ansible_hostname'] }} osprole" with_items: "{{ groups['controller'] }}" - name: Unset cluster recheck interval to 1 minute @@ -92,7 +94,7 @@ OS_AUTH_URL: "{{ OS_AUTH_URL.stdout }}" OS_TENANT_NAME: "{{ OS_TENANT_NAME.stdout }}" become: yes - delegate_to: "overcloud-controller-0" + delegate_to: "{{ groups.controller[0] }}" - name: Cleanup failed resources (if any) shell: | @@ -101,7 +103,7 @@ pcs resource cleanup $resource done become: yes - delegate_to: "overcloud-controller-0" + delegate_to: "{{ groups.controller[0] }}" - name: Wait for failed resources to recover (if any) shell: pcs status | sed -n -e '/Failed Actions:/,/^$/p' | egrep 'OCF_|not running|unknown' | awk '{print $2}' | cut -f1 -d_ | sort |uniq @@ -110,7 +112,7 @@ retries: 10 delay: 10 become: yes - delegate_to: "overcloud-controller-0" + delegate_to: "{{ groups.controller[0] }}" - name: Enable openstack-nova-compute on compute service: @@ -118,7 +120,7 @@ state: started enabled: yes become: yes - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" @@ -128,7 +130,7 @@ state: started enabled: yes become: yes - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ] @@ -139,7 +141,7 @@ state: started enabled: yes become: yes - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ] @@ -150,7 +152,7 @@ name: libvirtd state: started enabled: yes - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ] @@ -161,7 +163,7 @@ name: pacemaker_remote enabled: no state: stopped - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['compute'] }}" @@ -170,7 +172,7 @@ shell: > iptables -D INPUT -p tcp --dport 3121 -j ACCEPT; /sbin/service iptables save - delegate_to: "{{ item }}" + delegate_to: "{{ hostvars[item]['ansible_hostname'] }}" with_items: - "{{ groups['controller'] }}" - "{{ groups['compute'] }}" @@ -181,3 +183,5 @@ vars: stonith_action: "uninstall" stonith_devices: "computes" + when: + - stonith_devices in ["all","computes"] diff --git a/roles/stonith-config/tasks/main.yml b/roles/stonith-config/tasks/main.yml index 666c23f..58230e3 100644 --- a/roles/stonith-config/tasks/main.yml +++ b/roles/stonith-config/tasks/main.yml @@ -15,7 +15,7 @@ file: path: "{{ overcloud_working_dir }}/config-stonith.sh" state: absent - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" - name: Create the STONITH script on the overcloud lineinfile: @@ -23,10 +23,10 @@ line: "{{ stonith_script.stdout }}" create: yes mode: 0755 - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" - name: Execute STONITH script become: true - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" shell: > {{ overcloud_working_dir }}/config-stonith.sh &> config_stonith.log diff --git a/roles/validate-ha/tasks/main.yml b/roles/validate-ha/tasks/main.yml index d7fceed..11103e6 100644 --- a/roles/validate-ha/tasks/main.yml +++ b/roles/validate-ha/tasks/main.yml @@ -19,7 +19,7 @@ - name: Copy ha-test-suite on undercloud and controllers shell: > - /usr/bin/rsync --delay-updates -F --compress --archive -e 'ssh -F {{ local_working_dir }}/ssh.config.ansible' {{ local_working_dir }}/tripleo-quickstart-utils/tools/ha-test-suite {{ item }}: + /usr/bin/rsync --delay-updates -F --compress --archive -e 'ssh -F {{ local_working_dir }}/ssh.config.ansible' {{ local_working_dir }}/tripleo-quickstart-utils/tools/ha-test-suite {{ hostvars[item]['ansible_hostname'] }}: delegate_to: "localhost" with_items: - "{{ groups['controller'] }}" @@ -36,7 +36,7 @@ # Test: failed actions - block: - name: HA test - Failed actions (overcloud) - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" shell: > {{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_check-failed-actions register: test_ha_failed_actions_cmd @@ -51,7 +51,7 @@ # Test: Master/Slave - block: - name: HA test - Master/Slave core resource stop and start (overcloud) - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" shell: > {{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_master-slave -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_master-slave register: test_ha_master_slave_cmd @@ -66,7 +66,7 @@ # Test: Keystone stop - block: - name: HA test Keystone stop (overcloud) - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" shell: > {{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal register: test_ha_keystone_stop_cmd @@ -81,7 +81,7 @@ # Test: Keystone removal - block: - name: HA test Keystone removal (overcloud) - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" shell: > {{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_keystone-constraint-removal -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_keystone-constraint-removal register: test_ha_keystone_constraint_removal_cmd @@ -96,7 +96,7 @@ # Test: NG A - block: - name: HA test NG A (overcloud) - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" shell: > {{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-a -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light register: test_ha_ng_a_cmd @@ -111,7 +111,7 @@ # Test: NG B - block: - name: HA test NG B (overcloud) - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" shell: > {{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-b -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light register: test_ha_ng_b_cmd @@ -126,7 +126,7 @@ # Test: NG C - block: - name: HA test NG C (overcloud) - delegate_to: overcloud-controller-0 + delegate_to: "{{ groups.controller[0] }}" shell: > {{ overcloud_working_dir }}/ha-test-suite/ha-test-suite.sh -t {{ overcloud_working_dir }}/ha-test-suite/test/test_pacemaker-light-c -r {{ overcloud_working_dir }}/ha-test-suite/recovery/recovery_pacemaker-light register: test_ha_ng_c_cmd