Add overcloudrc.v3 support in IHA

This commit adds the support to the stack rc file related to the version
3 of keystone. It will check for an overcloudrc.v3 file in the
working_dir of the deployment (typically /home/stack) and if it is not
able to find it it will use overclourc as usual.
The command for creating the nova-evacuate resource was changed
accordingly, adding --force to support liberty/osp8 and mitaka/osp9
because in these releases we can't provide the new requested fields
project_domain and user_domain.
The way overcloudrc was treated before has been cleaned. No need to copy
the file on the controller node. The name of the file is calculated
dynamically as before, also for v3.

Note: this commit solves also what was proposed here [1] when dealing
with overcloudrc files in which there are both OS_TENANT_NAME and
OS_PROJECT_NAME variables, taking the latter.
The stonith creation python script has been modified to support also
Ocata, for which we still have OS_TENANT_NAME but we don't support
positional arguments anymore.

Change-Id: I1a143b67850c67d6d3207c06abcf60d32f2456ff
This commit is contained in:
Raoul Scarazzini 2018-03-22 12:11:57 -04:00
parent 8fc618c3d3
commit 00f82288b1
3 changed files with 80 additions and 100 deletions

View File

@ -111,56 +111,79 @@
source {{ working_dir }}/stackrc
openstack stack list -f value -c 'Stack Name'
register: stack_name
delegate_to: "{{ groups['undercloud'][0] }}"
- name: Check if a v3 overcloud's rc file exists
stat:
path: "{{ working_dir }}/{{ stack_name.stdout }}rc.v3"
register: v3_rc_file_stat
- name: Get the contents of the overcloud's rc file v3
set_fact:
overcloudrc: "{{ stack_name.stdout }}rc.v3"
when: v3_rc_file_stat.stat.exists
- name: Get the contents of the overcloud's rc file
shell: cat {{ working_dir }}/{{ stack_name.stdout }}rc
register: overcloudrc
set_fact:
overcloudrc: "{{ stack_name.stdout }}rc"
when: not v3_rc_file_stat.stat.exists
- block:
- name: Remove overcloudrc file on first controller (if exists)
file:
path: "{{ overcloud_working_dir }}/overcloudrc"
state: absent
- name: Copy overcloudrc file on first controller
lineinfile:
destfile: "{{ overcloud_working_dir }}/overcloudrc"
line: "{{ overcloudrc.stdout }}"
create: yes
mode: 0644
- name: Get environment vars from overcloudrc
- name: Get OS_USERNAME from overcloudrc
shell: |
grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g'
grep OS_USERNAME {{ working_dir }}/{{ overcloudrc }} | sed 's/export OS_USERNAME=//g'
register: "OS_USERNAME"
- name: Get environment vars from overcloudrc
- name: Get OS_PASSWORD from overcloudrc
shell: |
grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g'
grep OS_PASSWORD {{ working_dir }}/{{ overcloudrc }} | sed 's/export OS_PASSWORD=//g'
register: "OS_PASSWORD"
- name: Get environment vars from overcloudrc
- name: Get OS_AUTH_URL from overcloudrc
shell: |
grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g'
grep OS_AUTH_URL {{ working_dir }}/{{ overcloudrc }} | sed 's/export OS_AUTH_URL=//g'
register: "OS_AUTH_URL"
- name: Get environment vars from overcloudrc
- name: Get OS_PROJECT_NAME or OS_TENANT_NAME from overcloudrc
shell: |
grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g'
grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ working_dir }}/{{ overcloudrc }} | tail -1 | sed 's/export OS_.*_NAME=//g'
register: "OS_TENANT_NAME"
delegate_to: "{{ groups.controller[0] }}"
- name: Get OS_USER_DOMAIN_NAME from overcloudrc
shell: |
grep OS_USER_DOMAIN_NAME {{ working_dir }}/{{ overcloudrc }} | sed 's/export OS_USER_DOMAIN_NAME=//g'
register: "OS_USER_DOMAIN_NAME"
when: v3_rc_file_stat.stat.exists
- name: Get OS_PROJECT_DOMAIN_NAME from overcloudrc
shell: |
grep OS_PROJECT_DOMAIN_NAME {{ working_dir }}/{{ overcloudrc }} | sed 's/export OS_PROJECT_DOMAIN_NAME=//g'
register: "OS_PROJECT_DOMAIN_NAME"
when: v3_rc_file_stat.stat.exists
- name: Define variable for pcs additional options for overcloud's rc file v3
set_fact:
pcs_v3_rc_file_opts: ""
- name: Define variable for pcs additional options for no_shared_storage
set_fact:
pcs_NovaEvacuate_no_shared_storage_opts: ""
pcs_fence_compute_no_shared_storage_opts: ""
- name: Set pcs additional options for overcloud's rc file v3
set_fact:
pcs_v3_rc_file_opts: "project_domain=$OS_PROJECT_DOMAIN_NAME user_domain=$OS_USER_DOMAIN_NAME"
when: v3_rc_file_stat.stat.exists
- name: Set pcs additional options for no_shared_storage
set_fact:
pcs_NovaEvacuate_no_shared_storage_opts: "no_shared_storage=1"
pcs_fence_compute_no_shared_storage_opts: "no-shared-storage=True"
when: not instance_ha_shared_storage|bool
- block:
- name: Create resource nova-evacuate
shell: |
pcs resource create nova-evacuate ocf:openstack:NovaEvacuate auth_url=$OS_AUTH_URL username=$OS_USERNAME password=$OS_PASSWORD tenant_name=$OS_TENANT_NAME
when: instance_ha_shared_storage|bool
- name: Create resource nova-evacuate (no_shared_storage)
shell: |
pcs resource create nova-evacuate ocf:openstack:NovaEvacuate auth_url=$OS_AUTH_URL username=$OS_USERNAME password=$OS_PASSWORD tenant_name=$OS_TENANT_NAME no_shared_storage=1
when: not instance_ha_shared_storage|bool
pcs resource create nova-evacuate ocf:openstack:NovaEvacuate auth_url=$OS_AUTH_URL username=$OS_USERNAME password=$OS_PASSWORD tenant_name=$OS_TENANT_NAME {{ pcs_v3_rc_file_opts }} {{ pcs_NovaEvacuate_no_shared_storage_opts }} --force
- name: Create pacemaker constraint to start nova-evacuate only on non compute nodes
shell: |
@ -256,21 +279,13 @@
shell: "pcs resource defaults requires=fencing"
when: release in [ 'pike', 'rhos-12' ]
- name: Create fence-nova pacemaker resource
shell: "pcs stonith create fence-nova fence_compute auth-url=$OS_AUTH_URL login=$OS_USERNAME passwd=$OS_PASSWORD tenant-name=$OS_TENANT_NAME domain=localdomain record-only=1 --force"
when: instance_ha_shared_storage|bool and release not in [ 'pike', 'rhos-12' ]
- name: Create fence-nova pacemaker resource (no shared storage)
shell: "pcs stonith create fence-nova fence_compute auth-url=$OS_AUTH_URL login=$OS_USERNAME passwd=$OS_PASSWORD tenant-name=$OS_TENANT_NAME domain=localdomain record-only=1 no-shared-storage=True --force"
when: not instance_ha_shared_storage|bool and release not in [ 'pike', 'rhos-12' ]
shell: "pcs stonith create fence-nova fence_compute auth-url=$OS_AUTH_URL login=$OS_USERNAME passwd=$OS_PASSWORD tenant-name=$OS_TENANT_NAME domain=localdomain record-only=1 {{ pcs_fence_compute_no_shared_storage_opts }} --force"
when: release not in [ 'pike', 'rhos-12' ]
- name: Create fence-nova pacemaker resource (Pike/RHOS-12)
shell: "pcs stonith create fence-nova fence_compute auth-url=$OS_AUTH_URL login=$OS_USERNAME passwd=$OS_PASSWORD tenant-name=$OS_TENANT_NAME domain=localdomain record-only=1 meta provides=unfencing --force"
when: instance_ha_shared_storage|bool and release in [ 'pike', 'rhos-12' ]
- name: Create fence-nova pacemaker resource (no shared storage, Pike/RHOS-12)
shell: "pcs stonith create fence-nova fence_compute auth-url=$OS_AUTH_URL login=$OS_USERNAME passwd=$OS_PASSWORD tenant-name=$OS_TENANT_NAME domain=localdomain record-only=1 no-shared-storage=True meta provides=unfencing --force"
when: not instance_ha_shared_storage|bool and release in [ 'pike', 'rhos-12' ]
shell: "pcs stonith create fence-nova fence_compute auth-url=$OS_AUTH_URL login=$OS_USERNAME passwd=$OS_PASSWORD tenant-name=$OS_TENANT_NAME domain=localdomain record-only=1 {{ pcs_fence_compute_no_shared_storage_opts }} meta provides=unfencing --force"
when: release in [ 'pike', 'rhos-12' ]
- name: Create pacemaker constraint for fence-nova to fix it on controller node and set resource-discovery never
shell: "pcs constraint location fence-nova rule resource-discovery=never score=0 osprole eq controller"
@ -340,6 +355,8 @@
OS_PASSWORD: "{{ OS_PASSWORD.stdout }}"
OS_AUTH_URL: "{{ OS_AUTH_URL.stdout }}"
OS_TENANT_NAME: "{{ OS_TENANT_NAME.stdout }}"
OS_USER_DOMAIN_NAME: "{{ OS_USER_DOMAIN_NAME.stdout }}"
OS_PROJECT_DOMAIN_NAME: "{{ OS_PROJECT_DOMAIN_NAME.stdout }}"
become: yes
delegate_to: "{{ groups.controller[0] }}"

View File

@ -1,49 +1,4 @@
---
- name: Get the name of the stack
shell: |
source {{ working_dir }}/stackrc
openstack stack list -f value -c 'Stack Name'
register: stack_name
delegate_to: "{{ groups['undercloud'][0] }}"
- name: Get the contents of the overcloud's rc file
shell: cat {{ working_dir }}/{{ stack_name.stdout }}rc
register: overcloudrc
- block:
- name: Remove overcloudrc file on first controller (if exists)
file:
path: "{{ overcloud_working_dir }}/overcloudrc"
state: absent
- name: Copy overcloudrc file on first controller
lineinfile:
destfile: "{{ overcloud_working_dir }}/overcloudrc"
line: "{{ overcloudrc.stdout }}"
create: yes
mode: 0644
- name: Get environment vars from overcloudrc
shell: >
grep OS_USERNAME {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_USERNAME=//g'
register: "OS_USERNAME"
- name: Get environment vars from overcloudrc
shell: >
grep OS_PASSWORD {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_PASSWORD=//g'
register: "OS_PASSWORD"
- name: Get environment vars from overcloudrc
shell: >
grep OS_AUTH_URL {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_AUTH_URL=//g'
register: "OS_AUTH_URL"
- name: Get environment vars from overcloudrc
shell: >
grep -E 'OS_PROJECT_NAME|OS_TENANT_NAME' {{ overcloud_working_dir }}/overcloudrc | sed 's/export OS_.*_NAME=//g'
register: "OS_TENANT_NAME"
delegate_to: "{{ groups.controller[0] }}"
- block:
- name: Remove fence-nova STONITH device
shell: |
@ -94,11 +49,6 @@
- name: Unset cluster recheck interval to 1 minute
shell: "pcs property unset cluster-recheck-interval"
environment:
OS_USERNAME: "{{ OS_USERNAME.stdout }}"
OS_PASSWORD: "{{ OS_PASSWORD.stdout }}"
OS_AUTH_URL: "{{ OS_AUTH_URL.stdout }}"
OS_TENANT_NAME: "{{ OS_TENANT_NAME.stdout }}"
become: yes
delegate_to: "{{ groups.controller[0] }}"

View File

@ -40,19 +40,32 @@ if (fence_devices in ['controllers','all']):
# Connect to nova
try:
# Liberty/OSP-8,Mitaka/OSP-9,Newton/OSP-10
nt = client.Client(2,
os_username,
os_password,
os_tenant_name,
os_auth_url)
nt.hypervisors.list()
except:
nt = client.Client(2,
auth_url=os_auth_url,
username=os_username,
password=os_password,
project_name=os_project_name,
project_domain_name=os_project_domain_name,
user_domain_name=os_user_domain_name)
try:
# Ocata/OSP-11
nt = client.Client(2,
username=os_username,
password=os_password,
project_name=os_tenant_name,
auth_url=os_auth_url)
nt.hypervisors.list()
except:
# Pike/OSP-12
nt = client.Client(2,
auth_url=os_auth_url,
username=os_username,
password=os_password,
project_name=os_project_name,
project_domain_name=os_project_domain_name,
user_domain_name=os_user_domain_name)
nt.hypervisors.list()
# Parse instances
for instance in nt.servers.list():