Create a single playbook to deploy stonith and IHA

This commit creates a new playbook to deploy both stonith for
controllers and instance ha steps so that a user can drive everything by
specifying just the two variables "stonith_action" and
"instance_ha_action" both supporting two values "install" and
"uninstall".
It also sets the support for downstream releases too, for all roles,
so that one can pass "liberty" or "rhos-8" arbitrarily as release
parameter.

Change-Id: Ia422e5e79255c0c0c1af178fc637366dc348500b
This commit is contained in:
Raoul Scarazzini 2017-08-03 06:36:44 -04:00
parent 6e52979141
commit a31e4aef38
16 changed files with 59 additions and 37 deletions

View File

@ -3,5 +3,5 @@
overcloud_working_dir: "/home/heat-admin"
working_dir: "/home/stack"
# apply or undo
instance_ha_config: apply
# Can be install or uninstall
instance_ha_action: "install"

View File

@ -24,7 +24,7 @@
delegate_to: "{{ item }}"
with_items:
- "{{ groups['compute'] }}"
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Disable neutron-openvswitch-agent on compute
service:
@ -35,7 +35,7 @@
delegate_to: "{{ item }}"
with_items:
- "{{ groups['compute'] }}"
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Disable libvirtd on compute
become: yes
@ -46,7 +46,7 @@
delegate_to: "{{ item }}"
with_items:
- "{{ groups['compute'] }}"
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Generate authkey for remote pacemaker
shell: |
@ -163,16 +163,16 @@
- openstack-glance-api-clone
- neutron-metadata-agent-clone
- openstack-nova-conductor-clone
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Disable keystone resource
shell: "pcs resource disable openstack-keystone --wait=900"
when: release == 'liberty'
when: release in [ 'liberty', 'rhos-8' ]
# Keystone resource was replaced by openstack-core resource in RHOS9
- name: Disable openstack-core resource
shell: "pcs resource disable openstack-core --wait=900"
when: release == 'mitaka'
when: release in [ 'mitaka', 'rhos-9' ]
- name: Set controller pacemaker property on controllers
shell: "pcs property set --node {{ item }} osprole=controller"
@ -221,14 +221,14 @@
pcs constraint colocation add ceilometer-compute-clone with libvirtd-compute-clone
pcs constraint order start libvirtd-compute-clone then nova-compute-clone
pcs constraint colocation add nova-compute-clone with libvirtd-compute-clone
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Create pacemaker constraint for neutron-server, nova-conductor and ceilometer-notification
shell: |
pcs constraint order start neutron-server-clone then neutron-openvswitch-agent-compute-clone require-all=false
pcs constraint order start openstack-ceilometer-notification-clone then ceilometer-compute-clone require-all=false
pcs constraint order start openstack-nova-conductor-clone then nova-compute-checkevacuate-clone require-all=false
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Check if ipmi exists for all compute nodes
shell: |
@ -264,11 +264,11 @@
- name: Enable keystone resource
shell: "pcs resource enable openstack-keystone"
when: release == 'liberty'
when: release in [ 'liberty', 'rhos-8' ]
- name: Enable openstack-core resource
shell: "pcs resource enable openstack-core"
when: release == 'mitaka'
when: release in [ 'mitaka', 'rhos-9' ]
- name: Wait for httpd service to be started
shell: "systemctl show httpd --property=ActiveState"
@ -276,7 +276,7 @@
until: httpd_status_result.stdout.find('inactive') == -1 and httpd_status_result.stdout.find('activating') == -1
retries: 30
delay: 10
when: release != 'liberty' and release != 'mitaka'
when: release not in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Enable compute nodes resources (nova)
shell: "pcs resource enable {{ item }}"
@ -290,7 +290,7 @@
- neutron-openvswitch-agent-compute
- libvirtd-compute
- ceilometer-compute
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
environment:
OS_USERNAME: "{{ OS_USERNAME.stdout }}"
OS_PASSWORD: "{{ OS_PASSWORD.stdout }}"

View File

@ -1,7 +1,16 @@
---
- name: Apply STONITH for controller nodes (if selected)
include_role:
name: stonith-config
when: stonith_action == "install"
- include: apply.yml
when: instance_ha_config == 'apply'
when: instance_ha_action == 'install'
- include: undo.yml
when: instance_ha_config == 'undo'
when: instance_ha_action == 'uninstall'
- name: Remove STONITH for controller nodes (if selected)
include_role:
name: stonith-config
when: stonith_action == "uninstall"

View File

@ -131,7 +131,7 @@
delegate_to: "{{ item }}"
with_items:
- "{{ groups['compute'] }}"
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Enable neutron-openvswitch-agent on compute
service:
@ -142,7 +142,7 @@
delegate_to: "{{ item }}"
with_items:
- "{{ groups['compute'] }}"
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Enable libvirtd on compute
become: yes
@ -153,7 +153,7 @@
delegate_to: "{{ item }}"
with_items:
- "{{ groups['compute'] }}"
when: release == 'liberty' or release == 'mitaka'
when: release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ]
- name: Stop pacemaker remote service on compute nodes
become: yes
@ -179,5 +179,5 @@
include_role:
name: stonith-config
vars:
stonith_config: "undo"
stonith_action: "uninstall"
stonith_devices: "computes"

View File

@ -6,8 +6,8 @@ instack_env_file: "{{ working_dir }}/instackenv.json"
config_stonith_python_script: config-stonith-from-instackenv.py.j2
# Can be apply or undo
stonith_config: "apply"
# Can be install, uninstall or none
stonith_action: "install"
# Can be none, all, controllers or computes
stonith_devices: all
# Can be all, controllers or computes
stonith_devices: controllers

View File

@ -8,7 +8,7 @@
- name: Generate STONITH script
shell: |
source {{ working_dir }}/stackrc
{{ working_dir }}/config_stonith_from_instackenv.py {{ instack_env_file }} {{ stonith_config }} {{ stonith_devices }}
{{ working_dir }}/config_stonith_from_instackenv.py {{ instack_env_file }} {{ stonith_action }} {{ stonith_devices }}
register: stonith_script
- name: Delete the STONITH script on the overcloud (if exists)

View File

@ -12,7 +12,7 @@ from novaclient import client
jdata = open(sys.argv[1])
data = json.load(jdata)
# apply, undo
# install, uninstall, none
fence_config = sys.argv[2]
# controllers, computes, all or none
fence_devices = sys.argv[3]
@ -29,11 +29,11 @@ os_compute_api_version = os.environ['COMPUTE_API_VERSION']
# If fence_devices includes controllers then we act on the overall stonith-enabled property of the cluster
if (fence_devices in ['controllers','all']):
# If we're undoying then we disable stonith
if (fence_config == 'undo'):
# If we're uninstalling then we disable stonith
if (fence_config == 'uninstall'):
print('pcs property set stonith-enabled=false')
# If we're applying then we enable it
elif (fence_config == 'apply'):
# If we're installing then we enable it
elif (fence_config == 'install'):
print('pcs property set stonith-enabled=true')
# Connect to nova
@ -45,9 +45,9 @@ nt = client.Client("2.1", session=sess)
for instance in nt.servers.list():
for node in data["nodes"]:
if (node["mac"][0] == instance.addresses['ctlplane'][0]['OS-EXT-IPS-MAC:mac_addr'] and (('controller' in instance.name and fence_devices in ['controllers','all']) or ('compute' in instance.name and fence_devices in ['computes','all']))):
if (fence_config == 'undo'):
if (fence_config == 'uninstall'):
print('pcs stonith delete ipmilan-{} || /bin/true'.format(instance.name))
elif (fence_config == 'apply'):
elif (fence_config == 'install'):
print('pcs stonith create ipmilan-{} fence_ipmilan pcmk_host_list="{}" ipaddr="{}" login="{}" passwd="{}" lanplus="true" delay=20 op monitor interval=60s'.format(instance.name,instance.name,node["pm_addr"],node["pm_user"],node["pm_password"]))
print('pcs constraint location ipmilan-{} avoids {}'.format(instance.name,instance.name))

View File

@ -51,7 +51,7 @@ export OS_NO_CACHE=True
auth = v3.Password(auth_url=os_auth_url,
username=os_username,
password=os_password,
{% if release in [ 'liberty', 'mitaka' ] %}
{% if release in [ 'liberty', 'rhos-8', 'mitaka', 'rhos-9' ] %}
tenant_name=os_tenant_name,
{% else %}
project_name=os_tenant_name,

View File

@ -8,20 +8,20 @@ source {{ working_dir }}/stackrc
CONTROLLERS=$(nova list | grep controller | awk '{print $12}' | cut -f2 -d=)
CONTROLLER0=$(nova list | grep controller-0 | awk '{print $12}' | cut -f2 -d=)
{% if release == 'newton' or release == 'mitaka' %}
{% if release in [ 'mitaka', 'rhos-9', 'newton', 'rhos-10' ] %}
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1348222
for CONTROLLER in $CONTROLLERS; do
$SSH heat-admin@$CONTROLLER sudo pip install redis;
done
{% endif %}
{% if release == 'mitaka' %}
{% if release in [ 'mitaka', 'rhos-9' ] %}
# Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1357229
for CONTROLLER in $CONTROLLERS; do
$SSH heat-admin@$CONTROLLER "sudo sed -i -e 's/^After=.*/After=syslog.target network.target/g' /usr/lib/systemd/system/openstack-heat-engine.service";
done
{% endif %}
{% if release == 'newton' or release == 'mitaka' %}
{% if release in [ 'mitaka', 'rhos-9', 'newton', 'rhos-10' ] %}
$SSH heat-admin@$CONTROLLER0 sudo pcs resource cleanup
{% endif %}

View File

@ -1 +1 @@
test_list_ocata.yml
test_list_pike.yml

View File

@ -0,0 +1,8 @@
test_ha_failed_actions: true
test_ha_master_slave: true
test_ha_keystone_stop: false
test_ha_keystone_constraint_removal: false
test_ha_ng_a: true
test_ha_ng_b: true
test_ha_ng_c: true
test_ha_instance: true

View File

@ -0,0 +1 @@
test_list_newton.yml

View File

@ -0,0 +1 @@
test_list_ocata.yml

View File

@ -0,0 +1 @@
test_list_pike.yml

View File

@ -0,0 +1 @@
test_list_liberty.yml

View File

@ -0,0 +1 @@
test_list_mitaka.yml