Jesse Pretorius ef12bf04ed Implement rolling upgrades for cinder
Based on [1], this patch implements changes to the playbook
which executes the cinder deployment in a play per host
group, serialised to ensure that:

1. The services are changed in the right order.
2. The services remain available at all times during
   an upgrade.
3. Online data migrations are actioned once all versions
   of software are at the same levels.
4. If services are sharing a host/container then the
   role execution will not execute twice on the same
   host.

[1] https://docs.openstack.org/developer/cinder/upgrade.html#minimal-downtime-upgrade-procedure

Depends-On: Id95cae40f736ea2c84200955fccdb44ea3bc1dd8
Depends-On: If5729671cb69f928df660ec2d9ba83fe3f567946
Depends-On: I9aacda78f92355374af3f4ab24d2d9a9b47491ed
Change-Id: I0bdb51ce0d8b3b9a145d29ef6808e1fe595924e2
2017-06-20 09:40:43 +00:00

131 lines
4.3 KiB
YAML

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Install cinder services
hosts: "{{ cinder_hosts }}"
serial: "{{ cinder_serial }}"
gather_facts: "{{ gather_facts | default(True) }}"
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- cinder
pre_tasks:
# In order to ensure that any container, software or
# config file changes which causes a container/service
# restart do not cause an unexpected outage, we drain
# the load balancer back end for this container.
- include: ../common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_backend: cinder_api-back
haproxy_state: disabled
when:
- "'cinder_api' in group_names"
- "groups['cinder_api'] | length > 1"
- name: Determine storage bridge IP address
include: ../common-tasks/dynamic-address-fact.yml
vars:
network_address: "storage_address"
- name: Configure container (cinder-volume)
include: ../common-tasks/os-lxc-container-setup.yml
static: no
vars:
aa_profile: "unconfined"
extra_container_config:
- "lxc.autodev=0"
- "lxc.cgroup.devices.allow=a *:* rmw"
- "lxc.mount.entry=udev dev devtmpfs defaults 0 0"
extra_container_config_no_restart:
- "lxc.start.order=79"
when:
- "'cinder_volume' in group_names"
- "cinder_backend_lvm_inuse | bool"
- name: Configure container (other services)
include: ../common-tasks/os-lxc-container-setup.yml
static: no
when:
- "'cinder_volume' not in group_names"
- name: Configure log directories (on metal)
include: ../common-tasks/os-log-dir-setup.yml
vars:
log_dirs:
- src: "/openstack/log/{{ inventory_hostname }}-cinder"
dest: "/var/log/cinder"
- name: Configure package proxy cache
include: ../common-tasks/package-cache-proxy.yml
- name: Add volume group block device to cinder
shell: |
{% if item.value.volume_group is defined %}
if [ "$(pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}')" ];then
for device in `pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}'`
do lxc-device -n {{ container_name }} add $device
done
fi
{% else %}
echo "{{ item.key }} volume_group not defined"
{% endif %}
with_dict: "{{ cinder_backends | default({}) }}"
when:
- physical_host != container_name
- cinder_backend_lvm_inuse | bool
delegate_to: "{{ physical_host }}"
- name: udevadm trigger
command: udevadm trigger
delegate_to: "{{ physical_host }}"
when: cinder_backend_lvm_inuse | bool
roles:
- role: "os_cinder"
cinder_storage_address: "{{ storage_address }}"
- role: "ceph_client"
openstack_service_system_user: "{{ cinder_system_user_name }}"
openstack_service_venv_bin: "{{ cinder_bin }}"
when:
- "'cinder_volume' in group_names"
- "cinder_backend_rbd_inuse | default(false) | bool"
tags:
- ceph
- role: "rsyslog_client"
rsyslog_client_log_rotate_file: cinder_log_rotate
rsyslog_client_log_dir: "/var/log/cinder"
rsyslog_client_config_name: "99-cinder-rsyslog-client.conf"
tags:
- rsyslog
- role: "system_crontab_coordination"
tags:
- crontab
post_tasks:
# Now that container changes are done, we can set
# the load balancer back end for this container
# to available again.
- include: ../common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_backend: cinder_api-back
haproxy_state: enabled
when:
- "'cinder_api' in group_names"
- "groups['cinder_api'] | length > 1"