Damian Dabrowski 60750a2796 Gather facts before including common-playbooks
For nova, neutron and cinder, haproxy service is configured before
playbooks from common-playbooks/ are included(and facts for these hosts
are gathered). Eventually, haproxy config tasks are executed without
common facts.

It may lead to several corner cases. For example, after facts cache
expires and user runs os-nova-install.yml, haproxy config('Create
haproxy service config files' task) will fail because
`haproxy_service_configs` uses `nova_console_type` variable which needs
`ansible_facts['architecture']`. It can be easily reproduced with:
```
rm -rf /etc/openstack_deploy/ansible_facts/ && \
openstack-ansible /opt/openstack-ansible/playbooks/os-nova-install.yml
```

As a solution, this change gathers facts at the beginning of
os-<service>-install.yml playbooks, instead of doing this inside
common-playbooks/<service>.yml.

This bug was not detected by CI because during deployment process,
hardware facts are gathered for all hosts (at least) by
security-hardening.yml. As long as they exists in cache everything works
fine.

Change-Id: I27073a1bf85294ff65bde24cae939f28f5c69bd7
2023-07-11 21:00:19 +02:00

116 lines
3.9 KiB
YAML

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Install cinder services
hosts: "{{ cinder_hosts }}"
serial: "{{ cinder_serial }}"
gather_facts: false
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
vars_files:
- "../defaults/{{ install_method }}_install.yml"
tags:
- cinder
pre_tasks:
# In order to ensure that any container, software or
# config file changes which causes a container/service
# restart do not cause an unexpected outage, we drain
# the load balancer back end for this container.
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_backend: cinder_api-back
haproxy_state: disabled
when:
- "'cinder_api' in group_names"
- "groups['cinder_api'] | length > 1"
- name: Determine storage bridge IP address
include_tasks: ../common-tasks/dynamic-address-fact.yml
vars:
network_address: "storage_address"
tags:
- always
- name: Configure container (cinder-volume) when lvm is in-use
include_tasks: "../common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
vars:
extra_container_config:
- "lxc.autodev=0"
- "lxc.cgroup.devices.allow=a *:* rmw"
- "lxc.mount.entry=udev dev devtmpfs defaults 0 0"
extra_container_config_no_restart:
- "lxc.start.order=39"
when:
- "not is_metal"
- "'cinder_volume' in group_names"
- "cinder_backend_lvm_inuse | bool"
- name: Configure container (other services)
include_tasks: "../common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
when:
- "not is_metal"
- "not ( 'cinder_volume' in group_names and cinder_backend_lvm_inuse | bool )"
- include_tasks: ../common-tasks/unbound-clients.yml
when:
- hostvars['localhost']['resolvconf_enabled'] | bool
- name: Add volume group block device to cinder
shell: |
set -o pipefail
{% if item.value.volume_group is defined %}
if [ "$(pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}')" ];then
for device in `pvdisplay | grep -B1 {{ item.value.volume_group }} | awk '/PV/ {print $3}'`
do lxc-device -n {{ container_name }} add $device
done
fi
{% else %}
echo "{{ item.key }} volume_group not defined"
{% endif %}
args:
executable: /bin/bash
with_dict: "{{ cinder_backends | default({}) }}"
when:
- container_tech | default('lxc') == 'lxc'
- physical_host != container_name
- cinder_backend_lvm_inuse | bool
delegate_to: "{{ physical_host }}"
- name: udevadm trigger
command: udevadm trigger
delegate_to: "{{ physical_host }}"
when: cinder_backend_lvm_inuse | bool
roles:
- role: "os_cinder"
cinder_storage_address: "{{ storage_address }}"
- role: "openstack.osa.system_crontab_coordination"
tags:
- crontab
post_tasks:
# Now that container changes are done, we can set
# the load balancer back end for this container
# to available again.
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
vars:
haproxy_backend: cinder_api-back
haproxy_state: enabled
when:
- "'cinder_api' in group_names"
- "groups['cinder_api'] | length > 1"