
With minimal facts gathering we will experience failures while trying to evaluate amount of threads for apis or mounts for hardening. So to avoid gathering full hardware subset, we add common-task which will gather only specific subset that can be additionally filtered. Gathering processor or mounts subsent simply does not work with ansible and result in full hardware subset as well. Change-Id: Ia5802b4ec0b18271b8c5fbcc5574b484c5233a01
181 lines
6.3 KiB
YAML
181 lines
6.3 KiB
YAML
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
- name: Gather glance facts
|
|
hosts: "glance_all"
|
|
gather_facts: "{{ osa_gather_facts | default(True) }}"
|
|
tags:
|
|
- always
|
|
|
|
- name: Install glance services
|
|
hosts: "glance_all"
|
|
serial: "{{ glance_api_serial | default(['1', '100%']) }}"
|
|
gather_facts: false
|
|
user: root
|
|
environment: "{{ deployment_environment_variables | default({}) }}"
|
|
vars_files:
|
|
- "defaults/repo_packages/openstack_services.yml"
|
|
- "defaults/{{ install_method }}_install.yml"
|
|
tags:
|
|
- glance
|
|
pre_tasks:
|
|
- name: Gather additional facts
|
|
include_tasks: "common-tasks/gather-hardware-facts.yml"
|
|
tags:
|
|
- always
|
|
|
|
# In order to ensure that any container, software or
|
|
# config file changes which causes a container/service
|
|
# restart do not cause an unexpected outage, we drain
|
|
# the load balancer back end for this container.
|
|
- include_tasks: common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: glance_api-back
|
|
haproxy_state: disabled
|
|
when:
|
|
- "'glance_api' in group_names"
|
|
- "groups['glance_api'] | length > 1"
|
|
|
|
- name: Configure container (non-nfs)
|
|
include_tasks: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
|
|
vars:
|
|
list_of_bind_mounts: "{{ glance_container_bind_mounts }}"
|
|
when:
|
|
- not is_metal
|
|
- glance_default_store == "file"
|
|
- (glance_nfs_client is not defined) or (glance_nfs_client | length == 0)
|
|
|
|
- name: Configure container (nfs)
|
|
include_tasks: "common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
|
|
when:
|
|
- not is_metal
|
|
- (glance_default_store != "file") or (glance_nfs_client is defined)
|
|
|
|
- include_tasks: common-tasks/unbound-clients.yml
|
|
when:
|
|
- hostvars['localhost']['resolvconf_enabled'] | bool
|
|
|
|
roles:
|
|
- role: "os_glance"
|
|
- role: "system_crontab_coordination"
|
|
tags:
|
|
- crontab
|
|
|
|
post_tasks:
|
|
# Now that container changes are done, we can set
|
|
# the load balancer back end for this container
|
|
# to available again.
|
|
- include_tasks: common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: glance_api-back
|
|
haproxy_state: enabled
|
|
when:
|
|
- "'glance_api' in group_names"
|
|
- "groups['glance_api'] | length > 1"
|
|
|
|
|
|
|
|
|
|
# These facts are set against the deployment host to ensure that
|
|
# they are fast to access. This is done in preference to setting
|
|
# them against each target as the hostvars extraction will take
|
|
# a long time if executed against a large inventory.
|
|
- name: Refresh local facts after all software changes are made
|
|
hosts: glance_all
|
|
gather_facts: no
|
|
user: root
|
|
environment: "{{ deployment_environment_variables | default({}) }}"
|
|
vars_files:
|
|
- "defaults/{{ install_method }}_install.yml"
|
|
tags:
|
|
- glance
|
|
tasks:
|
|
- name: refresh local facts
|
|
setup:
|
|
filter: ansible_local
|
|
gather_subset: "!all"
|
|
|
|
# This variable contains the values of the local fact set for the glance
|
|
# venv tag for all hosts in the 'glance_all' host group.
|
|
- name: Gather software version list
|
|
set_fact:
|
|
glance_all_software_versions: "{{ (groups['glance_all'] |
|
|
map('extract', hostvars, ['ansible_local', 'openstack_ansible', 'glance', 'venv_tag'])) |
|
|
list }}"
|
|
delegate_to: localhost
|
|
run_once: yes
|
|
|
|
# This variable outputs a boolean value which is True when
|
|
# glance_all_software_versions contains a list of defined
|
|
# values. If they are not defined, it means that not all
|
|
# hosts have their software deployed yet.
|
|
- name: Set software deployed fact
|
|
set_fact:
|
|
glance_all_software_deployed: "{{ (glance_all_software_versions | select('defined')) | list == glance_all_software_versions }}"
|
|
delegate_to: localhost
|
|
run_once: yes
|
|
|
|
# This variable outputs a boolean when all the values in
|
|
# glance_all_software_versions are the same and the software
|
|
# has been deployed to all hosts in the group.
|
|
- name: Set software updated fact
|
|
set_fact:
|
|
glance_all_software_updated: "{{ ((glance_all_software_versions | unique) | length == 1) and (glance_all_software_deployed | bool) }}"
|
|
delegate_to: localhost
|
|
run_once: yes
|
|
|
|
|
|
- name: Restart glance API to ensure new RPC object version is used
|
|
hosts: glance_all
|
|
gather_facts: no
|
|
serial: "{{ glance_api_serial | default(['1','100%']) }}"
|
|
user: root
|
|
environment: "{{ deployment_environment_variables | default({}) }}"
|
|
vars_files:
|
|
- "defaults/{{ install_method }}_install.yml"
|
|
tags:
|
|
- glance
|
|
tasks:
|
|
# In order to ensure that the service restart does not
|
|
# cause an unexpected outage, we drain the load balancer
|
|
# back end for this container.
|
|
- include: common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: glance_api-back
|
|
haproxy_state: disabled
|
|
when:
|
|
- "glance_all_software_updated | bool"
|
|
- "ansible_local['openstack_ansible']['glance']['need_service_restart'] | bool"
|
|
- "groups['glance_api'] | length > 1"
|
|
|
|
- name: Execute glance service restart
|
|
include: common-tasks/restart-service.yml
|
|
vars:
|
|
service_name: "glance-api"
|
|
service_action: "restarted"
|
|
service_fact: "glance"
|
|
when:
|
|
- "glance_all_software_updated | bool"
|
|
- "ansible_local['openstack_ansible']['glance']['need_service_restart'] | bool"
|
|
|
|
# Now that service restart is done, we can set
|
|
# the load balancer back end for this container
|
|
# to available again.
|
|
- include: common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: glance_api-back
|
|
haproxy_state: enabled
|
|
when: "groups['glance_api'] | length > 1"
|