openstack-ansible/playbooks/os-nova-install.yml
Jesse Pretorius 332a0be8ef Move database creation into role (nova)
There is no record for why we implement the database creation outside
of the role in the playbook, when we could do it inside the role.

Implementing it inside the role allows us to reduce the quantity of
group_vars duplicated from the role, and allows us to better document
the required variables in the role. The delegation can still be done
as it is done in the playbook too.

In this patch we remove the group_vars which were duplicated from the
role, and remove the DB setup tasks as they are no longer required.

Change-Id: Id43563435631789d7837a7f28cdc971e44b04344
Depends-On: https://review.openstack.org/571785
Depends-On: https://review.openstack.org/574834
2018-06-20 12:19:50 +00:00

209 lines
7.2 KiB
YAML

---
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
- name: Prepare MQ services
hosts: nova_conductor
gather_facts: no
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- nova
tasks:
- name: Configure oslo messaging rpc vhost/user
include: common-tasks/oslomsg-rpc-vhost-user.yml
static: no
vars:
rpc_user: "{{ nova_oslomsg_rpc_userid }}"
rpc_password: "{{ nova_oslomsg_rpc_password }}"
rpc_vhost: "{{ nova_oslomsg_rpc_vhost }}"
when:
- groups[nova_oslomsg_rpc_host_group] | length > 0
run_once: yes
- name: Configure oslo.messaging notify vhost/user
include: common-tasks/oslomsg-notify-vhost-user.yml
static: no
vars:
notify_user: "{{ nova_oslomsg_notify_userid }}"
notify_password: "{{ nova_oslomsg_notify_password }}"
notify_vhost: "{{ nova_oslomsg_notify_vhost }}"
when:
- nova_ceilometer_enabled | bool
- groups[nova_oslomsg_notify_host_group] | length > 0
run_once: yes
- name: Install nova-conductor services
include: common-playbooks/nova.yml
vars:
nova_hosts: "nova_conductor"
nova_serial: "{{ nova_conductor_serial | default(['1', '100%']) }}"
- name: Install nova-scheduler services
include: common-playbooks/nova.yml
vars:
nova_hosts: "nova_scheduler:!nova_conductor"
nova_serial: "{{ nova_scheduler_serial | default(['1', '100%']) }}"
- name: Install nova API services
include: common-playbooks/nova.yml
vars:
nova_hosts: "nova_api_os_compute:nova_api_placement:!nova_conductor:!nova_scheduler:!nova_consoleauth"
nova_serial: "{{ nova_api_serial | default(['1', '100%']) }}"
- name: Install nova console/metadata services
include: common-playbooks/nova.yml
vars:
nova_hosts: "nova_api_metadata:nova_console:!nova_conductor:!nova_scheduler:!nova_consoleauth:!nova_api_os_compute:!nova_api_placement"
nova_serial: "{{ nova_console_serial | default(['1', '100%']) }}"
- name: Install nova compute
include: common-playbooks/nova.yml
vars:
nova_hosts: "nova_compute:!nova_conductor:!nova_scheduler:!nova_consoleauth:!nova_api_os_compute:!nova_api_placement:!nova_api_metadata:!nova_console"
nova_serial: "{{ nova_compute_serial | default('100%') }}"
# These facts are set against the deployment host to ensure that
# they are fast to access. This is done in preference to setting
# them against each target as the hostvars extraction will take
# a long time if executed against a large inventory.
- name: Refresh local facts after all software changes are made
hosts: nova_all
gather_facts: no
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- nova
tasks:
- name: refresh local facts
setup:
filter: ansible_local
gather_subset: "!all"
# This variable contains the values of the local fact set for the cinder
# venv tag for all hosts in the 'cinder_all' host group.
- name: Gather software version list
set_fact:
nova_all_software_versions: "{{ (groups['nova_all'] | map('extract', hostvars, ['ansible_local', 'openstack_ansible', 'nova', 'venv_tag'])) | list }}"
delegate_to: localhost
run_once: yes
# This variable outputs a boolean value which is True when
# nova_all_software_versions contains a list of defined
# values. If they are not defined, it means that not all
# hosts have their software deployed yet.
- name: Set software deployed fact
set_fact:
nova_all_software_deployed: "{{ (nova_all_software_versions | select('defined')) | list == nova_all_software_versions }}"
delegate_to: localhost
run_once: yes
# This variable outputs a boolean when all the values in
# nova_all_software_versions are the same and the software
# has been deployed to all hosts in the group.
- name: Set software updated fact
set_fact:
nova_all_software_updated: "{{ ((nova_all_software_versions | unique) | length == 1) and (nova_all_software_deployed | bool) }}"
delegate_to: localhost
run_once: yes
# Note that the placement API service and the console services do not
# understand how to reload, so they fail when you try to make them do
# so. We therefore restart them instead.
- name: Reload all nova services which support a reload to ensure new RPC object version is used
hosts: "nova_all:!nova_api_placement:!nova_console"
gather_facts: no
serial: "{{ nova_serial | default('100%') }}"
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- nova
tasks:
- name: Execute nova service reload
include: common-tasks/restart-service.yml
vars:
service_name: "nova"
service_action: "reloaded"
service_negate: "{{ ['nova-placement-api.service', 'nova-novncproxy.service', 'nova-spicehtml5proxy.service' ] + nova_service_negate | default([]) }}"
when:
- "nova_all_software_updated | bool"
- "ansible_local['openstack_ansible']['nova']['need_service_restart'] | bool"
# Note that the placement API service and the console services do not
# understand how to reload, so they fail when you try to make them do
# so. We therefore restart them instead.
- name: Restart the remaining nova services to ensure new RPC object version is used
hosts: "nova_api_placement:nova_console"
gather_facts: no
serial: "{{ nova_api_serial | default(['1', '100%']) }}"
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- nova
tasks:
- name: Execute nova service restart
include: common-tasks/restart-service.yml
vars:
service_name: "nova"
service_action: "restarted"
service_fact: "nova"
when:
- "nova_all_software_updated | bool"
- "ansible_local['openstack_ansible']['nova']['need_service_restart'] | bool"
- name: Perform online database migrations
hosts: nova_conductor
gather_facts: no
user: root
environment: "{{ deployment_environment_variables | default({}) }}"
tags:
- nova
tasks:
- name: Perform online data migrations
command: "{{ nova_bin }}/nova-manage db online_data_migrations"
become: yes
become_user: "{{ nova_system_user_name }}"
when:
- "nova_all_software_updated | bool"
- "ansible_local['openstack_ansible']['nova']['need_online_data_migrations'] | bool"
changed_when: false
run_once: yes
register: data_migrations
- name: Disable the online migrations requirement
ini_file:
dest: "/etc/ansible/facts.d/openstack_ansible.fact"
section: nova
option: need_online_data_migrations
value: False
when:
- data_migrations | succeeded