b9f3b515a4
If the fact is not set, default it according to the os_nova role defaults. Change-Id: I368594c920953a8e042329aabb927386d2f0788a
162 lines
5.3 KiB
YAML
162 lines
5.3 KiB
YAML
---
|
|
# Copyright 2014, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
- name: Install nova services
|
|
hosts: "{{ nova_hosts }}"
|
|
serial: "{{ nova_serial }}"
|
|
gather_facts: "{{ osa_gather_facts | default(True) }}"
|
|
user: root
|
|
environment: "{{ deployment_environment_variables | default({}) }}"
|
|
vars_files:
|
|
- "../defaults/repo_packages/openstack_services.yml"
|
|
- "../defaults/repo_packages/nova_consoles.yml"
|
|
- "../defaults/{{ install_method }}_install.yml"
|
|
tags:
|
|
- nova
|
|
pre_tasks:
|
|
# Enable execution of ceph_client on the nova compute hosts if cinder RBD
|
|
# backends are used. This is necessary to ensure that volume-backed Nova
|
|
# instances can function when RBD is the volume backend.
|
|
- name: Set cinder RBD inuse fact
|
|
set_fact:
|
|
nova_cinder_rbd_inuse: "{{ True in groups['cinder_volume'] | map('extract', hostvars, 'cinder_backend_rbd_inuse') }}"
|
|
delegate_to: localhost
|
|
delegate_facts: True
|
|
when:
|
|
- "'nova_compute' in group_names"
|
|
- "inventory_hostname == ((groups['nova_compute'] | intersect(ansible_play_hosts)) | list)[0]"
|
|
- "hostvars['localhost']['nova_cinder_rbd_inuse'] is not defined"
|
|
tags:
|
|
- always
|
|
|
|
# In order to ensure that any container, software or
|
|
# config file changes which causes a container/service
|
|
# restart do not cause an unexpected outage, we drain
|
|
# the load balancer back end for this container.
|
|
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: "{{ backend_name }}-back"
|
|
haproxy_state: disabled
|
|
loop_control:
|
|
loop_var: backend_name
|
|
when:
|
|
- "backend_name in group_names"
|
|
- "groups[backend_name] | length > 1"
|
|
with_items:
|
|
- "nova_api_metadata"
|
|
- "nova_api_os_compute"
|
|
- "nova_api_placement"
|
|
- "nova_console"
|
|
|
|
- name: Determine management bridge IP address
|
|
include_tasks: ../common-tasks/dynamic-address-fact.yml
|
|
vars:
|
|
network_address: "management_address"
|
|
|
|
- name: Configure container
|
|
include_tasks: "../common-tasks/os-{{ container_tech | default('lxc') }}-container-setup.yml"
|
|
when: not is_metal
|
|
vars:
|
|
extra_container_config_no_restart:
|
|
- "lxc.start.order=39"
|
|
|
|
- include_tasks: ../common-tasks/unbound-clients.yml
|
|
when:
|
|
- hostvars['localhost']['resolvconf_enabled'] | bool
|
|
|
|
- name: Add nbd devices to the compute
|
|
shell: |
|
|
for i in /dev/nbd*;do
|
|
lxc-device -n {{ container_name }} add $i $i
|
|
done
|
|
failed_when: false
|
|
register: device_add
|
|
changed_when: >
|
|
'added' in device_add.stdout.lower()
|
|
delegate_to: "{{ physical_host }}"
|
|
when:
|
|
- container_tech | default('lxc') == 'lxc'
|
|
- "'nova_compute' in group_names"
|
|
- "not is_metal | bool"
|
|
tags:
|
|
- always
|
|
|
|
- name: Add net/tun device to the compute
|
|
command: |
|
|
lxc-device -n {{ container_name }} add /dev/net/tun /dev/net/tun
|
|
delegate_to: "{{ physical_host }}"
|
|
when:
|
|
- container_tech | default('lxc') == 'lxc'
|
|
- "'nova_compute' in group_names"
|
|
- "not is_metal | bool"
|
|
tags:
|
|
- always
|
|
|
|
- name: Check if kvm device exists
|
|
stat:
|
|
path: /dev/kvm
|
|
delegate_to: "{{ physical_host }}"
|
|
register: kvm_device
|
|
when:
|
|
- container_tech | default('lxc') == 'lxc'
|
|
- "'nova_compute' in group_names"
|
|
- "not is_metal | bool"
|
|
tags:
|
|
- always
|
|
|
|
- name: Add kvm device to the compute
|
|
command: |
|
|
lxc-device -n {{ container_name }} add /dev/kvm /dev/kvm
|
|
delegate_to: "{{ physical_host }}"
|
|
register: device_add
|
|
failed_when: false
|
|
changed_when: >
|
|
'added' in device_add.stdout.lower()
|
|
when:
|
|
- container_tech | default('lxc') == 'lxc'
|
|
- "'nova_compute' in group_names"
|
|
- "not is_metal | bool"
|
|
- "'ischr' in kvm_device.stat and kvm_device.stat.ischr | bool"
|
|
tags:
|
|
- always
|
|
|
|
roles:
|
|
- role: "os_nova"
|
|
nova_management_address: "{{ management_address }}"
|
|
nova_cinder_rbd_inuse: "{{ hostvars['localhost']['nova_cinder_rbd_inuse'] | default(False) }}"
|
|
|
|
- role: "system_crontab_coordination"
|
|
tags:
|
|
- crontab
|
|
|
|
post_tasks:
|
|
# Now that container changes are done, we can set
|
|
# the load balancer back end for this container
|
|
# to available again.
|
|
- include_tasks: ../common-tasks/haproxy-endpoint-manage.yml
|
|
vars:
|
|
haproxy_backend: "{{ backend_name }}-back"
|
|
haproxy_state: enabled
|
|
loop_control:
|
|
loop_var: backend_name
|
|
when:
|
|
- "backend_name in group_names"
|
|
- "groups[backend_name] | length > 1"
|
|
with_items:
|
|
- "nova_api_metadata"
|
|
- "nova_api_os_compute"
|
|
- "nova_api_placement"
|
|
- "nova_console"
|