Copy Nova role as a basis for the Nova cell role

The idea is to factor out a role for deploying Nova related services
to cells. Since all deployments use cells, this role can be used
in both regular deployments which have just cell0 and cell1,
and deployments with many cells.

Partially Implements: blueprint support-nova-cells
Change-Id: Ib1f36ec0a773c384f2c1eac1843782a3e766045a
This commit is contained in:
Doug Szumski 2019-07-23 13:07:42 +00:00 committed by Mark Goddard
parent 039cc2be50
commit 952b53089b
46 changed files with 2549 additions and 0 deletions

View File

@ -0,0 +1,461 @@
---
project_name: "nova"
nova_services:
nova-libvirt:
container_name: nova_libvirt
group: compute
enabled: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}"
image: "{{ nova_libvirt_image_full }}"
pid_mode: "host"
privileged: True
volumes: "{{ nova_libvirt_default_volumes + nova_libvirt_extra_volumes }}"
dimensions: "{{ nova_libvirt_dimensions }}"
nova-ssh:
container_name: "nova_ssh"
group: "compute"
image: "{{ nova_ssh_image_full }}"
enabled: "{{ enable_nova_ssh | bool }}"
volumes: "{{ nova_ssh_default_volumes + nova_ssh_extra_volumes }}"
dimensions: "{{ nova_ssh_dimensions }}"
nova-api:
container_name: "nova_api"
group: "nova-api"
image: "{{ nova_api_image_full }}"
enabled: True
privileged: True
volumes: "{{ nova_api_default_volumes + nova_api_extra_volumes }}"
dimensions: "{{ nova_api_dimensions }}"
haproxy:
nova_api:
enabled: "{{ enable_nova }}"
mode: "http"
external: false
port: "{{ nova_api_port }}"
listen_port: "{{ nova_api_listen_port }}"
nova_api_external:
enabled: "{{ enable_nova }}"
mode: "http"
external: true
port: "{{ nova_api_port }}"
listen_port: "{{ nova_api_listen_port }}"
nova_metadata:
enabled: "{{ enable_nova }}"
mode: "http"
external: false
port: "{{ nova_metadata_port }}"
listen_port: "{{ nova_metadata_listen_port }}"
nova_metadata_external:
enabled: "{{ enable_nova }}"
mode: "http"
external: true
port: "{{ nova_metadata_port }}"
listen_port: "{{ nova_metadata_listen_port }}"
nova_rdp:
enabled: "{{ enable_nova|bool and nova_console == 'rdp' }}"
mode: "http"
external: false
port: "{{ rdp_port }}"
host_group: "hyperv"
nova-novncproxy:
container_name: "nova_novncproxy"
group: "nova-novncproxy"
image: "{{ nova_novncproxy_image_full }}"
enabled: "{{ nova_console == 'novnc' }}"
volumes: "{{ nova_novncproxy_default_volumes + nova_novncproxy_extra_volumes }}"
dimensions: "{{ nova_novncproxy_dimensions }}"
haproxy:
nova_novncproxy:
enabled: "{{ enable_nova|bool and nova_console == 'novnc' }}"
mode: "http"
external: false
port: "{{ nova_novncproxy_port }}"
listen_port: "{{ nova_novncproxy_listen_port }}"
backend_http_extra:
- "timeout tunnel 1h"
nova_novncproxy_external:
enabled: "{{ enable_nova|bool and nova_console == 'novnc' }}"
mode: "http"
external: true
port: "{{ nova_novncproxy_port }}"
listen_port: "{{ nova_novncproxy_listen_port }}"
nova-scheduler:
container_name: "nova_scheduler"
group: "nova-scheduler"
image: "{{ nova_scheduler_image_full }}"
enabled: True
volumes: "{{ nova_scheduler_default_volumes + nova_scheduler_extra_volumes }}"
dimensions: "{{ nova_scheduler_dimensions }}"
nova-spicehtml5proxy:
container_name: "nova_spicehtml5proxy"
group: "nova-spicehtml5proxy"
image: "{{ nova_spicehtml5proxy_image_full }}"
enabled: "{{ nova_console == 'spice' }}"
volumes: "{{ nova_spicehtml5proxy_default_volumes + nova_spicehtml5proxy_extra_volumes }}"
dimensions: "{{ nova_spicehtml5proxy_dimensions }}"
haproxy:
nova_spicehtml5proxy:
enabled: "{{ enable_nova|bool and nova_console == 'spice' }}"
mode: "http"
external: false
port: "{{ nova_spicehtml5proxy_port }}"
listen_port: "{{ nova_spicehtml5proxy_listen_port }}"
nova_spicehtml5proxy_external:
enabled: "{{ enable_nova|bool and nova_console == 'spice' }}"
mode: "http"
external: true
port: "{{ nova_spicehtml5proxy_port }}"
listen_port: "{{ nova_spicehtml5proxy_listen_port }}"
nova-serialproxy:
container_name: "nova_serialproxy"
group: "nova-serialproxy"
image: "{{ nova_serialproxy_image_full }}"
enabled: "{{ enable_nova_serialconsole_proxy | bool }}"
volumes: "{{ nova_serialproxy_default_volumes + nova_serialproxy_extra_volumes }}"
dimensions: "{{ nova_serialproxy_dimensions }}"
haproxy:
nova_serialconsole_proxy:
enabled: "{{ enable_nova|bool and enable_nova_serialconsole_proxy|bool }}"
mode: "http"
external: false
port: "{{ nova_serialproxy_port }}"
listen_port: "{{ nova_serialproxy_listen_port }}"
backend_http_extra:
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
nova_serialconsole_proxy_external:
enabled: "{{ enable_nova|bool and enable_nova_serialconsole_proxy|bool }}"
mode: "http"
external: true
port: "{{ nova_serialproxy_port }}"
listen_port: "{{ nova_serialproxy_listen_port }}"
backend_http_extra:
- "timeout tunnel {{ haproxy_nova_serialconsole_proxy_tunnel_timeout }}"
nova-conductor:
container_name: "nova_conductor"
group: "nova-conductor"
enabled: True
image: "{{ nova_conductor_image_full }}"
volumes: "{{ nova_conductor_default_volumes + nova_conductor_extra_volumes }}"
dimensions: "{{ nova_conductor_dimensions }}"
nova-compute:
container_name: "nova_compute"
group: "compute"
image: "{{ nova_compute_image_full }}"
environment:
LIBGUESTFS_BACKEND: "direct"
privileged: True
enabled: "{{ not enable_nova_fake | bool }}"
ipc_mode: "host"
volumes: "{{ nova_compute_default_volumes + nova_compute_extra_volumes }}"
dimensions: "{{ nova_compute_dimensions }}"
nova-compute-ironic:
container_name: "nova_compute_ironic"
group: "nova-compute-ironic"
image: "{{ nova_compute_ironic_image_full }}"
enabled: "{{ enable_ironic | bool }}"
volumes: "{{ nova_compute_ironic_default_volumes + nova_compute_ironic_extra_volumes }}"
dimensions: "{{ nova_compute_ironic_dimensions }}"
####################
# Ceph
####################
ceph_nova_pool_type: "{{ ceph_pool_type }}"
ceph_nova_cache_mode: "{{ ceph_cache_mode }}"
# Due to Ansible issues on include, you cannot override these variables. Please
# override the variables they reference instead.
nova_pool_name: "{{ ceph_nova_pool_name }}"
nova_pool_type: "{{ ceph_nova_pool_type }}"
nova_cache_mode: "{{ ceph_nova_cache_mode }}"
nova_pool_pg_num: "{{ ceph_pool_pg_num }}"
nova_pool_pgp_num: "{{ ceph_pool_pgp_num }}"
# Discard option for nova managed disks. Requires libvirt (1, 0, 6) or later and
# qemu (1, 6, 0) or later. Set to "" to disable.
nova_hw_disk_discard: "unmap"
ceph_client_nova_keyring_caps:
mon: 'allow r, allow command "osd blacklist"'
osd: >-
allow class-read object_prefix rbd_children,
allow rwx pool={{ ceph_cinder_pool_name }},
allow rwx pool={{ ceph_cinder_pool_name }}-cache,
allow rwx pool={{ ceph_nova_pool_name }},
allow rwx pool={{ ceph_nova_pool_name }}-cache,
allow rwx pool={{ ceph_glance_pool_name }},
allow rwx pool={{ ceph_glance_pool_name }}-cache
####################
# Database
####################
nova_database_name: "nova"
nova_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova{% endif %}"
nova_database_address: "{{ database_address }}:{{ database_port }}"
nova_api_database_name: "nova_api"
nova_api_database_user: "{% if use_preconfigured_databases | bool and use_common_mariadb_user | bool %}{{ database_user }}{% else %}nova_api{% endif %}"
nova_api_database_address: "{{ database_address }}:{{ database_port }}"
####################
# Docker
####################
nova_install_type: "{{ kolla_install_type }}"
nova_tag: "{{ openstack_release }}"
nova_libvirt_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-libvirt"
nova_libvirt_tag: "{{ nova_tag }}"
nova_libvirt_image_full: "{{ nova_libvirt_image }}:{{ nova_libvirt_tag }}"
nova_libvirt_cpu_mode: "{{ 'host-passthrough' if ansible_architecture == 'aarch64' else '' }}"
nova_ssh_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-ssh"
nova_ssh_tag: "{{ nova_tag }}"
nova_ssh_image_full: "{{ nova_ssh_image }}:{{ nova_ssh_tag }}"
nova_conductor_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-conductor"
nova_conductor_tag: "{{ nova_tag }}"
nova_conductor_image_full: "{{ nova_conductor_image }}:{{ nova_conductor_tag }}"
nova_novncproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-novncproxy"
nova_novncproxy_tag: "{{ nova_tag }}"
nova_novncproxy_image_full: "{{ nova_novncproxy_image }}:{{ nova_novncproxy_tag }}"
nova_spicehtml5proxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-spicehtml5proxy"
nova_spicehtml5proxy_tag: "{{ nova_tag }}"
nova_spicehtml5proxy_image_full: "{{ nova_spicehtml5proxy_image }}:{{ nova_spicehtml5proxy_tag }}"
nova_scheduler_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-scheduler"
nova_scheduler_tag: "{{ nova_tag }}"
nova_scheduler_image_full: "{{ nova_scheduler_image }}:{{ nova_scheduler_tag }}"
nova_compute_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-compute"
nova_compute_tag: "{{ nova_tag }}"
nova_compute_image_full: "{{ nova_compute_image }}:{{ nova_compute_tag }}"
nova_api_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-api"
nova_api_tag: "{{ nova_tag }}"
nova_api_image_full: "{{ nova_api_image }}:{{ nova_api_tag }}"
nova_compute_ironic_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-compute-ironic"
nova_compute_ironic_tag: "{{ nova_tag }}"
nova_compute_ironic_image_full: "{{ nova_compute_ironic_image }}:{{ nova_compute_ironic_tag }}"
nova_serialproxy_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ nova_install_type }}-nova-serialproxy"
nova_serialproxy_tag: "{{ nova_tag }}"
nova_serialproxy_image_full: "{{ nova_serialproxy_image }}:{{ nova_serialproxy_tag }}"
nova_libvirt_dimensions: "{{ default_container_dimensions }}"
nova_ssh_dimensions: "{{ default_container_dimensions }}"
nova_api_dimensions: "{{ default_container_dimensions }}"
nova_novncproxy_dimensions: "{{ default_container_dimensions }}"
nova_scheduler_dimensions: "{{ default_container_dimensions }}"
nova_spicehtml5proxy_dimensions: "{{ default_container_dimensions }}"
nova_serialproxy_dimensions: "{{ default_container_dimensions }}"
nova_conductor_dimensions: "{{ default_container_dimensions }}"
nova_compute_dimensions: "{{ default_container_dimensions }}"
nova_compute_ironic_dimensions: "{{ default_container_dimensions }}"
nova_libvirt_default_volumes:
- "{{ node_config_directory }}/nova-libvirt/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- "/run/:/run/:shared"
- "/dev:/dev"
- "/sys/fs/cgroup:/sys/fs/cgroup"
- "kolla_logs:/var/log/kolla/"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
- "nova_libvirt_qemu:/etc/libvirt/qemu"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_ssh_default_volumes:
- "{{ node_config_directory }}/nova-ssh/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_api_default_volumes:
- "{{ node_config_directory }}/nova-api/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_novncproxy_default_volumes:
- "{{ node_config_directory }}/nova-novncproxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_scheduler_default_volumes:
- "{{ node_config_directory }}/nova-scheduler/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_spicehtml5proxy_default_volumes:
- "{{ node_config_directory }}/nova-spicehtml5proxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_serialproxy_default_volumes:
- "{{ node_config_directory }}/nova-serialproxy/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_conductor_default_volumes:
- "{{ node_config_directory }}/nova-conductor/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_compute_default_volumes:
- "{{ node_config_directory }}/nova-compute/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "/dev:/dev"
- "kolla_logs:/var/log/kolla/"
- "{% if enable_iscsid | bool %}iscsi_info:/etc/iscsi{% endif %}"
- "libvirtd:/var/lib/libvirt"
- "{{ nova_instance_datadir_volume }}:/var/lib/nova/"
- "{% if enable_shared_var_lib_nova_mnt | bool %}/var/lib/nova/mnt:/var/lib/nova/mnt:shared{% endif %}"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_compute_ironic_default_volumes:
- "{{ node_config_directory }}/nova-compute-ironic/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "kolla_logs:/var/log/kolla/"
- "{{ kolla_dev_repos_directory ~ '/nova/nova:/var/lib/kolla/venv/lib/python2.7/site-packages/nova' if nova_dev_mode | bool else '' }}"
nova_extra_volumes: "{{ default_extra_volumes }}"
nova_libvirt_extra_volumes: "{{ nova_extra_volumes }}"
nova_ssh_extra_volumes: "{{ nova_extra_volumes }}"
nova_api_extra_volumes: "{{ nova_extra_volumes }}"
nova_novncproxy_extra_volumes: "{{ nova_extra_volumes }}"
nova_scheduler_extra_volumes: "{{ nova_extra_volumes }}"
nova_spicehtml5proxy_extra_volumes: "{{ nova_extra_volumes }}"
nova_serialproxy_extra_volumes: "{{ nova_extra_volumes }}"
nova_conductor_extra_volumes: "{{ nova_extra_volumes }}"
nova_compute_extra_volumes: "{{ nova_extra_volumes }}"
nova_compute_ironic_extra_volumes: "{{ nova_extra_volumes }}"
####################
# HAProxy
####################
haproxy_nova_serialconsole_proxy_tunnel_timeout: "10m"
####################
# OpenStack
####################
nova_legacy_admin_endpoint: "{{ admin_protocol }}://{{ nova_internal_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s"
nova_legacy_internal_endpoint: "{{ internal_protocol }}://{{ nova_internal_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s"
nova_legacy_public_endpoint: "{{ public_protocol }}://{{ nova_external_fqdn }}:{{ nova_api_port }}/v2/%(tenant_id)s"
nova_admin_endpoint: "{{ admin_protocol }}://{{ nova_internal_fqdn }}:{{ nova_api_port }}/v2.1"
nova_internal_endpoint: "{{ internal_protocol }}://{{ nova_internal_fqdn }}:{{ nova_api_port }}/v2.1"
nova_public_endpoint: "{{ public_protocol }}://{{ nova_external_fqdn }}:{{ nova_api_port }}/v2.1"
nova_logging_debug: "{{ openstack_logging_debug }}"
openstack_nova_auth: "{{ openstack_auth }}"
nova_compute_host_rp_filter_mode: 0
nova_safety_upgrade: "no"
nova_libvirt_port: "{{'16514' if libvirt_tls | bool else '16509'}}"
nova_ssh_port: "8022"
nova_services_require_nova_conf:
- nova-api
- nova-compute
- nova-compute-ironic
- nova-conductor
- nova-novncproxy
- nova-serialproxy
- nova-scheduler
- nova-spicehtml5proxy
# After upgrading nova-compute, services will have an RPC version cap in place.
# We need to restart all services that communicate with nova-compute in order
# to allow them to use the latest RPC version. Ideally, there would be a way to
# check whether all nova services are using the latest version, but currently
# there is not. Instead, wait a short time for all nova compute services to
# update the version of their service in the database. This seems to take
# around 10 seconds, but the default is 30 to allow room for slowness.
nova_compute_startup_delay: 30
####################
# Keystone
####################
nova_ks_services:
- name: "nova_legacy"
type: "compute_legacy"
description: "OpenStack Compute Service (Legacy 2.0)"
endpoints:
- {'interface': 'admin', 'url': '{{ nova_legacy_admin_endpoint }}'}
- {'interface': 'internal', 'url': '{{ nova_legacy_internal_endpoint }}'}
- {'interface': 'public', 'url': '{{ nova_legacy_public_endpoint }}'}
- name: "nova"
type: "compute"
description: "OpenStack Compute Service"
endpoints:
- {'interface': 'admin', 'url': '{{ nova_admin_endpoint }}'}
- {'interface': 'internal', 'url': '{{ nova_internal_endpoint }}'}
- {'interface': 'public', 'url': '{{ nova_public_endpoint }}'}
nova_ks_users:
- project: "service"
user: "{{ nova_keystone_user }}"
password: "{{ nova_keystone_password }}"
role: "admin"
####################
# Notification
####################
nova_notification_topics:
- name: notifications
enabled: "{{ enable_ceilometer | bool or enable_searchlight | bool or enable_neutron_infoblox_ipam_agent | bool }}"
- name: notifications_designate
enabled: "{{ enable_designate | bool }}"
- name: vitrage_notifications
enabled: "{{ enable_vitrage | bool }}"
nova_enabled_notification_topics: "{{ nova_notification_topics | selectattr('enabled', 'equalto', true) | list }}"
####################
# VMware
####################
vmware_vcenter_datastore_regex: ".*"
ovs_bridge: "nsx-managed"
####################
# Libvirt/qemu
####################
# The number of max files qemu can open
qemu_max_files: 32768
# The number of max processes qemu can open
qemu_max_processes: 131072
# Use TLS for libvirt connections and live migration
libvirt_tls: false
# Should kolla-ansible manage/copy the certs. False, assumes the deployer is
# responsible for making the TLS certs show up in the config directories
# also means the deployer is responsible for restarting the nova_compute and
# nova_libvirt containers when the key changes, as we can't know when to do that
libvirt_tls_manage_certs: true
# When using tls we are verfiying the hostname we are connected to matches the
# libvirt cert we are presented. As such we can't use IP's here, but keep the
# ability for people to override the hostname to use.
migration_hostname: "{{ ansible_nodename }}"
####################
# Kolla
####################
nova_git_repository: "{{ kolla_dev_repos_git }}/{{ project_name }}"
nova_dev_repos_pull: "{{ kolla_dev_repos_pull }}"
nova_dev_mode: "{{ kolla_dev_mode }}"
nova_source_version: "{{ kolla_source_version }}"
###################################
# Enable Shared Bind Propogation
###################################
enable_shared_var_lib_nova_mnt: "{{ enable_cinder_backend_nfs | bool or enable_cinder_backend_quobyte | bool }}"

View File

@ -0,0 +1,240 @@
---
- name: Restart nova-conductor container
vars:
service_name: "nova-conductor"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart nova-ssh container
vars:
service_name: "nova-ssh"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
pid_mode: "{{ service.pid_mode | default('') }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart nova-libvirt container
vars:
service_name: "nova-libvirt"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
pid_mode: "{{ service.pid_mode | default('') }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
register: restart_nova_libvirt
# NOTE(Jeffrey4l): retry 5 to remove nova_libvirt container because when
# guests running, nova_libvirt will raise error even though it is removed.
retries: 5
until: restart_nova_libvirt is success
when:
- kolla_action != "config"
- name: Restart nova-scheduler container
vars:
service_name: "nova-scheduler"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart nova-novncproxy container
vars:
service_name: "nova-novncproxy"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart nova-spicehtml5proxy container
vars:
service_name: "nova-spicehtml5proxy"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart nova-serialproxy container
vars:
service_name: "nova-serialproxy"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart nova-api container
vars:
service_name: "nova-api"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart nova-compute container
vars:
service_name: "nova-compute"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
environment: "{{ service.environment | default(omit) }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
ipc_mode: "{{ service.ipc_mode | default(omit) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
- name: Restart nova-compute-ironic container
vars:
service_name: "nova-compute-ironic"
service: "{{ nova_services[service_name] }}"
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "{{ service.container_name }}"
image: "{{ service.image }}"
privileged: "{{ service.privileged | default(False) }}"
volumes: "{{ service.volumes|reject('equalto', '')|list }}"
dimensions: "{{ service.dimensions }}"
when:
- kolla_action != "config"
# nova-compute-fake is special. It will start multi numbers of container
# so put all variables here rather than defaults/main.yml file
- name: Restart nova-compute-fake containers
become: true
kolla_docker:
action: "recreate_or_restart_container"
common_options: "{{ docker_common_options }}"
name: "nova_compute_fake_{{ item }}"
image: "{{ nova_compute_image_full }}"
privileged: True
volumes:
- "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "kolla_logs:/var/log/kolla/"
with_sequence: start=1 end={{ num_nova_fake_per_node }}
when:
- kolla_action != "config"
# NOTE(mgoddard): After upgrading nova-compute, services will have an RPC
# version cap in place. We need to restart all services that communicate with
# nova-compute in order to allow them to use the latest RPC version. Ideally,
# there would be a way to check whether all nova services are using the latest
# version, but currently there is not. Instead, wait a short time for all nova
# compute services to update the version of their service in the database.
# This seems to take around 10 seconds, but the default is 30 to allow room
# for slowness.
- name: Wait for nova-compute services to update service versions
pause:
seconds: "{{ nova_compute_startup_delay }}"
run_once: true
when:
- kolla_action == 'upgrade'
listen:
- Restart nova-compute container
- Restart nova-compute-ironic container
- Restart nova-compute-fake containers
# NOTE(mgoddard): Currently (just prior to Stein release), sending SIGHUP to
# nova compute services leaves them in a broken state in which they cannot
# start new instances. The following error is seen in the logs:
# "In shutdown, no new events can be scheduled"
# To work around this we restart the nova-compute services.
# Speaking to the nova team, this seems to be an issue in oslo.service,
# with a fix proposed here: https://review.openstack.org/#/c/641907.
# This issue also seems to affect the proxy services, which exit non-zero in
# reponse to a SIGHUP, so restart those too.
# The issue actually affects all nova services, since they remain with RPC
# version pinned to the previous release:
# https://bugs.launchpad.net/kolla-ansible/+bug/1833069.
# TODO(mgoddard): Use SIGHUP when this bug has been fixed.
- name: Restart nova services to remove RPC version cap
become: true
kolla_docker:
action: restart_container
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
when:
- kolla_action == 'upgrade'
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
- item.key in nova_services_require_nova_conf
with_dict: "{{ nova_services }}"
listen:
- Restart nova-compute container
- Restart nova-compute-ironic container
- Restart nova-compute-fake containers

View File

@ -0,0 +1,3 @@
---
dependencies:
- { role: common }

View File

@ -0,0 +1,53 @@
---
- name: Creating Nova databases
become: true
kolla_toolbox:
module_name: mysql_db
module_args:
login_host: "{{ database_address }}"
login_port: "{{ database_port }}"
login_user: "{{ database_user }}"
login_password: "{{ database_password }}"
name: "{{ item }}"
run_once: True
delegate_to: "{{ groups['nova-api'][0] }}"
with_items:
- "{{ nova_database_name }}"
- "{{ nova_database_name }}_cell0"
- "{{ nova_api_database_name }}"
when:
- not use_preconfigured_databases | bool
- name: Creating Nova databases user and setting permissions
become: true
kolla_toolbox:
module_name: mysql_user
module_args:
login_host: "{{ database_address }}"
login_port: "{{ database_port }}"
login_user: "{{ database_user }}"
login_password: "{{ database_password }}"
name: "{{ item.database_username }}"
password: "{{ item.database_password }}"
host: "%"
priv: "{{ item.database_name }}.*:ALL"
append_privs: "yes"
with_items:
- database_name: "{{ nova_database_name }}"
database_username: "{{ nova_database_user }}"
database_password: "{{ nova_database_password }}"
- database_name: "{{ nova_database_name }}_cell0"
database_username: "{{ nova_database_user }}"
database_password: "{{ nova_database_password }}"
- database_name: "{{ nova_api_database_name }}"
database_username: "{{ nova_api_database_user }}"
database_password: "{{ nova_api_database_password }}"
loop_control:
label: "{{ item.database_name }}"
run_once: True
delegate_to: "{{ groups['nova-api'][0] }}"
when:
- not use_preconfigured_databases | bool
no_log: true
- include_tasks: bootstrap_service.yml

View File

@ -0,0 +1,20 @@
---
- name: Running Nova bootstrap container
vars:
nova_api: "{{ nova_services['nova-api'] }}"
become: true
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
environment:
KOLLA_UPGRADE:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
image: "{{ nova_api.image }}"
labels:
BOOTSTRAP:
name: "bootstrap_nova"
restart_policy: no
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"

View File

@ -0,0 +1,26 @@
---
- name: Install package python-os-xenapi
package:
name: python-os-xenapi
state: present
become: True
- name: Ensure XenAPI root path
file:
path: "{{ xenapi_facts_root }}"
state: directory
mode: "0770"
become: True
- name: Bootstrap XenAPI compute node
vars:
xenapi_facts_path: "{{ xenapi_facts_root + '/' + xenapi_facts_file }}"
command: xenapi_bootstrap -i {{ xenserver_himn_ip }} -u {{ xenserver_username }} -p {{ xenserver_password }} -f {{ xenapi_facts_path }}
become: True
- name: Fetching XenAPI facts file
fetch:
src: "{{ xenapi_facts_root + '/' + xenapi_facts_file }}"
dest: "{{ xenapi_facts_root + '/' + inventory_hostname + '/' }}"
flat: yes
become: True

View File

@ -0,0 +1,119 @@
---
- name: Ensuring config directory exists
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
mode: "0770"
become: true
with_items:
- "nova-libvirt/secrets"
when: inventory_hostname in groups['compute']
- name: Copying over ceph.conf(s)
vars:
service_name: "{{ item }}"
merge_configs:
sources:
- "{{ role_path }}/../ceph/templates/ceph.conf.j2"
- "{{ node_custom_config }}/ceph.conf"
- "{{ node_custom_config }}/ceph/{{ inventory_hostname }}/ceph.conf"
dest: "{{ node_config_directory }}/{{ item }}/ceph.conf"
mode: "0660"
become: true
with_items:
- "nova-compute"
- "nova-libvirt"
when: inventory_hostname in groups['compute']
notify:
- Restart {{ item }} container
- include_tasks: ../../ceph_pools.yml
vars:
pool_name: "{{ nova_pool_name }}"
pool_type: "{{ nova_pool_type }}"
cache_mode: "{{ nova_cache_mode }}"
pool_pg_num: "{{ nova_pool_pg_num }}"
pool_pgp_num: "{{ nova_pool_pgp_num }}"
pool_application: "rbd"
- name: Pulling cephx keyring for nova
become: true
kolla_ceph_keyring:
name: client.nova
caps: "{{ ceph_client_nova_keyring_caps }}"
register: nova_cephx_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
run_once: True
- name: Pulling cinder cephx keyring for libvirt
become: true
command: docker exec ceph_mon ceph auth get-key client.cinder
register: cinder_cephx_raw_key
delegate_to: "{{ groups['ceph-mon'][0] }}"
when:
- enable_cinder | bool
- cinder_backend_ceph | bool
changed_when: False
run_once: True
- name: Pushing cephx keyring for nova
copy:
content: |
[client.nova]
key = {{ nova_cephx_key.keyring.key }}
dest: "{{ node_config_directory }}/nova-compute/ceph.client.nova.keyring"
mode: "0600"
become: true
when: inventory_hostname in groups['compute']
notify:
- Restart nova-compute container
- name: Pushing secrets xml for libvirt
template:
src: "secret.xml.j2"
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.xml"
mode: "0600"
become: true
when:
- inventory_hostname in groups['compute']
- item.enabled | bool
with_items:
- uuid: "{{ rbd_secret_uuid }}"
name: client.nova secret
enabled: true
- uuid: "{{ cinder_rbd_secret_uuid }}"
name: client.cinder secret
enabled: "{{ enable_cinder | bool and cinder_backend_ceph | bool}}"
notify:
- Restart nova-libvirt container
- name: Pushing secrets key for libvirt
copy:
content: "{{ item.content }}"
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.base64"
mode: "0600"
become: true
when:
- inventory_hostname in groups['compute']
- item.enabled | bool
with_items:
- uuid: "{{ rbd_secret_uuid }}"
content: "{{ nova_cephx_key.keyring.key }}"
enabled: true
- uuid: "{{ cinder_rbd_secret_uuid }}"
content: "{{ cinder_cephx_raw_key.stdout|default('') }}"
enabled: "{{ enable_cinder | bool and cinder_backend_ceph | bool}}"
notify:
- Restart nova-libvirt container
- name: Ensuring config directory has correct owner and permission
become: true
file:
path: "{{ node_config_directory }}/{{ item }}"
recurse: yes
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
with_items:
- "nova-compute"
- "nova-libvirt/secrets"
when: inventory_hostname in groups['compute']

View File

@ -0,0 +1,20 @@
---
- name: Check nova containers
become: true
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
image: "{{ item.value.image }}"
environment: "{{ item.value.environment|default(omit) }}"
pid_mode: "{{ item.value.pid_mode|default('') }}"
ipc_mode: "{{ item.value.ipc_mode|default(omit) }}"
privileged: "{{ item.value.privileged|default(False) }}"
volumes: "{{ item.value.volumes|reject('equalto', '')|list }}"
dimensions: "{{ item.value.dimensions }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ nova_services }}"
notify:
- "Restart {{ item.key }} container"

View File

@ -0,0 +1 @@
---

View File

@ -0,0 +1,7 @@
---
- name: Cloning nova source repository for development
git:
repo: "{{ nova_git_repository }}"
dest: "{{ kolla_dev_repos_directory }}/{{ project_name }}"
update: "{{ nova_dev_repos_pull }}"
version: "{{ nova_source_version }}"

View File

@ -0,0 +1,14 @@
---
- name: Copying over libvirt TLS keys {{ file }}
become: true
copy:
src: "{{ first_found }}"
dest: "{{ node_config_directory }}/{{ service_name }}/{{ file }}"
mode: "0600"
with_first_found:
- "{{ node_custom_config }}/nova/nova-libvirt/{{ inventory_hostname }}/{{ file }}"
- "{{ node_custom_config }}/nova/nova-libvirt/{{ file }}"
loop_control:
loop_var: first_found
notify:
- Restart {{ service_name }} container

View File

@ -0,0 +1,68 @@
---
- name: Ensuring config directories exist
become: true
file:
path: "{{ node_config_directory }}/nova-compute-fake-{{ item }}"
state: "directory"
mode: "0770"
with_sequence: start=1 end={{ num_nova_fake_per_node }}
notify:
- Restart nova-compute-fake containers
- name: Copying over config.json files for services
become: true
template:
src: "nova-compute.json.j2"
dest: "{{ node_config_directory }}/nova-compute-fake-{{ item }}/config.json"
mode: "0660"
with_sequence: start=1 end={{ num_nova_fake_per_node }}
notify:
- Restart nova-compute-fake containers
- name: Copying over nova.conf
become: true
vars:
service_name: "{{ item }}"
merge_configs:
sources:
- "{{ role_path }}/templates/nova.conf.j2"
- "{{ node_custom_config }}/global.conf"
- "{{ node_custom_config }}/nova.conf"
- "{{ node_custom_config }}/nova/{{ item }}.conf"
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/nova.conf"
dest: "{{ node_config_directory }}/nova-compute-fake-{{ item }}/nova.conf"
mode: "0660"
with_sequence: start=1 end={{ num_nova_fake_per_node }}
- name: Ensuring config directory has correct owner and permission
become: true
file:
path: "{{ node_config_directory }}/nova-compute-fake-{{ item }}"
recurse: yes
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
with_sequence: start=1 end={{ num_nova_fake_per_node }}
notify:
- Restart nova-compute-fake containers
- name: Check nova-compute-fake containers
become: true
kolla_docker:
action: "compare_container"
common_options: "{{ docker_common_options }}"
name: "nova_compute_fake_{{ item }}"
image: "{{ nova_compute_image_full }}"
privileged: True
volumes:
- "{{ node_config_directory }}/nova-compute-fake-{{ item }}/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/lib/modules:/lib/modules:ro"
- "/run:/run:shared"
- "kolla_logs:/var/log/kolla/"
with_sequence: start=1 end={{ num_nova_fake_per_node }}
when:
- kolla_action != "config"
- inventory_hostname in groups['compute']
- enable_nova_fake | bool
notify:
- Restart nova-compute-fake containers

View File

@ -0,0 +1,230 @@
---
- name: Setting sysctl values
become: true
sysctl: name={{ item.name }} value={{ item.value }} sysctl_set=yes
with_items:
- { name: "net.bridge.bridge-nf-call-iptables", value: 1}
- { name: "net.bridge.bridge-nf-call-ip6tables", value: 1}
- { name: "net.ipv4.conf.all.rp_filter", value: "{{ nova_compute_host_rp_filter_mode }}"}
- { name: "net.ipv4.conf.default.rp_filter", value: "{{ nova_compute_host_rp_filter_mode }}"}
when:
- set_sysctl | bool
- inventory_hostname in groups['compute']
- name: Ensuring config directories exist
become: true
file:
path: "{{ node_config_directory }}/{{ item.key }}"
state: "directory"
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
mode: "0770"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ nova_services }}"
- include_tasks: ceph.yml
when:
- enable_ceph | bool and nova_backend == "rbd"
- inventory_hostname in groups['ceph-mon'] or
inventory_hostname in groups['compute'] or
inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['nova-conductor'] or
inventory_hostname in groups['nova-novncproxy'] or
inventory_hostname in groups['nova-scheduler']
- include_tasks: external_ceph.yml
when:
- not enable_ceph | bool and (nova_backend == "rbd" or cinder_backend_ceph | bool)
- inventory_hostname in groups['compute']
- name: Check if policies shall be overwritten
local_action: stat path="{{ item }}"
run_once: True
register: nova_policy
with_first_found:
- files: "{{ supported_policy_format_list }}"
paths:
- "{{ node_custom_config }}/nova/"
skip: true
- name: Set nova policy file
set_fact:
nova_policy_file: "{{ nova_policy.results.0.stat.path | basename }}"
nova_policy_file_path: "{{ nova_policy.results.0.stat.path }}"
when:
- nova_policy.results
- name: Copying over config.json files for services
become: true
template:
src: "{{ item.key }}.json.j2"
dest: "{{ node_config_directory }}/{{ item.key }}/config.json"
mode: "0660"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ nova_services }}"
notify:
- "Restart {{ item.key }} container"
- name: Set XenAPI facts
set_fact:
xenapi_facts: "{{ lookup('file', xenapi_facts_root + '/' + inventory_hostname + '/' + xenapi_facts_file) | from_json }}"
when:
- nova_compute_virt_type == 'xenapi'
- inventory_hostname in groups['compute']
- name: Copying over nova.conf
become: true
vars:
service_name: "{{ item.key }}"
merge_configs:
sources:
- "{{ role_path }}/templates/nova.conf.j2"
- "{{ node_custom_config }}/global.conf"
- "{{ node_custom_config }}/nova.conf"
- "{{ node_custom_config }}/nova/{{ item.key }}.conf"
- "{{ node_custom_config }}/nova/{{ inventory_hostname }}/nova.conf"
dest: "{{ node_config_directory }}/{{ item.key }}/nova.conf"
mode: "0660"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
- item.key in nova_services_require_nova_conf
with_dict: "{{ nova_services }}"
notify:
- "Restart {{ item.key }} container"
- name: Copying over libvirt configuration
become: true
vars:
service: "{{ nova_services['nova-libvirt'] }}"
template:
src: "{{ item.src }}"
dest: "{{ node_config_directory }}/nova-libvirt/{{ item.dest }}"
mode: "0660"
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
with_items:
- { src: "qemu.conf.j2", dest: "qemu.conf" }
- { src: "libvirtd.conf.j2", dest: "libvirtd.conf" }
notify:
- Restart nova-libvirt container
- name: Copying over libvirt TLS keys (nova-libvirt)
include_tasks: "config-libvirt-tls.yml"
vars:
service: "{{ nova_services['nova-libvirt'] }}"
service_name: nova-libvirt
file: "{{ item }}"
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
- libvirt_tls | bool
- libvirt_tls_manage_certs | bool
with_items:
- cacert.pem
- servercert.pem
- serverkey.pem
- clientcert.pem
- clientkey.pem
- name: Copying over libvirt TLS keys (nova-compute)
include_tasks: "config-libvirt-tls.yml"
vars:
service: "{{ nova_services['nova-compute'] }}"
service_name: nova-compute
file: "{{ item }}"
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
- libvirt_tls | bool
- libvirt_tls_manage_certs | bool
with_items:
- cacert.pem
- clientcert.pem
- clientkey.pem
- name: Copying files for nova-ssh
become: true
vars:
service: "{{ nova_services['nova-ssh'] }}"
template:
src: "{{ item.src }}"
dest: "{{ node_config_directory }}/nova-ssh/{{ item.dest }}"
mode: "0660"
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
with_items:
- { src: "sshd_config.j2", dest: "sshd_config" }
- { src: "id_rsa", dest: "id_rsa" }
- { src: "id_rsa.pub", dest: "id_rsa.pub" }
- { src: "ssh_config.j2", dest: "ssh_config" }
notify:
- Restart nova-ssh container
- name: Copying VMware vCenter CA file
vars:
service: "{{ nova_services['nova-compute'] }}"
copy:
src: "{{ node_custom_config }}/vmware_ca"
dest: "{{ node_config_directory }}/nova-compute/vmware_ca"
mode: "0660"
when:
- nova_compute_virt_type == "vmware"
- not vmware_vcenter_insecure | bool
- inventory_hostname in groups[service.group]
- service.enabled | bool
notify:
- Restart nova-compute container
- name: Copying 'release' file for nova_compute
vars:
service: "{{ nova_services['nova-compute'] }}"
copy:
src: "{{ item }}"
dest: "{{ node_config_directory }}/nova-compute/release"
mode: "0660"
with_first_found:
- files:
- "{{ node_custom_config }}/nova_compute/{{ inventory_hostname }}/release"
- "{{ node_custom_config }}/nova_compute/release"
- "{{ node_custom_config }}/nova/release"
skip: true
when:
- inventory_hostname in groups[service.group]
- service.enabled | bool
notify:
- Restart nova-compute container
- name: Copying over existing policy file
become: true
vars:
services_require_policy_json:
- nova-api
- nova-compute
- nova-compute-ironic
- nova-conductor
- nova-novncproxy
- nova-serialproxy
- nova-scheduler
- nova-spicehtml5proxy
template:
src: "{{ nova_policy_file_path }}"
dest: "{{ node_config_directory }}/{{ item.key }}/{{ nova_policy_file }}"
mode: "0660"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
- nova_policy_file is defined
- item.key in services_require_policy_json
with_dict: "{{ nova_services }}"
notify:
- "Restart {{ item.key }} container"
- include_tasks: check-containers.yml
when: kolla_action != "config"

View File

@ -0,0 +1,128 @@
---
- name: Create cell0 mappings
vars:
nova_api: "{{ nova_services['nova-api'] }}"
become: true
kolla_docker:
action: "start_container"
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 map_cell0'
common_options: "{{ docker_common_options }}"
detach: False
image: "{{ nova_api.image }}"
labels:
BOOTSTRAP:
name: "create_cell0_nova"
restart_policy: no
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
register: map_cell0
changed_when:
- map_cell0 is success
- '"Cell0 is already setup" not in map_cell0.stdout'
failed_when:
- map_cell0.rc != 0
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"
- include_tasks: bootstrap_service.yml
when: map_cell0.changed
- name: Get list of existing cells
vars:
nova_api: "{{ nova_services['nova-api'] }}"
become: true
kolla_docker:
action: "start_container"
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 list_cells --verbose'
common_options: "{{ docker_common_options }}"
detach: False
image: "{{ nova_api.image }}"
labels:
BOOTSTRAP:
name: "list_cells_nova"
restart_policy: no
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
register: existing_cells_list
changed_when: false
failed_when:
- existing_cells_list.rc != 0
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"
- name: Check if a base cell already exists
vars:
nova_api: "{{ nova_services['nova-api'] }}"
# We match lines containing a UUID in a column (except the one used by
# cell0), followed by two other columns, the first containing the transport
# URL and the second the database connection. For example:
#
# | None | 68a3f49e-27ec-422f-9e2e-2a4e5dc8291b | rabbit://openstack:password@1.2.3.4:5672 | mysql+pymysql://nova:password@1.2.3.4:3306/nova | False |
#
# NOTE(priteau): regexp doesn't support passwords containing spaces
regexp: '\| +(?!00000000-0000-0000-0000-000000000000)([0-9a-f\-]+) +\| +([^ ]+) +\| +([^ ]+) +\| +([^ ]+) +\|$'
set_fact:
existing_cells: "{{ existing_cells_list.stdout | regex_findall(regexp, multiline=True) }}"
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"
- name: Create base cell for legacy instances
vars:
nova_api: "{{ nova_services['nova-api'] }}"
become: true
kolla_docker:
action: "start_container"
command: bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 create_cell'
common_options: "{{ docker_common_options }}"
detach: False
image: "{{ nova_api.image }}"
labels:
BOOTSTRAP:
name: "create_cell_nova"
restart_policy: no
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
register: base_cell
changed_when:
- base_cell is success
failed_when:
- base_cell.rc != 0
- '"already exists" not in base_cell.stdout'
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"
when: existing_cells | length == 0
- name: Update base cell for legacy instances
vars:
connection_url: "mysql+pymysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }}"
nova_api: "{{ nova_services['nova-api'] }}"
become: true
kolla_docker:
action: "start_container"
command: "bash -c 'sudo -E kolla_set_configs && nova-manage cell_v2 update_cell --cell_uuid {{ existing_cells[0][0] }}'"
common_options: "{{ docker_common_options }}"
detach: False
image: "{{ nova_api.image }}"
labels:
BOOTSTRAP:
name: "create_cell_nova"
restart_policy: no
volumes: "{{ nova_api.volumes|reject('equalto', '')|list }}"
register: base_cell
changed_when:
- base_cell is success
failed_when:
- base_cell.rc != 0
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"
when:
- existing_cells | length == 1
- existing_cells[0][1] != rpc_transport_url or existing_cells[0][2] != connection_url
- name: Print warning if a duplicate cell is detected
vars:
nova_api: "{{ nova_services['nova-api'] }}"
fail:
msg: Multiple base cells detected, manual cleanup with `nova-manage cell_v2` may be required.
ignore_errors: yes
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"
when:
- existing_cells | length > 1

View File

@ -0,0 +1,2 @@
---
- import_tasks: check-containers.yml

View File

@ -0,0 +1,31 @@
---
- include_tasks: register.yml
when: inventory_hostname in groups['nova-api']
- include_tasks: bootstrap_xenapi.yml
when:
- inventory_hostname in groups['compute']
- nova_compute_virt_type == "xenapi"
- include_tasks: clone.yml
when: nova_dev_mode | bool
- include_tasks: config.yml
- include_tasks: config-nova-fake.yml
when:
- enable_nova_fake | bool
- inventory_hostname in groups['compute']
- include_tasks: bootstrap.yml
when: inventory_hostname in groups['nova-api'] or
inventory_hostname in groups['compute']
- include_tasks: create_cells.yml
when: inventory_hostname in groups['nova-api']
- name: Flush handlers
meta: flush_handlers
- include_tasks: discover_computes.yml
when: inventory_hostname in groups['nova-api']

View File

@ -0,0 +1,82 @@
---
# We need to wait for all expected compute services to register before running
# cells v2 host discovery. This includes virtualised compute services and
# ironic compute services.
# Work with --limit by including only hosts in ansible_play_batch.
- name: Build a list of expected compute service hosts
vars:
# For virt, use ansible_nodename rather than inventory_hostname, since this
# is similar to what nova uses internally as its default for the
# [DEFAULT] host config option.
virt_compute_service_hosts: >-
{{ groups['compute'] |
intersect(ansible_play_batch) |
map('extract', hostvars, 'ansible_nodename') |
list }}
# For ironic, use {{ansible_hostname}}-ironic since this is what we
# configure for [DEFAULT] host in nova.conf.
ironic_compute_service_hosts: >-
{{ (groups['nova-compute-ironic'] |
intersect(ansible_play_batch) |
map('extract', hostvars, 'ansible_hostname') |
map('regex_replace', '^(.*)$', '\1-ironic') |
list)
if enable_ironic | bool else [] }}
set_fact:
expected_compute_service_hosts: "{{ virt_compute_service_hosts + ironic_compute_service_hosts }}"
run_once: True
delegate_to: "{{ groups['nova-api'][0] }}"
- name: Waiting for nova-compute services to register themselves
become: true
command: >
docker exec kolla_toolbox openstack
--os-interface internal
--os-auth-url {{ keystone_admin_url }}
--os-identity-api-version 3
--os-project-domain-name {{ openstack_auth.domain_name }}
--os-tenant-name {{ openstack_auth.project_name }}
--os-username {{ openstack_auth.username }}
--os-password {{ keystone_admin_password }}
--os-user-domain-name {{ openstack_auth.domain_name }}
--os-region-name {{ openstack_region_name }}
{% if openstack_cacert != '' %}--os-cacert {{ openstack_cacert }}{% endif %}
compute service list --format json --column Host --service nova-compute
register: nova_compute_services
changed_when: false
run_once: True
delegate_to: "{{ groups['nova-api'][0] }}"
retries: 20
delay: 10
until:
- nova_compute_services is success
# A list containing the 'Host' field of compute services that have
# registered themselves. Don't exclude compute services that are disabled
# since these could have been explicitly disabled by the operator. While we
# could exclude services that are down, the nova-manage cell_v2
# discover_hosts does not do this so let's not block on it here.
# NOTE(mgoddard): Cannot factor this out into an intermediary variable
# before ansible 2.8, due to
# https://bugs.launchpad.net/kolla-ansible/+bug/1835817.
- (nova_compute_services.stdout |
from_json |
map(attribute='Host') |
list)
is superset(expected_compute_service_hosts)
# TODO(yoctozepto): no need to do --by-service if ironic not used
- name: Discover nova hosts
become: true
command: >
docker exec nova_api nova-manage cell_v2 discover_hosts --by-service
changed_when: False
run_once: True
delegate_to: "{{ groups['nova-api'][0] }}"
# NOTE(yoctozepto): SIGHUP is probably unnecessary
- name: Refresh cell cache in nova scheduler
become: true
command: docker kill --signal HUP nova_scheduler
changed_when: False
when:
- inventory_hostname in groups['nova-scheduler']

View File

@ -0,0 +1,129 @@
---
- name: Ensuring config directory exists
file:
path: "{{ node_config_directory }}/{{ item }}"
state: "directory"
mode: "0770"
become: true
with_items:
- "nova-libvirt/secrets"
when: inventory_hostname in groups['compute']
- name: Check nova keyring file
local_action: stat path="{{ node_custom_config }}/nova/ceph.client.nova.keyring"
run_once: True
register: nova_cephx_keyring_file
failed_when: not nova_cephx_keyring_file.stat.exists
when:
- nova_backend == "rbd"
- external_ceph_cephx_enabled | bool
- name: Check cinder keyring file
local_action: stat path="{{ node_custom_config }}/nova/ceph.client.cinder.keyring"
run_once: True
register: cinder_cephx_keyring_file
failed_when: not cinder_cephx_keyring_file.stat.exists
when:
- cinder_backend_ceph | bool
- external_ceph_cephx_enabled | bool
# NOTE: nova-compute and nova-libvirt only need ceph.client.nova.keyring.
- name: Copy over ceph nova keyring file
copy:
src: "{{ nova_cephx_keyring_file.stat.path }}"
dest: "{{ node_config_directory }}/{{ item }}/"
mode: "0660"
become: true
with_items:
- nova-compute
- nova-libvirt
when:
- inventory_hostname in groups['compute']
- nova_backend == "rbd"
- external_ceph_cephx_enabled | bool
notify:
- Restart {{ item }} container
- name: Copy over ceph.conf
template:
src: "{{ node_custom_config }}/nova/ceph.conf"
dest: "{{ node_config_directory }}/{{ item }}/"
mode: "0660"
become: true
with_items:
- nova-compute
- nova-libvirt
when:
- inventory_hostname in groups['compute']
- nova_backend == "rbd"
notify:
- Restart {{ item }} container
- name: Pushing nova secret xml for libvirt
template:
src: "secret.xml.j2"
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.xml"
mode: "0600"
become: true
when:
- inventory_hostname in groups['compute']
- item.enabled | bool
with_items:
- uuid: "{{ rbd_secret_uuid }}"
name: "client.nova secret"
enabled: "{{ nova_backend == 'rbd' }}"
- uuid: "{{ cinder_rbd_secret_uuid }}"
name: "client.cinder secret"
enabled: "{{ cinder_backend_ceph }}"
notify:
- Restart nova-libvirt container
- name: Extract nova key from file
local_action: shell cat "{{ nova_cephx_keyring_file.stat.path }}" | grep -E 'key\s*=' | awk '{ print $3 }'
changed_when: false
run_once: True
register: nova_cephx_raw_key
when:
- nova_backend == "rbd"
- external_ceph_cephx_enabled | bool
- name: Extract cinder key from file
local_action: shell cat "{{ cinder_cephx_keyring_file.stat.path }}" | grep -E 'key\s*=' | awk '{ print $3 }'
changed_when: false
run_once: True
register: cinder_cephx_raw_key
when:
- cinder_backend_ceph | bool
- external_ceph_cephx_enabled | bool
- name: Pushing secrets key for libvirt
copy:
content: "{{ item.result.stdout }}"
dest: "{{ node_config_directory }}/nova-libvirt/secrets/{{ item.uuid }}.base64"
mode: "0600"
become: true
when:
- inventory_hostname in groups['compute']
- item.enabled | bool
- external_ceph_cephx_enabled | bool
with_items:
- uuid: "{{ rbd_secret_uuid }}"
result: "{{ nova_cephx_raw_key }}"
enabled: "{{ nova_backend == 'rbd' }}"
- uuid: "{{ cinder_rbd_secret_uuid }}"
result: "{{ cinder_cephx_raw_key }}"
enabled: "{{ cinder_backend_ceph }}"
notify:
- Restart nova-libvirt container
- name: Ensuring config directory has correct owner and permission
become: true
file:
path: "{{ node_config_directory }}/{{ item }}"
recurse: yes
owner: "{{ config_owner_user }}"
group: "{{ config_owner_group }}"
with_items:
- "nova-compute"
- "nova-libvirt/secrets"
when: inventory_hostname in groups['compute']

View File

@ -0,0 +1,7 @@
---
- name: "Configure haproxy for {{ project_name }}"
import_role:
role: haproxy-config
vars:
project_services: "{{ nova_services }}"
tags: always

View File

@ -0,0 +1,2 @@
---
- include_tasks: "{{ kolla_action }}.yml"

View File

@ -0,0 +1,139 @@
---
- name: Get container facts
become: true
kolla_container_facts:
name:
- nova_api
- nova_novncproxy
- nova_serialproxy
- nova_spicehtml5proxy
- nova_ssh
- nova_libvirt
register: container_facts
- name: Checking available compute nodes in inventory
vars:
nova_compute_ironic: "{{ nova_services['nova-compute-ironic'] }}"
fail:
msg: >
At least 1 compute node required in inventory when ironic is disabled.
when:
- groups['compute'] | length < 1
- not nova_compute_ironic.enabled | bool
- name: Checking free port for Nova API
vars:
nova_api: "{{ nova_services['nova-api'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_api_listen_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['nova_api'] is not defined
- inventory_hostname in groups[nova_api.group]
- nova_api.enabled | bool
- name: Checking free port for Nova Metadata
vars:
nova_api: "{{ nova_services['nova-api'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_metadata_listen_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['nova_api'] is not defined
- inventory_hostname in groups[nova_api.group]
- nova_api.enabled | bool
- name: Checking free port for Nova NoVNC Proxy
vars:
nova_novncproxy: "{{ nova_services['nova-novncproxy'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_novncproxy_listen_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['nova_novncproxy'] is not defined
- nova_novncproxy.enabled | bool
- inventory_hostname in groups[nova_novncproxy.group]
- name: Checking free port for Nova Serial Proxy
vars:
nova_serialproxy: "{{ nova_services['nova-serialproxy'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_serialproxy_listen_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['nova_serialproxy'] is not defined
- nova_serialproxy.enabled | bool
- inventory_hostname in groups[nova_serialproxy.group]
- name: Checking free port for Nova Spice HTML5 Proxy
vars:
nova_spicehtml5proxy: "{{ nova_services['nova-spicehtml5proxy'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_spicehtml5proxy_listen_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['nova_spicehtml5proxy'] is not defined
- nova_spicehtml5proxy.enabled | bool
- inventory_hostname in groups[nova_spicehtml5proxy.group]
- name: Checking free port for Nova SSH
vars:
nova_ssh: "{{ nova_services['nova-ssh'] }}"
wait_for:
host: "{{ migration_interface_address }}"
port: "{{ nova_ssh_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['nova_ssh'] is not defined
- nova_ssh.enabled | bool
- inventory_hostname in groups[nova_ssh.group]
- name: Checking free port for Nova Libvirt
vars:
nova_libvirt: "{{ nova_services['nova-libvirt'] }}"
wait_for:
host: "{{ api_interface_address }}"
port: "{{ nova_libvirt_port }}"
connect_timeout: 1
timeout: 1
state: stopped
when:
- container_facts['nova_libvirt'] is not defined
- nova_libvirt.enabled | bool
- inventory_hostname in groups[nova_libvirt.group]
- name: Checking that libvirt is not running
vars:
nova_libvirt: "{{ nova_services['nova-libvirt'] }}"
stat: path=/var/run/libvirt/libvirt-sock
register: result
failed_when: result.stat.exists
when:
- nova_compute_virt_type in ['kvm', 'qemu']
- container_facts['nova_libvirt'] is not defined
- inventory_hostname in groups[nova_libvirt.group]
# TODO(mgoddard): Remove this task in the Ussuri cycle.
- name: Check that legacy upgrade is not enabled
fail:
msg: >
Legacy upgrade support has been removed. 'nova_enable_rolling_upgrade'
should no longer be set.
when: not nova_enable_rolling_upgrade | default(true) | bool

View File

@ -0,0 +1,11 @@
---
- name: Pulling nova images
become: true
kolla_docker:
action: "pull_image"
common_options: "{{ docker_common_options }}"
image: "{{ item.value.image }}"
when:
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
with_dict: "{{ nova_services }}"

View File

@ -0,0 +1,2 @@
---
- include_tasks: deploy.yml

View File

@ -0,0 +1,8 @@
---
- import_role:
name: service-ks-register
vars:
service_ks_register_auth: "{{ openstack_nova_auth }}"
service_ks_register_services: "{{ nova_ks_services }}"
service_ks_register_users: "{{ nova_ks_users }}"
tags: always

View File

@ -0,0 +1,46 @@
---
# Create new set of configs on nodes
- include_tasks: config.yml
- include_tasks: bootstrap_service.yml
- name: Stopping all nova services except nova-compute
become: true
kolla_docker:
action: "stop_container"
common_options: "{{ docker_common_options }}"
name: "{{ item.value.container_name }}"
with_dict: "{{ nova_services }}"
when:
- "'nova-compute' not in item.key"
- inventory_hostname in groups[item.value.group]
- item.value.enabled | bool
- nova_safety_upgrade | bool
# TODO(donghm): Flush_handlers to restart nova services
# should be run in serial nodes to decrease downtime if
# the previous task did not run. Update when the
# Ansible strategy module for rolling upgrade is finished.
- name: Flush handlers
meta: flush_handlers
- name: Migrate Nova database
vars:
nova_api: "{{ nova_services['nova-api'] }}"
become: true
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
environment:
KOLLA_OSM:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
image: "{{ nova_api.image }}"
labels:
BOOTSTRAP:
name: "bootstrap_nova"
restart_policy: no
volumes: "{{ nova_api.volumes }}"
run_once: True
delegate_to: "{{ groups[nova_api.group][0] }}"

View File

@ -0,0 +1,6 @@
---
- import_role:
role: service-stop
vars:
project_services: "{{ nova_services }}"
service_name: "{{ project_name }}"

View File

@ -0,0 +1,25 @@
---
- name: Check nova upgrade status
become: true
command: docker exec -t nova_api nova-status upgrade check
register: nova_upgrade_check_stdout
when: inventory_hostname == groups['nova-api'][0]
failed_when: false
- name: Upgrade status check result
fail:
msg:
- "There was an upgrade status check failure!"
- "See the detail at https://docs.openstack.org/nova/latest/cli/nova-status.html#nova-status-checks"
vars:
first_nova_api_host: "{{ groups['nova-api'][0] }}"
when: hostvars[first_nova_api_host]['nova_upgrade_check_stdout']['rc'] not in [0, 1]
- include_tasks: rolling_upgrade.yml
# NOTE(jeffrey4l): Remove this task in U cycle.
- name: Remove nova-consoleauth container
become: true
kolla_docker:
action: "remove_container"
name: "nova_consoleauth"

View File

@ -0,0 +1 @@
{{ nova_ssh_key.private_key }}

View File

@ -0,0 +1 @@
{{ nova_ssh_key.public_key }}

View File

@ -0,0 +1,17 @@
{% if libvirt_tls | bool %}
listen_tls = 1
listen_tcp = 0
tls_port = "{{ nova_libvirt_port }}"
key_file = "/etc/pki/libvirt/private/serverkey.pem"
cert_file = "/etc/pki/libvirt/servercert.pem"
ca_file = "/etc/pki/CA/cacert.pem"
{% else %}
listen_tcp = 1
listen_tls = 0
auth_tcp = "none"
tcp_port = "{{ nova_libvirt_port }}"
ca_file = ""
{% endif %}
log_level = 3
log_outputs = "3:file:/var/log/kolla/libvirt/libvirtd.log"
listen_addr = "{{ migration_interface_address }}"

View File

@ -0,0 +1,24 @@
{
"command": "nova-api",
"config_files": [
{
"source": "{{ container_config_directory }}/nova.conf",
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
}{% if nova_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
"dest": "/etc/nova/{{ nova_policy_file }}",
"owner": "nova",
"perm": "0600"
}{% endif %}
],
"permissions": [
{
"path": "/var/log/kolla/nova",
"owner": "nova:nova",
"recurse": true
}
]
}

View File

@ -0,0 +1,24 @@
{
"command": "nova-compute",
"config_files": [
{
"source": "{{ container_config_directory }}/nova.conf",
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
}{% if nova_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
"dest": "/etc/nova/{{ nova_policy_file }}",
"owner": "nova",
"perm": "0600"
}{% endif %}
],
"permissions": [
{
"path": "/var/log/kolla/nova",
"owner": "nova:nova",
"recurse": true
}
]
}

View File

@ -0,0 +1,66 @@
{
"command": "nova-compute",
"config_files": [
{
"source": "{{ container_config_directory }}/nova.conf",
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
}{% if nova_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
"dest": "/etc/nova/{{ nova_policy_file }}",
"owner": "nova",
"perm": "0600"
}{% endif %}{% if nova_backend == "rbd" %},
{
"source": "{{ container_config_directory }}/ceph.*",
"dest": "/etc/ceph/",
"owner": "nova",
"perm": "0700"
}{% endif %}{% if nova_compute_virt_type == "vmware" and not vmware_vcenter_insecure | bool %},
{
"source": "{{ container_config_directory }}/vmware_ca",
"dest": "/etc/nova/vmware_ca",
"owner": "nova",
"perm": "0600"
}{% endif %}{% if libvirt_tls | bool %},
{
"source": "{{ container_config_directory }}/clientkey.pem",
"dest": "/etc/pki/libvirt/private/clientkey.pem",
"owner": "root:nova",
"perm": "0640"
},
{
"source": "{{ container_config_directory }}/clientcert.pem",
"dest": "/etc/pki/libvirt/clientcert.pem",
"owner": "root:nova",
"perm": "0640"
},
{
"source": "{{ container_config_directory }}/cacert.pem",
"dest": "/etc/pki/CA/cacert.pem",
"owner": "root:nova",
"perm": "0640"
}{% endif %},
{
"source": "{{ container_config_directory }}/release",
"dest": "/etc/nova/release",
"owner": "nova",
"perm": "0600",
"optional": true
}
],
"permissions": [
{
"path": "/var/log/kolla/nova",
"owner": "nova:nova",
"recurse": true
},
{
"path": "/var/lib/nova",
"owner": "nova:nova",
"recurse": true
}
]
}

View File

@ -0,0 +1,24 @@
{
"command": "nova-conductor",
"config_files": [
{
"source": "{{ container_config_directory }}/nova.conf",
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
}{% if nova_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
"dest": "/etc/nova/{{ nova_policy_file }}",
"owner": "nova",
"perm": "0600"
}{% endif %}
],
"permissions": [
{
"path": "/var/log/kolla/nova",
"owner": "nova:nova",
"recurse": true
}
]
}

View File

@ -0,0 +1,59 @@
{
"command": "/usr/sbin/libvirtd --listen",
"config_files": [
{
"source": "{{ container_config_directory }}/libvirtd.conf",
"dest": "/etc/libvirt/libvirtd.conf",
"owner": "root",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/qemu.conf",
"dest": "/etc/libvirt/qemu.conf",
"owner": "root",
"perm": "0600"
}{% if libvirt_tls | bool %},
{
"source": "{{ container_config_directory }}/serverkey.pem",
"dest": "/etc/pki/libvirt/private/serverkey.pem",
"owner": "root",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/servercert.pem",
"dest": "/etc/pki/libvirt/servercert.pem",
"owner": "root",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/clientkey.pem",
"dest": "/etc/pki/libvirt/private/clientkey.pem",
"owner": "root",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/clientcert.pem",
"dest": "/etc/pki/libvirt/clientcert.pem",
"owner": "root",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/cacert.pem",
"dest": "/etc/pki/CA/cacert.pem",
"owner": "root",
"perm": "0600"
}{% endif %}{% if nova_backend == "rbd" or cinder_backend_ceph | bool %},
{
"source": "{{ container_config_directory }}/secrets",
"dest": "/etc/libvirt/secrets",
"owner": "root",
"perm": "0600"
}{% endif %}{% if nova_backend == "rbd" %},
{
"source": "{{ container_config_directory }}/ceph.conf",
"dest": "/etc/ceph/ceph.conf",
"owner": "root",
"perm": "0600"
}{% endif %}
]
}

View File

@ -0,0 +1,24 @@
{
"command": "nova-novncproxy",
"config_files": [
{
"source": "{{ container_config_directory }}/nova.conf",
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
}{% if nova_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
"dest": "/etc/nova/{{ nova_policy_file }}",
"owner": "nova",
"perm": "0600"
}{% endif %}
],
"permissions": [
{
"path": "/var/log/kolla/nova",
"owner": "nova:nova",
"recurse": true
}
]
}

View File

@ -0,0 +1,24 @@
{
"command": "nova-scheduler",
"config_files": [
{
"source": "{{ container_config_directory }}/nova.conf",
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
}{% if nova_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
"dest": "/etc/nova/{{ nova_policy_file }}",
"owner": "nova",
"perm": "0600"
}{% endif %}
],
"permissions": [
{
"path": "/var/log/kolla/nova",
"owner": "nova:nova",
"recurse": true
}
]
}

View File

@ -0,0 +1,18 @@
{
"command": "nova-serialproxy",
"config_files": [
{
"source": "{{ container_config_directory }}/nova.conf",
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
}
],
"permissions": [
{
"path": "/var/log/kolla/nova",
"owner": "nova:nova",
"recurse": true
}
]
}

View File

@ -0,0 +1,24 @@
{
"command": "nova-spicehtml5proxy",
"config_files": [
{
"source": "{{ container_config_directory }}/nova.conf",
"dest": "/etc/nova/nova.conf",
"owner": "nova",
"perm": "0600"
}{% if nova_policy_file is defined %},
{
"source": "{{ container_config_directory }}/{{ nova_policy_file }}",
"dest": "/etc/nova/{{ nova_policy_file }}",
"owner": "nova",
"perm": "0600"
}{% endif %}
],
"permissions": [
{
"path": "/var/log/kolla/nova",
"owner": "nova:nova",
"recurse": true
}
]
}

View File

@ -0,0 +1,29 @@
{
"command": "/usr/sbin/sshd -D",
"config_files": [
{
"source": "{{ container_config_directory }}/sshd_config",
"dest": "/etc/ssh/sshd_config",
"owner": "root",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/ssh_config",
"dest": "/var/lib/nova/.ssh/config",
"owner": "nova",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/id_rsa",
"dest": "/var/lib/nova/.ssh/id_rsa",
"owner": "nova",
"perm": "0600"
},
{
"source": "{{ container_config_directory }}/id_rsa.pub",
"dest": "/var/lib/nova/.ssh/authorized_keys",
"owner": "nova",
"perm": "0600"
}
]
}

View File

@ -0,0 +1,24 @@
[libvirt]
{% if libvirt_tls | bool %}
connection_uri = "qemu+tls://{{ migration_hostname }}/system"
live_migration_uri = "qemu+tls://%s/system"
{% else %}
connection_uri = "qemu+tcp://{{ migration_interface_address }}/system"
{% endif %}
{% if enable_ceph | bool and nova_backend == "rbd" %}
images_type = rbd
images_rbd_pool = {{ ceph_nova_pool_name }}
images_rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_user = nova
disk_cachemodes="network=writeback"
{% if nova_hw_disk_discard != '' %}
hw_disk_discard = {{ nova_hw_disk_discard }}
{% endif %}
{% endif %}
{% if nova_backend == "rbd" and external_ceph_cephx_enabled | bool %}
rbd_secret_uuid = {{ rbd_secret_uuid }}
{% endif %}
virt_type = {{ nova_compute_virt_type }}
{% if nova_libvirt_cpu_mode %}
cpu_mode = {{ nova_libvirt_cpu_mode }}
{% endif %}

View File

@ -0,0 +1,288 @@
# nova.conf
[DEFAULT]
debug = {{ nova_logging_debug }}
log_dir = /var/log/kolla/nova
state_path = /var/lib/nova
osapi_compute_listen = {{ api_interface_address }}
osapi_compute_listen_port = {{ nova_api_listen_port }}
osapi_compute_workers = {{ openstack_service_workers }}
metadata_workers = {{ openstack_service_workers }}
metadata_listen = {{ api_interface_address }}
metadata_listen_port = {{ nova_metadata_listen_port }}
allow_resize_to_same_host = true
{% if service_name == "nova-compute-ironic" %}
host={{ ansible_hostname }}-ironic
log_file = /var/log/kolla/nova/nova-compute-ironic.log
compute_driver = ironic.IronicDriver
ram_allocation_ratio = 1.0
reserved_host_memory_mb = 0
{% elif enable_nova_fake | bool %}
host = {{ ansible_hostname }}_{{ service_name }}
compute_driver = fake.FakeDriver
{% elif nova_compute_virt_type == 'vmware' %}
compute_driver = vmwareapi.VMwareVCDriver
{% elif nova_compute_virt_type == 'xenapi' %}
compute_driver = xenapi.XenAPIDriver
{% if service_name == 'nova-compute' %}
host = xenapi_facts['dom0_hostname']
{% endif %}
{% else %}
compute_driver = libvirt.LibvirtDriver
{% endif %}
# Though my_ip is not used directly, lots of other variables use $my_ip
my_ip = {{ api_interface_address }}
{% if enable_ceilometer | bool or enable_searchlight | bool or enable_designate | bool %}
instance_usage_audit = True
instance_usage_audit_period = hour
{% if enable_watcher | bool %}
compute_monitors=nova.compute.monitors.cpu.virt_driver
{% endif %}
{% endif %}
transport_url = {{ rpc_transport_url }}
{% if enable_blazar | bool %}
[filter_scheduler]
available_filters = nova.scheduler.filters.all_filters
available_filters = blazarnova.scheduler.filters.blazar_filter.BlazarFilter
enabled_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter,BlazarFilter
{% endif %}
[api]
use_forwarded_for = true
[conductor]
workers = {{ openstack_service_workers }}
{% if nova_console == 'novnc' %}
[vnc]
{% if service_name == "nova-compute-ironic" %}
enabled = false
{% else %}
novncproxy_host = {{ api_interface_address }}
novncproxy_port = {{ nova_novncproxy_listen_port }}
server_listen = {{ api_interface_address }}
server_proxyclient_address = {{ api_interface_address }}
{% if inventory_hostname in groups['compute'] %}
novncproxy_base_url = {{ public_protocol }}://{{ nova_novncproxy_fqdn }}:{{ nova_novncproxy_port }}/vnc_auto.html
{% endif %}
{% endif %}
{% elif nova_console == 'spice' %}
[vnc]
# We have to turn off vnc to use spice
enabled = false
[spice]
enabled = true
server_listen = {{ api_interface_address }}
server_proxyclient_address = {{ api_interface_address }}
{% if inventory_hostname in groups['compute'] %}
html5proxy_base_url = {{ public_protocol }}://{{ nova_spicehtml5proxy_fqdn }}:{{ nova_spicehtml5proxy_port }}/spice_auto.html
{% endif %}
html5proxy_host = {{ api_interface_address }}
html5proxy_port = {{ nova_spicehtml5proxy_listen_port }}
{% elif nova_console == 'none' %}
[vnc]
enabled = false
[spice]
enabled = false
{% endif %}
{% if enable_nova_serialconsole_proxy | bool %}
[serial_console]
enabled = true
base_url = {{ nova_serialproxy_protocol }}://{{ nova_serialproxy_fqdn }}:{{ nova_serialproxy_port }}/
serialproxy_host = {{ api_interface_address }}
serialproxy_port = {{ nova_serialproxy_listen_port }}
proxyclient_address = {{ api_interface_address }}
{% endif %}
{% if service_name == "nova-compute-ironic" %}
[ironic]
username = {{ ironic_keystone_user }}
password = {{ ironic_keystone_password }}
auth_url = {{ openstack_auth.auth_url }}/v3
auth_type = password
project_name = service
user_domain_name = {{ default_user_domain_name }}
project_domain_name = {{ default_project_domain_name }}
endpoint_override = {{ internal_protocol }}://{{ ironic_internal_fqdn }}:{{ ironic_api_port }}/v1
{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[glance]
api_servers = {{ internal_protocol }}://{{ glance_internal_fqdn }}:{{ glance_api_port }}
num_retries = {{ groups['glance-api'] | length }}
{% if enable_cinder | bool %}
[cinder]
catalog_info = volumev3:cinderv3:internalURL
os_region_name = {{ openstack_region_name }}
{% endif %}
[neutron]
metadata_proxy_shared_secret = {{ metadata_secret }}
service_metadata_proxy = true
{% if neutron_plugin_agent == 'vmware_nsxv3' %}
ovs_bridge = {{ ovs_bridge }}
{% endif %}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_name = {{ default_project_domain_name }}
user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ neutron_keystone_user }}
password = {{ neutron_keystone_password }}
region_name = {{ openstack_region_name }}
valid_interfaces = internal
{% if not service_name.startswith('nova-compute') %}
[database]
connection = mysql+pymysql://{{ nova_database_user }}:{{ nova_database_password }}@{{ nova_database_address }}/{{ nova_database_name }}
max_pool_size = 50
max_overflow = 1000
max_retries = -1
[api_database]
connection = mysql+pymysql://{{ nova_api_database_user }}:{{ nova_api_database_password }}@{{ nova_api_database_address }}/{{ nova_api_database_name }}
max_retries = -1
{% endif %}
[cache]
backend = oslo_cache.memcache_pool
enabled = True
memcache_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[keystone_authtoken]
www_authenticate_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = {{ default_project_domain_id }}
user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ nova_keystone_user }}
password = {{ nova_keystone_password }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
{% if service_name == 'nova-compute' %}
{% if nova_compute_virt_type in ['kvm', 'qemu'] %}
{# must be an include because Ansible 2.8 (and earlier) does not like defined variables referencing undefined variables: migration_interface_address here #}
{# see https://github.com/ansible/ansible/issues/58835 #}
{% include 'nova.conf.d/libvirt.conf.j2' %}
{% endif %}
{% endif %}
{% if nova_compute_virt_type == "vmware" %}
[vmware]
host_ip = {{ vmware_vcenter_host_ip }}
host_username = {{ vmware_vcenter_host_username }}
host_password = {{ vmware_vcenter_host_password }}
cluster_name = {{ vmware_vcenter_cluster_name }}
datastore_regex = {{ vmware_vcenter_datastore_regex }}
insecure = {{ vmware_vcenter_insecure }}
{% if not vmware_vcenter_insecure | bool %}
ca_file = /etc/nova/vmware_ca
{% endif %}
{% endif %}
[upgrade_levels]
compute = auto
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
{% if nova_enabled_notification_topics %}
driver = messagingv2
topics = {{ nova_enabled_notification_topics | map(attribute='name') | join(',') }}
{% else %}
driver = noop
{% endif %}
{% if nova_policy_file is defined %}
[oslo_policy]
policy_file = {{ nova_policy_file }}
{% endif %}
[privsep_entrypoint]
helper_command=sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file /etc/nova/nova.conf
[glance]
debug = {{ nova_logging_debug }}
[guestfs]
debug = {{ nova_logging_debug }}
[wsgi]
api_paste_config = /etc/nova/api-paste.ini
{% if kolla_enable_tls_external | bool or kolla_enable_tls_internal | bool %}
secure_proxy_ssl_header = HTTP_X_FORWARDED_PROTO
{% endif %}
[scheduler]
max_attempts = 10
# NOTE(yoctozepto): kolla-ansible handles cell mapping by itself on each deploy
# periodic run must be disabled to avoid random failures (where both try to map)
# -1 is default and means periodic discovery is disabled
discover_hosts_in_cells_interval = -1
{% if enable_nova_fake | bool %}
default_filters = RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter
{% endif %}
[placement]
auth_type = password
auth_url = {{ keystone_admin_url }}
username = {{ placement_keystone_user }}
password = {{ placement_keystone_password }}
user_domain_name = {{ default_user_domain_name }}
project_name = service
project_domain_name = {{ default_project_domain_name }}
region_name = {{ openstack_region_name }}
os_interface = internal
[notifications]
{% if enable_ceilometer | bool or enable_searchlight | bool or enable_designate | bool or enable_neutron_infoblox_ipam_agent | bool %}
notify_on_state_change = vm_and_task_state
{% endif %}
{% if not enable_searchlight | bool %}
notification_format = unversioned
{% else %}
notification_format = both
{% endif %}
{% if enable_osprofiler | bool %}
[profiler]
enabled = true
trace_sqlalchemy = true
hmac_keys = {{ osprofiler_secret }}
connection_string = {{ osprofiler_backend_connection_string }}
{% endif %}
{% if enable_barbican | bool %}
[barbican]
auth_endpoint = {{ keystone_internal_url }}
{% endif %}
{% if nova_compute_virt_type == "xenapi" %}
[xenserver]
ovs_integration_bridge = br-int
connection_password = {{ xenserver_password }}
connection_username = {{ xenserver_username }}
connection_url = {{ xenserver_connect_protocol }}://{{ xenserver_himn_ip }}
{% endif %}

View File

@ -0,0 +1,7 @@
stdio_handler = "file"
user = "nova"
group = "nova"
max_files = {{ qemu_max_files }}
max_processes = {{ qemu_max_processes }}

View File

@ -0,0 +1,6 @@
<secret ephemeral='no' private='no'>
<uuid>{{ item.uuid }}</uuid>
<usage type='ceph'>
<name>{{ item.name }}</name>
</usage>
</secret>

View File

@ -0,0 +1,4 @@
Host *
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
port {{ nova_ssh_port }}

View File

@ -0,0 +1,5 @@
Port {{ nova_ssh_port }}
ListenAddress {{ migration_interface_address }}
SyslogFacility AUTHPRIV
UsePAM yes