![Andy McCrae](/assets/img/avatar_default.png)
This patch updates all the roles to the latest available SHA's, updates all the OpenStack Service SHA's and also updates the appropriate python requirements pins. NB Ceilometer has not been updated to it's latest SHA due to the requirements bump for kafka-python which doesn't match the global-requirements version. Change-Id: I646c55d1a90c2b207ab230e1f50f219d2efbac64
448 lines
20 KiB
YAML
448 lines
20 KiB
YAML
---
|
|
# Copyright 2016, Rackspace US, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
## OpenStack Source Code Release
|
|
openstack_release: master
|
|
|
|
## Verbosity Options
|
|
debug: False
|
|
|
|
## SSH connection wait time
|
|
ssh_delay: 5
|
|
|
|
# Set the package install state for distribution packages
|
|
# Options are 'present' and 'latest'
|
|
package_state: "latest"
|
|
|
|
# Set "/var/log" to be a bind mount to the physical host.
|
|
default_bind_mount_logs: true
|
|
|
|
# Ensure that the package state matches the global setting
|
|
ceph_client_package_state: "{{ package_state }}"
|
|
galera_client_package_state: "{{ package_state }}"
|
|
pip_install_package_state: "{{ package_state }}"
|
|
rsyslog_client_package_state: "{{ package_state }}"
|
|
|
|
# These are pinned to ensure exactly the same behaviour forever!
|
|
# These pins are updated through the sources-branch-updater script
|
|
pip_packages:
|
|
- pip==9.0.1
|
|
- setuptools==28.8.0
|
|
- wheel==0.29.0
|
|
|
|
pip_links:
|
|
- { name: "openstack_release", link: "{{ openstack_repo_url }}/os-releases/{{ openstack_release }}/" }
|
|
pip_lock_to_internal_repo: "{{ (pip_links | length) >= 1 }}"
|
|
|
|
# The upper constraints to apply to all pip installations
|
|
pip_install_upper_constraints: "{{ openstack_repo_url }}/os-releases/{{ openstack_release }}/requirements_absolute_requirements.txt"
|
|
|
|
## OpenStack source options
|
|
# URL for the frozen internal openstack repo.
|
|
repo_server_port: 8181
|
|
repo_pkg_cache_enabled: true
|
|
repo_pkg_cache_port: 3142
|
|
repo_pkg_cache_url: "http://{{ internal_lb_vip_address }}:{{ repo_pkg_cache_port }}"
|
|
openstack_repo_url: "http://{{ internal_lb_vip_address }}:{{ repo_server_port }}"
|
|
openstack_repo_git_url: "git://{{ internal_lb_vip_address }}"
|
|
|
|
## kernel modules for specific group hosts
|
|
# :param name: name of the kernel module
|
|
# :param pattern: pattern to search for in /boot/config-$kernel_version to check how module is configured inside kernel
|
|
# :param group: group of hosts where the module will be loaded
|
|
openstack_host_specific_kernel_modules:
|
|
- { name: "ebtables", pattern: "CONFIG_BRIDGE_NF_EBTABLES", group: "network_hosts" }
|
|
|
|
## DNS resolution (resolvconf) options
|
|
#Group containing resolvers to configure
|
|
resolvconf_resolver_group: unbound_all
|
|
|
|
## Memcached options
|
|
memcached_port: 11211
|
|
memcached_servers: "{% for host in groups['memcached'] %}{{ hostvars[host]['ansible_host'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}"
|
|
|
|
## Galera
|
|
galera_address: "{{ internal_lb_vip_address }}"
|
|
galera_root_user: "root"
|
|
|
|
## RabbitMQ
|
|
rabbitmq_host_group: "rabbitmq_all"
|
|
rabbitmq_port: "{{ (rabbitmq_use_ssl | bool) | ternary(5671, 5672) }}"
|
|
|
|
rabbitmq_use_ssl: True
|
|
rabbitmq_servers: "{% for host in groups[rabbitmq_host_group] %}{{ hostvars[host]['ansible_host'] }}{% if not loop.last %},{% endif %}{% endfor %}"
|
|
|
|
## Enable external SSL handling for general OpenStack services
|
|
openstack_external_ssl: true
|
|
|
|
## OpenStack global Endpoint Protos
|
|
openstack_service_publicuri_proto: https
|
|
#openstack_service_adminuri_proto: http
|
|
#openstack_service_internaluri_proto: http
|
|
|
|
## SSL
|
|
# These do not need to be configured unless you're creating certificates for
|
|
# services running behind Apache (currently, Horizon and Keystone).
|
|
ssl_protocol: "ALL -SSLv2 -SSLv3"
|
|
# Cipher suite string from https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
|
|
ssl_cipher_suite: "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS"
|
|
|
|
## Region Name
|
|
service_region: RegionOne
|
|
|
|
## OpenStack Domain
|
|
openstack_domain: openstack.local
|
|
lxc_container_domain: "{{ openstack_domain }}"
|
|
|
|
## DHCP Domain Name
|
|
dhcp_domain: openstacklocal
|
|
|
|
## LDAP enabled toggle
|
|
service_ldap_backend_enabled: "{{ keystone_ldap is defined and keystone_ldap.Default is defined }}"
|
|
|
|
## Aodh
|
|
aodh_service_region: "{{ service_region }}"
|
|
aodh_galera_user: aodh
|
|
aodh_galera_database: aodh
|
|
aodh_galera_address: "{{ internal_lb_vip_address }}"
|
|
aodh_connection_string: "mysql+pymysql://{{ aodh_galera_user }}:{{ aodh_container_db_password }}@{{ aodh_galera_address }}/{{ aodh_galera_database }}?charset=utf8"
|
|
aodh_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
|
|
## Ceilometer
|
|
ceilometer_service_user_name: ceilometer
|
|
ceilometer_service_tenant_name: service
|
|
|
|
# These are here rather than in ceilometer_all because
|
|
# both the os_ceilometer and os_swift roles require them
|
|
ceilometer_rabbitmq_userid: ceilometer
|
|
ceilometer_rabbitmq_vhost: /ceilometer
|
|
ceilometer_rabbitmq_port: "{{ rabbitmq_port }}"
|
|
ceilometer_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
|
|
ceilometer_rabbitmq_servers: "{{ rabbitmq_servers }}"
|
|
ceilometer_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
|
|
## Cinder
|
|
cinder_service_region: "{{ service_region }}"
|
|
cinder_service_port: 8776
|
|
# If there are Swift hosts in the environment, then enable cinder backups to it
|
|
cinder_service_backup_program_enabled: "{{ groups['swift_all'] is defined and groups['swift_all'] | length > 0 }}"
|
|
# cinder_backend_rbd_inuse: True if current host has an rbd backend
|
|
cinder_backend_rbd_inuse: '{{ (cinder_backends|default("")|to_json).find("cinder.volume.drivers.rbd.RBDDriver") != -1 }}'
|
|
# cinder_backends_rbd_inuse: true if at least 1 cinder_backend on any
|
|
# cinder_volume host uses Ceph RBD
|
|
# http://stackoverflow.com/questions/9486393/jinja2-change-the-value-of-a-variable-inside-a-loop
|
|
cinder_backends_rbd_inuse: >
|
|
{% set _var = {'rbd_inuse': False} %}{%
|
|
for host in groups.cinder_volume %}{%
|
|
if hostvars[host].cinder_backend_rbd_inuse | bool %}{%
|
|
if _var.update({'rbd_inuse': True }) %}{%
|
|
endif %}{%
|
|
endif %}{%
|
|
endfor %}{{
|
|
_var.rbd_inuse }}
|
|
cinder_ceph_client: cinder
|
|
|
|
# These are here rather than in cinder_all because
|
|
# both the os_ceilometer and os_cinder roles require them
|
|
|
|
# RPC
|
|
cinder_rabbitmq_userid: cinder
|
|
cinder_rabbitmq_vhost: /cinder
|
|
cinder_rabbitmq_port: "{{ rabbitmq_port }}"
|
|
cinder_rabbitmq_servers: "{{ rabbitmq_servers }}"
|
|
cinder_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
|
|
cinder_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
# Telemetry notifications
|
|
cinder_rabbitmq_telemetry_userid: "{{ cinder_rabbitmq_userid }}"
|
|
cinder_rabbitmq_telemetry_password: "{{ cinder_rabbitmq_password }}"
|
|
cinder_rabbitmq_telemetry_vhost: "{{ cinder_rabbitmq_vhost }}"
|
|
cinder_rabbitmq_telemetry_port: "{{ cinder_rabbitmq_port }}"
|
|
cinder_rabbitmq_telemetry_servers: "{{ cinder_rabbitmq_servers }}"
|
|
cinder_rabbitmq_telemetry_use_ssl: "{{ cinder_rabbitmq_use_ssl }}"
|
|
cinder_rabbitmq_telemetry_host_group: "{{ cinder_rabbitmq_host_group }}"
|
|
|
|
# If there are any Ceilometer hosts in the environment, then enable its usage
|
|
cinder_ceilometer_enabled: "{{ (groups['cinder_all'] is defined) and (groups['cinder_all'] | length > 0) and (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
|
|
|
|
## Glance
|
|
glance_service_port: 9292
|
|
glance_service_proto: http
|
|
glance_service_publicuri_proto: "{{ openstack_service_publicuri_proto | default(glance_service_proto) }}"
|
|
glance_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(glance_service_proto) }}"
|
|
glance_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(glance_service_proto) }}"
|
|
glance_service_publicuri: "{{ glance_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ glance_service_port }}"
|
|
glance_service_publicurl: "{{ glance_service_publicuri }}"
|
|
glance_service_internaluri: "{{ glance_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ glance_service_port }}"
|
|
glance_service_internalurl: "{{ glance_service_internaluri }}"
|
|
glance_service_adminuri: "{{ glance_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ glance_service_port }}"
|
|
glance_service_adminurl: "{{ glance_service_adminuri }}"
|
|
glance_api_servers: "{{ glance_service_internaluri }}"
|
|
glance_service_user_name: glance
|
|
|
|
# These are here rather than in glance_all because
|
|
# both the os_ceilometer and os_glance roles require them
|
|
|
|
## Gnocchi
|
|
# Used in both Gnocchi and Swift roles.
|
|
gnocchi_service_project_name: "{{ (gnocchi_storage_driver is defined and gnocchi_storage_driver == 'swift') | ternary('gnocchi_swift', 'service') }}"
|
|
|
|
# RPC
|
|
glance_rabbitmq_userid: glance
|
|
glance_rabbitmq_vhost: /glance
|
|
glance_rabbitmq_port: "{{ rabbitmq_port }}"
|
|
glance_rabbitmq_servers: "{{ rabbitmq_servers }}"
|
|
glance_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
|
|
glance_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
# Telemetry notifications
|
|
glance_rabbitmq_telemetry_userid: "{{ glance_rabbitmq_userid }}"
|
|
glance_rabbitmq_telemetry_password: "{{ glance_rabbitmq_password }}"
|
|
glance_rabbitmq_telemetry_vhost: "{{ glance_rabbitmq_vhost }}"
|
|
glance_rabbitmq_telemetry_port: "{{ glance_rabbitmq_port }}"
|
|
glance_rabbitmq_telemetry_servers: "{{ glance_rabbitmq_servers }}"
|
|
glance_rabbitmq_telemetry_use_ssl: "{{ glance_rabbitmq_use_ssl }}"
|
|
glance_rabbitmq_telemetry_host_group: "{{ glance_rabbitmq_host_group }}"
|
|
|
|
# If there are any Ceilometer hosts in the environment, then enable its usage
|
|
glance_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
|
|
|
|
## Heat
|
|
|
|
# These are here rather than in heat_all because
|
|
# both the os_ceilometer and os_heat roles require them
|
|
|
|
# RPC
|
|
heat_rabbitmq_userid: heat
|
|
heat_rabbitmq_vhost: /heat
|
|
heat_rabbitmq_port: "{{ rabbitmq_port }}"
|
|
heat_rabbitmq_servers: "{{ rabbitmq_servers }}"
|
|
heat_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
|
|
heat_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
# Telemetry notifications
|
|
heat_rabbitmq_telemetry_userid: "{{ heat_rabbitmq_userid }}"
|
|
heat_rabbitmq_telemetry_password: "{{ heat_rabbitmq_password }}"
|
|
heat_rabbitmq_telemetry_vhost: "{{ heat_rabbitmq_vhost }}"
|
|
heat_rabbitmq_telemetry_port: "{{ heat_rabbitmq_port }}"
|
|
heat_rabbitmq_telemetry_servers: "{{ heat_rabbitmq_servers }}"
|
|
heat_rabbitmq_telemetry_use_ssl: "{{ heat_rabbitmq_use_ssl }}"
|
|
heat_rabbitmq_telemetry_host_group: "{{ heat_rabbitmq_host_group }}"
|
|
|
|
# If there are any Ceilometer hosts in the environment, then enable its usage
|
|
heat_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
|
|
|
|
## Ironic
|
|
ironic_keystone_auth_plugin: password
|
|
ironic_rabbitmq_userid: ironic
|
|
ironic_rabbitmq_vhost: /ironic
|
|
ironic_rabbitmq_port: "{{ rabbitmq_port }}"
|
|
ironic_rabbitmq_servers: "{{ rabbitmq_servers }}"
|
|
ironic_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
|
|
ironic_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
ironic_service_name: ironic
|
|
ironic_service_proto: http
|
|
ironic_service_port: 6385
|
|
ironic_service_project_name: service
|
|
ironic_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(ironic_service_proto) }}"
|
|
ironic_service_adminurl: "{{ ironic_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ ironic_service_port }}"
|
|
|
|
## Keystone
|
|
keystone_admin_user_name: admin
|
|
keystone_admin_tenant_name: admin
|
|
keystone_admin_port: 35357
|
|
keystone_service_port: 5000
|
|
keystone_service_proto: http
|
|
keystone_service_region: "{{ service_region }}"
|
|
|
|
# These are here rather than in keystone_all because
|
|
# both the os_ceilometer and os_keystone roles require them
|
|
|
|
# RPC
|
|
keystone_rabbitmq_userid: keystone
|
|
keystone_rabbitmq_vhost: /keystone
|
|
keystone_rabbitmq_port: "{{ rabbitmq_port }}"
|
|
keystone_rabbitmq_servers: "{{ rabbitmq_servers }}"
|
|
keystone_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
|
|
keystone_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
# Telemetry notifications
|
|
keystone_rabbitmq_telemetry_userid: "{{ keystone_rabbitmq_userid }}"
|
|
keystone_rabbitmq_telemetry_password: "{{ keystone_rabbitmq_password }}"
|
|
keystone_rabbitmq_telemetry_vhost: "{{ keystone_rabbitmq_vhost }}"
|
|
keystone_rabbitmq_telemetry_port: "{{ keystone_rabbitmq_port }}"
|
|
keystone_rabbitmq_telemetry_servers: "{{ keystone_rabbitmq_servers }}"
|
|
keystone_rabbitmq_telemetry_use_ssl: "{{ keystone_rabbitmq_use_ssl }}"
|
|
keystone_rabbitmq_telemetry_host_group: "{{ keystone_rabbitmq_host_group }}"
|
|
|
|
# If there are any Ceilometer hosts in the environment, then enable its usage
|
|
keystone_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
|
|
|
|
keystone_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(keystone_service_proto) }}"
|
|
keystone_service_adminuri_insecure: "{% if keystone_service_adminuri_proto == 'https' and (keystone_user_ssl_cert is not defined or haproxy_user_ssl_cert is not defined) | bool %}true{% else %}false{% endif %}"
|
|
keystone_service_adminuri: "{{ keystone_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ keystone_admin_port }}"
|
|
keystone_service_adminurl: "{{ keystone_service_adminuri }}/v3"
|
|
|
|
keystone_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(keystone_service_proto) }}"
|
|
keystone_service_internaluri_insecure: "{% if keystone_service_internaluri_proto == 'https' and (keystone_user_ssl_cert is not defined or haproxy_user_ssl_cert is not defined) | bool %}true{% else %}false{% endif %}"
|
|
keystone_service_internaluri: "{{ keystone_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ keystone_service_port }}"
|
|
keystone_service_internalurl: "{{ keystone_service_internaluri }}/v3"
|
|
|
|
## Neutron
|
|
neutron_service_port: 9696
|
|
neutron_service_proto: http
|
|
neutron_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(neutron_service_proto) }}"
|
|
neutron_service_adminuri: "{{ neutron_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ neutron_service_port }}"
|
|
neutron_service_adminurl: "{{ neutron_service_adminuri }}"
|
|
neutron_service_user_name: neutron
|
|
neutron_service_project_name: service
|
|
neutron_service_region: "{{ service_region }}"
|
|
|
|
# These are here rather than in neutron_all because
|
|
# both the os_ceilometer and os_neutron roles require them
|
|
|
|
# RPC
|
|
neutron_rabbitmq_userid: neutron
|
|
neutron_rabbitmq_vhost: /neutron
|
|
neutron_rabbitmq_port: "{{ rabbitmq_port }}"
|
|
neutron_rabbitmq_servers: "{{ rabbitmq_servers }}"
|
|
neutron_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
|
|
neutron_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
# Telemetry notifications
|
|
neutron_rabbitmq_telemetry_userid: "{{ neutron_rabbitmq_userid }}"
|
|
neutron_rabbitmq_telemetry_password: "{{ neutron_rabbitmq_password }}"
|
|
neutron_rabbitmq_telemetry_vhost: "{{ neutron_rabbitmq_vhost }}"
|
|
neutron_rabbitmq_telemetry_port: "{{ neutron_rabbitmq_port }}"
|
|
neutron_rabbitmq_telemetry_servers: "{{ neutron_rabbitmq_servers }}"
|
|
neutron_rabbitmq_telemetry_use_ssl: "{{ neutron_rabbitmq_use_ssl }}"
|
|
neutron_rabbitmq_telemetry_host_group: "{{ neutron_rabbitmq_host_group }}"
|
|
|
|
# If there are any Designate hosts in the environment, then enable its usage
|
|
neutron_designate_enabled: "{{ (groups['designate_all'] is defined) and (groups['designate_all'] | length > 0) }}"
|
|
# If there are any Ceilometer hosts in the environment, then enable its usage
|
|
neutron_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
|
|
|
|
neutron_plugin_type: ml2.lxb
|
|
|
|
## Nova
|
|
nova_service_port: 8774
|
|
nova_metadata_port: 8775
|
|
nova_service_proto: http
|
|
nova_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(nova_service_proto) }}"
|
|
nova_service_adminuri: "{{ nova_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ nova_service_port }}"
|
|
nova_service_adminurl: "{{ nova_service_adminuri }}/v2.1/%(tenant_id)s"
|
|
nova_service_region: "{{ service_region }}"
|
|
nova_service_user_name: nova
|
|
nova_service_project_name: service
|
|
nova_service_project_domain_id: default
|
|
nova_service_user_domain_id: default
|
|
nova_keystone_auth_plugin: password
|
|
nova_console_type: spice
|
|
nova_novncproxy_port: 6080
|
|
nova_spice_html5proxy_base_port: 6082
|
|
nova_console_port: "{% if nova_console_type == 'spice' %}{{ nova_spice_html5proxy_base_port }}{% else %}{{ nova_novncproxy_port }}{% endif %}"
|
|
|
|
# These are here rather than in nova_all because
|
|
# both the os_ceilometer and os_nova roles require them
|
|
|
|
# RPC
|
|
nova_rabbitmq_userid: nova
|
|
nova_rabbitmq_vhost: /nova
|
|
nova_rabbitmq_port: "{{ rabbitmq_port }}"
|
|
nova_rabbitmq_servers: "{{ rabbitmq_servers }}"
|
|
nova_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
|
|
nova_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
# Telemetry notifications
|
|
nova_rabbitmq_telemetry_userid: "{{ nova_rabbitmq_userid }}"
|
|
nova_rabbitmq_telemetry_password: "{{ nova_rabbitmq_password }}"
|
|
nova_rabbitmq_telemetry_vhost: "{{ nova_rabbitmq_vhost }}"
|
|
nova_rabbitmq_telemetry_port: "{{ nova_rabbitmq_port }}"
|
|
nova_rabbitmq_telemetry_servers: "{{ nova_rabbitmq_servers }}"
|
|
nova_rabbitmq_telemetry_use_ssl: "{{ nova_rabbitmq_use_ssl }}"
|
|
nova_rabbitmq_telemetry_host_group: "{{ nova_rabbitmq_host_group }}"
|
|
|
|
# If there are any Designate hosts in the environment, then enable its usage
|
|
nova_designate_enabled: "{{ (groups['designate_all'] is defined) and (groups['designate_all'] | length > 0) }}"
|
|
# If there are any Ceilometer hosts in the environment, then enable its usage
|
|
nova_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['ceilometer_all'] | length > 0) }}"
|
|
|
|
## Sahara
|
|
#RPC
|
|
sahara_rabbitmq_userid: sahara
|
|
sahara_rabbitmq_vhost: /sahara
|
|
sahara_rabbitmq_port: "{{ rabbitmq_port }}"
|
|
sahara_rabbitmq_servers: "{{ rabbitmq_servers }}"
|
|
sahara_rabbitmq_use_ssl: "{{ rabbitmq_use_ssl }}"
|
|
sahara_rabbitmq_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
# Telemetry notifications
|
|
sahara_rabbitmq_telemetry_userid: "{{ sahara_rabbitmq_userid }}"
|
|
sahara_rabbitmq_telemetry_password: "{{ sahara_rabbitmq_password }}"
|
|
sahara_rabbitmq_telemetry_vhost: "{{ sahara_rabbitmq_vhost }}"
|
|
sahara_rabbitmq_telemetry_port: "{{ sahara_rabbitmq_port }}"
|
|
sahara_rabbitmq_telemetry_servers: "{{ sahara_rabbitmq_servers }}"
|
|
sahara_rabbitmq_telemetry_use_ssl: "{{ sahara_rabbitmq_use_ssl }}"
|
|
sahara_rabbitmq_telemetry_host_group: "{{ sahara_rabbitmq_host_group }}"
|
|
|
|
# If there are any Ceilometer and Sahara hosts in the environment, then enable its usage
|
|
sahara_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['sahara_all'] is defined) and (groups['ceilometer_all'] | length > 0) and (groups['sahara_all'] | length > 0) }}"
|
|
|
|
## Swift
|
|
swift_proxy_port: 8080
|
|
swift_system_user_name: swift
|
|
swift_system_shell: /bin/bash
|
|
swift_system_comment: swift system user
|
|
swift_system_home_folder: "/var/lib/{{ swift_system_user_name }}"
|
|
|
|
# Swift Telemetry notifications
|
|
swift_rabbitmq_telemetry_userid: "swift"
|
|
swift_rabbitmq_telemetry_vhost: "/swift"
|
|
swift_rabbitmq_telemetry_servers: "{{ rabbitmq_servers }}"
|
|
swift_rabbitmq_telemetry_host_group: "{{ rabbitmq_host_group }}"
|
|
|
|
# If there are any Ceilometer and Swift hosts in the environment, then enable its usage
|
|
swift_ceilometer_enabled: "{{ (groups['ceilometer_all'] is defined) and (groups['swift_proxy'] is defined) and (groups['ceilometer_all'] | length > 0) and (groups['swift_proxy'] | length > 0) }}"
|
|
|
|
## OpenStack Openrc
|
|
openrc_os_auth_url: "{{ keystone_service_internalurl }}"
|
|
openrc_os_password: "{{ keystone_auth_admin_password }}"
|
|
openrc_os_domain_name: "Default"
|
|
openrc_region_name: "{{ service_region }}"
|
|
|
|
## Host security hardening
|
|
# The openstack-ansible-security role provides security hardening for hosts
|
|
# by applying security configurations from the STIG. Hardening is enabled by
|
|
# default, but an option to opt out is available by setting the following
|
|
# variable to 'false'.
|
|
# Docs: http://docs.openstack.org/developer/openstack-ansible-security/
|
|
apply_security_hardening: true
|
|
|
|
## Ansible ssh configuration
|
|
ansible_ssh_extra_args: >
|
|
-o UserKnownHostsFile=/dev/null
|
|
-o StrictHostKeyChecking=no
|
|
-o ServerAliveInterval=64
|
|
-o ServerAliveCountMax=1024
|
|
-o Compression=no
|
|
-o TCPKeepAlive=yes
|
|
-o VerifyHostKeyDNS=no
|
|
-o ForwardX11=no
|
|
-o ForwardAgent=yes
|
|
-T
|