Michal Arbet 8f0a4bcb0d Switch mariadb's loadbalancer from HAProxy to ProxySQL
It's been some time since ProxySQL has been
with us in Kolla. Let's switch the load balancer
for MariaDB connections from HAProxy to ProxySQL.

Depends-On: https://review.opendev.org/c/openstack/kolla/+/928956
Change-Id: I42ba4fb83b5bb31058e888f0d39d47c27b844de5
2024-10-17 14:37:32 +02:00

1441 lines
66 KiB
YAML

---
# The options in this file can be overridden in 'globals.yml'
# The "temp" files that are created before merge need to stay persistent due
# to the fact that ansible will register a "change" if it has to create them
# again. Persistent files allow for idempotency
container_config_directory: "/var/lib/kolla/config_files"
# The directory on the deploy host containing globals.yml.
node_config: "{{ CONFIG_DIR | default('/etc/kolla') }}"
# The directory to merge custom config files the kolla's config files
node_custom_config: "{{ node_config }}/config"
# The directory to store the config files on the destination node
node_config_directory: "/etc/kolla"
# The group which own node_config_directory, you can use a non-root
# user to deploy kolla
config_owner_user: "root"
config_owner_group: "root"
###################
# Ansible options
###################
# This variable is used as the "filter" argument for the setup module. For
# instance, if one wants to remove/ignore all Neutron interface facts:
# kolla_ansible_setup_filter: "ansible_[!qt]*"
# By default, we do not provide a filter.
kolla_ansible_setup_filter: "{{ omit }}"
# This variable is used as the "gather_subset" argument for the setup module.
# For instance, if one wants to avoid collecting facts via facter:
# kolla_ansible_setup_gather_subset: "all,!facter"
# By default, we do not provide a gather subset.
kolla_ansible_setup_gather_subset: "{{ omit }}"
###################
# Kolla options
###################
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
config_strategy: "COPY_ALWAYS"
# Valid options are ['centos', 'debian', 'rocky', 'ubuntu']
kolla_base_distro: "rocky"
kolla_internal_vip_address: "{{ kolla_internal_address | default('') }}"
kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
kolla_same_external_internal_vip: "{{ kolla_external_vip_address | ansible.utils.ipaddr('address') == kolla_internal_vip_address | ansible.utils.ipaddr('address') }}"
kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_same_external_internal_vip | bool else kolla_external_vip_address }}"
kolla_dev_repos_directory: "/opt/stack/"
kolla_dev_repos_git: "https://opendev.org/openstack"
kolla_dev_repos_pull: "no"
kolla_dev_mode: "no"
kolla_source_version: "{% if openstack_release == 'master' %}master{% else %}stable/{{ openstack_release }}{% endif %}"
# Proxy settings for containers such as magnum that need internet access
container_http_proxy: ""
container_https_proxy: ""
container_no_proxy: "localhost,127.0.0.1"
container_proxy_no_proxy_entries:
- "{{ container_no_proxy }}"
- "{{ api_interface_address }}"
- "{{ kolla_internal_vip_address | default('') }}"
container_proxy:
http_proxy: "{{ container_http_proxy }}"
https_proxy: "{{ container_https_proxy }}"
no_proxy: "{{ container_proxy_no_proxy_entries | select | join(',') }}"
# By default, Kolla API services bind to the network address assigned
# to the api_interface. Allow the bind address to be an override.
api_interface_address: "{{ 'api' | kolla_address }}"
####################
# Database options
####################
database_address: "{{ kolla_internal_fqdn }}"
database_user: "root"
database_port: "3306"
database_connection_recycle_time: 10
database_max_pool_size: 1
####################
# Container engine options
####################
kolla_container_engine: "docker"
####################
# Docker options
####################
docker_registry_email:
docker_registry: "quay.io"
docker_namespace: "openstack.kolla"
docker_image_name_prefix: ""
docker_registry_username:
# Please read the docs carefully before applying docker_registry_insecure.
docker_registry_insecure: "no"
docker_runtime_directory: ""
# Docker client timeout in seconds.
docker_client_timeout: 120
# Docker networking options
docker_disable_default_iptables_rules: "yes"
docker_disable_default_network: "{{ docker_disable_default_iptables_rules }}"
docker_disable_ip_forward: "{{ docker_disable_default_iptables_rules }}"
# Retention settings for Docker logs
docker_log_max_file: "5"
docker_log_max_size: "50m"
# Valid options are [ no, on-failure, always, unless-stopped ]
docker_restart_policy: "unless-stopped"
# '0' means unlimited retries (applies only to 'on-failure' policy)
docker_restart_policy_retry: "10"
# Extra docker options for Zun
docker_configure_for_zun: "no"
docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375
docker_zun_config: {}
# Extra containerd options for Zun
containerd_configure_for_zun: "no"
# Enable Ceph backed Cinder Volumes for zun
zun_configure_for_cinder_ceph: "no"
# 42463 is the static group id of the zun user in the Zun image.
# If users customize this value on building the Zun images,
# they need to change this config accordingly.
containerd_grpc_gid: 42463
# Timeout after Docker sends SIGTERM before sending SIGKILL.
docker_graceful_timeout: 60
# Common options used throughout Docker
docker_common_options:
auth_email: "{{ docker_registry_email }}"
auth_password: "{{ docker_registry_password }}"
auth_registry: "{{ docker_registry }}"
auth_username: "{{ docker_registry_username }}"
environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
restart_policy: "{{ docker_restart_policy }}"
restart_retries: "{{ docker_restart_policy_retry }}"
graceful_timeout: "{{ docker_graceful_timeout }}"
client_timeout: "{{ docker_client_timeout }}"
container_engine: "{{ kolla_container_engine }}"
# Container engine specific volume paths
docker_volumes_path: "{{ docker_runtime_directory or '/var/lib/docker' }}/volumes"
podman_volumes_path: "{{ docker_runtime_directory or '/var/lib/containers' }}/storage/volumes"
container_engine_volumes_path: "{{ docker_volumes_path if kolla_container_engine == 'docker' else podman_volumes_path }}"
#####################
# Volumes under /run
#####################
# Podman has problem with mounting whole /run directory
# described here: https://github.com/containers/podman/issues/16305
run_default_volumes_podman:
- '/run/netns:/run/netns:shared'
- '/run/lock/nova:/run/lock/nova:shared'
- "/run/libvirt:/run/libvirt:shared"
- "/run/nova:/run/nova:shared"
- "/run/openvswitch:/run/openvswitch:shared"
run_default_volumes_docker: []
run_default_subdirectories:
- '/run/netns'
- '/run/lock/nova'
- "/run/libvirt"
- "/run/nova"
- "/run/openvswitch"
####################
# Dimensions options
####################
# Dimension options for Docker Containers
# NOTE(mnasiadka): Lower 1073741816 nofile limit on EL9 (RHEL9/CentOS Stream 9/Rocky Linux 9)
# fixes at least rabbitmq and mariadb
default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else '{}' }}"
default_container_dimensions_el9: "{{ default_docker_dimensions_el9 if kolla_container_engine == 'docker' else default_podman_dimensions_el9 }}"
default_docker_dimensions_el9:
ulimits:
nofile:
soft: 1048576
hard: 1048576
default_podman_dimensions_el9:
ulimits:
RLIMIT_NOFILE:
soft: 1048576
hard: 1048576
RLIMIT_NPROC:
soft: 1048576
hard: 1048576
#####################
# Healthcheck options
#####################
enable_container_healthchecks: "yes"
# Healthcheck options for Docker containers
# interval/timeout/start_period are in seconds
default_container_healthcheck_interval: 30
default_container_healthcheck_timeout: 30
default_container_healthcheck_retries: 3
default_container_healthcheck_start_period: 5
#######################
# Extra volumes options
#######################
# Extra volumes for Docker Containers
default_extra_volumes: []
####################
# keepalived options
####################
# Arbitrary unique number from 0..255
keepalived_virtual_router_id: "51"
#######################
## Opensearch Options
########################
opensearch_datadir_volume: "opensearch"
opensearch_internal_endpoint: "{{ opensearch_address | kolla_url(internal_protocol, opensearch_port) }}"
opensearch_dashboards_internal_fqdn: "{{ kolla_internal_fqdn }}"
opensearch_dashboards_external_fqdn: "{{ kolla_external_fqdn }}"
opensearch_dashboards_internal_endpoint: "{{ opensearch_dashboards_internal_fqdn | kolla_url(internal_protocol, opensearch_dashboards_port) }}"
opensearch_dashboards_external_endpoint: "{{ opensearch_dashboards_external_fqdn | kolla_url(public_protocol, opensearch_dashboards_port_external) }}"
opensearch_dashboards_user: "opensearch"
opensearch_log_index_prefix: "{{ kibana_log_prefix if kibana_log_prefix is defined else 'flog' }}"
###################
# Messaging options
###################
# oslo.messaging rpc transport valid options are [ rabbit, amqp ]
om_rpc_transport: "rabbit"
om_rpc_user: "{{ rabbitmq_user }}"
om_rpc_password: "{{ rabbitmq_password }}"
om_rpc_port: "{{ rabbitmq_port }}"
om_rpc_group: "rabbitmq"
om_rpc_vhost: "/"
rpc_transport_url: "{{ om_rpc_transport }}://{% for host in groups[om_rpc_group] %}{{ om_rpc_user }}:{{ om_rpc_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_rpc_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_rpc_vhost }}"
# oslo.messaging notify transport valid options are [ rabbit ]
om_notify_transport: "rabbit"
om_notify_user: "{{ rabbitmq_user }}"
om_notify_password: "{{ rabbitmq_password }}"
om_notify_port: "{{ rabbitmq_port }}"
om_notify_group: "rabbitmq"
om_notify_vhost: "/"
notify_transport_url: "{{ om_notify_transport }}://{% for host in groups[om_notify_group] %}{{ om_notify_user }}:{{ om_notify_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_notify_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_notify_vhost }}"
# Whether to enable TLS for oslo.messaging communication with RabbitMQ.
om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}"
# CA certificate bundle in containers using oslo.messaging with RabbitMQ TLS.
om_rabbitmq_cacert: "{{ rabbitmq_cacert }}"
om_enable_rabbitmq_high_availability: false
# Only enable quorum queues if you disable om_enable_rabbitmq_high_availability
om_enable_rabbitmq_quorum_queues: true
####################
# Networking options
####################
network_interface: "eth0"
neutron_external_interface: "eth1"
kolla_external_vip_interface: "{{ network_interface }}"
api_interface: "{{ network_interface }}"
swift_storage_interface: "{{ network_interface }}"
swift_replication_interface: "{{ swift_storage_interface }}"
migration_interface: "{{ api_interface }}"
tunnel_interface: "{{ network_interface }}"
octavia_network_interface: "{{ 'o-hm0' if octavia_network_type == 'tenant' else api_interface }}"
bifrost_network_interface: "{{ network_interface }}"
dns_interface: "{{ network_interface }}"
dpdk_tunnel_interface: "{{ neutron_external_interface }}"
ironic_http_interface: "{{ api_interface }}"
ironic_tftp_interface: "{{ api_interface }}"
# Configure the address family (AF) per network.
# Valid options are [ ipv4, ipv6 ]
network_address_family: "ipv4"
api_address_family: "{{ network_address_family }}"
storage_address_family: "{{ network_address_family }}"
swift_storage_address_family: "{{ storage_address_family }}"
swift_replication_address_family: "{{ swift_storage_address_family }}"
migration_address_family: "{{ api_address_family }}"
tunnel_address_family: "{{ network_address_family }}"
octavia_network_address_family: "{{ api_address_family }}"
bifrost_network_address_family: "{{ network_address_family }}"
dns_address_family: "{{ network_address_family }}"
dpdk_tunnel_address_family: "{{ network_address_family }}"
ironic_http_address_family: "{{ api_address_family }}"
ironic_tftp_address_family: "{{ api_address_family }}"
migration_interface_address: "{{ 'migration' | kolla_address }}"
tunnel_interface_address: "{{ 'tunnel' | kolla_address }}"
octavia_network_interface_address: "{{ 'octavia_network' | kolla_address }}"
dpdk_tunnel_interface_address: "{{ 'dpdk_tunnel' | kolla_address }}"
ironic_http_interface_address: "{{ 'ironic_http' | kolla_address }}"
ironic_tftp_interface_address: "{{ 'ironic_tftp' | kolla_address }}"
# Valid options are [ openvswitch, ovn, linuxbridge, vmware_nsxv, vmware_nsxv3, vmware_nsxp, vmware_dvs ]
# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable.
# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html
neutron_plugin_agent: "openvswitch"
# Valid options are [ internal, infoblox ]
neutron_ipam_driver: "internal"
# The default ports used by each service.
# The list should be in alphabetical order
aodh_internal_fqdn: "{{ kolla_internal_fqdn }}"
aodh_external_fqdn: "{{ kolla_external_fqdn }}"
aodh_internal_endpoint: "{{ aodh_internal_fqdn | kolla_url(internal_protocol, aodh_api_port) }}"
aodh_public_endpoint: "{{ aodh_external_fqdn | kolla_url(public_protocol, aodh_api_public_port) }}"
aodh_api_port: "8042"
aodh_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else aodh_api_port }}"
aodh_api_listen_port: "{{ aodh_api_port }}"
barbican_internal_fqdn: "{{ kolla_internal_fqdn }}"
barbican_external_fqdn: "{{ kolla_external_fqdn }}"
barbican_internal_endpoint: "{{ barbican_internal_fqdn | kolla_url(internal_protocol, barbican_api_port) }}"
barbican_public_endpoint: "{{ barbican_external_fqdn | kolla_url(public_protocol, barbican_api_public_port) }}"
barbican_api_port: "9311"
barbican_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else barbican_api_port }}"
barbican_api_listen_port: "{{ barbican_api_port }}"
blazar_internal_fqdn: "{{ kolla_internal_fqdn }}"
blazar_external_fqdn: "{{ kolla_external_fqdn }}"
blazar_internal_base_endpoint: "{{ blazar_internal_fqdn | kolla_url(internal_protocol, blazar_api_port) }}"
blazar_public_base_endpoint: "{{ blazar_external_fqdn | kolla_url(public_protocol, blazar_api_public_port) }}"
blazar_api_port: "1234"
blazar_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else blazar_api_port }}"
blazar_api_listen_port: "{{ blazar_api_port }}"
ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}"
ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}"
ceph_rgw_internal_base_endpoint: "{{ ceph_rgw_internal_fqdn | kolla_url(internal_protocol, ceph_rgw_port) }}"
ceph_rgw_public_base_endpoint: "{{ ceph_rgw_external_fqdn | kolla_url(public_protocol, ceph_rgw_public_port) }}"
ceph_rgw_port: "6780"
ceph_rgw_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ceph_rgw_port }}"
cinder_internal_fqdn: "{{ kolla_internal_fqdn }}"
cinder_external_fqdn: "{{ kolla_external_fqdn }}"
cinder_internal_base_endpoint: "{{ cinder_internal_fqdn | kolla_url(internal_protocol, cinder_api_port) }}"
cinder_public_base_endpoint: "{{ cinder_external_fqdn | kolla_url(public_protocol, cinder_api_public_port) }}"
cinder_api_port: "8776"
cinder_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cinder_api_port }}"
cinder_api_listen_port: "{{ cinder_api_port }}"
cloudkitty_internal_fqdn: "{{ kolla_internal_fqdn }}"
cloudkitty_external_fqdn: "{{ kolla_external_fqdn }}"
cloudkitty_internal_endpoint: "{{ cloudkitty_internal_fqdn | kolla_url(internal_protocol, cloudkitty_api_port) }}"
cloudkitty_public_endpoint: "{{ cloudkitty_external_fqdn | kolla_url(public_protocol, cloudkitty_api_public_port) }}"
cloudkitty_api_port: "8889"
cloudkitty_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else cloudkitty_api_port }}"
cloudkitty_api_listen_port: "{{ cloudkitty_api_port }}"
collectd_udp_port: "25826"
cyborg_api_port: "6666"
designate_internal_fqdn: "{{ kolla_internal_fqdn }}"
designate_external_fqdn: "{{ kolla_external_fqdn }}"
designate_internal_endpoint: "{{ designate_internal_fqdn | kolla_url(internal_protocol, designate_api_port) }}"
designate_public_endpoint: "{{ designate_external_fqdn | kolla_url(public_protocol, designate_api_public_port) }}"
designate_api_port: "9001"
designate_api_listen_port: "{{ designate_api_port }}"
designate_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else designate_api_port }}"
designate_bind_port: "53"
designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}"
designate_rndc_port: "953"
etcd_client_port: "2379"
etcd_peer_port: "2380"
etcd_enable_tls: "{{ kolla_enable_tls_backend }}"
etcd_protocol: "{{ 'https' if etcd_enable_tls | bool else 'http' }}"
fluentd_syslog_port: "5140"
glance_internal_fqdn: "{{ kolla_internal_fqdn }}"
glance_external_fqdn: "{{ kolla_external_fqdn }}"
glance_internal_endpoint: "{{ glance_internal_fqdn | kolla_url(internal_protocol, glance_api_port) }}"
glance_public_endpoint: "{{ glance_external_fqdn | kolla_url(public_protocol, glance_api_public_port) }}"
glance_api_port: "9292"
glance_api_listen_port: "{{ glance_api_port }}"
glance_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else glance_api_port }}"
glance_tls_proxy_stats_port: "9293"
gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}"
gnocchi_external_fqdn: "{{ kolla_external_fqdn }}"
gnocchi_internal_endpoint: "{{ gnocchi_internal_fqdn | kolla_url(internal_protocol, gnocchi_api_port) }}"
gnocchi_public_endpoint: "{{ gnocchi_external_fqdn | kolla_url(public_protocol, gnocchi_api_public_port) }}"
gnocchi_api_port: "8041"
gnocchi_api_listen_port: "{{ gnocchi_api_port }}"
gnocchi_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else gnocchi_api_port }}"
grafana_internal_fqdn: "{{ kolla_internal_fqdn }}"
grafana_external_fqdn: "{{ kolla_external_fqdn }}"
grafana_internal_endpoint: "{{ grafana_internal_fqdn | kolla_url(internal_protocol, grafana_server_port) }}"
grafana_public_endpoint: "{{ grafana_external_fqdn | kolla_url(public_protocol, grafana_server_public_port) }}"
grafana_server_port: "3000"
grafana_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else grafana_server_port }}"
grafana_server_listen_port: "{{ grafana_server_port }}"
haproxy_stats_port: "1984"
haproxy_monitor_port: "61313"
haproxy_ssh_port: "2985"
# configure SSL/TLS settings for haproxy config, one of [modern, intermediate, legacy]:
kolla_haproxy_ssl_settings: "modern"
haproxy_ssl_settings: "{{ ssl_legacy_settings if kolla_haproxy_ssl_settings == 'legacy' else ssl_intermediate_settings if kolla_haproxy_ssl_settings == 'intermediate' else ssl_modern_settings | default(ssl_modern_settings) }}"
ssl_legacy_settings: |
ssl-default-bind-ciphers DEFAULT:!MEDIUM:!3DES
ssl-default-bind-options no-sslv3 no-tlsv10 no-tlsv11
ssl_intermediate_settings: |
ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
ssl-default-server-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305
ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tls-tickets
ssl_modern_settings: |
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options prefer-client-ciphers no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets
ssl-default-server-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-server-options no-sslv3 no-tlsv10 no-tlsv11 no-tlsv12 no-tls-tickets
heat_internal_fqdn: "{{ kolla_internal_fqdn }}"
heat_external_fqdn: "{{ kolla_external_fqdn }}"
heat_internal_base_endpoint: "{{ heat_internal_fqdn | kolla_url(internal_protocol, heat_api_port) }}"
heat_public_base_endpoint: "{{ heat_external_fqdn | kolla_url(public_protocol, heat_api_public_port) }}"
heat_api_port: "8004"
heat_api_listen_port: "{{ heat_api_port }}"
heat_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_port }}"
heat_cfn_internal_fqdn: "{{ kolla_internal_fqdn }}"
heat_cfn_external_fqdn: "{{ kolla_external_fqdn }}"
heat_cfn_internal_base_endpoint: "{{ heat_cfn_internal_fqdn | kolla_url(internal_protocol, heat_api_cfn_port) }}"
heat_cfn_public_base_endpoint: "{{ heat_cfn_external_fqdn | kolla_url(public_protocol, heat_api_cfn_public_port) }}"
heat_api_cfn_port: "8000"
heat_api_cfn_listen_port: "{{ heat_api_cfn_port }}"
heat_api_cfn_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else heat_api_cfn_port }}"
horizon_internal_fqdn: "{{ kolla_internal_fqdn }}"
horizon_external_fqdn: "{{ kolla_external_fqdn }}"
horizon_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port) }}"
horizon_public_endpoint: "{{ kolla_external_fqdn | kolla_url(public_protocol, horizon_tls_port if kolla_enable_tls_external | bool else horizon_port) }}"
horizon_port: "80"
horizon_tls_port: "443"
horizon_listen_port: "{{ horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}"
influxdb_http_port: "8086"
ironic_internal_fqdn: "{{ kolla_internal_fqdn }}"
ironic_external_fqdn: "{{ kolla_external_fqdn }}"
ironic_internal_endpoint: "{{ ironic_internal_fqdn | kolla_url(internal_protocol, ironic_api_port) }}"
ironic_public_endpoint: "{{ ironic_external_fqdn | kolla_url(public_protocol, ironic_api_public_port) }}"
ironic_api_port: "6385"
ironic_api_listen_port: "{{ ironic_api_port }}"
ironic_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_api_port }}"
ironic_inspector_internal_fqdn: "{{ kolla_internal_fqdn }}"
ironic_inspector_external_fqdn: "{{ kolla_external_fqdn }}"
ironic_inspector_internal_endpoint: "{{ ironic_inspector_internal_fqdn | kolla_url(internal_protocol, ironic_inspector_port) }}"
ironic_inspector_public_endpoint: "{{ ironic_inspector_external_fqdn | kolla_url(public_protocol, ironic_inspector_public_port) }}"
ironic_inspector_port: "5050"
ironic_inspector_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else ironic_inspector_port }}"
ironic_inspector_listen_port: "{{ ironic_inspector_port }}"
ironic_http_port: "8089"
ironic_prometheus_exporter_port: "9608"
iscsi_port: "3260"
keystone_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else keystone_public_listen_port }}"
keystone_public_listen_port: "5000"
keystone_internal_port: "5000"
keystone_internal_listen_port: "{{ keystone_internal_port }}"
keystone_ssh_port: "8023"
kuryr_port: "23750"
letsencrypt_webserver_port: "8081"
magnum_internal_fqdn: "{{ kolla_internal_fqdn }}"
magnum_external_fqdn: "{{ kolla_external_fqdn }}"
magnum_internal_base_endpoint: "{{ magnum_internal_fqdn | kolla_url(internal_protocol, magnum_api_port) }}"
magnum_public_base_endpoint: "{{ magnum_external_fqdn | kolla_url(public_protocol, magnum_api_public_port) }}"
magnum_api_port: "9511"
magnum_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else magnum_api_port }}"
magnum_api_listen_port: "{{ magnum_api_port }}"
manila_internal_fqdn: "{{ kolla_internal_fqdn }}"
manila_external_fqdn: "{{ kolla_external_fqdn }}"
manila_internal_base_endpoint: "{{ manila_internal_fqdn | kolla_url(internal_protocol, manila_api_port) }}"
manila_public_base_endpoint: "{{ manila_external_fqdn | kolla_url(public_protocol, manila_api_public_port) }}"
manila_api_port: "8786"
manila_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else manila_api_port }}"
manila_api_listen_port: "{{ manila_api_port }}"
mariadb_port: "{{ database_port }}"
mariadb_wsrep_port: "4567"
mariadb_ist_port: "4568"
mariadb_sst_port: "4444"
mariadb_clustercheck_port: "4569"
mariadb_monitor_user: "{{ 'monitor' if enable_proxysql | bool else 'haproxy' }}"
mariadb_monitor_connect_interval: "2000"
mariadb_monitor_galera_healthcheck_interval: "4000"
mariadb_monitor_galera_healthcheck_timeout: "1000"
mariadb_monitor_galera_healthcheck_max_timeout_count: "2"
mariadb_monitor_ping_interval: "3000"
mariadb_monitor_ping_timeout: "2000"
mariadb_monitor_ping_max_failures: "2"
#
# Defaults preserved for multinode setup
# Tweaked for single-node
#
# ProxySQL shuns servers on MySQL errors, which can cause failures
# during upgrades or restarts. In single-node setups, ProxySQL can't reroute
# traffic, leading to "Max connect timeout" errors. To avoid this in CI and
# signle-node environments, delay error responses to clients by 10 seconds,
# giving the backend time to recover without immediate failures.
#
# See ProxySQL docs for more: https://proxysql.com/documentation/global-variables/mysql-variables/#mysql-shun_on_failures
mariadb_shun_on_failures: "{{ '10' if mariadb_shards_info.shards.values() | map(attribute='hosts') | map('length') | select('<=', 1) | list | length > 0 else '5' }}"
mariadb_connect_retries_delay: "{{ '1000' if mariadb_shards_info.shards.values() | map(attribute='hosts') | map('length') | select('<=', 1) | list | length > 0 else '1' }}"
mariadb_connect_retries_on_failure: "{{ '20' if mariadb_shards_info.shards.values() | map(attribute='hosts') | map('length') | select('<=', 1) | list | length > 0 else '10' }}"
mariadb_datadir_volume: "mariadb"
mariadb_default_database_shard_id: 0
mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}"
mariadb_shard_id: "{{ mariadb_default_database_shard_id }}"
mariadb_shard_name: "shard_{{ mariadb_shard_id }}"
mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}"
mariadb_loadbalancer: "{{ 'proxysql' if enable_proxysql | bool else 'haproxy' }}"
mariadb_backup_target: "{{ 'active' if mariadb_loadbalancer == 'haproxy' else 'replica' }}"
mariadb_shard_root_user_prefix: "root_shard_"
mariadb_shard_backup_user_prefix: "backup_shard_"
mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}"
masakari_internal_fqdn: "{{ kolla_internal_fqdn }}"
masakari_external_fqdn: "{{ kolla_external_fqdn }}"
masakari_internal_endpoint: "{{ masakari_internal_fqdn | kolla_url(internal_protocol, masakari_api_port) }}"
masakari_public_endpoint: "{{ masakari_external_fqdn | kolla_url(public_protocol, masakari_api_public_port) }}"
masakari_api_port: "15868"
masakari_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else masakari_api_port }}"
masakari_api_listen_port: "{{ masakari_api_port }}"
masakari_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
memcached_port: "11211"
memcache_security_strategy: "ENCRYPT"
mistral_internal_fqdn: "{{ kolla_internal_fqdn }}"
mistral_external_fqdn: "{{ kolla_external_fqdn }}"
mistral_internal_base_endpoint: "{{ mistral_internal_fqdn | kolla_url(internal_protocol, mistral_api_port) }}"
mistral_public_base_endpoint: "{{ mistral_external_fqdn | kolla_url(public_protocol, mistral_api_public_port) }}"
mistral_api_port: "8989"
mistral_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else mistral_api_port }}"
mistral_api_listen_port: "{{ mistral_api_port }}"
neutron_internal_fqdn: "{{ kolla_internal_fqdn }}"
neutron_external_fqdn: "{{ kolla_external_fqdn }}"
neutron_internal_endpoint: "{{ neutron_internal_fqdn | kolla_url(internal_protocol, neutron_server_port) }}"
neutron_public_endpoint: "{{ neutron_external_fqdn | kolla_url(public_protocol, neutron_server_public_port) }}"
neutron_server_port: "9696"
neutron_server_listen_port: "{{ neutron_server_port }}"
neutron_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else neutron_server_port }}"
neutron_tls_proxy_stats_port: "9697"
nova_internal_fqdn: "{{ kolla_internal_fqdn }}"
nova_external_fqdn: "{{ kolla_external_fqdn }}"
nova_internal_base_endpoint: "{{ nova_internal_fqdn | kolla_url(internal_protocol, nova_api_port) }}"
nova_public_base_endpoint: "{{ nova_external_fqdn | kolla_url(public_protocol, nova_api_public_port) }}"
nova_api_port: "8774"
nova_api_listen_port: "{{ nova_api_port }}"
nova_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_api_port }}"
nova_metadata_internal_fqdn: "{{ kolla_internal_fqdn }}"
nova_metadata_external_fqdn: "{{ kolla_external_fqdn }}"
nova_metadata_port: "8775"
nova_metadata_listen_port: "{{ nova_metadata_port }}"
nova_novncproxy_fqdn: "{{ kolla_external_fqdn }}"
nova_novncproxy_port: "6080"
nova_novncproxy_listen_port: "{{ nova_novncproxy_port }}"
nova_novncproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_novncproxy_port }}"
nova_spicehtml5proxy_fqdn: "{{ kolla_external_fqdn }}"
nova_spicehtml5proxy_port: "6082"
nova_spicehtml5proxy_listen_port: "{{ nova_spicehtml5proxy_port }}"
nova_spicehtml5proxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_spicehtml5proxy_port }}"
nova_serialproxy_fqdn: "{{ kolla_external_fqdn }}"
nova_serialproxy_port: "6083"
nova_serialproxy_listen_port: "{{ nova_serialproxy_port }}"
nova_serialproxy_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else nova_serialproxy_port }}"
nova_serialproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
octavia_internal_fqdn: "{{ kolla_internal_fqdn }}"
octavia_external_fqdn: "{{ kolla_external_fqdn }}"
octavia_internal_endpoint: "{{ octavia_internal_fqdn | kolla_url(internal_protocol, octavia_api_port) }}"
octavia_public_endpoint: "{{ octavia_external_fqdn | kolla_url(public_protocol, octavia_api_public_port) }}"
octavia_api_port: "9876"
octavia_api_listen_port: "{{ octavia_api_port }}"
octavia_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else octavia_api_port }}"
octavia_health_manager_port: "5555"
# NOTE: If an external ElasticSearch cluster port is specified,
# we default to using that port in services with ElasticSearch
# endpoints. This is for backwards compatibility.
opensearch_port: "{{ elasticsearch_port | default('9200') }}"
opensearch_dashboards_port: "5601"
opensearch_dashboards_port_external: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else opensearch_dashboards_port }}"
opensearch_dashboards_listen_port: "{{ opensearch_dashboards_port }}"
ovn_nb_db_port: "6641"
ovn_sb_db_port: "6642"
ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}"
ovn_sb_connection: "{% for host in groups['ovn-sb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}"
ovsdb_port: "6640"
placement_internal_fqdn: "{{ kolla_internal_fqdn }}"
placement_external_fqdn: "{{ kolla_external_fqdn }}"
placement_internal_endpoint: "{{ placement_internal_fqdn | kolla_url(internal_protocol, placement_api_port) }}"
placement_public_endpoint: "{{ placement_external_fqdn | kolla_url(public_protocol, placement_api_public_port) }}"
# Default Placement API port of 8778 already in use
placement_api_port: "8780"
placement_api_listen_port: "{{ placement_api_port }}"
placement_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else placement_api_port }}"
prometheus_internal_fqdn: "{{ kolla_internal_fqdn }}"
prometheus_external_fqdn: "{{ kolla_external_fqdn }}"
prometheus_internal_endpoint: "{{ prometheus_internal_fqdn | kolla_url(internal_protocol, prometheus_port) }}"
prometheus_public_endpoint: "{{ prometheus_external_fqdn | kolla_url(public_protocol, prometheus_public_port) }}"
prometheus_port: "9091"
prometheus_listen_port: "{{ prometheus_port }}"
prometheus_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_port }}"
prometheus_node_exporter_port: "9100"
prometheus_mysqld_exporter_port: "9104"
prometheus_haproxy_exporter_port: "9101"
prometheus_memcached_exporter_port: "9150"
prometheus_rabbitmq_exporter_port: "{{ rabbitmq_prometheus_port }}"
# Default cadvisor port of 8080 already in use
prometheus_cadvisor_port: "18080"
prometheus_fluentd_integration_port: "24231"
prometheus_libvirt_exporter_port: "9177"
prometheus_etcd_integration_port: "{{ etcd_client_port }}"
proxysql_prometheus_exporter_port: "6070"
# Prometheus alertmanager ports
prometheus_alertmanager_internal_fqdn: "{{ kolla_internal_fqdn }}"
prometheus_alertmanager_external_fqdn: "{{ kolla_external_fqdn }}"
prometheus_alertmanager_internal_endpoint: "{{ prometheus_alertmanager_internal_fqdn | kolla_url(internal_protocol, prometheus_alertmanager_port) }}"
prometheus_alertmanager_public_endpoint: "{{ prometheus_alertmanager_external_fqdn | kolla_url(public_protocol, prometheus_alertmanager_public_port) }}"
prometheus_alertmanager_port: "9093"
prometheus_alertmanager_cluster_port: "9094"
prometheus_alertmanager_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else prometheus_alertmanager_port }}"
prometheus_alertmanager_listen_port: "{{ prometheus_alertmanager_port }}"
# Prometheus openstack-exporter ports
prometheus_openstack_exporter_port: "9198"
prometheus_elasticsearch_exporter_port: "9108"
# Prometheus blackbox-exporter ports
prometheus_blackbox_exporter_port: "9115"
# Prometheus instance label to use for metrics
prometheus_instance_label:
proxysql_admin_port: "6032"
rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}"
rabbitmq_management_port: "15672"
rabbitmq_cluster_port: "25672"
rabbitmq_epmd_port: "4369"
rabbitmq_prometheus_port: "15692"
redis_port: "6379"
redis_sentinel_port: "26379"
skyline_apiserver_internal_fqdn: "{{ kolla_internal_fqdn }}"
skyline_apiserver_external_fqdn: "{{ kolla_external_fqdn }}"
skyline_apiserver_internal_endpoint: "{{ skyline_apiserver_internal_fqdn | kolla_url(internal_protocol, skyline_apiserver_port) }}"
skyline_apiserver_public_endpoint: "{{ skyline_apiserver_external_fqdn | kolla_url(public_protocol, skyline_apiserver_public_port) }}"
skyline_console_internal_fqdn: "{{ kolla_internal_fqdn }}"
skyline_console_external_fqdn: "{{ kolla_external_fqdn }}"
skyline_console_internal_endpoint: "{{ skyline_console_internal_fqdn | kolla_url(internal_protocol, skyline_console_port) }}"
skyline_console_public_endpoint: "{{ skyline_console_external_fqdn | kolla_url(public_protocol, skyline_console_public_port) }}"
skyline_apiserver_port: "9998"
skyline_apiserver_listen_port: "{{ skyline_apiserver_port }}"
skyline_apiserver_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_apiserver_port }}"
skyline_console_port: "9999"
skyline_console_listen_port: "{{ skyline_console_port }}"
skyline_console_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else skyline_console_port }}"
skyline_enable_sso: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol', 'equalto', 'openid') | list | count > 0 }}"
swift_internal_fqdn: "{{ kolla_internal_fqdn }}"
swift_external_fqdn: "{{ kolla_external_fqdn }}"
swift_internal_base_endpoint: "{{ swift_internal_fqdn | kolla_url(internal_protocol, swift_proxy_server_port) }}"
swift_public_base_endpoint: "{{ swift_external_fqdn | kolla_url(public_protocol, swift_proxy_server_port) }}"
swift_proxy_server_port: "8080"
swift_proxy_server_listen_port: "{{ swift_proxy_server_port }}"
swift_object_server_port: "6000"
swift_account_server_port: "6001"
swift_container_server_port: "6002"
swift_rsync_port: "10873"
syslog_udp_port: "{{ fluentd_syslog_port }}"
tacker_internal_fqdn: "{{ kolla_internal_fqdn }}"
tacker_external_fqdn: "{{ kolla_external_fqdn }}"
tacker_internal_endpoint: "{{ tacker_internal_fqdn | kolla_url(internal_protocol, tacker_server_port) }}"
tacker_public_endpoint: "{{ tacker_external_fqdn | kolla_url(public_protocol, tacker_server_public_port) }}"
tacker_server_port: "9890"
tacker_server_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else tacker_server_port }}"
tacker_server_listen_port: "{{ tacker_server_port }}"
trove_internal_fqdn: "{{ kolla_internal_fqdn }}"
trove_external_fqdn: "{{ kolla_external_fqdn }}"
trove_internal_base_endpoint: "{{ trove_internal_fqdn | kolla_url(internal_protocol, trove_api_port) }}"
trove_public_base_endpoint: "{{ trove_external_fqdn | kolla_url(public_protocol, trove_api_public_port) }}"
trove_api_port: "8779"
trove_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else trove_api_port }}"
trove_api_listen_port: "{{ trove_api_port }}"
venus_internal_fqdn: "{{ kolla_internal_fqdn }}"
venus_external_fqdn: "{{ kolla_external_fqdn }}"
venus_internal_endpoint: "{{ venus_internal_fqdn | kolla_url(internal_protocol, venus_api_port) }}"
venus_public_endpoint: "{{ venus_external_fqdn | kolla_url(public_protocol, venus_api_public_port) }}"
venus_api_port: "10010"
venus_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else venus_api_port }}"
venus_api_listen_port: "{{ venus_api_port }}"
watcher_internal_fqdn: "{{ kolla_internal_fqdn }}"
watcher_external_fqdn: "{{ kolla_external_fqdn }}"
watcher_internal_endpoint: "{{ watcher_internal_fqdn | kolla_url(internal_protocol, watcher_api_port) }}"
watcher_public_endpoint: "{{ watcher_external_fqdn | kolla_url(public_protocol, watcher_api_public_port) }}"
watcher_api_port: "9322"
watcher_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else watcher_api_port }}"
watcher_api_listen_port: "{{ watcher_api_port }}"
zun_api_port: "9517"
zun_api_public_port: "{{ haproxy_single_external_frontend_public_port if haproxy_single_external_frontend | bool else zun_api_port }}"
zun_api_listen_port: "{{ zun_api_port }}"
zun_wsproxy_internal_fqdn: "{{ kolla_internal_fqdn }}"
zun_wsproxy_external_fqdn: "{{ kolla_external_fqdn }}"
zun_wsproxy_port: "6784"
zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
zun_cni_daemon_port: "9036"
zun_internal_fqdn: "{{ kolla_internal_fqdn }}"
zun_external_fqdn: "{{ kolla_external_fqdn }}"
zun_internal_base_endpoint: "{{ zun_internal_fqdn | kolla_url(internal_protocol, zun_api_port) }}"
zun_public_base_endpoint: "{{ zun_external_fqdn | kolla_url(public_protocol, zun_api_public_port) }}"
public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}"
##################
# Firewall options
##################
enable_external_api_firewalld: "false"
external_api_firewalld_zone: "public"
####################
# OpenStack options
####################
openstack_release: "master"
# Docker image tag used by default.
openstack_tag: "{{ openstack_release }}-{{ kolla_base_distro }}-{{ kolla_base_distro_version }}{{ openstack_tag_suffix }}"
openstack_tag_suffix: ""
openstack_logging_debug: "False"
openstack_region_name: "RegionOne"
# Variable defined the pin_release_version, apply for rolling upgrade process
openstack_previous_release_name: "2023.2"
# A list of policy file formats that are supported by Oslo.policy
supported_policy_format_list:
- policy.yaml
- policy.json
# In the context of multi-regions, list here the name of all your regions.
multiple_regions_names:
- "{{ openstack_region_name }}"
openstack_service_workers: "{{ [ansible_facts.processor_vcpus, 5] | min }}"
openstack_service_rpc_workers: "{{ [ansible_facts.processor_vcpus, 3] | min }}"
# Optionally allow Kolla to set sysctl values
set_sysctl: "yes"
# Optionally change the path to sysctl.conf modified by Kolla Ansible plays.
kolla_sysctl_conf_path: /etc/sysctl.conf
# Endpoint type used to connect with OpenStack services with ansible modules.
# Valid options are [ public, internal ]
openstack_interface: "internal"
# Openstack CA certificate bundle file
# CA bundle file must be added to both the Horizon and Kolla Toolbox containers
openstack_cacert: ""
# Enable core OpenStack services. This includes:
# glance, keystone, neutron, nova, heat, and horizon.
enable_openstack_core: "yes"
# These roles are required for Kolla to be operation, however a savvy deployer
# could disable some of these required roles and run their own services.
enable_glance: "{{ enable_openstack_core | bool }}"
enable_haproxy: "yes"
enable_keepalived: "{{ enable_haproxy | bool }}"
enable_loadbalancer: "{{ enable_haproxy | bool or enable_keepalived | bool or enable_proxysql | bool }}"
enable_keystone: "{{ enable_openstack_core | bool }}"
enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}"
enable_mariadb: "yes"
enable_memcached: "yes"
enable_neutron: "{{ enable_openstack_core | bool }}"
enable_nova: "{{ enable_openstack_core | bool }}"
enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}"
# NOTE: Most memcached clients handle load-balancing via client side
# hashing (consistent or not) logic, so going under the covers and messing
# with things that the clients are not aware of is generally wrong
enable_haproxy_memcached: "no"
# Additional optional OpenStack features and services are specified here
enable_aodh: "no"
enable_barbican: "no"
enable_blazar: "no"
enable_ceilometer: "no"
enable_ceilometer_ipmi: "no"
enable_ceilometer_prometheus_pushgateway: "no"
enable_cells: "no"
enable_central_logging: "no"
enable_ceph_rgw: "no"
enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}"
enable_cinder: "no"
enable_cinder_backup: "yes"
enable_cinder_backend_hnas_nfs: "no"
enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}"
enable_cinder_backend_lvm: "no"
enable_cinder_backend_nfs: "no"
enable_cinder_backend_quobyte: "no"
enable_cinder_backend_pure_iscsi: "no"
enable_cinder_backend_pure_fc: "no"
enable_cinder_backend_pure_roce: "no"
enable_cinder_backend_pure_nvme_tcp: "no"
enable_cloudkitty: "no"
enable_collectd: "no"
enable_cyborg: "no"
enable_designate: "no"
enable_etcd: "no"
enable_fluentd: "yes"
enable_fluentd_systemd: "{{ (enable_fluentd | bool) and (enable_central_logging | bool) }}"
enable_gnocchi: "no"
enable_gnocchi_statsd: "no"
enable_grafana: "no"
enable_grafana_external: "{{ enable_grafana | bool }}"
enable_hacluster: "{{ enable_masakari_hostmonitor | bool }}"
enable_heat: "{{ enable_openstack_core | bool }}"
enable_horizon: "{{ enable_openstack_core | bool }}"
enable_horizon_blazar: "{{ enable_blazar | bool }}"
enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}"
enable_horizon_designate: "{{ enable_designate | bool }}"
enable_horizon_fwaas: "{{ enable_neutron_fwaas | bool }}"
enable_horizon_heat: "{{ enable_heat | bool }}"
enable_horizon_ironic: "{{ enable_ironic | bool }}"
enable_horizon_magnum: "{{ enable_magnum | bool }}"
enable_horizon_manila: "{{ enable_manila | bool }}"
enable_horizon_masakari: "{{ enable_masakari | bool }}"
enable_horizon_mistral: "{{ enable_mistral | bool }}"
enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}"
enable_horizon_octavia: "{{ enable_octavia | bool }}"
enable_horizon_tacker: "{{ enable_tacker | bool }}"
enable_horizon_trove: "{{ enable_trove | bool }}"
enable_horizon_watcher: "{{ enable_watcher | bool }}"
enable_horizon_zun: "{{ enable_zun | bool }}"
enable_influxdb: "{{ enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb' }}"
enable_ironic: "no"
enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
enable_ironic_prometheus_exporter: "{{ enable_ironic | bool and enable_prometheus | bool }}"
enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
enable_kuryr: "no"
enable_letsencrypt: "no"
enable_magnum: "no"
enable_manila: "no"
enable_manila_backend_generic: "no"
enable_manila_backend_hnas: "no"
enable_manila_backend_cephfs_native: "no"
enable_manila_backend_cephfs_nfs: "no"
enable_manila_backend_glusterfs_nfs: "no"
enable_mariabackup: "no"
enable_masakari: "no"
enable_masakari_instancemonitor: "{{ enable_masakari | bool }}"
enable_masakari_hostmonitor: "{{ enable_masakari | bool }}"
enable_mistral: "no"
enable_multipathd: "no"
enable_neutron_vpnaas: "no"
enable_neutron_sriov: "no"
enable_neutron_mlnx: "no"
enable_neutron_dvr: "no"
enable_neutron_fwaas: "no"
enable_neutron_qos: "no"
enable_neutron_agent_ha: "no"
enable_neutron_bgp_dragent: "no"
enable_neutron_provider_networks: "no"
enable_neutron_segments: "no"
enable_neutron_packet_logging: "no"
enable_neutron_sfc: "no"
enable_neutron_taas: "no"
enable_neutron_trunk: "no"
enable_neutron_metering: "no"
enable_neutron_infoblox_ipam_agent: "no"
enable_neutron_port_forwarding: "no"
enable_nova_libvirt_container: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}"
enable_nova_serialconsole_proxy: "no"
enable_nova_ssh: "yes"
enable_octavia: "no"
enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}"
enable_octavia_jobboard: "{{ enable_octavia | bool and 'amphora' in octavia_provider_drivers }}"
enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}"
enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}"
enable_ovs_dpdk: "no"
enable_osprofiler: "no"
enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
enable_prometheus: "no"
enable_proxysql: "yes"
enable_redis: "no"
enable_skyline: "no"
enable_swift: "no"
enable_swift_s3api: "no"
enable_swift_recon: "no"
enable_tacker: "no"
enable_telegraf: "no"
enable_trove: "no"
enable_trove_singletenant: "no"
enable_venus: "no"
enable_watcher: "no"
enable_zun: "no"
ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}"
designate_keystone_user: "designate"
ironic_keystone_user: "ironic"
neutron_keystone_user: "neutron"
nova_keystone_user: "nova"
placement_keystone_user: "placement"
cinder_keystone_user: "cinder"
glance_keystone_user: "glance"
# Nova fake driver and the number of fake driver per compute node
enable_nova_fake: "no"
num_nova_fake_per_node: 5
# Clean images options are specified here
enable_destroy_images: "no"
####################
# Global Options
####################
# List of containers to skip during stop command in YAML list format
# skip_stop_containers:
# - container1
# - container2
skip_stop_containers: []
####################
# Logging options
####################
# NOTE: If an external ElasticSearch cluster address is configured, all
# services with ElasticSearch endpoints should be configured to log
# to the external cluster by default. This is for backwards compatibility.
opensearch_address: "{{ elasticsearch_address if elasticsearch_address is defined else kolla_internal_fqdn }}"
enable_opensearch: "{{ enable_central_logging | bool or enable_osprofiler | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'opensearch') }}"
enable_opensearch_dashboards: "{{ enable_opensearch | bool }}"
enable_opensearch_dashboards_external: "{{ enable_opensearch_dashboards | bool }}"
####################
# Redis options
####################
redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}default:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}{{ redis_connection_string_extras }}"
redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes"
####################
# Osprofiler options
####################
# valid values: ["elasticsearch", "redis"]
osprofiler_backend: "elasticsearch"
opensearch_connection_string: "elasticsearch://{{ opensearch_address | put_address_in_context('url') }}:{{ opensearch_port }}"
osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else opensearch_connection_string }}"
####################
# RabbitMQ options
####################
rabbitmq_user: "openstack"
rabbitmq_monitoring_user: ""
# Whether to enable TLS encryption for RabbitMQ client-server communication.
rabbitmq_enable_tls: "no"
# CA certificate bundle in RabbitMQ container.
rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
rabbitmq_datadir_volume: "rabbitmq"
####################
# HAProxy options
####################
haproxy_user: "openstack"
haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}"
haproxy_enable_http2: "yes"
haproxy_http2_protocol: "alpn h2,http/1.1"
kolla_enable_tls_internal: "no"
kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}"
kolla_certificates_dir: "{{ node_config }}/certificates"
kolla_external_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy.pem"
kolla_internal_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy-internal.pem"
kolla_admin_openrc_cacert: ""
kolla_copy_ca_into_containers: "no"
haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
haproxy_backend_cacert_dir: "/etc/ssl/certs"
haproxy_single_external_frontend: false
haproxy_single_external_frontend_public_port: "{{ '443' if kolla_enable_tls_external | bool else '80' }}"
##################
# Backend options
##################
kolla_httpd_keep_alive: "60"
kolla_httpd_timeout: "60"
######################
# Backend TLS options
######################
kolla_enable_tls_backend: "no"
kolla_verify_tls_backend: "yes"
kolla_tls_backend_cert: "{{ kolla_certificates_dir }}/backend-cert.pem"
kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem"
#####################
# ACME client options
#####################
acme_client_lego: "server lego {{ api_interface_address }}:{{ letsencrypt_webserver_port }}"
acme_client_servers: "{% set arr = [] %}{% if enable_letsencrypt | bool %}{{ arr.append(acme_client_lego) }}{% endif %}{{ arr }}"
####################
# Keystone options
####################
keystone_internal_fqdn: "{{ kolla_internal_fqdn }}"
keystone_external_fqdn: "{{ kolla_external_fqdn }}"
keystone_internal_url: "{{ keystone_internal_fqdn | kolla_url(internal_protocol, keystone_internal_port) }}"
keystone_public_url: "{{ keystone_external_fqdn | kolla_url(public_protocol, keystone_public_port) }}"
keystone_admin_user: "admin"
keystone_admin_project: "admin"
# Whether or not to apply changes to service user passwords when services are
# reconfigured
update_keystone_service_user_passwords: true
default_project_domain_name: "Default"
default_project_domain_id: "default"
default_user_domain_name: "Default"
default_user_domain_id: "default"
# Keystone fernet token expiry in seconds. Default is 1 day.
fernet_token_expiry: 86400
# Keystone window to allow expired fernet tokens. Default is 2 days.
fernet_token_allow_expired_window: 172800
# Keystone fernet key rotation interval in seconds. Default is sum of token
# expiry and allow expired window, 3 days. This ensures the minimum number
# of keys are active. If this interval is lower than the sum of the token
# expiry and allow expired window, multiple active keys will be necessary.
fernet_key_rotation_interval: "{{ fernet_token_expiry + fernet_token_allow_expired_window }}"
keystone_default_user_role: "member"
# OpenStack authentication string. You should only need to override these if you
# are changing the admin tenant/project or user.
openstack_auth:
auth_url: "{{ keystone_internal_url }}"
username: "{{ keystone_admin_user }}"
password: "{{ keystone_admin_password }}"
project_name: "{{ keystone_admin_project }}"
domain_name: "default"
user_domain_name: "default"
#######################
# Glance options
#######################
glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_s3 | bool or glance_backend_swift | bool or glance_backend_vmware | bool) }}"
glance_backend_ceph: "no"
glance_backend_vmware: "no"
glance_backend_s3: "no"
enable_glance_image_cache: "no"
glance_backend_swift: "{{ enable_swift | bool }}"
glance_file_datadir_volume: "glance"
glance_enable_rolling_upgrade: "no"
glance_enable_property_protection: "no"
glance_enable_interoperable_image_import: "no"
glance_api_hosts: "{{ [groups['glance-api'] | first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}"
# NOTE(mnasiadka): For use in common role
glance_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
#######################
# Barbican options
#######################
# Valid options are [ simple_crypto, p11_crypto ]
barbican_crypto_plugin: "simple_crypto"
barbican_library_path: "/usr/lib/libCryptoki2_64.so"
#################
# Gnocchi options
#################
# Valid options are [ file, ceph, swift ]
gnocchi_backend_storage: "{% if enable_swift | bool %}swift{% else %}file{% endif %}"
# Valid options are [redis, '']
gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}"
gnocchi_metric_datadir_volume: "gnocchi"
#################################
# Cinder options
#################################
cinder_backend_ceph: "no"
cinder_backend_huawei: "no"
cinder_backend_huawei_xml_files: []
cinder_backend_vmwarevc_vmdk: "no"
cinder_backend_vmware_vstorage_object: "no"
cinder_volume_group: "cinder-volumes"
cinder_target_helper: "{{ 'lioadm' if ansible_facts.os_family == 'RedHat' else 'tgtadm' }}"
# Valid options are [ '', redis, etcd ]
cinder_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
# Valid options are [ nfs, swift, ceph, s3 ]
cinder_backup_driver: "ceph"
cinder_backup_share: ""
cinder_backup_mount_options_nfs: ""
#######################
# Cloudkitty options
#######################
# Valid options are 'sqlalchemy' or 'influxdb'. The default value is
# 'influxdb', which matches the default in Cloudkitty since the Stein release.
# When the backend is "influxdb", we also enable Influxdb.
# Also, when using 'influxdb' as the backend, we trigger the configuration/use
# of Cloudkitty storage backend version 2.
cloudkitty_storage_backend: "influxdb"
#######################
# Designate options
#######################
# Valid options are [ bind9, infoblox ]
designate_backend: "bind9"
designate_ns_record:
- "ns1.example.org"
designate_backend_external: "no"
designate_backend_external_bind9_nameservers: ""
# Valid options are [ '', redis ]
designate_coordination_backend: "{{ 'redis' if enable_redis | bool else '' }}"
designate_enable_notifications_sink: "no"
designate_notifications_topic_name: "notifications_designate"
#######################
# Neutron options
#######################
neutron_bgp_router_id: "1.1.1.1"
neutron_bridge_name: "{{ 'br-dvs' if neutron_plugin_agent == 'vmware_dvs' else 'br_dpdk' if enable_ovs_dpdk | bool else 'br-ex' }}"
neutron_physical_networks: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index }}{% if not loop.last %},{% endif %}{% endfor %}"
# Comma-separated type of enabled ml2 type drivers
neutron_type_drivers: "flat,vlan,vxlan{% if neutron_plugin_agent == 'ovn' %},geneve{% endif %}"
# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers')
# NOTE: for ironic this list should also contain 'flat'
neutron_tenant_network_types: "{% if neutron_plugin_agent == 'ovn' %}geneve{% else %}vxlan{% endif %}"
# valid values: ["dvr", "dvr_no_external"]
neutron_compute_dvr_mode: "dvr"
computes_need_external_bridge: "{{ (enable_neutron_dvr | bool and neutron_compute_dvr_mode == 'dvr') or enable_neutron_provider_networks | bool or neutron_ovn_distributed_fip | bool }}"
# Default DNS resolvers for virtual networks
neutron_dnsmasq_dns_servers: "1.1.1.1,8.8.8.8,8.8.4.4"
# Set legacy iptables to allow kernels not supporting iptables-nft
neutron_legacy_iptables: "no"
# Enable distributed floating ip for OVN deployments
neutron_ovn_distributed_fip: "no"
# SRIOV physnet:interface mappings when SRIOV is enabled
# "sriovnet1" and tunnel_interface used here as placeholders
neutron_sriov_physnet_mappings:
sriovnet1: "{{ tunnel_interface }}"
neutron_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
# Set OVN network availability zones
neutron_ovn_availability_zones: []
# Enable OVN agent
neutron_enable_ovn_agent: "no"
#######################
# Nova options
#######################
nova_backend_ceph: "no"
nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
# Valid options are [ kvm, qemu, vmware ]
nova_compute_virt_type: "kvm"
nova_instance_datadir_volume: "{{ 'nova_compute' if enable_nova_libvirt_container | bool else '/var/lib/nova' }}"
nova_safety_upgrade: "no"
# Valid options are [ none, novnc, spice ]
nova_console: "novnc"
#######################
# Nova Database
#######################
nova_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
#######################
# Horizon options
#######################
horizon_backend_database: false
horizon_keystone_multidomain: False
# Enable deploying custom horizon policy files for services that don't have a
# horizon plugin but have a policy file. Override these when you have services
# not deployed by kolla-ansible but want custom policy files deployed for them
# in horizon.
enable_ceilometer_horizon_policy_file: "{{ enable_ceilometer }}"
enable_cinder_horizon_policy_file: "{{ enable_cinder }}"
enable_glance_horizon_policy_file: "{{ enable_glance }}"
enable_heat_horizon_policy_file: "{{ enable_heat }}"
enable_keystone_horizon_policy_file: "{{ enable_keystone }}"
enable_neutron_horizon_policy_file: "{{ enable_neutron }}"
enable_nova_horizon_policy_file: "{{ enable_nova }}"
horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
###################
# External Ceph options
###################
# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes)
external_ceph_cephx_enabled: "yes"
# External Ceph pool names
ceph_cinder_pool_name: "volumes"
ceph_cinder_backup_pool_name: "backups"
ceph_glance_pool_name: "images"
ceph_gnocchi_pool_name: "gnocchi"
ceph_nova_pool_name: "vms"
ceph_cinder_backup_user: "cinder-backup"
ceph_cinder_user: "cinder"
ceph_glance_user: "glance"
ceph_gnocchi_user: "gnocchi"
ceph_manila_user: "manila"
ceph_nova_user: "{{ ceph_cinder_user }}"
# External Ceph keyrings
ceph_cinder_keyring: "client.{{ ceph_cinder_user }}.keyring"
ceph_cinder_backup_keyring: "client.{{ ceph_cinder_backup_user }}.keyring"
ceph_glance_keyring: "client.{{ ceph_glance_user }}.keyring"
ceph_gnocchi_keyring: "client.{{ ceph_gnocchi_user }}.keyring"
ceph_manila_keyring: "client.{{ ceph_manila_user }}.keyring"
ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
#####################
# VMware support
######################
vmware_vcenter_host_ip: "127.0.0.1"
vmware_vcenter_host_username: "username"
vmware_vcenter_cluster_name: "cluster-1"
vmware_vcenter_insecure: "True"
#############################################
# MariaDB component-specific database details
#############################################
# Whether to configure haproxy to load balance
# the external MariaDB server(s)
enable_external_mariadb_load_balancer: "no"
# Whether to use pre-configured databases / users
use_preconfigured_databases: "no"
# whether to use a common, preconfigured user
# for all component databases
use_common_mariadb_user: "no"
############
# Prometheus
############
enable_prometheus_server: "{{ enable_prometheus | bool }}"
enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}"
enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}"
enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}"
enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable_fluentd | bool }}"
enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}"
enable_prometheus_ceph_mgr_exporter: "no"
enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_openstack_exporter_external: "no"
enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_opensearch | bool }}"
enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}"
enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}"
enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}"
enable_prometheus_proxysql_exporter: "{{ enable_prometheus | bool and enable_proxysql | bool }}"
prometheus_alertmanager_user: "admin"
prometheus_ceph_exporter_interval: "{{ prometheus_scrape_interval }}"
prometheus_grafana_user: "grafana"
prometheus_skyline_user: "skyline"
prometheus_scrape_interval: "60s"
prometheus_openstack_exporter_interval: "{{ prometheus_scrape_interval }}"
prometheus_openstack_exporter_timeout: "45s"
prometheus_elasticsearch_exporter_interval: "{{ prometheus_scrape_interval }}"
prometheus_cmdline_extras:
prometheus_ceph_mgr_exporter_endpoints: []
prometheus_openstack_exporter_endpoint_type: "internal"
prometheus_openstack_exporter_compute_api_version: "latest"
prometheus_libvirt_exporter_interval: "60s"
####################
# InfluxDB options
####################
influxdb_address: "{{ kolla_internal_fqdn }}"
influxdb_datadir_volume: "influxdb"
influxdb_internal_endpoint: "{{ kolla_internal_fqdn | kolla_url(internal_protocol, influxdb_http_port) }}"
#########################
# Internal Image options
#########################
kolla_base_distro_version_default_map: {
"centos": "stream9",
"debian": "bookworm",
"rocky": "9",
"ubuntu": "noble",
}
distro_python_version: "3"
kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_distro] }}"
#############
# S3 options
#############
# Common options for S3 Cinder Backup and Glance S3 backend.
s3_url:
s3_bucket:
s3_access_key:
s3_secret_key:
##########
# Telegraf
##########
# Configure telegraf to use the docker daemon itself as an input for
# telemetry data.
telegraf_enable_docker_input: "no"
# Valid options are [ '', redis, etcd ]
ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
##########
# Octavia
##########
# Whether to run Kolla-Ansible's automatic configuration for Octavia.
# NOTE: if you upgrade from Ussuri, you must set `octavia_auto_configure` to `no`
# and keep your other Octavia config like before.
octavia_auto_configure: "{{ 'amphora' in octavia_provider_drivers }}"
# Octavia network type options are [ tenant, provider ]
# * tenant indicates that we will create a tenant network and a network
# interface on the Octavia worker nodes for communication with amphorae.
# * provider indicates that we will create a flat or vlan provider network.
# In this case octavia_network_interface should be set to a network interface
# on the Octavia worker nodes on the same provider network.
octavia_network_type: "provider"
###################################
# Identity federation configuration
###################################
# Here we configure all of the IdPs meta information that will be required to implement identity federation with OpenStack Keystone.
# We require the administrator to enter the following metadata:
# * name (internal name of the IdP in Keystone);
# * openstack_domain (the domain in Keystone that the IdP belongs to)
# * protocol (the federated protocol used by the IdP; e.g. openid or saml);
# * identifier (the IdP identifier; e.g. https://accounts.google.com);
# * public_name (the public name that will be shown for users in Horizon);
# * attribute_mapping (the attribute mapping to be used for this IdP. This mapping is configured in the "keystone_identity_mappings" configuration);
# * metadata_folder (folder containing all the identity provider metadata as jsons named as the identifier without the protocol
# and with '/' escaped as %2F followed with '.provider' or '.client' or '.conf'; e.g. accounts.google.com.provider; PS, all .conf,
# .provider and .client jsons must be in the folder, even if you dont override any conf in the .conf json, you must leave it as an empty json '{}');
# * certificate_file (the path to the Identity Provider certificate file, the file must be named as 'certificate-key-id.pem';
# e.g. LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem; You can find the key-id in the Identity provider '.well-known/openid-configuration' jwks_uri as kid);
#
# The IdPs meta information are to be presented to Kolla-Ansible as the following example:
# keystone_identity_providers:
# - name: "myidp1"
# openstack_domain: "my-domain"
# protocol: "openid"
# identifier: "https://accounts.google.com"
# public_name: "Authenticate via myidp1"
# attribute_mapping: "mappingId1"
# metadata_folder: "path/to/metadata/folder"
# certificate_file: "path/to/certificate/file.pem"
#
# We also need to configure the attribute mapping that is used by IdPs.
# The configuration of attribute mappings is a list of objects, where each
# object must have a 'name' (that mapps to the 'attribute_mapping' to the IdP
# object in the IdPs set), and the 'file' with a full qualified path to a mapping file.
# keystone_identity_mappings:
# - name: "mappingId1"
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId1"
# - name: "mappingId2"
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId2"
# - name: "mappingId3"
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId3"
keystone_identity_providers: []
keystone_identity_mappings: []
####################
# Corosync options
####################
# this is UDP port
hacluster_corosync_port: 5405