67607c679e
Added c9s jobs are non voting, as agreed on PTG to focus on Rocky Linux 9.
Since both CS9 and RL9 have higher default fd limit (1073741816 vs
1048576 in CS8) - lowering that for:
* RMQ - because Erlang allocates memory based on this (see [1], [2], [3]).
* MariaDB - because Galera cluster bootstrap failed
Changed openvswitch_db healthcheck, because for unknown reason
the usual check (using lsof on /run/openvswitch/db.sock) is hanging
on "Bad file descriptor" (even with privileged: true).
[1]: https://github.com/docker-library/rabbitmq/issues/545
[2]: https://github.com/rabbitmq/cluster-operator/issues/959#issuecomment-1043280324
[3]: a8b627aaed
Depends-On: https://review.opendev.org/c/openstack/tenks/+/856296
Depends-On: https://review.opendev.org/c/openstack/kolla-ansible/+/856328
Depends-On: https://review.opendev.org/c/openstack/kolla-ansible/+/856443
Needed-By: https://review.opendev.org/c/openstack/kolla/+/836664
Co-Authored-By: Michał Nasiadka <mnasiadka@gmail.com>
Change-Id: I3f7b480519aea38c3927bee7fb2c23eea178554d
1305 lines
51 KiB
YAML
1305 lines
51 KiB
YAML
---
|
|
# The options in this file can be overridden in 'globals.yml'
|
|
|
|
# The "temp" files that are created before merge need to stay persistent due
|
|
# to the fact that ansible will register a "change" if it has to create them
|
|
# again. Persistent files allow for idempotency
|
|
container_config_directory: "/var/lib/kolla/config_files"
|
|
|
|
# The directory on the deploy host containing globals.yml.
|
|
node_config: "{{ CONFIG_DIR | default('/etc/kolla') }}"
|
|
|
|
# The directory to merge custom config files the kolla's config files
|
|
node_custom_config: "{{ node_config }}/config"
|
|
|
|
# The directory to store the config files on the destination node
|
|
node_config_directory: "/etc/kolla"
|
|
|
|
# The group which own node_config_directory, you can use a non-root
|
|
# user to deploy kolla
|
|
config_owner_user: "root"
|
|
config_owner_group: "root"
|
|
|
|
###################
|
|
# Ansible options
|
|
###################
|
|
|
|
# This variable is used as the "filter" argument for the setup module. For
|
|
# instance, if one wants to remove/ignore all Neutron interface facts:
|
|
# kolla_ansible_setup_filter: "ansible_[!qt]*"
|
|
# By default, we do not provide a filter.
|
|
kolla_ansible_setup_filter: "{{ omit }}"
|
|
|
|
# This variable is used as the "gather_subset" argument for the setup module.
|
|
# For instance, if one wants to avoid collecting facts via facter:
|
|
# kolla_ansible_setup_gather_subset: "all,!facter"
|
|
# By default, we do not provide a gather subset.
|
|
kolla_ansible_setup_gather_subset: "{{ omit }}"
|
|
|
|
###################
|
|
# Kolla options
|
|
###################
|
|
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
|
|
config_strategy: "COPY_ALWAYS"
|
|
|
|
# Valid options are ['centos', 'debian', 'rhel', 'ubuntu']
|
|
kolla_base_distro: "centos"
|
|
|
|
kolla_internal_vip_address: "{{ kolla_internal_address | default('') }}"
|
|
kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
|
|
kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
|
|
kolla_same_external_internal_vip: "{{ kolla_external_vip_address == kolla_internal_vip_address }}"
|
|
kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_same_external_internal_vip | bool else kolla_external_vip_address }}"
|
|
|
|
kolla_dev_repos_directory: "/opt/stack/"
|
|
kolla_dev_repos_git: "https://opendev.org/openstack"
|
|
kolla_dev_repos_pull: "no"
|
|
kolla_dev_mode: "no"
|
|
kolla_source_version: "{% if openstack_release == 'master' %}master{% else %}stable/{{ openstack_release }}{% endif %}"
|
|
|
|
# Proxy settings for containers such as magnum that need internet access
|
|
container_http_proxy: ""
|
|
container_https_proxy: ""
|
|
container_no_proxy: "localhost,127.0.0.1"
|
|
|
|
container_proxy_no_proxy_entries:
|
|
- "{{ container_no_proxy }}"
|
|
- "{{ api_interface_address }}"
|
|
- "{{ kolla_internal_vip_address | default('') }}"
|
|
|
|
container_proxy:
|
|
http_proxy: "{{ container_http_proxy }}"
|
|
https_proxy: "{{ container_https_proxy }}"
|
|
no_proxy: "{{ container_proxy_no_proxy_entries | select | join(',') }}"
|
|
|
|
# By default, Kolla API services bind to the network address assigned
|
|
# to the api_interface. Allow the bind address to be an override.
|
|
api_interface_address: "{{ 'api' | kolla_address }}"
|
|
|
|
|
|
####################
|
|
# Database options
|
|
####################
|
|
database_address: "{{ kolla_internal_fqdn }}"
|
|
database_user: "root"
|
|
database_port: "3306"
|
|
database_connection_recycle_time: 10
|
|
database_max_pool_size: 1
|
|
|
|
|
|
####################
|
|
# Docker options
|
|
####################
|
|
docker_registry_email:
|
|
docker_registry: "quay.io"
|
|
docker_namespace: "openstack.kolla"
|
|
docker_registry_username:
|
|
# Please read the docs carefully before applying docker_registry_insecure.
|
|
docker_registry_insecure: "no"
|
|
docker_runtime_directory: ""
|
|
# Docker client timeout in seconds.
|
|
docker_client_timeout: 120
|
|
|
|
# Docker networking options
|
|
docker_disable_default_iptables_rules: "yes"
|
|
docker_disable_default_network: "{{ docker_disable_default_iptables_rules }}"
|
|
docker_disable_ip_forward: "{{ docker_disable_default_iptables_rules }}"
|
|
|
|
# Retention settings for Docker logs
|
|
docker_log_max_file: "5"
|
|
docker_log_max_size: "50m"
|
|
|
|
# Valid options are [ no, on-failure, always, unless-stopped ]
|
|
docker_restart_policy: "unless-stopped"
|
|
|
|
# '0' means unlimited retries (applies only to 'on-failure' policy)
|
|
docker_restart_policy_retry: "10"
|
|
|
|
# Extra docker options for Zun
|
|
docker_configure_for_zun: "no"
|
|
docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375
|
|
docker_zun_config:
|
|
cluster-store: etcd://{% for host in groups.get('etcd', []) %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['etcd_client_port'] }}{% if not loop.last %},{% endif %}{% endfor %}
|
|
|
|
# Extra containerd options for Zun
|
|
containerd_configure_for_zun: "no"
|
|
|
|
# Enable Ceph backed Cinder Volumes for zun
|
|
zun_configure_for_cinder_ceph: "no"
|
|
|
|
# 42463 is the static group id of the zun user in the Zun image.
|
|
# If users customize this value on building the Zun images,
|
|
# they need to change this config accordingly.
|
|
containerd_grpc_gid: 42463
|
|
|
|
# Timeout after Docker sends SIGTERM before sending SIGKILL.
|
|
docker_graceful_timeout: 60
|
|
|
|
# Common options used throughout Docker
|
|
docker_common_options:
|
|
auth_email: "{{ docker_registry_email }}"
|
|
auth_password: "{{ docker_registry_password }}"
|
|
auth_registry: "{{ docker_registry }}"
|
|
auth_username: "{{ docker_registry_username }}"
|
|
environment:
|
|
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
|
restart_policy: "{{ docker_restart_policy }}"
|
|
restart_retries: "{{ docker_restart_policy_retry }}"
|
|
graceful_timeout: "{{ docker_graceful_timeout }}"
|
|
client_timeout: "{{ docker_client_timeout }}"
|
|
|
|
####################
|
|
# Dimensions options
|
|
####################
|
|
# Dimension options for Docker Containers
|
|
# NOTE(mnasiadka): Lower 1073741816 nofile limit on EL9 (RHEL9/CentOS Stream 9/Rocky Linux 9)
|
|
# fixes at least rabbitmq and mariadb
|
|
default_container_dimensions: "{{ default_container_dimensions_el9 if ansible_facts.os_family == 'RedHat' else '{}' }}"
|
|
default_container_dimensions_el9:
|
|
ulimits:
|
|
nofile:
|
|
soft: 1048576
|
|
hard: 1048576
|
|
|
|
#####################
|
|
# Healthcheck options
|
|
#####################
|
|
enable_container_healthchecks: "yes"
|
|
# Healthcheck options for Docker containers
|
|
# interval/timeout/start_period are in seconds
|
|
default_container_healthcheck_interval: 30
|
|
default_container_healthcheck_timeout: 30
|
|
default_container_healthcheck_retries: 3
|
|
default_container_healthcheck_start_period: 5
|
|
|
|
#######################
|
|
# Extra volumes options
|
|
#######################
|
|
# Extra volumes for Docker Containers
|
|
default_extra_volumes: []
|
|
|
|
####################
|
|
# keepalived options
|
|
####################
|
|
# Arbitrary unique number from 0..255
|
|
keepalived_virtual_router_id: "51"
|
|
|
|
|
|
#######################
|
|
# Elasticsearch Options
|
|
#######################
|
|
elasticsearch_datadir_volume: "elasticsearch"
|
|
|
|
elasticsearch_internal_endpoint: "{{ internal_protocol }}://{{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"
|
|
|
|
###################
|
|
# Messaging options
|
|
###################
|
|
# oslo.messaging rpc transport valid options are [ rabbit, amqp ]
|
|
om_rpc_transport: "rabbit"
|
|
om_rpc_user: "{{ rabbitmq_user }}"
|
|
om_rpc_password: "{{ rabbitmq_password }}"
|
|
om_rpc_port: "{{ rabbitmq_port }}"
|
|
om_rpc_group: "rabbitmq"
|
|
om_rpc_vhost: "/"
|
|
|
|
rpc_transport_url: "{{ om_rpc_transport }}://{% for host in groups[om_rpc_group] %}{{ om_rpc_user }}:{{ om_rpc_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_rpc_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_rpc_vhost }}"
|
|
|
|
# oslo.messaging notify transport valid options are [ rabbit ]
|
|
om_notify_transport: "rabbit"
|
|
om_notify_user: "{{ rabbitmq_user }}"
|
|
om_notify_password: "{{ rabbitmq_password }}"
|
|
om_notify_port: "{{ rabbitmq_port }}"
|
|
om_notify_group: "rabbitmq"
|
|
om_notify_vhost: "/"
|
|
|
|
notify_transport_url: "{{ om_notify_transport }}://{% for host in groups[om_notify_group] %}{{ om_notify_user }}:{{ om_notify_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_notify_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_notify_vhost }}"
|
|
|
|
# Whether to enable TLS for oslo.messaging communication with RabbitMQ.
|
|
om_enable_rabbitmq_tls: "{{ rabbitmq_enable_tls | bool }}"
|
|
# CA certificate bundle in containers using oslo.messaging with RabbitMQ TLS.
|
|
om_rabbitmq_cacert: "{{ rabbitmq_cacert }}"
|
|
|
|
####################
|
|
# Networking options
|
|
####################
|
|
network_interface: "eth0"
|
|
neutron_external_interface: "eth1"
|
|
kolla_external_vip_interface: "{{ network_interface }}"
|
|
api_interface: "{{ network_interface }}"
|
|
swift_storage_interface: "{{ network_interface }}"
|
|
swift_replication_interface: "{{ swift_storage_interface }}"
|
|
migration_interface: "{{ api_interface }}"
|
|
tunnel_interface: "{{ network_interface }}"
|
|
octavia_network_interface: "{{ 'o-hm0' if octavia_network_type == 'tenant' else api_interface }}"
|
|
bifrost_network_interface: "{{ network_interface }}"
|
|
dns_interface: "{{ network_interface }}"
|
|
dpdk_tunnel_interface: "{{ neutron_external_interface }}"
|
|
ironic_http_interface: "{{ api_interface }}"
|
|
|
|
# Configure the address family (AF) per network.
|
|
# Valid options are [ ipv4, ipv6 ]
|
|
network_address_family: "ipv4"
|
|
api_address_family: "{{ network_address_family }}"
|
|
storage_address_family: "{{ network_address_family }}"
|
|
swift_storage_address_family: "{{ storage_address_family }}"
|
|
swift_replication_address_family: "{{ swift_storage_address_family }}"
|
|
migration_address_family: "{{ api_address_family }}"
|
|
tunnel_address_family: "{{ network_address_family }}"
|
|
octavia_network_address_family: "{{ api_address_family }}"
|
|
bifrost_network_address_family: "{{ network_address_family }}"
|
|
dns_address_family: "{{ network_address_family }}"
|
|
dpdk_tunnel_address_family: "{{ network_address_family }}"
|
|
ironic_http_address_family: "{{ api_address_family }}"
|
|
|
|
migration_interface_address: "{{ 'migration' | kolla_address }}"
|
|
tunnel_interface_address: "{{ 'tunnel' | kolla_address }}"
|
|
octavia_network_interface_address: "{{ 'octavia_network' | kolla_address }}"
|
|
dpdk_tunnel_interface_address: "{{ 'dpdk_tunnel' | kolla_address }}"
|
|
ironic_http_interface_address: "{{ 'ironic_http' | kolla_address }}"
|
|
|
|
# Valid options are [ openvswitch, ovn, linuxbridge, vmware_nsxv, vmware_nsxv3, vmware_nsxp, vmware_dvs ]
|
|
# Do note linuxbridge is *EXPERIMENTAL* in Neutron since Zed and it requires extra tweaks to config to be usable.
|
|
# For details, see: https://docs.openstack.org/neutron/latest/admin/config-experimental-framework.html
|
|
neutron_plugin_agent: "openvswitch"
|
|
|
|
# Valid options are [ internal, infoblox ]
|
|
neutron_ipam_driver: "internal"
|
|
|
|
# The default ports used by each service.
|
|
# The list should be in alphabetical order
|
|
aodh_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
aodh_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
aodh_api_port: "8042"
|
|
aodh_api_listen_port: "{{ aodh_api_port }}"
|
|
|
|
barbican_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
barbican_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
barbican_api_port: "9311"
|
|
barbican_api_listen_port: "{{ barbican_api_port }}"
|
|
|
|
blazar_api_port: "1234"
|
|
|
|
ceph_rgw_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
ceph_rgw_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
ceph_rgw_port: "6780"
|
|
|
|
cinder_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
cinder_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
cinder_api_port: "8776"
|
|
cinder_api_listen_port: "{{ cinder_api_port }}"
|
|
|
|
cloudkitty_api_port: "8889"
|
|
|
|
collectd_udp_port: "25826"
|
|
|
|
cyborg_api_port: "6666"
|
|
|
|
designate_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
designate_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
designate_api_port: "9001"
|
|
designate_api_listen_port: "{{ designate_api_port }}"
|
|
designate_bind_port: "53"
|
|
designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}"
|
|
designate_rndc_port: "953"
|
|
|
|
elasticsearch_port: "9200"
|
|
|
|
etcd_client_port: "2379"
|
|
etcd_peer_port: "2380"
|
|
etcd_enable_tls: "{{ kolla_enable_tls_backend }}"
|
|
etcd_protocol: "{{ 'https' if etcd_enable_tls | bool else 'http' }}"
|
|
|
|
fluentd_syslog_port: "5140"
|
|
|
|
freezer_api_port: "9090"
|
|
|
|
glance_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
glance_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
glance_api_port: "9292"
|
|
glance_api_listen_port: "{{ glance_api_port }}"
|
|
glance_tls_proxy_stats_port: "9293"
|
|
|
|
gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
gnocchi_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
gnocchi_api_port: "8041"
|
|
gnocchi_api_listen_port: "{{ gnocchi_api_port }}"
|
|
|
|
grafana_server_port: "3000"
|
|
|
|
haproxy_stats_port: "1984"
|
|
haproxy_monitor_port: "61313"
|
|
|
|
heat_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
heat_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
heat_api_port: "8004"
|
|
heat_api_listen_port: "{{ heat_api_port }}"
|
|
heat_cfn_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
heat_cfn_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
heat_api_cfn_port: "8000"
|
|
heat_api_cfn_listen_port: "{{ heat_api_cfn_port }}"
|
|
|
|
horizon_port: "80"
|
|
horizon_tls_port: "443"
|
|
horizon_listen_port: "{{ horizon_tls_port if horizon_enable_tls_backend | bool else horizon_port }}"
|
|
|
|
influxdb_http_port: "8086"
|
|
|
|
ironic_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
ironic_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
ironic_api_port: "6385"
|
|
ironic_api_listen_port: "{{ ironic_api_port }}"
|
|
ironic_inspector_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
ironic_inspector_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
ironic_inspector_port: "5050"
|
|
ironic_inspector_listen_port: "{{ ironic_inspector_port }}"
|
|
ironic_http_port: "8089"
|
|
|
|
iscsi_port: "3260"
|
|
|
|
kafka_port: "9092"
|
|
|
|
keystone_public_port: "5000"
|
|
keystone_public_listen_port: "{{ keystone_public_port }}"
|
|
# NOTE(yoctozepto): Admin port settings are kept only for upgrade compatibility.
|
|
# TODO(yoctozepto): Remove after Zed.
|
|
keystone_admin_port: "35357"
|
|
keystone_admin_listen_port: "{{ keystone_admin_port }}"
|
|
keystone_ssh_port: "8023"
|
|
|
|
kibana_server_port: "5601"
|
|
|
|
kuryr_port: "23750"
|
|
|
|
magnum_api_port: "9511"
|
|
|
|
manila_api_port: "8786"
|
|
|
|
mariadb_port: "{{ database_port }}"
|
|
mariadb_wsrep_port: "4567"
|
|
mariadb_ist_port: "4568"
|
|
mariadb_sst_port: "4444"
|
|
mariadb_clustercheck_port: "4569"
|
|
mariadb_monitor_user: "{{ 'monitor' if enable_proxysql | bool else 'haproxy' }}"
|
|
|
|
mariadb_default_database_shard_id: 0
|
|
mariadb_default_database_shard_hosts: "{% set default_shard = [] %}{% for host in groups['mariadb'] %}{% if hostvars[host]['mariadb_shard_id'] is not defined or hostvars[host]['mariadb_shard_id'] == mariadb_default_database_shard_id %}{{ default_shard.append(host) }}{% endif %}{% endfor %}{{ default_shard }}"
|
|
mariadb_shard_id: "{{ mariadb_default_database_shard_id }}"
|
|
mariadb_shard_name: "shard_{{ mariadb_shard_id }}"
|
|
mariadb_shard_group: "mariadb_{{ mariadb_shard_name }}"
|
|
mariadb_loadbalancer: "{{ 'proxysql' if enable_proxysql | bool else 'haproxy' }}"
|
|
mariadb_shard_root_user_prefix: "root_shard_"
|
|
mariadb_shard_backup_user_prefix: "backup_shard_"
|
|
mariadb_shards_info: "{{ groups['mariadb'] | database_shards_info() }}"
|
|
|
|
masakari_api_port: "15868"
|
|
|
|
memcached_port: "11211"
|
|
|
|
mistral_api_port: "8989"
|
|
|
|
monasca_api_port: "8070"
|
|
monasca_log_api_port: "{{ monasca_api_port }}"
|
|
monasca_agent_forwarder_port: "17123"
|
|
monasca_agent_statsd_port: "8125"
|
|
|
|
murano_api_port: "8082"
|
|
|
|
neutron_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
neutron_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
neutron_server_port: "9696"
|
|
neutron_server_listen_port: "{{ neutron_server_port }}"
|
|
neutron_tls_proxy_stats_port: "9697"
|
|
|
|
nova_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
nova_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
nova_api_port: "8774"
|
|
nova_api_listen_port: "{{ nova_api_port }}"
|
|
nova_metadata_port: "8775"
|
|
nova_metadata_listen_port: "{{ nova_metadata_port }}"
|
|
nova_novncproxy_fqdn: "{{ kolla_external_fqdn }}"
|
|
nova_novncproxy_port: "6080"
|
|
nova_novncproxy_listen_port: "{{ nova_novncproxy_port }}"
|
|
nova_spicehtml5proxy_fqdn: "{{ kolla_external_fqdn }}"
|
|
nova_spicehtml5proxy_port: "6082"
|
|
nova_spicehtml5proxy_listen_port: "{{ nova_spicehtml5proxy_port }}"
|
|
nova_serialproxy_fqdn: "{{ kolla_external_fqdn }}"
|
|
nova_serialproxy_port: "6083"
|
|
nova_serialproxy_listen_port: "{{ nova_serialproxy_port }}"
|
|
nova_serialproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
|
|
|
|
octavia_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
octavia_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
octavia_api_port: "9876"
|
|
octavia_api_listen_port: "{{ octavia_api_port }}"
|
|
octavia_health_manager_port: "5555"
|
|
|
|
ovn_nb_db_port: "6641"
|
|
ovn_sb_db_port: "6642"
|
|
ovn_nb_connection: "{% for host in groups['ovn-nb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_nb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}"
|
|
ovn_sb_connection: "{% for host in groups['ovn-sb-db'] %}tcp:{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ ovn_sb_db_port }}{% if not loop.last %},{% endif %}{% endfor %}"
|
|
|
|
outward_rabbitmq_port: "5674"
|
|
outward_rabbitmq_management_port: "15674"
|
|
outward_rabbitmq_cluster_port: "25674"
|
|
outward_rabbitmq_epmd_port: "4371"
|
|
outward_rabbitmq_prometheus_port: "15694"
|
|
|
|
ovsdb_port: "6640"
|
|
|
|
placement_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
placement_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
# Default Placement API port of 8778 already in use
|
|
placement_api_port: "8780"
|
|
placement_api_listen_port: "{{ placement_api_port }}"
|
|
|
|
prometheus_port: "9091"
|
|
prometheus_node_exporter_port: "9100"
|
|
prometheus_mysqld_exporter_port: "9104"
|
|
prometheus_haproxy_exporter_port: "9101"
|
|
prometheus_memcached_exporter_port: "9150"
|
|
prometheus_rabbitmq_exporter_port: "{{ rabbitmq_prometheus_port }}"
|
|
# Default cadvisor port of 8080 already in use
|
|
prometheus_cadvisor_port: "18080"
|
|
prometheus_fluentd_integration_port: "24231"
|
|
prometheus_libvirt_exporter_port: "9177"
|
|
prometheus_etcd_integration_port: "{{ etcd_client_port }}"
|
|
|
|
# Prometheus alertmanager ports
|
|
prometheus_alertmanager_port: "9093"
|
|
prometheus_alertmanager_cluster_port: "9094"
|
|
|
|
# Prometheus MSTeams port
|
|
prometheus_msteams_port: "9095"
|
|
|
|
# Prometheus openstack-exporter ports
|
|
prometheus_openstack_exporter_port: "9198"
|
|
prometheus_elasticsearch_exporter_port: "9108"
|
|
|
|
# Prometheus blackbox-exporter ports
|
|
prometheus_blackbox_exporter_port: "9115"
|
|
|
|
proxysql_admin_port: "6032"
|
|
|
|
rabbitmq_port: "{{ '5671' if rabbitmq_enable_tls | bool else '5672' }}"
|
|
rabbitmq_management_port: "15672"
|
|
rabbitmq_cluster_port: "25672"
|
|
rabbitmq_epmd_port: "4369"
|
|
rabbitmq_prometheus_port: "15692"
|
|
|
|
redis_port: "6379"
|
|
redis_sentinel_port: "26379"
|
|
|
|
sahara_api_port: "8386"
|
|
|
|
senlin_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
senlin_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
senlin_api_port: "8778"
|
|
senlin_api_listen_port: "{{ senlin_api_port }}"
|
|
|
|
skydive_analyzer_port: "8085"
|
|
skydive_agents_port: "8090"
|
|
|
|
solum_application_deployment_port: "9777"
|
|
solum_image_builder_port: "9778"
|
|
|
|
storm_nimbus_thrift_port: 6627
|
|
storm_supervisor_thrift_port: 6628
|
|
# Storm will run up to (end - start) + 1 workers per worker host. Here
|
|
# we reserve ports for those workers, and implicitly define the maximum
|
|
# number of workers per host.
|
|
storm_worker_port_range:
|
|
start: 6700
|
|
end: 6703
|
|
|
|
swift_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
swift_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
swift_proxy_server_port: "8080"
|
|
swift_proxy_server_listen_port: "{{ swift_proxy_server_port }}"
|
|
swift_object_server_port: "6000"
|
|
swift_account_server_port: "6001"
|
|
swift_container_server_port: "6002"
|
|
swift_rsync_port: "10873"
|
|
|
|
syslog_udp_port: "{{ fluentd_syslog_port }}"
|
|
|
|
tacker_server_port: "9890"
|
|
|
|
trove_api_port: "8779"
|
|
|
|
venus_api_port: "10010"
|
|
|
|
watcher_api_port: "9322"
|
|
|
|
zookeeper_client_port: "2181"
|
|
zookeeper_peer_port: "2888"
|
|
zookeeper_quorum_port: "3888"
|
|
|
|
zun_api_port: "9517"
|
|
zun_wsproxy_port: "6784"
|
|
zun_wsproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
|
|
zun_cni_daemon_port: "9036"
|
|
|
|
vitrage_api_port: "8999"
|
|
|
|
public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
|
|
internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}"
|
|
# TODO(yoctozepto): Remove after Zed. Kept for compatibility only.
|
|
admin_protocol: "{{ internal_protocol }}"
|
|
|
|
##################
|
|
# Firewall options
|
|
##################
|
|
enable_external_api_firewalld: "false"
|
|
external_api_firewalld_zone: "public"
|
|
|
|
####################
|
|
# OpenStack options
|
|
####################
|
|
openstack_release: "master"
|
|
# Docker image tag used by default.
|
|
openstack_tag: "{{ openstack_release }}-{{ kolla_base_distro }}-{{ kolla_base_distro_version }}{{ openstack_tag_suffix }}"
|
|
openstack_tag_suffix: ""
|
|
openstack_logging_debug: "False"
|
|
|
|
openstack_region_name: "RegionOne"
|
|
|
|
# Variable defined the pin_release_version, apply for rolling upgrade process
|
|
openstack_previous_release_name: "yoga"
|
|
|
|
# A list of policy file formats that are supported by Oslo.policy
|
|
supported_policy_format_list:
|
|
- policy.yaml
|
|
- policy.json
|
|
|
|
# In the context of multi-regions, list here the name of all your regions.
|
|
multiple_regions_names:
|
|
- "{{ openstack_region_name }}"
|
|
|
|
openstack_service_workers: "{{ [ansible_facts.processor_vcpus, 5] | min }}"
|
|
openstack_service_rpc_workers: "{{ [ansible_facts.processor_vcpus, 3] | min }}"
|
|
|
|
# Optionally allow Kolla to set sysctl values
|
|
set_sysctl: "yes"
|
|
|
|
# Optionally change the path to sysctl.conf modified by Kolla Ansible plays.
|
|
kolla_sysctl_conf_path: /etc/sysctl.conf
|
|
|
|
# Endpoint type used to connect with OpenStack services with ansible modules.
|
|
# Valid options are [ public, internal ]
|
|
openstack_interface: "internal"
|
|
|
|
# Openstack CA certificate bundle file
|
|
# CA bundle file must be added to both the Horizon and Kolla Toolbox containers
|
|
openstack_cacert: ""
|
|
|
|
# Enable core OpenStack services. This includes:
|
|
# glance, keystone, neutron, nova, heat, and horizon.
|
|
enable_openstack_core: "yes"
|
|
|
|
# These roles are required for Kolla to be operation, however a savvy deployer
|
|
# could disable some of these required roles and run their own services.
|
|
enable_glance: "{{ enable_openstack_core | bool }}"
|
|
enable_haproxy: "yes"
|
|
enable_keepalived: "{{ enable_haproxy | bool }}"
|
|
enable_loadbalancer: "{{ enable_haproxy | bool or enable_keepalived | bool or enable_proxysql | bool }}"
|
|
enable_keystone: "{{ enable_openstack_core | bool }}"
|
|
enable_keystone_federation: "{{ (keystone_identity_providers | length > 0) and (keystone_identity_mappings | length > 0) }}"
|
|
enable_mariadb: "yes"
|
|
enable_memcached: "yes"
|
|
enable_neutron: "{{ enable_openstack_core | bool }}"
|
|
enable_nova: "{{ enable_openstack_core | bool }}"
|
|
enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}"
|
|
enable_outward_rabbitmq: "{{ enable_murano | bool }}"
|
|
|
|
# NOTE: Most memcached clients handle load-balancing via client side
|
|
# hashing (consistent or not) logic, so going under the covers and messing
|
|
# with things that the clients are not aware of is generally wrong
|
|
enable_haproxy_memcached: "no"
|
|
|
|
# Additional optional OpenStack features and services are specified here
|
|
enable_aodh: "no"
|
|
enable_barbican: "no"
|
|
enable_blazar: "no"
|
|
enable_ceilometer: "no"
|
|
enable_ceilometer_ipmi: "no"
|
|
enable_ceilometer_prometheus_pushgateway: "no"
|
|
enable_cells: "no"
|
|
enable_central_logging: "no"
|
|
enable_ceph_rgw: "no"
|
|
enable_ceph_rgw_loadbalancer: "{{ enable_ceph_rgw | bool }}"
|
|
enable_cinder: "no"
|
|
enable_cinder_backup: "yes"
|
|
enable_cinder_backend_hnas_nfs: "no"
|
|
enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool }}"
|
|
enable_cinder_backend_lvm: "no"
|
|
enable_cinder_backend_nfs: "no"
|
|
enable_cinder_backend_quobyte: "no"
|
|
enable_cinder_backend_pure_iscsi: "no"
|
|
enable_cinder_backend_pure_fc: "no"
|
|
enable_cloudkitty: "no"
|
|
enable_collectd: "no"
|
|
enable_cyborg: "no"
|
|
enable_designate: "no"
|
|
enable_etcd: "no"
|
|
enable_fluentd: "yes"
|
|
enable_freezer: "no"
|
|
enable_gnocchi: "no"
|
|
enable_gnocchi_statsd: "no"
|
|
enable_grafana: "{{ enable_monasca | bool }}"
|
|
enable_grafana_external: "{{ enable_grafana | bool }}"
|
|
enable_hacluster: "{{ enable_masakari_hostmonitor | bool }}"
|
|
enable_heat: "{{ enable_openstack_core | bool }}"
|
|
enable_horizon: "{{ enable_openstack_core | bool }}"
|
|
enable_horizon_blazar: "{{ enable_blazar | bool }}"
|
|
enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}"
|
|
enable_horizon_designate: "{{ enable_designate | bool }}"
|
|
enable_horizon_freezer: "{{ enable_freezer | bool }}"
|
|
enable_horizon_heat: "{{ enable_heat | bool }}"
|
|
enable_horizon_ironic: "{{ enable_ironic | bool }}"
|
|
enable_horizon_magnum: "{{ enable_magnum | bool }}"
|
|
enable_horizon_manila: "{{ enable_manila | bool }}"
|
|
enable_horizon_masakari: "{{ enable_masakari | bool }}"
|
|
enable_horizon_mistral: "{{ enable_mistral | bool }}"
|
|
enable_horizon_monasca: "{{ enable_monasca | bool }}"
|
|
enable_horizon_murano: "{{ enable_murano | bool }}"
|
|
enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}"
|
|
enable_horizon_octavia: "{{ enable_octavia | bool }}"
|
|
enable_horizon_sahara: "{{ enable_sahara | bool }}"
|
|
enable_horizon_senlin: "{{ enable_senlin | bool }}"
|
|
enable_horizon_solum: "{{ enable_solum | bool }}"
|
|
enable_horizon_tacker: "{{ enable_tacker | bool }}"
|
|
enable_horizon_trove: "{{ enable_trove | bool }}"
|
|
enable_horizon_vitrage: "{{ enable_vitrage | bool }}"
|
|
enable_horizon_watcher: "{{ enable_watcher | bool }}"
|
|
enable_horizon_zun: "{{ enable_zun | bool }}"
|
|
enable_influxdb: "{{ enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb') }}"
|
|
enable_ironic: "no"
|
|
enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
|
|
# TODO(yoctozepto): Remove the deprecated enable_ironic_pxe_uefi in Zed.
|
|
enable_ironic_pxe_uefi: "no"
|
|
enable_iscsid: "{{ enable_cinder | bool and enable_cinder_backend_iscsi | bool }}"
|
|
enable_kafka: "{{ enable_monasca | bool }}"
|
|
enable_kuryr: "no"
|
|
enable_magnum: "no"
|
|
enable_manila: "no"
|
|
enable_manila_backend_generic: "no"
|
|
enable_manila_backend_hnas: "no"
|
|
enable_manila_backend_cephfs_native: "no"
|
|
enable_manila_backend_cephfs_nfs: "no"
|
|
enable_manila_backend_glusterfs_nfs: "no"
|
|
enable_mariabackup: "no"
|
|
enable_masakari: "no"
|
|
enable_masakari_instancemonitor: "{{ enable_masakari | bool }}"
|
|
enable_masakari_hostmonitor: "{{ enable_masakari | bool }}"
|
|
enable_mistral: "no"
|
|
enable_monasca: "no"
|
|
enable_multipathd: "no"
|
|
enable_murano: "no"
|
|
enable_neutron_vpnaas: "no"
|
|
enable_neutron_sriov: "no"
|
|
enable_neutron_mlnx: "no"
|
|
enable_neutron_dvr: "no"
|
|
enable_neutron_qos: "no"
|
|
enable_neutron_agent_ha: "no"
|
|
enable_neutron_bgp_dragent: "no"
|
|
enable_neutron_provider_networks: "no"
|
|
enable_neutron_segments: "no"
|
|
enable_neutron_packet_logging: "no"
|
|
enable_neutron_sfc: "no"
|
|
enable_neutron_trunk: "no"
|
|
enable_neutron_metering: "no"
|
|
enable_neutron_infoblox_ipam_agent: "no"
|
|
enable_neutron_port_forwarding: "no"
|
|
enable_nova_libvirt_container: "{{ nova_compute_virt_type in ['kvm', 'qemu'] }}"
|
|
enable_nova_serialconsole_proxy: "no"
|
|
enable_nova_ssh: "yes"
|
|
enable_octavia: "no"
|
|
enable_octavia_driver_agent: "{{ enable_octavia | bool and neutron_plugin_agent == 'ovn' }}"
|
|
enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}"
|
|
enable_ovn: "{{ enable_neutron | bool and neutron_plugin_agent == 'ovn' }}"
|
|
enable_ovs_dpdk: "no"
|
|
enable_osprofiler: "no"
|
|
enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
|
|
enable_prometheus: "no"
|
|
enable_proxysql: "no"
|
|
enable_redis: "no"
|
|
enable_sahara: "no"
|
|
enable_senlin: "no"
|
|
enable_skydive: "no"
|
|
enable_solum: "no"
|
|
enable_storm: "{{ enable_monasca | bool and monasca_enable_alerting_pipeline | bool }}"
|
|
enable_swift: "no"
|
|
enable_swift_s3api: "no"
|
|
enable_swift_recon: "no"
|
|
enable_tacker: "no"
|
|
enable_telegraf: "no"
|
|
enable_trove: "no"
|
|
enable_trove_singletenant: "no"
|
|
enable_venus: "no"
|
|
enable_vitrage: "no"
|
|
enable_watcher: "no"
|
|
enable_zookeeper: "{{ enable_kafka | bool or enable_storm | bool }}"
|
|
enable_zun: "no"
|
|
|
|
ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}"
|
|
designate_keystone_user: "designate"
|
|
ironic_keystone_user: "ironic"
|
|
neutron_keystone_user: "neutron"
|
|
nova_keystone_user: "nova"
|
|
placement_keystone_user: "placement"
|
|
murano_keystone_user: "murano"
|
|
cinder_keystone_user: "cinder"
|
|
glance_keystone_user: "glance"
|
|
|
|
# Nova fake driver and the number of fake driver per compute node
|
|
enable_nova_fake: "no"
|
|
num_nova_fake_per_node: 5
|
|
|
|
# Clean images options are specified here
|
|
enable_destroy_images: "no"
|
|
|
|
####################
|
|
# Monasca options
|
|
####################
|
|
monasca_enable_alerting_pipeline: True
|
|
|
|
# Send logs from the control plane to the Monasca API. Monasca will then persist
|
|
# them in Elasticsearch. If this is disabled, control plane logs will be sent
|
|
# directly to Elasticsearch.
|
|
monasca_ingest_control_plane_logs: True
|
|
|
|
monasca_api_internal_base_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ monasca_api_port }}"
|
|
monasca_api_public_base_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ monasca_api_port }}"
|
|
|
|
monasca_log_api_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ monasca_log_api_port }}"
|
|
monasca_log_api_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ monasca_log_api_port }}"
|
|
|
|
# The OpenStack username used by the Monasca Agent and the Fluentd Monasca
|
|
# plugin to post logs and metrics from the control plane to Monasca.
|
|
monasca_agent_user: "monasca-agent"
|
|
|
|
# The OpenStack project to which the control plane logs and metrics are
|
|
# tagged with. Only users with the monasca read only user role, or higher
|
|
# can access these from the Monasca APIs.
|
|
monasca_control_plane_project: "monasca_control_plane"
|
|
|
|
####################
|
|
# Global Options
|
|
####################
|
|
# List of containers to skip during stop command in YAML list format
|
|
# skip_stop_containers:
|
|
# - container1
|
|
# - container2
|
|
skip_stop_containers: []
|
|
|
|
####################
|
|
# Logging options
|
|
####################
|
|
|
|
elasticsearch_address: "{{ kolla_internal_fqdn }}"
|
|
enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'elasticsearch') else 'no' }}"
|
|
|
|
# If using Curator an actions file will need to be defined. Please see
|
|
# the documentation.
|
|
enable_elasticsearch_curator: "no"
|
|
|
|
enable_kibana: "{{ 'yes' if enable_central_logging | bool or enable_monasca | bool else 'no' }}"
|
|
enable_kibana_external: "{{ enable_kibana | bool }}"
|
|
|
|
####################
|
|
# Redis options
|
|
####################
|
|
redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}admin:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}{{ redis_connection_string_extras }}"
|
|
redis_connection_string_extras: "&db=0&socket_timeout=60&retry_on_timeout=yes"
|
|
|
|
####################
|
|
# Osprofiler options
|
|
####################
|
|
# valid values: ["elasticsearch", "redis"]
|
|
osprofiler_backend: "elasticsearch"
|
|
elasticsearch_connection_string: "elasticsearch://{{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"
|
|
osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else elasticsearch_connection_string }}"
|
|
|
|
####################
|
|
# RabbitMQ options
|
|
####################
|
|
rabbitmq_user: "openstack"
|
|
rabbitmq_monitoring_user: ""
|
|
outward_rabbitmq_user: "openstack"
|
|
# Whether to enable TLS encryption for RabbitMQ client-server communication.
|
|
rabbitmq_enable_tls: "no"
|
|
# CA certificate bundle in RabbitMQ container.
|
|
rabbitmq_cacert: "/etc/ssl/certs/{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
|
|
|
|
####################
|
|
# HAProxy options
|
|
####################
|
|
haproxy_user: "openstack"
|
|
haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}"
|
|
kolla_enable_tls_internal: "no"
|
|
kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}"
|
|
kolla_certificates_dir: "{{ node_config }}/certificates"
|
|
kolla_external_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy.pem"
|
|
kolla_internal_fqdn_cert: "{{ kolla_certificates_dir }}/haproxy-internal.pem"
|
|
kolla_admin_openrc_cacert: ""
|
|
kolla_copy_ca_into_containers: "no"
|
|
haproxy_backend_cacert: "{{ 'ca-certificates.crt' if kolla_base_distro in ['debian', 'ubuntu'] else 'ca-bundle.trust.crt' }}"
|
|
haproxy_backend_cacert_dir: "/etc/ssl/certs"
|
|
|
|
##################
|
|
# Backend options
|
|
##################
|
|
kolla_httpd_keep_alive: "60"
|
|
kolla_httpd_timeout: "60"
|
|
|
|
######################
|
|
# Backend TLS options
|
|
######################
|
|
kolla_enable_tls_backend: "no"
|
|
kolla_verify_tls_backend: "yes"
|
|
kolla_tls_backend_cert: "{{ kolla_certificates_dir }}/backend-cert.pem"
|
|
kolla_tls_backend_key: "{{ kolla_certificates_dir }}/backend-key.pem"
|
|
|
|
#####################
|
|
# ACME client options
|
|
#####################
|
|
acme_client_servers: []
|
|
|
|
####################
|
|
# Kibana options
|
|
####################
|
|
kibana_user: "kibana"
|
|
kibana_log_prefix: "flog"
|
|
|
|
####################
|
|
# Keystone options
|
|
####################
|
|
keystone_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
|
keystone_external_fqdn: "{{ kolla_external_fqdn }}"
|
|
|
|
# TODO(yoctozepto): Remove after Zed. Kept for compatibility only.
|
|
keystone_admin_url: "{{ keystone_internal_url }}"
|
|
keystone_internal_url: "{{ internal_protocol }}://{{ keystone_internal_fqdn | put_address_in_context('url') }}:{{ keystone_public_port }}"
|
|
keystone_public_url: "{{ public_protocol }}://{{ keystone_external_fqdn | put_address_in_context('url') }}:{{ keystone_public_port }}"
|
|
|
|
keystone_admin_user: "admin"
|
|
keystone_admin_project: "admin"
|
|
|
|
default_project_domain_name: "Default"
|
|
default_project_domain_id: "default"
|
|
|
|
default_user_domain_name: "Default"
|
|
default_user_domain_id: "default"
|
|
|
|
# Keystone fernet token expiry in seconds. Default is 1 day.
|
|
fernet_token_expiry: 86400
|
|
# Keystone window to allow expired fernet tokens. Default is 2 days.
|
|
fernet_token_allow_expired_window: 172800
|
|
# Keystone fernet key rotation interval in seconds. Default is sum of token
|
|
# expiry and allow expired window, 3 days. This ensures the minimum number
|
|
# of keys are active. If this interval is lower than the sum of the token
|
|
# expiry and allow expired window, multiple active keys will be necessary.
|
|
fernet_key_rotation_interval: "{{ fernet_token_expiry + fernet_token_allow_expired_window }}"
|
|
|
|
keystone_default_user_role: "_member_"
|
|
|
|
# OpenStack authentication string. You should only need to override these if you
|
|
# are changing the admin tenant/project or user.
|
|
openstack_auth:
|
|
auth_url: "{{ keystone_internal_url }}"
|
|
username: "{{ keystone_admin_user }}"
|
|
password: "{{ keystone_admin_password }}"
|
|
user_domain_name: "{{ default_user_domain_name }}"
|
|
system_scope: "all"
|
|
|
|
#######################
|
|
# Glance options
|
|
#######################
|
|
glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_swift | bool or glance_backend_vmware | bool) }}"
|
|
glance_backend_ceph: "no"
|
|
glance_backend_vmware: "no"
|
|
enable_glance_image_cache: "no"
|
|
glance_backend_swift: "{{ enable_swift | bool }}"
|
|
glance_file_datadir_volume: "glance"
|
|
glance_enable_rolling_upgrade: "no"
|
|
glance_enable_property_protection: "no"
|
|
glance_enable_interoperable_image_import: "no"
|
|
glance_api_hosts: "{{ [groups['glance-api'] | first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}"
|
|
# NOTE(mnasiadka): For use in common role
|
|
glance_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
|
|
|
|
glance_internal_endpoint: "{{ internal_protocol }}://{{ glance_internal_fqdn | put_address_in_context('url') }}:{{ glance_api_port }}"
|
|
glance_public_endpoint: "{{ public_protocol }}://{{ glance_external_fqdn | put_address_in_context('url') }}:{{ glance_api_port }}"
|
|
|
|
#######################
|
|
# Barbican options
|
|
#######################
|
|
# Valid options are [ simple_crypto, p11_crypto ]
|
|
barbican_crypto_plugin: "simple_crypto"
|
|
barbican_library_path: "/usr/lib/libCryptoki2_64.so"
|
|
|
|
barbican_internal_endpoint: "{{ internal_protocol }}://{{ barbican_internal_fqdn | put_address_in_context('url') }}:{{ barbican_api_port }}"
|
|
barbican_public_endpoint: "{{ public_protocol }}://{{ barbican_external_fqdn | put_address_in_context('url') }}:{{ barbican_api_port }}"
|
|
|
|
#################
|
|
# Gnocchi options
|
|
#################
|
|
# Valid options are [ file, ceph, swift ]
|
|
gnocchi_backend_storage: "{% if enable_swift | bool %}swift{% else %}file{% endif %}"
|
|
|
|
# Valid options are [redis, '']
|
|
gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}"
|
|
gnocchi_metric_datadir_volume: "gnocchi"
|
|
|
|
#################################
|
|
# Cinder options
|
|
#################################
|
|
cinder_backend_ceph: "no"
|
|
cinder_backend_vmwarevc_vmdk: "no"
|
|
cinder_backend_vmware_vstorage_object: "no"
|
|
cinder_volume_group: "cinder-volumes"
|
|
cinder_target_helper: "{{ 'lioadm' if ansible_facts.os_family == 'RedHat' else 'tgtadm' }}"
|
|
# Valid options are [ '', redis, etcd ]
|
|
cinder_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
|
|
|
|
# Valid options are [ nfs, swift, ceph ]
|
|
cinder_backup_driver: "ceph"
|
|
cinder_backup_share: ""
|
|
cinder_backup_mount_options_nfs: ""
|
|
|
|
#######################
|
|
# Cloudkitty options
|
|
#######################
|
|
# Valid options are 'sqlalchemy' or 'influxdb'. The default value is
|
|
# 'influxdb', which matches the default in Cloudkitty since the Stein release.
|
|
# When the backend is "influxdb", we also enable Influxdb.
|
|
# Also, when using 'influxdb' as the backend, we trigger the configuration/use
|
|
# of Cloudkitty storage backend version 2.
|
|
cloudkitty_storage_backend: "influxdb"
|
|
|
|
#######################
|
|
# Designate options
|
|
#######################
|
|
# Valid options are [ bind9, infoblox ]
|
|
designate_backend: "bind9"
|
|
designate_ns_record:
|
|
- "ns1.example.org"
|
|
designate_backend_external: "no"
|
|
designate_backend_external_bind9_nameservers: ""
|
|
# Valid options are [ '', redis ]
|
|
designate_coordination_backend: "{{ 'redis' if enable_redis | bool else '' }}"
|
|
|
|
designate_internal_endpoint: "{{ internal_protocol }}://{{ designate_internal_fqdn | put_address_in_context('url') }}:{{ designate_api_port }}"
|
|
designate_public_endpoint: "{{ public_protocol }}://{{ designate_external_fqdn | put_address_in_context('url') }}:{{ designate_api_port }}"
|
|
|
|
designate_enable_notifications_sink: "yes"
|
|
designate_notifications_topic_name: "notifications_designate"
|
|
|
|
#######################
|
|
# Neutron options
|
|
#######################
|
|
neutron_bgp_router_id: "1.1.1.1"
|
|
neutron_bridge_name: "{{ 'br-dvs' if neutron_plugin_agent == 'vmware_dvs' else 'br_dpdk' if enable_ovs_dpdk | bool else 'br-ex' }}"
|
|
# Comma-separated type of enabled ml2 type drivers
|
|
neutron_type_drivers: "flat,vlan,vxlan{% if neutron_plugin_agent == 'ovn' %},geneve{% endif %}"
|
|
# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers')
|
|
# NOTE: for ironic this list should also contain 'flat'
|
|
neutron_tenant_network_types: "{% if neutron_plugin_agent == 'ovn' %}geneve{% else %}vxlan{% endif %}"
|
|
|
|
# valid values: ["dvr", "dvr_no_external"]
|
|
neutron_compute_dvr_mode: "dvr"
|
|
computes_need_external_bridge: "{{ (enable_neutron_dvr | bool and neutron_compute_dvr_mode == 'dvr') or enable_neutron_provider_networks | bool or neutron_ovn_distributed_fip | bool }}"
|
|
|
|
# Default DNS resolvers for virtual networks
|
|
neutron_dnsmasq_dns_servers: "1.1.1.1,8.8.8.8,8.8.4.4"
|
|
|
|
# Set legacy iptables to allow kernels not supporting iptables-nft
|
|
neutron_legacy_iptables: "no"
|
|
|
|
# Enable distributed floating ip for OVN deployments
|
|
neutron_ovn_distributed_fip: "no"
|
|
|
|
neutron_internal_endpoint: "{{ internal_protocol }}://{{ neutron_internal_fqdn | put_address_in_context('url') }}:{{ neutron_server_port }}"
|
|
neutron_public_endpoint: "{{ public_protocol }}://{{ neutron_external_fqdn | put_address_in_context('url') }}:{{ neutron_server_port }}"
|
|
|
|
# SRIOV physnet:interface mappings when SRIOV is enabled
|
|
# "sriovnet1" and tunnel_interface used here as placeholders
|
|
neutron_sriov_physnet_mappings:
|
|
sriovnet1: "{{ tunnel_interface }}"
|
|
neutron_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
|
|
|
|
#######################
|
|
# Nova options
|
|
#######################
|
|
nova_backend_ceph: "no"
|
|
nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
|
|
# Valid options are [ kvm, qemu, vmware ]
|
|
nova_compute_virt_type: "kvm"
|
|
nova_instance_datadir_volume: "{{ 'nova_compute' if enable_nova_libvirt_container | bool else '/var/lib/nova' }}"
|
|
nova_safety_upgrade: "no"
|
|
# Valid options are [ none, novnc, spice ]
|
|
nova_console: "novnc"
|
|
|
|
#######################
|
|
# Nova Database
|
|
#######################
|
|
nova_database_shard_id: "{{ mariadb_default_database_shard_id | int }}"
|
|
|
|
#######################
|
|
# Murano options
|
|
#######################
|
|
murano_agent_rabbitmq_vhost: "muranoagent"
|
|
murano_agent_rabbitmq_user: "muranoagent"
|
|
|
|
|
|
#######################
|
|
# Horizon options
|
|
#######################
|
|
horizon_backend_database: "{{ enable_murano | bool }}"
|
|
horizon_keystone_multidomain: False
|
|
|
|
# Enable deploying custom horizon policy files for services that don't have a
|
|
# horizon plugin but have a policy file. Override these when you have services
|
|
# not deployed by kolla-ansible but want custom policy files deployed for them
|
|
# in horizon.
|
|
enable_ceilometer_horizon_policy_file: "{{ enable_ceilometer }}"
|
|
enable_cinder_horizon_policy_file: "{{ enable_cinder }}"
|
|
enable_glance_horizon_policy_file: "{{ enable_glance }}"
|
|
enable_heat_horizon_policy_file: "{{ enable_heat }}"
|
|
enable_keystone_horizon_policy_file: "{{ enable_keystone }}"
|
|
enable_neutron_horizon_policy_file: "{{ enable_neutron }}"
|
|
enable_nova_horizon_policy_file: "{{ enable_nova }}"
|
|
|
|
horizon_enable_tls_backend: "{{ kolla_enable_tls_backend }}"
|
|
|
|
horizon_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_internal | bool else horizon_port }}"
|
|
horizon_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ horizon_tls_port if kolla_enable_tls_external | bool else horizon_port }}"
|
|
|
|
###################
|
|
# External Ceph options
|
|
###################
|
|
# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes)
|
|
external_ceph_cephx_enabled: "yes"
|
|
|
|
# External Ceph pool names
|
|
ceph_cinder_pool_name: "volumes"
|
|
ceph_cinder_backup_pool_name: "backups"
|
|
ceph_glance_pool_name: "images"
|
|
ceph_gnocchi_pool_name: "gnocchi"
|
|
ceph_nova_pool_name: "vms"
|
|
|
|
ceph_cinder_backup_user: "cinder-backup"
|
|
ceph_cinder_user: "cinder"
|
|
ceph_glance_user: "glance"
|
|
ceph_gnocchi_user: "gnocchi"
|
|
ceph_manila_user: "manila"
|
|
ceph_nova_user: "{{ ceph_cinder_user }}"
|
|
|
|
# External Ceph keyrings
|
|
ceph_cinder_keyring: "ceph.client.cinder.keyring"
|
|
ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
|
|
ceph_glance_keyring: "ceph.client.glance.keyring"
|
|
ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
|
|
ceph_manila_keyring: "ceph.client.manila.keyring"
|
|
ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
|
|
|
|
#####################
|
|
# VMware support
|
|
######################
|
|
vmware_vcenter_host_ip: "127.0.0.1"
|
|
vmware_vcenter_host_username: "username"
|
|
vmware_vcenter_cluster_name: "cluster-1"
|
|
vmware_vcenter_insecure: "True"
|
|
|
|
#############################################
|
|
# MariaDB component-specific database details
|
|
#############################################
|
|
# Whether to configure haproxy to load balance
|
|
# the external MariaDB server(s)
|
|
enable_external_mariadb_load_balancer: "no"
|
|
# Whether to use pre-configured databases / users
|
|
use_preconfigured_databases: "no"
|
|
# whether to use a common, preconfigured user
|
|
# for all component databases
|
|
use_common_mariadb_user: "no"
|
|
|
|
############
|
|
# Prometheus
|
|
############
|
|
enable_prometheus_server: "{{ enable_prometheus | bool }}"
|
|
enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}"
|
|
enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}"
|
|
enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
|
|
enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}"
|
|
enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
|
|
enable_prometheus_fluentd_integration: "{{ enable_prometheus | bool and enable_fluentd | bool }}"
|
|
enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
|
|
enable_prometheus_alertmanager_external: "{{ enable_prometheus_alertmanager | bool }}"
|
|
enable_prometheus_ceph_mgr_exporter: "no"
|
|
enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
|
|
enable_prometheus_openstack_exporter_external: "no"
|
|
enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
|
|
enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
|
|
enable_prometheus_rabbitmq_exporter: "{{ enable_prometheus | bool and enable_rabbitmq | bool }}"
|
|
enable_prometheus_libvirt_exporter: "{{ enable_prometheus | bool and enable_nova | bool and nova_compute_virt_type in ['kvm', 'qemu'] }}"
|
|
enable_prometheus_etcd_integration: "{{ enable_prometheus | bool and enable_etcd | bool }}"
|
|
enable_prometheus_msteams: "no"
|
|
|
|
prometheus_alertmanager_user: "admin"
|
|
prometheus_scrape_interval: "60s"
|
|
prometheus_openstack_exporter_interval: "{{ prometheus_scrape_interval }}"
|
|
prometheus_openstack_exporter_timeout: "45s"
|
|
prometheus_elasticsearch_exporter_interval: "{{ prometheus_scrape_interval }}"
|
|
prometheus_cmdline_extras:
|
|
prometheus_ceph_mgr_exporter_endpoints: []
|
|
prometheus_openstack_exporter_endpoint_type: "internal"
|
|
prometheus_openstack_exporter_compute_api_version: "latest"
|
|
prometheus_libvirt_exporter_interval: "60s"
|
|
prometheus_msteams_webhook_url:
|
|
|
|
############
|
|
# Vitrage
|
|
############
|
|
enable_vitrage_prometheus_datasource: "{{ enable_prometheus | bool }}"
|
|
|
|
|
|
####################
|
|
# InfluxDB options
|
|
####################
|
|
influxdb_address: "{{ kolla_internal_fqdn }}"
|
|
influxdb_datadir_volume: "influxdb"
|
|
|
|
influxdb_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ influxdb_http_port }}"
|
|
|
|
#################
|
|
# Kafka options
|
|
#################
|
|
kafka_datadir_volume: "kafka"
|
|
|
|
# The number of brokers in a Kafka cluster. This is used for automatically
|
|
# setting quantities such as topic replicas and it is not recommended to
|
|
# change it unless you know what you are doing.
|
|
kafka_broker_count: "{{ groups['kafka'] | length }}"
|
|
|
|
#########################
|
|
# Internal Image options
|
|
#########################
|
|
distro_python_version_map: {
|
|
"centos": "3.6",
|
|
"debian": "3.9",
|
|
"rhel": "3.6",
|
|
"ubuntu": "3.10"
|
|
}
|
|
|
|
kolla_base_distro_version_default_map: {
|
|
"centos": "stream8",
|
|
"debian": "bullseye",
|
|
"ubuntu": "jammy",
|
|
}
|
|
|
|
distro_python_version: "{{ distro_python_version_map[kolla_base_distro] }}"
|
|
|
|
kolla_base_distro_version: "{{ kolla_base_distro_version_default_map[kolla_base_distro] }}"
|
|
|
|
##########
|
|
# Telegraf
|
|
##########
|
|
# Configure telegraf to use the docker daemon itself as an input for
|
|
# telemetry data.
|
|
telegraf_enable_docker_input: "no"
|
|
|
|
vitrage_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ vitrage_api_port }}"
|
|
vitrage_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ vitrage_api_port }}"
|
|
|
|
####################
|
|
# Grafana
|
|
####################
|
|
grafana_internal_endpoint: "{{ internal_protocol }}://{{ kolla_internal_fqdn | put_address_in_context('url') }}:{{ grafana_server_port }}"
|
|
grafana_public_endpoint: "{{ public_protocol }}://{{ kolla_external_fqdn | put_address_in_context('url') }}:{{ grafana_server_port }}"
|
|
|
|
#############
|
|
# Ironic
|
|
#############
|
|
ironic_internal_endpoint: "{{ internal_protocol }}://{{ ironic_internal_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}"
|
|
ironic_public_endpoint: "{{ public_protocol }}://{{ ironic_external_fqdn | put_address_in_context('url') }}:{{ ironic_api_port }}"
|
|
|
|
# Valid options are [ '', redis, etcd ]
|
|
ironic_coordination_backend: "{{ 'redis' if enable_redis | bool else 'etcd' if enable_etcd | bool else '' }}"
|
|
|
|
########
|
|
# Swift
|
|
########
|
|
swift_internal_base_endpoint: "{{ internal_protocol }}://{{ swift_internal_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}"
|
|
|
|
swift_internal_endpoint: "{{ swift_internal_base_endpoint }}/v1/AUTH_%(tenant_id)s"
|
|
swift_public_endpoint: "{{ public_protocol }}://{{ swift_external_fqdn | put_address_in_context('url') }}:{{ swift_proxy_server_port }}/v1/AUTH_%(tenant_id)s"
|
|
|
|
##########
|
|
# Octavia
|
|
##########
|
|
# Whether to run Kolla-Ansible's automatic configuration for Octavia.
|
|
# NOTE: if you upgrade from Ussuri, you must set `octavia_auto_configure` to `no`
|
|
# and keep your other Octavia config like before.
|
|
octavia_auto_configure: yes
|
|
|
|
# Octavia network type options are [ tenant, provider ]
|
|
# * tenant indicates that we will create a tenant network and a network
|
|
# interface on the Octavia worker nodes for communication with amphorae.
|
|
# * provider indicates that we will create a flat or vlan provider network.
|
|
# In this case octavia_network_interface should be set to a network interface
|
|
# on the Octavia woker nodes on the same provider network.
|
|
octavia_network_type: "provider"
|
|
|
|
octavia_internal_endpoint: "{{ internal_protocol }}://{{ octavia_internal_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
|
|
octavia_public_endpoint: "{{ public_protocol }}://{{ octavia_external_fqdn | put_address_in_context('url') }}:{{ octavia_api_port }}"
|
|
|
|
###################################
|
|
# Identity federation configuration
|
|
###################################
|
|
# Here we configure all of the IdPs meta informations that will be required to implement identity federation with OpenStack Keystone.
|
|
# We require the administrator to enter the following metadata:
|
|
# * name (internal name of the IdP in Keystone);
|
|
# * openstack_domain (the domain in Keystone that the IdP belongs to)
|
|
# * protocol (the federated protocol used by the IdP; e.g. openid or saml);
|
|
# * identifier (the IdP identifier; e.g. https://accounts.google.com);
|
|
# * public_name (the public name that will be shown for users in Horizon);
|
|
# * attribute_mapping (the attribute mapping to be used for this IdP. This mapping is configured in the "keystone_identity_mappings" configuration);
|
|
# * metadata_folder (folder containing all the identity provider metadata as jsons named as the identifier without the protocol
|
|
# and with '/' escaped as %2F followed with '.provider' or '.client' or '.conf'; e.g. accounts.google.com.provider; PS, all .conf,
|
|
# .provider and .client jsons must be in the folder, even if you dont override any conf in the .conf json, you must leave it as an empty json '{}');
|
|
# * certificate_file (the path to the Identity Provider certificate file, the file must be named as 'certificate-key-id.pem';
|
|
# e.g. LRVweuT51StjMdsna59jKfB3xw0r8Iz1d1J1HeAbmlw.pem; You can find the key-id in the Identity provider '.well-known/openid-configuration' jwks_uri as kid);
|
|
#
|
|
# The IdPs meta information are to be presented to Kolla-Ansible as the following example:
|
|
# keystone_identity_providers:
|
|
# - name: "myidp1"
|
|
# openstack_domain: "my-domain"
|
|
# protocol: "openid"
|
|
# identifier: "https://accounts.google.com"
|
|
# public_name: "Authenticate via myidp1"
|
|
# attribute_mapping: "mappingId1"
|
|
# metadata_folder: "path/to/metadata/folder"
|
|
# certificate_file: "path/to/certificate/file.pem"
|
|
#
|
|
# We also need to configure the attribute mapping that is used by IdPs.
|
|
# The configuration of attribute mappings is a list of objects, where each
|
|
# object must have a 'name' (that mapps to the 'attribute_mapping' to the IdP
|
|
# object in the IdPs set), and the 'file' with a full qualified path to a mapping file.
|
|
# keystone_identity_mappings:
|
|
# - name: "mappingId1"
|
|
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId1"
|
|
# - name: "mappingId2"
|
|
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId2"
|
|
# - name: "mappingId3"
|
|
# file: "/full/qualified/path/to/mapping/json/file/to/mappingId3"
|
|
keystone_identity_providers: []
|
|
keystone_identity_mappings: []
|
|
|
|
####################
|
|
# Corosync options
|
|
####################
|
|
|
|
# this is UDP port
|
|
hacluster_corosync_port: 5405
|