kolla-ansible/ansible/group_vars/all.yml
Will Szumski 4cf7ff9fa2 Introduce influxdb_datadir_volume
This allows you to tune the performance of InfluxDB by locating the
volume on a drive that is separate to the default docker storage.

Change-Id: Iea555a2702b225b30f5d7035b8a703d4f3376ee7
2020-02-19 16:08:33 +00:00

1064 lines
36 KiB
YAML

---
# The options in this file can be overridden in 'globals.yml'
# The "temp" files that are created before merge need to stay persistent due
# to the fact that ansible will register a "change" if it has to create them
# again. Persistent files allow for idempotency
container_config_directory: "/var/lib/kolla/config_files"
# The directory on the deploy host containing globals.yml.
node_config: "{{ CONFIG_DIR | default('/etc/kolla') }}"
# The directory to merge custom config files the kolla's config files
node_custom_config: "/etc/kolla/config"
# The directory to store the config files on the destination node
node_config_directory: "/etc/kolla"
# The group which own node_config_directory, you can use a non-root
# user to deploy kolla
config_owner_user: "root"
config_owner_group: "root"
###################
# Kolla options
###################
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
config_strategy: "COPY_ALWAYS"
# Valid options are ['centos', 'debian', 'rhel', 'ubuntu']
kolla_base_distro: "centos"
# Valid options are [ binary, source ]
kolla_install_type: "binary"
kolla_internal_vip_address: "{{ kolla_internal_address }}"
kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
kolla_same_external_internal_vip: "{{ kolla_external_vip_address == kolla_internal_vip_address }}"
kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_same_external_internal_vip | bool else kolla_external_vip_address }}"
kolla_enable_sanity_checks: "no"
kolla_enable_sanity_barbican: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_keystone: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_glance: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_cinder: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_swift: "{{ kolla_enable_sanity_checks }}"
kolla_dev_repos_directory: "/opt/stack/"
kolla_dev_repos_git: "https://opendev.org/openstack"
kolla_dev_repos_pull: "no"
kolla_dev_mode: "no"
kolla_source_version: "master"
# Proxy settings for containers such as magnum that need internet access
container_http_proxy: ""
container_https_proxy: ""
container_no_proxy: "localhost,127.0.0.1"
container_proxy:
http_proxy: "{{ container_http_proxy }}"
https_proxy: "{{ container_https_proxy }}"
no_proxy: "{{ container_no_proxy }},{{ api_interface_address }},{{ kolla_internal_vip_address }}"
# By default, Kolla API services bind to the network address assigned
# to the api_interface. Allow the bind address to be an override.
api_interface_address: "{{ 'api' | kolla_address }}"
# This is used to get the ip corresponding to the storage_interface.
storage_interface_address: "{{ 'storage' | kolla_address }}"
################
# Chrony options
################
# A list contains ntp servers
external_ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
####################
# Database options
####################
database_address: "{{ kolla_internal_fqdn }}"
database_user: "root"
database_port: "3306"
####################
# Docker options
####################
docker_registry_email:
docker_registry:
docker_namespace: "kolla"
docker_registry_username:
docker_registry_insecure: "{{ 'yes' if docker_registry else 'no' }}"
docker_runtime_directory: ""
# Docker client timeout in seconds.
docker_client_timeout: 120
# Retention settings for Docker logs
docker_log_max_file: "5"
docker_log_max_size: "50m"
# Valid options are [ no, on-failure, always, unless-stopped ]
docker_restart_policy: "unless-stopped"
# '0' means unlimited retries (applies only to 'on-failure' policy)
docker_restart_policy_retry: "10"
# Extra docker options for Zun
docker_configure_for_zun: "no"
docker_zun_options: -H tcp://{{ api_interface_address | put_address_in_context('url') }}:2375
docker_zun_config:
cluster-store: etcd://{% for host in groups.get('etcd', []) %}{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ hostvars[host]['etcd_client_port'] }}{% if not loop.last %},{% endif %}{% endfor %}
# Timeout after Docker sends SIGTERM before sending SIGKILL.
docker_graceful_timeout: 60
# Common options used throughout Docker
docker_common_options:
auth_email: "{{ docker_registry_email }}"
auth_password: "{{ docker_registry_password }}"
auth_registry: "{{ docker_registry }}"
auth_username: "{{ docker_registry_username }}"
environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
restart_policy: "{{ docker_restart_policy }}"
restart_retries: "{{ docker_restart_policy_retry }}"
graceful_timeout: "{{ docker_graceful_timeout }}"
client_timeout: "{{ docker_client_timeout }}"
####################
# Dimensions options
####################
# Dimension options for Docker Containers
default_container_dimensions: {}
#######################
# Extra volumes options
#######################
# Extra volumes for Docker Containers
default_extra_volumes: []
####################
# keepalived options
####################
# Arbitrary unique number from 0..255
keepalived_virtual_router_id: "51"
#######################
# Elasticsearch Options
#######################
es_heap_size: "1G"
elasticsearch_datadir_volume: "elasticsearch"
###################
# Messaging options
###################
# oslo.messaging rpc transport valid options are [ rabbit, amqp ]
om_rpc_transport: "rabbit"
om_rpc_user: "{{ rabbitmq_user }}"
om_rpc_password: "{{ rabbitmq_password }}"
om_rpc_port: "{{ rabbitmq_port }}"
om_rpc_group: "rabbitmq"
om_rpc_vhost: "/"
rpc_transport_url: "{{ om_rpc_transport }}://{% for host in groups[om_rpc_group] %}{{ om_rpc_user }}:{{ om_rpc_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_rpc_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_rpc_vhost }}"
# oslo.messaging notify transport valid options are [ rabbit ]
om_notify_transport: "rabbit"
om_notify_user: "{{ rabbitmq_user }}"
om_notify_password: "{{ rabbitmq_password }}"
om_notify_port: "{{ rabbitmq_port }}"
om_notify_group: "rabbitmq"
om_notify_vhost: "/"
notify_transport_url: "{{ om_notify_transport }}://{% for host in groups[om_notify_group] %}{{ om_notify_user }}:{{ om_notify_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ om_notify_port }}{% if not loop.last %},{% endif %}{% endfor %}/{{ om_notify_vhost }}"
####################
# Networking options
####################
network_interface: "eth0"
neutron_external_interface: "eth1"
kolla_external_vip_interface: "{{ network_interface }}"
api_interface: "{{ network_interface }}"
storage_interface: "{{ network_interface }}"
cluster_interface: "{{ network_interface }}"
swift_storage_interface: "{{ storage_interface }}"
swift_replication_interface: "{{ swift_storage_interface }}"
migration_interface: "{{ network_interface }}"
tunnel_interface: "{{ network_interface }}"
octavia_network_interface: "{{ api_interface }}"
bifrost_network_interface: "{{ network_interface }}"
dns_interface: "{{ network_interface }}"
# Configure the address family (AF) per network.
# Valid options are [ ipv4, ipv6 ]
network_address_family: "ipv4"
api_address_family: "{{ network_address_family }}"
storage_address_family: "{{ network_address_family }}"
cluster_address_family: "{{ network_address_family }}"
swift_storage_address_family: "{{ storage_address_family }}"
swift_replication_address_family: "{{ swift_storage_address_family }}"
migration_address_family: "{{ network_address_family }}"
tunnel_address_family: "{{ network_address_family }}"
octavia_network_address_family: "{{ api_address_family }}"
bifrost_network_address_family: "{{ network_address_family }}"
dns_address_family: "{{ network_address_family }}"
migration_interface_address: "{{ 'migration' | kolla_address }}"
tunnel_interface_address: "{{ 'tunnel' | kolla_address }}"
octavia_network_interface_address: "{{ 'octavia_network' | kolla_address }}"
# Valid options are [ openvswitch, linuxbridge, vmware_nsxv, vmware_nsxv3, vmware_dvs ]
neutron_plugin_agent: "openvswitch"
# Valid options are [ internal, infoblox ]
neutron_ipam_driver: "internal"
# The default ports used by each service.
# The list should be in alphabetical order
aodh_internal_fqdn: "{{ kolla_internal_fqdn }}"
aodh_external_fqdn: "{{ kolla_external_fqdn }}"
aodh_api_port: "8042"
aodh_api_listen_port: "{{ aodh_api_port }}"
barbican_internal_fqdn: "{{ kolla_internal_fqdn }}"
barbican_external_fqdn: "{{ kolla_external_fqdn }}"
barbican_api_port: "9311"
barbican_api_listen_port: "{{ barbican_api_port }}"
blazar_api_port: "1234"
cinder_internal_fqdn: "{{ kolla_internal_fqdn }}"
cinder_external_fqdn: "{{ kolla_external_fqdn }}"
cinder_api_port: "8776"
cinder_api_listen_port: "{{ cinder_api_port }}"
congress_api_port: "1789"
cloudkitty_api_port: "8889"
collectd_udp_port: "25826"
cyborg_api_port: "6666"
designate_internal_fqdn: "{{ kolla_internal_fqdn }}"
designate_external_fqdn: "{{ kolla_external_fqdn }}"
designate_api_port: "9001"
designate_api_listen_port: "{{ designate_api_port }}"
designate_bind_port: "53"
designate_mdns_port: "{{ '53' if designate_backend == 'infoblox' else '5354' }}"
designate_rndc_port: "953"
elasticsearch_port: "9200"
etcd_client_port: "2379"
etcd_peer_port: "2380"
fluentd_syslog_port: "5140"
freezer_api_port: "9090"
glance_internal_fqdn: "{{ kolla_internal_fqdn }}"
glance_external_fqdn: "{{ kolla_external_fqdn }}"
glance_api_port: "9292"
glance_api_listen_port: "{{ glance_api_port }}"
gnocchi_internal_fqdn: "{{ kolla_internal_fqdn }}"
gnocchi_external_fqdn: "{{ kolla_external_fqdn }}"
gnocchi_api_port: "8041"
gnocchi_api_listen_port: "{{ gnocchi_api_port }}"
grafana_server_port: "3000"
haproxy_stats_port: "1984"
haproxy_monitor_port: "61313"
heat_internal_fqdn: "{{ kolla_internal_fqdn }}"
heat_external_fqdn: "{{ kolla_external_fqdn }}"
heat_api_port: "8004"
heat_api_listen_port: "{{ heat_api_port }}"
heat_cfn_internal_fqdn: "{{ kolla_internal_fqdn }}"
heat_cfn_external_fqdn: "{{ kolla_external_fqdn }}"
heat_api_cfn_port: "8000"
heat_api_cfn_listen_port: "{{ heat_api_cfn_port }}"
horizon_port: "80"
horizon_listen_port: "{{ horizon_port }}"
influxdb_http_port: "8086"
ironic_internal_fqdn: "{{ kolla_internal_fqdn }}"
ironic_external_fqdn: "{{ kolla_external_fqdn }}"
ironic_api_port: "6385"
ironic_api_listen_port: "{{ ironic_api_port }}"
ironic_inspector_internal_fqdn: "{{ kolla_internal_fqdn }}"
ironic_inspector_external_fqdn: "{{ kolla_external_fqdn }}"
ironic_inspector_port: "5050"
ironic_inspector_listen_port: "{{ ironic_inspector_port }}"
ironic_ipxe_port: "8089"
iscsi_port: "3260"
kafka_port: "9092"
karbor_api_port: "8799"
keystone_public_port: "5000"
keystone_public_listen_port: "{{ keystone_public_port }}"
keystone_admin_port: "35357"
keystone_admin_listen_port: "{{ keystone_admin_port }}"
keystone_ssh_port: "8023"
kibana_server_port: "5601"
kuryr_port: "23750"
magnum_api_port: "9511"
manila_api_port: "8786"
mariadb_port: "{{ database_port }}"
mariadb_wsrep_port: "4567"
mariadb_ist_port: "4568"
mariadb_sst_port: "4444"
masakari_api_port: "15868"
memcached_port: "11211"
mistral_api_port: "8989"
monasca_api_port: "8070"
monasca_log_api_port: "5607"
monasca_agent_forwarder_port: "17123"
monasca_agent_statsd_port: "8125"
monasca_grafana_server_port: "3001"
mongodb_port: "27017"
mongodb_web_port: "28017"
murano_api_port: "8082"
neutron_internal_fqdn: "{{ kolla_internal_fqdn }}"
neutron_external_fqdn: "{{ kolla_external_fqdn }}"
neutron_server_port: "9696"
neutron_server_listen_port: "{{ neutron_server_port }}"
nova_internal_fqdn: "{{ kolla_internal_fqdn }}"
nova_external_fqdn: "{{ kolla_external_fqdn }}"
nova_api_port: "8774"
nova_api_listen_port: "{{ nova_api_port }}"
nova_metadata_port: "8775"
nova_metadata_listen_port: "{{ nova_metadata_port }}"
nova_novncproxy_fqdn: "{{ kolla_external_fqdn }}"
nova_novncproxy_port: "6080"
nova_novncproxy_listen_port: "{{ nova_novncproxy_port }}"
nova_spicehtml5proxy_fqdn: "{{ kolla_external_fqdn }}"
nova_spicehtml5proxy_port: "6082"
nova_spicehtml5proxy_listen_port: "{{ nova_spicehtml5proxy_port }}"
nova_serialproxy_fqdn: "{{ kolla_external_fqdn }}"
nova_serialproxy_port: "6083"
nova_serialproxy_listen_port: "{{ nova_serialproxy_port }}"
nova_serialproxy_protocol: "{{ 'wss' if kolla_enable_tls_external | bool else 'ws' }}"
octavia_internal_fqdn: "{{ kolla_internal_fqdn }}"
octavia_external_fqdn: "{{ kolla_external_fqdn }}"
octavia_api_port: "9876"
octavia_api_listen_port: "{{ octavia_api_port }}"
octavia_health_manager_port: "5555"
outward_rabbitmq_port: "5674"
outward_rabbitmq_management_port: "15674"
outward_rabbitmq_cluster_port: "25674"
outward_rabbitmq_epmd_port: "4371"
ovsdb_port: "6640"
panko_api_port: "8977"
placement_internal_fqdn: "{{ kolla_internal_fqdn }}"
placement_external_fqdn: "{{ kolla_external_fqdn }}"
# Default Placement API port of 8778 already in use
placement_api_port: "8780"
placement_api_listen_port: "{{ placement_api_port }}"
prometheus_port: "9091"
prometheus_node_exporter_port: "9100"
prometheus_mysqld_exporter_port: "9104"
prometheus_haproxy_exporter_port: "9101"
prometheus_memcached_exporter_port: "9150"
# Default cadvisor port of 8080 already in use
prometheus_cadvisor_port: "18080"
# Prometheus alertmanager ports
prometheus_alertmanager_port: "9093"
prometheus_alertmanager_cluster_port: "9094"
# Prometheus openstack-exporter ports
prometheus_openstack_exporter_port: "9198"
prometheus_elasticsearch_exporter_port: "9108"
# Prometheus blackbox-exporter ports
prometheus_blackbox_exporter_port: "9115"
qdrouterd_port: "31459"
qinling_api_port: "7070"
rabbitmq_port: "5672"
rabbitmq_management_port: "15672"
rabbitmq_cluster_port: "25672"
rabbitmq_epmd_port: "4369"
redis_port: "6379"
redis_sentinel_port: "26379"
rdp_port: "8001"
rgw_port: "6780"
sahara_api_port: "8386"
searchlight_api_port: "9393"
senlin_internal_fqdn: "{{ kolla_internal_fqdn }}"
senlin_external_fqdn: "{{ kolla_external_fqdn }}"
senlin_api_port: "8778"
senlin_api_listen_port: "{{ senlin_api_port }}"
skydive_analyzer_port: "8085"
skydive_agents_port: "8090"
solum_application_deployment_port: "9777"
solum_image_builder_port: "9778"
storm_nimbus_thrift_port: 6627
storm_supervisor_thrift_port: 6628
# Storm will run up to (end - start) + 1 workers per worker host. Here
# we reserve ports for those workers, and implicitly define the maximum
# number of workers per host.
storm_worker_port_range:
start: 6700
end: 6703
swift_internal_fqdn: "{{ kolla_internal_fqdn }}"
swift_external_fqdn: "{{ kolla_external_fqdn }}"
swift_proxy_server_port: "8080"
swift_proxy_server_listen_port: "{{ swift_proxy_server_port }}"
swift_object_server_port: "6000"
swift_account_server_port: "6001"
swift_container_server_port: "6002"
swift_rsync_port: "10873"
syslog_udp_port: "{{ fluentd_syslog_port }}"
tacker_server_port: "9890"
trove_api_port: "8779"
watcher_api_port: "9322"
zookeeper_client_port: "2181"
zookeeper_peer_port: "2888"
zookeeper_quorum_port: "3888"
zun_api_port: "9517"
zun_wsproxy_port: "6784"
vitrage_api_port: "8999"
public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
internal_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}"
admin_protocol: "{{ 'https' if kolla_enable_tls_internal | bool else 'http' }}"
####################
# OpenStack options
####################
openstack_release: "master"
# Docker image tag used by default.
openstack_tag: "{{ openstack_release ~ openstack_tag_suffix }}"
# TODO(mgoddard): Set to an empty string when CentOS 7 is no longer supported.
openstack_tag_suffix: "{{ '' if kolla_base_distro != 'centos' or ansible_distribution_major_version == '7' else '-centos8' }}"
openstack_logging_debug: "False"
openstack_region_name: "RegionOne"
# Variable defined the pin_release_version, apply for rolling upgrade process
openstack_previous_release_name: "train"
# A list of policy file formats that are supported by Oslo.policy
supported_policy_format_list:
- policy.yaml
- policy.json
# In the context of multi-regions, list here the name of all your regions.
multiple_regions_names:
- "{{ openstack_region_name }}"
openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min }}"
openstack_service_rpc_workers: "{{ [ansible_processor_vcpus, 3]|min }}"
# Optionally allow Kolla to set sysctl values
set_sysctl: "yes"
# Endpoint type used to connect with OpenStack services with ansible modules.
# Valid options are [ public, internal, admin ]
openstack_interface: "admin"
# Openstack CA certificate bundle file
# CA bundle file must be added to both the Horizon and Kolla Toolbox containers
openstack_cacert: ""
# Enable core OpenStack services. This includes:
# glance, keystone, neutron, nova, heat, and horizon.
enable_openstack_core: "yes"
# These roles are required for Kolla to be operation, however a savvy deployer
# could disable some of these required roles and run their own services.
enable_glance: "{{ enable_openstack_core | bool }}"
enable_haproxy: "yes"
enable_keepalived: "{{ enable_haproxy | bool }}"
enable_keystone: "{{ enable_openstack_core | bool }}"
enable_mariadb: "yes"
enable_memcached: "yes"
enable_neutron: "{{ enable_openstack_core | bool }}"
enable_nova: "{{ enable_openstack_core | bool }}"
enable_rabbitmq: "{{ 'yes' if om_rpc_transport == 'rabbit' or om_notify_transport == 'rabbit' else 'no' }}"
enable_outward_rabbitmq: "{{ enable_murano | bool }}"
# NOTE: Most memcached clients handle load-balancing via client side
# hashing (consistent or not) logic, so going under the covers and messing
# with things that the clients are not aware of is generally wrong
enable_haproxy_memcached: "no"
# Additional optional OpenStack features and services are specified here
enable_aodh: "no"
enable_barbican: "no"
enable_blazar: "no"
# NOTE: This variable has been deprecated and will be removed in the U cycle.
enable_cadf_notifications: "no"
enable_ceilometer: "no"
enable_ceilometer_ipmi: "no"
enable_cells: "no"
enable_central_logging: "no"
enable_chrony: "yes"
enable_cinder: "no"
enable_cinder_backup: "yes"
enable_cinder_backend_hnas_nfs: "no"
enable_cinder_backend_iscsi: "{{ enable_cinder_backend_lvm | bool or enable_cinder_backend_zfssa_iscsi | bool }}"
enable_cinder_backend_lvm: "no"
enable_cinder_backend_nfs: "no"
enable_cinder_backend_zfssa_iscsi: "no"
enable_cinder_backend_quobyte: "no"
enable_cloudkitty: "no"
enable_collectd: "no"
enable_congress: "no"
enable_cyborg: "no"
enable_designate: "no"
enable_etcd: "no"
enable_fluentd: "yes"
enable_freezer: "no"
enable_gnocchi: "no"
enable_grafana: "no"
enable_heat: "{{ enable_openstack_core | bool }}"
enable_horizon: "{{ enable_openstack_core | bool }}"
enable_horizon_blazar: "{{ enable_blazar | bool }}"
enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}"
enable_horizon_congress: "{{ enable_congress | bool }}"
enable_horizon_designate: "{{ enable_designate | bool }}"
enable_horizon_fwaas: "{{ enable_neutron_fwaas | bool }}"
enable_horizon_freezer: "{{ enable_freezer | bool }}"
enable_horizon_heat: "{{ enable_heat | bool }}"
enable_horizon_ironic: "{{ enable_ironic | bool }}"
enable_horizon_karbor: "{{ enable_karbor | bool }}"
enable_horizon_magnum: "{{ enable_magnum | bool }}"
enable_horizon_manila: "{{ enable_manila | bool }}"
enable_horizon_masakari: "{{ enable_masakari | bool }}"
enable_horizon_mistral: "{{ enable_mistral | bool }}"
enable_horizon_monasca: "{{ enable_monasca | bool }}"
enable_horizon_murano: "{{ enable_murano | bool }}"
enable_horizon_neutron_vpnaas: "{{ enable_neutron_vpnaas | bool }}"
enable_horizon_octavia: "{{ enable_octavia | bool }}"
enable_horizon_qinling: "{{ enable_qinling | bool }}"
enable_horizon_sahara: "{{ enable_sahara | bool }}"
enable_horizon_searchlight: "{{ enable_searchlight | bool }}"
enable_horizon_senlin: "{{ enable_senlin | bool }}"
enable_horizon_solum: "{{ enable_solum | bool }}"
enable_horizon_tacker: "{{ enable_tacker | bool }}"
enable_horizon_trove: "{{ enable_trove | bool }}"
enable_horizon_vitrage: "{{ enable_vitrage | bool }}"
enable_horizon_watcher: "{{ enable_watcher | bool }}"
enable_horizon_zun: "{{ enable_zun | bool }}"
enable_hyperv: "no"
enable_influxdb: "{{ enable_monasca | bool or (enable_cloudkitty | bool and cloudkitty_storage_backend == 'influxdb') }}"
enable_ironic: "no"
enable_ironic_ipxe: "no"
enable_ironic_neutron_agent: "{{ enable_neutron | bool and enable_ironic | bool }}"
enable_ironic_pxe_uefi: "no"
enable_iscsid: "{{ (enable_cinder | bool and enable_cinder_backend_iscsi | bool) or enable_ironic | bool }}"
enable_karbor: "no"
enable_kafka: "{{ enable_monasca | bool }}"
enable_kuryr: "no"
enable_magnum: "no"
enable_manila: "no"
enable_manila_backend_generic: "no"
enable_manila_backend_hnas: "no"
enable_manila_backend_cephfs_native: "no"
enable_manila_backend_cephfs_nfs: "no"
enable_mariabackup: "no"
enable_masakari: "no"
enable_mistral: "no"
enable_monasca: "no"
enable_mongodb: "no"
enable_multipathd: "no"
enable_murano: "no"
enable_neutron_vpnaas: "no"
enable_neutron_sriov: "no"
enable_neutron_dvr: "no"
enable_neutron_fwaas: "no"
enable_neutron_qos: "no"
enable_neutron_agent_ha: "no"
enable_neutron_bgp_dragent: "no"
enable_neutron_provider_networks: "no"
enable_neutron_segments: "no"
enable_neutron_sfc: "no"
enable_neutron_metering: "no"
enable_neutron_infoblox_ipam_agent: "no"
enable_neutron_port_forwarding: "no"
enable_nova_serialconsole_proxy: "no"
enable_nova_ssh: "yes"
enable_octavia: "no"
enable_openvswitch: "{{ enable_neutron | bool and neutron_plugin_agent != 'linuxbridge' }}"
enable_ovs_dpdk: "no"
enable_osprofiler: "no"
enable_panko: "no"
enable_placement: "{{ enable_nova | bool or enable_zun | bool }}"
enable_prometheus: "no"
enable_qdrouterd: "{{ 'yes' if om_rpc_transport == 'amqp' else 'no' }}"
enable_qinling: "no"
enable_rally: "no"
enable_redis: "no"
enable_sahara: "no"
enable_searchlight: "no"
enable_senlin: "no"
enable_skydive: "no"
enable_solum: "no"
enable_storm: "{{ enable_monasca | bool }}"
enable_swift: "no"
enable_swift_s3api: "no"
enable_swift_recon: "no"
enable_tacker: "no"
enable_telegraf: "no"
enable_tempest: "no"
enable_trove: "no"
enable_trove_singletenant: "no"
enable_vitrage: "no"
enable_vmtp: "no"
enable_watcher: "no"
enable_zookeeper: "{{ enable_kafka | bool }}"
enable_zun: "no"
ovs_datapath: "{{ 'netdev' if enable_ovs_dpdk | bool else 'system' }}"
designate_keystone_user: "designate"
ironic_keystone_user: "ironic"
neutron_keystone_user: "neutron"
nova_keystone_user: "nova"
placement_keystone_user: "placement"
murano_keystone_user: "murano"
cinder_keystone_user: "cinder"
# Nova fake driver and the number of fake driver per compute node
enable_nova_fake: "no"
num_nova_fake_per_node: 5
# Clean images options are specified here
enable_destroy_images: "no"
####################
# Monasca options
####################
# The OpenStack username used by the Monasca Agent and the Fluentd Monasca
# plugin to post logs and metrics from the control plane to Monasca.
monasca_agent_user: "monasca-agent"
# The OpenStack project to which the control plane logs and metrics are
# tagged with. Only users with the monasca read only user role, or higher
# can access these from the Monasca APIs.
monasca_control_plane_project: "monasca_control_plane"
####################
# Global Options
####################
# List of containers to skip during stop command in YAML list format
# skip_stop_containers:
# - container1
# - container2
skip_stop_containers: []
####################
# Logging options
####################
elasticsearch_address: "{{ kolla_internal_fqdn }}"
enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool or enable_osprofiler | bool or enable_skydive | bool or enable_monasca | bool else 'no' }}"
# If using Curator an actions file will need to be defined. Please see
# the documentation.
enable_elasticsearch_curator: "no"
enable_kibana: "{{ 'yes' if enable_central_logging | bool or enable_monasca | bool else 'no' }}"
####################
# Redis options
####################
redis_connection_string: "redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}admin:{{ redis_master_password }}@{{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ 'api' | kolla_address(host) | put_address_in_context('url') }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}&db=0&socket_timeout=60&retry_on_timeout=yes"
####################
# Osprofiler options
####################
# valid values: ["elasticsearch", "redis"]
osprofiler_backend: "elasticsearch"
elasticsearch_connection_string: "elasticsearch://{{ elasticsearch_address | put_address_in_context('url') }}:{{ elasticsearch_port }}"
osprofiler_backend_connection_string: "{{ redis_connection_string if osprofiler_backend == 'redis' else elasticsearch_connection_string }}"
####################
# RabbitMQ options
####################
rabbitmq_hipe_compile: "no"
rabbitmq_user: "openstack"
rabbitmq_monitoring_user: ""
outward_rabbitmq_user: "openstack"
####################
# Qdrouterd options
####################
qdrouterd_user: "openstack"
####################
# HAProxy options
####################
haproxy_user: "openstack"
haproxy_enable_external_vip: "{{ 'no' if kolla_same_external_internal_vip | bool else 'yes' }}"
kolla_enable_tls_internal: "no"
kolla_enable_tls_external: "{{ kolla_enable_tls_internal if kolla_same_external_internal_vip | bool else 'no' }}"
kolla_external_fqdn_cert: "{{ node_config }}/certificates/haproxy.pem"
kolla_internal_fqdn_cert: "{{ node_config }}/certificates/haproxy-internal.pem"
kolla_external_fqdn_cacert: "{{ node_config }}/certificates/ca/haproxy.crt"
kolla_internal_fqdn_cacert: "{{ node_config }}/certificates/ca/haproxy-internal.crt"
kolla_copy_ca_into_containers: "no"
####################
# Kibana options
####################
kibana_user: "kibana"
kibana_log_prefix: "flog"
####################
# Keystone options
####################
keystone_internal_fqdn: "{{ kolla_internal_fqdn }}"
keystone_external_fqdn: "{{ kolla_external_fqdn }}"
keystone_admin_url: "{{ admin_protocol }}://{{ keystone_internal_fqdn | put_address_in_context('url') }}:{{ keystone_admin_port }}"
keystone_internal_url: "{{ internal_protocol }}://{{ keystone_internal_fqdn | put_address_in_context('url') }}:{{ keystone_public_port }}"
keystone_public_url: "{{ public_protocol }}://{{ keystone_external_fqdn | put_address_in_context('url') }}:{{ keystone_public_port }}"
keystone_admin_user: "admin"
keystone_admin_project: "admin"
default_project_domain_name: "Default"
default_project_domain_id: "default"
default_user_domain_name: "Default"
default_user_domain_id: "default"
# Valid options are [ fernet ]
keystone_token_provider: "fernet"
# Keystone fernet token expiry in seconds. Default is 1 day.
fernet_token_expiry: 86400
# Keystone window to allow expired fernet tokens. Default is 2 days.
fernet_token_allow_expired_window: 172800
# Keystone fernet key rotation interval in seconds. Default is sum of token
# expiry and allow expired window, 3 days. This ensures the minimum number
# of keys are active. If this interval is lower than the sum of the token
# expiry and allow expired window, multiple active keys will be necessary.
fernet_key_rotation_interval: "{{ fernet_token_expiry + fernet_token_allow_expired_window }}"
keystone_default_user_role: "_member_"
# OpenStack authentication string. You should only need to override these if you
# are changing the admin tenant/project or user.
openstack_auth:
auth_url: "{{ keystone_admin_url }}"
username: "{{ keystone_admin_user }}"
password: "{{ keystone_admin_password }}"
project_name: "{{ keystone_admin_project }}"
domain_name: "default"
user_domain_name: "default"
#######################
# Glance options
#######################
glance_backend_file: "{{ not (glance_backend_ceph | bool or glance_backend_swift | bool or glance_backend_vmware | bool) }}"
glance_backend_ceph: "no"
glance_backend_vmware: "no"
enable_glance_image_cache: "no"
glance_backend_swift: "{{ enable_swift | bool }}"
glance_file_datadir_volume: "glance"
glance_enable_rolling_upgrade: "no"
glance_api_hosts: "{{ [groups['glance-api']|first] if glance_backend_file | bool and glance_file_datadir_volume == 'glance' else groups['glance-api'] }}"
#######################
# Barbican options
#######################
# Valid options are [ simple_crypto, p11_crypto ]
barbican_crypto_plugin: "simple_crypto"
barbican_library_path: "/usr/lib/libCryptoki2_64.so"
########################
### Panko options
########################
# Valid options are [ mongodb, mysql ]
panko_database_type: "mysql"
#################
# Gnocchi options
#################
# Valid options are [ file, ceph, swift ]
gnocchi_backend_storage: "{% if enable_swift | bool %}swift{% else %}file{% endif %}"
# Valid options are [redis, '']
gnocchi_incoming_storage: "{{ 'redis' if enable_redis | bool else '' }}"
gnocchi_metric_datadir_volume: "gnocchi"
#################################
# Cinder options
#################################
cinder_backend_ceph: "no"
cinder_backend_vmwarevc_vmdk: "no"
cinder_volume_group: "cinder-volumes"
cinder_target_helper: "{{ 'lioadm' if ansible_os_family == 'RedHat' and ansible_distribution_major_version is version_compare('8', '>=') else 'tgtadm' }}"
# Valid options are [ '', redis, etcd ]
cinder_coordination_backend: "{{ 'redis' if enable_redis|bool else 'etcd' if enable_etcd|bool else '' }}"
# Valid options are [ nfs, swift, ceph ]
cinder_backup_driver: "ceph"
cinder_backup_share: ""
cinder_backup_mount_options_nfs: ""
#######################
# Cloudkitty options
#######################
# Valid option is gnocchi
cloudkitty_collector_backend: "gnocchi"
# Valid options are 'sqlalchemy' or 'influxdb'. The default value is
# 'influxdb', which matches the default in Cloudkitty since the Stein release.
# When the backend is "influxdb", we also enable Influxdb.
# Also, when using 'influxdb' as the backend, we trigger the configuration/use
# of Cloudkitty storage backend version 2.
cloudkitty_storage_backend: "influxdb"
#######################
# Designate options
#######################
# Valid options are [ bind9, infoblox ]
designate_backend: "bind9"
designate_ns_record: "sample.openstack.org"
designate_backend_external: "no"
designate_backend_external_bind9_nameservers: ""
# Valid options are [ '', redis, etcd ]
designate_coordination_backend: "{{ 'redis' if enable_redis|bool else 'etcd' if enable_etcd|bool else '' }}"
#######################
# Neutron options
#######################
neutron_bgp_router_id: "1.1.1.1"
neutron_bridge_name: "{{ 'br-dvs' if neutron_plugin_agent == 'vmware_dvs' else 'br-ex' }}"
# Comma-separated type of enabled ml2 type drivers
neutron_type_drivers: "flat,vlan,vxlan"
# Comma-separated types of tenant networks (should be listed in 'neutron_type_drivers')
# NOTE: for ironic this list should also contain 'flat'
neutron_tenant_network_types: "vxlan"
# valid values: ["dvr", "dvr_no_external"]
neutron_compute_dvr_mode: "dvr"
computes_need_external_bridge: "{{ (enable_neutron_dvr | bool and neutron_compute_dvr_mode == 'dvr') or enable_neutron_provider_networks | bool }}"
# Default DNS resolvers for virtual networks
neutron_dnsmasq_dns_servers: "1.1.1.1,8.8.8.8,8.8.4.4"
# Set legacy iptables to allow kernels not supporting iptables-nft
neutron_legacy_iptables: "no"
#######################
# Nova options
#######################
nova_backend_ceph: "no"
nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
# Valid options are [ kvm, qemu, vmware, xenapi ]
nova_compute_virt_type: "kvm"
nova_instance_datadir_volume: "nova_compute"
nova_safety_upgrade: "no"
# Valid options are [ none, novnc, spice, rdp ]
nova_console: "novnc"
#######################
# Murano options
#######################
murano_agent_rabbitmq_vhost: "muranoagent"
murano_agent_rabbitmq_user: "muranoagent"
#######################
# Horizon options
#######################
horizon_backend_database: "{{ enable_murano | bool }}"
horizon_keystone_multidomain: False
# Enable deploying custom horizon policy files for services that don't have a
# horizon plugin but have a policy file. Override these when you have services
# not deployed by kolla-ansible but want custom policy files deployed for them
# in horizon.
enable_ceilometer_horizon_policy_file: "{{ enable_ceilometer }}"
enable_cinder_horizon_policy_file: "{{ enable_cinder }}"
enable_congress_horizon_policy_file: "{{ enable_congress }}"
enable_glance_horizon_policy_file: "{{ enable_glance }}"
enable_heat_horizon_policy_file: "{{ enable_heat }}"
enable_keystone_horizon_policy_file: "{{ enable_keystone }}"
enable_neutron_horizon_policy_file: "{{ enable_neutron }}"
enable_nova_horizon_policy_file: "{{ enable_nova }}"
#################
# Octavia options
#################
# Load balancer topology options are [ SINGLE, ACTIVE_STANDBY ]
octavia_loadbalancer_topology: "SINGLE"
octavia_amp_boot_network_list:
octavia_amp_secgroup_list:
octavia_amp_flavor_id:
#################
# Qinling options
#################
# Configure qinling-engine certificates to authenticate with Kubernetes cluster.
qinling_kubernetes_certificates: "no"
###################
# External Ceph options
###################
# External Ceph - cephx auth enabled (this is the standard nowadays, defaults to yes)
external_ceph_cephx_enabled: "yes"
# External Ceph pool names
ceph_cinder_pool_name: "volumes"
ceph_cinder_backup_pool_name: "backups"
ceph_glance_pool_name: "images"
ceph_gnocchi_pool_name: "gnocchi"
ceph_nova_pool_name: "vms"
ceph_cinder_backup_user: "cinder-backup"
ceph_cinder_volume_user: "cinder"
ceph_glance_user: "glance"
ceph_gnocchi_user: "gnocchi"
ceph_manila_user: "manila"
ceph_nova_user: "nova"
# External Ceph keyrings
ceph_cinder_keyring: "ceph.client.cinder.keyring"
ceph_cinder_backup_keyring: "ceph.client.cinder-backup.keyring"
ceph_glance_keyring: "ceph.client.glance.keyring"
ceph_gnocchi_keyring: "ceph.client.gnocchi.keyring"
ceph_manila_keyring: "ceph.client.manila.keyring"
ceph_nova_keyring: "{{ ceph_cinder_keyring }}"
#####################
# VMware support
######################
vmware_vcenter_host_ip: "127.0.0.1"
vmware_vcenter_host_username: "username"
vmware_vcenter_cluster_name: "cluster-1"
vmware_vcenter_insecure: "True"
#######################################
# XenAPI - Support XenAPI for XenServer
#######################################
# XenAPI driver use HIMN(Host Internal Management Network)
# to communicate with XenServer host.
xenserver_himn_ip: "169.254.0.1"
xenserver_username: "root"
xenserver_connect_protocol: "https"
# File used to save XenAPI's facts variables formatted as json.
xenapi_facts_root: "/etc/kolla/xenapi/"
xenapi_facts_file: "facts.json"
#############################################
# MariaDB component-specific database details
#############################################
# Whether to configure haproxy to load balance
# the external MariaDB server(s)
enable_external_mariadb_load_balancer: "no"
# Whether to use pre-configured databases / users
use_preconfigured_databases: "no"
# whether to use a common, preconfigured user
# for all component databases
use_common_mariadb_user: "no"
############
# Prometheus
############
enable_prometheus_haproxy_exporter: "{{ enable_haproxy | bool }}"
enable_prometheus_mysqld_exporter: "{{ enable_mariadb | bool }}"
enable_prometheus_node_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_memcached_exporter: "{{ enable_memcached | bool }}"
enable_prometheus_cadvisor: "{{ enable_prometheus | bool }}"
enable_prometheus_alertmanager: "{{ enable_prometheus | bool }}"
enable_prometheus_ceph_mgr_exporter: "no"
enable_prometheus_openstack_exporter: "{{ enable_prometheus | bool }}"
enable_prometheus_elasticsearch_exporter: "{{ enable_prometheus | bool and enable_elasticsearch | bool }}"
enable_prometheus_blackbox_exporter: "{{ enable_prometheus | bool }}"
prometheus_alertmanager_user: "admin"
prometheus_openstack_exporter_interval: "60s"
prometheus_elasticsearch_exporter_interval: "60s"
prometheus_cmdline_extras:
prometheus_ceph_mgr_exporter_endpoints: []
############
# Vitrage
############
enable_vitrage_prometheus_datasource: "{{ enable_prometheus | bool }}"
####################
# InfluxDB options
####################
influxdb_address: "{{ kolla_internal_fqdn }}"
influxdb_datadir_volume: "influxdb"
#########################
# Internal Image options
#########################
distro_python_version_map: {
"centos": "{{ '3.6' if ansible_distribution_major_version is version(8, '>=') else '2.7' }}",
"debian": "3.7",
"rhel": "{{ '3.6' if ansible_distribution_major_version is version(8, '>=') else '2.7' }}",
"ubuntu": "3.6"
}
distro_python_version: "{{ distro_python_version_map[kolla_base_distro] }}"
##########
# Telegraf
##########
# Configure telegraf to use the docker daemon itself as an input for
# telemetry data.
telegraf_enable_docker_input: "no"