kolla-ansible/ansible/group_vars/all.yml
2017-01-26 18:48:59 +00:00

524 lines
14 KiB
YAML

---
# The options in this file can be overridden in 'globals.yml'
# The "temp" files that are created before merge need to stay persistent due
# to the fact that ansible will register a "change" if it has to create them
# again. Persistent files allow for idempotency
container_config_directory: "/var/lib/kolla/config_files"
# The directory to merge custom config files the kolla's config files
node_custom_config: "/etc/kolla/config"
# The project to generate configuration files for
project: ""
# The directory to store the config files on the destination node
node_config_directory: "/etc/kolla/{{ project }}"
###################
# Kolla options
###################
# Which orchestration engine to use. Valid options are [ ANSIBLE, KUBERNETES ]
orchestration_engine: "ANSIBLE"
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
config_strategy: "COPY_ALWAYS"
# Valid options are [ centos, oraclelinux, ubuntu ]
kolla_base_distro: "centos"
# Valid options are [ binary, source ]
kolla_install_type: "binary"
kolla_internal_vip_address: "{{ kolla_internal_address }}"
kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_external_vip_address == kolla_internal_vip_address else kolla_external_vip_address }}"
kolla_enable_sanity_checks: "no"
kolla_enable_sanity_keystone: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_glance: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_cinder: "{{ kolla_enable_sanity_checks }}"
kolla_enable_sanity_swift: "{{ kolla_enable_sanity_checks }}"
####################
# kolla-kubernetes
####################
# By default, Kolla API services bind to the network address assigned
# to the api_interface. Allow the bind address to be an override. In
# some cases (Kubernetes), the api_interface address is not known
# until container runtime, and thus it is necessary to bind to all
# interfaces "0.0.0.0". When used outside of Kubernetes, binding to
# all interfaces may present a security issue, and thus is not
# recommended.
api_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + api_interface]['ipv4']['address'] if orchestration_engine == 'ANSIBLE' else '0.0.0.0' }}"
################
# Chrony options
################
# a list contains ntp servers
external_ntp_servers:
- 0.pool.ntp.org
- 1.pool.ntp.org
- 2.pool.ntp.org
- 3.pool.ntp.org
####################
# Database options
####################
database_address: "{{ kolla_internal_fqdn }}"
database_user: "root"
database_port: "3306"
####################
# Docker options
####################
docker_registry_email:
docker_registry:
docker_namespace: "kolla"
docker_registry_username:
# Valid options are [ never, on-failure, always, unless-stopped ]
docker_restart_policy: "unless-stopped"
# '0' means unlimited retries
docker_restart_policy_retry: "10"
# Common options used throughout docker
docker_common_options:
auth_email: "{{ docker_registry_email }}"
auth_password: "{{ docker_registry_password }}"
auth_registry: "{{ docker_registry }}"
auth_username: "{{ docker_registry_username }}"
environment:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
restart_policy: "{{ docker_restart_policy }}"
restart_retries: "{{ docker_restart_policy_retry }}"
####################
# keepalived options
####################
# Arbitrary unique number from 0..255
keepalived_virtual_router_id: "51"
####################
# Networking options
####################
network_interface: "eth0"
neutron_external_interface: "eth1"
kolla_external_vip_interface: "{{ network_interface }}"
api_interface: "{{ network_interface }}"
storage_interface: "{{ network_interface }}"
cluster_interface: "{{ network_interface }}"
tunnel_interface: "{{ network_interface }}"
bifrost_network_interface: "{{ network_interface }}"
tunnel_interface_address: "{{ hostvars[inventory_hostname]['ansible_' + tunnel_interface]['ipv4']['address'] }}"
# Valid options are [ openvswitch, linuxbridge, sfc ]
neutron_plugin_agent: "openvswitch"
# The default ports used by each service.
aodh_api_port: "8042"
barbican_api_port: "9311"
ceilometer_api_port: "8777"
congress_api_port: "1789"
cloudkitty_api_port: "8889"
designate_api_port: "9001"
designate_bind_port: "53"
designate_mdns_port: "5354"
designate_rndc_port: "953"
iscsi_port: "3260"
gnocchi_api_port: "8041"
mariadb_port: "{{ database_port }}"
mariadb_wsrep_port: "4567"
mariadb_ist_port: "4568"
mariadb_sst_port: "4444"
panko_api_port: "8977"
rabbitmq_port: "5672"
rabbitmq_management_port: "15672"
rabbitmq_cluster_port: "25672"
rabbitmq_epmd_port: "4369"
mongodb_port: "27017"
mongodb_web_port: "28017"
haproxy_stats_port: "1984"
keystone_public_port: "5000"
keystone_admin_port: "35357"
keystone_ssh_port: "8023"
glance_api_port: "9292"
glance_registry_port: "9191"
octavia_api_port: "9876"
octavia_health_manager_port: "5555"
nova_api_port: "8774"
nova_metadata_port: "8775"
nova_novncproxy_port: "6080"
nova_spicehtml5proxy_port: "6082"
nova_serialproxy_port: "6083"
neutron_server_port: "9696"
cinder_api_port: "8776"
memcached_port: "11211"
swift_proxy_server_port: "8080"
swift_object_server_port: "6000"
swift_account_server_port: "6001"
swift_container_server_port: "6002"
swift_rsync_port: "10873"
sahara_api_port: "8386"
heat_api_port: "8004"
heat_api_cfn_port: "8000"
horizon_port: "80"
murano_api_port: "8082"
ironic_api_port: "6385"
ironic_inspector_port: "5050"
magnum_api_port: "9511"
solum_application_deployment_port: "9777"
solum_image_builder_port: "9778"
rgw_port: "6780"
mistral_api_port: "8989"
kibana_server_port: "5601"
elasticsearch_port: "9200"
manila_api_port: "8786"
watcher_api_port: "9322"
influxdb_admin_port: "8083"
influxdb_http_port: "8086"
senlin_api_port: "8778"
trove_api_port: "8779"
etcd_client_port: "2379"
etcd_peer_port: "2380"
karbor_api_port: "8799"
kuryr_port: "23750"
searchlight_api_port: "9393"
grafana_server_port: "3000"
tacker_server_port: "9890"
fluentd_syslog_port: "5140"
public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
internal_protocol: "http"
admin_protocol: "http"
####################
# OpenStack options
####################
openstack_release: "auto"
openstack_logging_debug: "False"
openstack_region_name: "RegionOne"
openstack_service_workers: "{{ [ansible_processor_vcpus, 5]|min if orchestration_engine == 'ANSIBLE' else '1'}}"
# Optionally allow Kolla to set sysctl values
set_sysctl: "yes"
# Valid options are [ novnc, spice ]
nova_console: "novnc"
# OpenStack authentication string. You should only need to override these if you
# are changing the admin tenant/project or user.
openstack_auth:
auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
username: "admin"
password: "{{ keystone_admin_password }}"
project_name: "admin"
domain_name: "default"
# These roles are required for Kolla to be operation, however a savvy deployer
# could disable some of these required roles and run their own services.
enable_glance: "yes"
enable_haproxy: "yes"
enable_keystone: "yes"
enable_mariadb: "yes"
enable_memcached: "yes"
enable_neutron: "yes"
enable_nova: "yes"
enable_rabbitmq: "yes"
# Additional optional OpenStack features and services are specified here
enable_aodh: "no"
enable_barbican: "no"
enable_cadf_notifications: "no"
enable_ceilometer: "no"
enable_central_logging: "no"
enable_ceph: "no"
enable_ceph_rgw: "no"
enable_chrony: "no"
enable_cinder: "no"
enable_cinder_backend_hnas_iscsi: "no"
enable_cinder_backend_hnas_nfs: "no"
enable_cinder_backend_iscsi: "no"
enable_cinder_backend_lvm: "no"
enable_cinder_backend_nfs: "no"
enable_cloudkitty: "no"
enable_congress: "no"
enable_designate: "no"
enable_etcd: "no"
enable_gnocchi: "no"
enable_grafana: "no"
enable_heat: "yes"
enable_horizon: "yes"
enable_horizon_cloudkitty: "{{ enable_cloudkitty | bool }}"
enable_horizon_ironic: "{{ enable_ironic | bool }}"
enable_horizon_karbor: "{{ enable_karbor | bool }}"
enable_horizon_magnum: "{{ enable_magnum | bool }}"
enable_horizon_manila: "{{ enable_manila | bool }}"
enable_horizon_mistral: "{{ enable_mistral | bool }}"
enable_horizon_murano: "{{ enable_murano | bool }}"
enable_horizon_neutron_lbaas: "{{ enable_neutron_lbaas | bool }}"
enable_horizon_sahara: "{{ enable_sahara | bool }}"
enable_horizon_searchlight: "{{ enable_searchlight | bool }}"
enable_horizon_senlin: "{{ enable_senlin | bool }}"
enable_horizon_solum: "{{ enable_solum | bool }}"
enable_horizon_trove: "{{ enable_trove | bool }}"
enable_horizon_watcher: "{{ enable_watcher | bool }}"
enable_influxdb: "no"
enable_ironic: "no"
enable_iscsid: "{{ enable_cinder_backend_iscsi | bool or enable_cinder_backend_lvm | bool or enable_ironic | bool }}"
enable_karbor: "no"
enable_kuryr: "no"
enable_magnum: "no"
enable_manila: "no"
enable_manila_backend_generic: "no"
enable_manila_backend_hnas: "no"
enable_mistral: "no"
enable_mongodb: "no"
enable_multipathd: "no"
enable_murano: "no"
enable_neutron_vpnaas: "no"
enable_neutron_dvr: "no"
enable_neutron_lbaas: "no"
enable_neutron_fwaas: "no"
enable_neutron_qos: "no"
enable_neutron_agent_ha: "no"
enable_nova_serialconsole_proxy: "no"
enable_octavia: "no"
enable_panko: "no"
enable_rally: "no"
enable_sahara: "no"
enable_searchlight: "no"
enable_senlin: "no"
enable_solum: "no"
enable_swift: "no"
enable_tacker: "no"
enable_telegraf: "no"
enable_tempest: "no"
enable_trove: "no"
enable_vmtp: "no"
enable_watcher: "no"
ironic_keystone_user: "ironic"
neutron_keystone_user: "neutron"
nova_keystone_user: "nova"
# Nova fake driver and the number of fake driver per compute node
enable_nova_fake: "no"
num_nova_fake_per_node: 5
# Monitoring options are specified here
enable_collectd: "no"
# Clean images options are specified here
enable_destroy_images: "no"
####################
# Logging options
####################
elasticsearch_address: "{{ kolla_internal_vip_address }}"
elasticsearch_protocol: "{{ internal_protocol }}"
enable_elasticsearch: "{{ 'yes' if enable_central_logging | bool else 'no' }}"
enable_kibana: "{{ 'yes' if enable_central_logging | bool else 'no' }}"
####################
# RabbitMQ options
####################
rabbitmq_user: "openstack"
rabbitmq_version: "rabbitmq_server-3.6/plugins/rabbitmq_clusterer-3.6.x.ez/rabbitmq_clusterer-3.6.x-667f92b0/ebin"
####################
# HAProxy options
####################
haproxy_user: "openstack"
haproxy_enable_external_vip: "{{ 'no' if kolla_external_vip_address == kolla_internal_vip_address else 'yes' }}"
kolla_enable_tls_external: "no"
kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem"
kolla_external_fqdn_cacert: "{{ node_config_directory }}/certificates/haproxy-ca.crt"
####################
# Kibana options
####################
kibana_user: "kibana"
####################
# Keystone options
####################
keystone_admin_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3"
keystone_internal_url: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3"
keystone_public_url: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ keystone_public_port }}/v3"
# Valid options are [ uuid, fernet ]
keystone_token_provider: "uuid"
fernet_token_expiry: 86400
keystone_default_user_role: "_member_"
#######################
# Glance options
#######################
glance_backend_file: "{{ not enable_ceph | bool }}"
glance_backend_ceph: "{{ enable_ceph }}"
#######################
# Ceilometer options
#######################
# Valid options are [ mongodb, mysql, gnocchi ]
ceilometer_database_type: "mongodb"
# Valid options are [ mongodb, gnocchi, panko ]
ceilometer_event_type: "mongodb"
########################
### Panko options
########################
# Valid options are [ mongodb, mysql ]
panko_database_type: "mysql"
#################
# Gnocchi options
#################
# Vaid options are [file, ceph]
gnocchi_backend_storage: "{{ 'ceph' if enable_ceph|bool else 'file' }}"
#################################
# Cinder options
#################################
cinder_backend_ceph: "{{ enable_ceph }}"
cinder_volume_group: "cinder-volumes"
cinder_backup_driver: "nfs"
cinder_backup_share: ""
cinder_backup_mount_options_nfs: ""
#######################
# Cloudkitty options
#######################
# Valid options are [ ceilometer, gnocchi ]
cloudkitty_collector_backend: "ceilometer"
#######################
# Designate options
#######################
# Valid options are [ bind9 ]
designate_backend: "bind9"
designate_ns_record: "sample.openstack.org"
#######################
# Nova options
#######################
nova_backend_ceph: "{{ enable_ceph }}"
nova_backend: "{{ 'rbd' if nova_backend_ceph | bool else 'default' }}"
#######################
# Horizon options
#######################
horizon_backend_database: "no"
#################
# Octavia options
#################
# Load balancer topology options are [ SINGLE, ACTIVE_STANDBY ]
octavia_loadbalancer_topology: "SINGLE"
octavia_amp_boot_network_list:
octavia_amp_secgroup_list:
octavia_amp_flavor_id:
###################
# Ceph options
###################
# Ceph can be setup with a caching to improve performance. To use the cache you
# must provide separate disks than those for the OSDs
ceph_enable_cache: "no"
# Valid options are [ forward, none, writeback ]
ceph_cache_mode: "writeback"
# Valid options are [ ext4, btrfs, xfs ]
ceph_osd_filesystem: "xfs"
# Set to 'yes-i-really-really-mean-it' to force wipe disks with existing partitions for OSDs. Only
# set if you understand the consequences!
ceph_osd_wipe_disk: ""
# These are /etc/fstab options. Comma separated, no spaces (see fstab(8))
ceph_osd_mount_options: "defaults,noatime"
# A requirement for using the erasure-coded pools is you must setup a cache tier
# Valid options are [ erasure, replicated ]
ceph_pool_type: "replicated"
# Integrate ceph rados object gateway with openstack keystone
enable_ceph_rgw_keystone: "no"
ceph_cinder_pool_name: "volumes"
ceph_cinder_backup_pool_name: "backups"
ceph_glance_pool_name: "images"
ceph_gnocchi_pool_name: "gnocchi"
ceph_nova_pool_name: "vms"
ceph_erasure_profile: "k=4 m=2 ruleset-failure-domain=host"
ceph_rule: "default host {{ 'indep' if ceph_pool_type == 'erasure' else 'firstn' }}"
ceph_cache_rule: "cache host firstn"