openstack-ansible/etc/openstack_deploy/user_variables.yml.prod-ceph.example
Dmitriy Rabotyagov 9813045788 [doc] Document better requirement for keepalived vip_cidr
Closes-Bug: #1998109
Change-Id: I5149b81f66775d0212d44e277e1e1cf794b1003d
2022-12-06 17:19:27 +00:00

42 lines
1.6 KiB
Plaintext

---
# Because we have three haproxy nodes, we need
# to one active LB IP, and we use keepalived for that.
# These variables must be defined when external_lb_vip_address or
# internal_lb_vip_address is set to FQDN.
## Load Balancer Configuration (haproxy/keepalived)
haproxy_keepalived_external_vip_cidr: "<external_ip_address>/<netmask>"
haproxy_keepalived_internal_vip_cidr: "172.29.236.9/32"
haproxy_keepalived_external_interface: ens2
haproxy_keepalived_internal_interface: br-mgmt
## Ceph cluster fsid (must be generated before first run)
## Generate a uuid using: python -c 'import uuid; print(str(uuid.uuid4()))'
generate_fsid: false
fsid: 116f14c4-7fe1-40e4-94eb-9240b63de5c1 # Replace with your generated UUID
## ceph-ansible settings
## See https://github.com/ceph/ceph-ansible/tree/master/group_vars for
## additional configuration options available.
monitor_address_block: "{{ cidr_networks.storage }}"
public_network: "{{ cidr_networks.storage }}"
cluster_network: "{{ cidr_networks.storage }}"
journal_size: 10240 # size in MB
# ceph-ansible automatically creates pools & keys for OpenStack services
openstack_config: true
cinder_ceph_client: cinder
glance_ceph_client: glance
glance_default_store: rbd
glance_rbd_store_pool: images
nova_libvirt_images_rbd_pool: vms
cinder_backends:
rbd_volumes:
volume_driver: cinder.volume.drivers.rbd.RBDDriver
rbd_pool: volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_store_chunk_size: 8
volume_backend_name: rbddriver
rbd_user: "{{ cinder_ceph_client }}"
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
report_discard_supported: true