
A combination of durable queues and classic queue mirroring can be used to provide high availability of RabbitMQ. However, these options should only be used together, otherwise the system will become unstable. Using the flag ``om_enable_rabbitmq_high_availability`` will either enable both options at once, or neither of them. There are some queues that should not be mirrored: * ``reply`` queues (these have a single consumer and TTL policy) * ``fanout`` queues (these have a TTL policy) * ``amq`` queues (these are auto-delete queues, with a single consumer) An exclusionary pattern is used in the classic mirroring policy. This pattern is ``^(?!(amq\\.)|(.*_fanout_)|(reply_)).*`` Change-Id: I51c8023b260eb40b2eaa91bd276b46890c215c25
247 lines
8.3 KiB
Django/Jinja
247 lines
8.3 KiB
Django/Jinja
[DEFAULT]
|
|
debug = {{ cinder_logging_debug }}
|
|
|
|
log_dir = /var/log/kolla/cinder
|
|
{% if service_name == "cinder-api" %}
|
|
log_file = cinder-api.log
|
|
{% endif %}
|
|
use_forwarded_for = true
|
|
|
|
# Set use_stderr to False or the logs will also be sent to stderr
|
|
# and collected by Docker
|
|
use_stderr = False
|
|
|
|
my_ip = {{ api_interface_address }}
|
|
|
|
volume_name_template = volume-%s
|
|
|
|
glance_api_servers = {{ glance_internal_endpoint }}
|
|
|
|
glance_num_retries = {{ groups['glance-api'] | length }}
|
|
glance_ca_certificates_file = {{ openstack_cacert }}
|
|
|
|
{% if cinder_enabled_backends %}
|
|
enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
|
|
{% endif %}
|
|
|
|
{% if service_name == "cinder-backup" and enable_cinder_backup | bool %}
|
|
{% if cinder_backup_driver == "ceph" %}
|
|
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
|
|
backup_ceph_conf = /etc/ceph/ceph.conf
|
|
backup_ceph_user = {{ ceph_cinder_backup_user }}
|
|
backup_ceph_chunk_size = 134217728
|
|
backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
|
|
backup_ceph_stripe_unit = 0
|
|
backup_ceph_stripe_count = 0
|
|
restore_discard_excess_bytes = true
|
|
{% elif cinder_backup_driver == "nfs" %}
|
|
backup_driver = cinder.backup.drivers.nfs.NFSBackupDriver
|
|
backup_mount_options = {{ cinder_backup_mount_options_nfs }}
|
|
backup_mount_point_base = /var/lib/cinder/backup
|
|
backup_share = {{ cinder_backup_share }}
|
|
backup_file_size = 327680000
|
|
{% elif enable_swift | bool and cinder_backup_driver == "swift" %}
|
|
backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver
|
|
backup_swift_url = {{ swift_internal_base_endpoint }}/v1/AUTH_
|
|
backup_swift_auth = per_user
|
|
backup_swift_auth_version = 1
|
|
backup_swift_user =
|
|
backup_swift_key =
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
api_paste_config = /etc/cinder/api-paste.ini
|
|
|
|
auth_strategy = keystone
|
|
|
|
transport_url = {{ rpc_transport_url }}
|
|
|
|
[oslo_messaging_notifications]
|
|
transport_url = {{ notify_transport_url }}
|
|
{% if cinder_enabled_notification_topics %}
|
|
driver = messagingv2
|
|
topics = {{ cinder_enabled_notification_topics | map(attribute='name') | join(',') }}
|
|
{% else %}
|
|
driver = noop
|
|
{% endif %}
|
|
|
|
[oslo_messaging_rabbit]
|
|
heartbeat_in_pthread = {{ service_name == 'cinder-api' }}
|
|
{% if om_enable_rabbitmq_tls | bool %}
|
|
ssl = true
|
|
ssl_ca_file = {{ om_rabbitmq_cacert }}
|
|
{% endif %}
|
|
{% if om_enable_rabbitmq_high_availability | bool %}
|
|
amqp_durable_queues = true
|
|
{% endif %}
|
|
|
|
[oslo_middleware]
|
|
enable_proxy_headers_parsing = True
|
|
|
|
{% if cinder_policy_file is defined %}
|
|
[oslo_policy]
|
|
policy_file = {{ cinder_policy_file }}
|
|
{% endif %}
|
|
|
|
[nova]
|
|
interface = internal
|
|
auth_url = {{ keystone_internal_url }}
|
|
auth_type = password
|
|
project_domain_id = {{ default_project_domain_id }}
|
|
user_domain_id = {{ default_user_domain_id }}
|
|
region_name = {{ openstack_region_name }}
|
|
project_name = service
|
|
username = {{ nova_keystone_user }}
|
|
password = {{ nova_keystone_password }}
|
|
cafile = {{ openstack_cacert }}
|
|
|
|
[database]
|
|
connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{{ cinder_database_address }}/{{ cinder_database_name }}
|
|
connection_recycle_time = {{ database_connection_recycle_time }}
|
|
max_pool_size = {{ database_max_pool_size }}
|
|
max_retries = -1
|
|
|
|
[keystone_authtoken]
|
|
service_type = volume
|
|
www_authenticate_uri = {{ keystone_internal_url }}
|
|
auth_url = {{ keystone_internal_url }}
|
|
auth_type = password
|
|
project_domain_id = {{ default_project_domain_id }}
|
|
user_domain_id = {{ default_user_domain_id }}
|
|
project_name = service
|
|
username = {{ cinder_keystone_user }}
|
|
password = {{ cinder_keystone_password }}
|
|
cafile = {{ openstack_cacert }}
|
|
region_name = {{ openstack_region_name }}
|
|
|
|
memcache_security_strategy = ENCRYPT
|
|
memcache_secret_key = {{ memcache_secret_key }}
|
|
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
|
|
|
|
|
[oslo_concurrency]
|
|
lock_path = /var/lib/cinder/tmp
|
|
|
|
{% if enable_cinder_backend_lvm | bool %}
|
|
[{{ cinder_backend_lvm_name }}]
|
|
volume_group = {{ cinder_volume_group }}
|
|
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
|
|
volume_backend_name = {{ cinder_backend_lvm_name }}
|
|
target_helper = {{ cinder_target_helper }}
|
|
target_protocol = iscsi
|
|
{% endif %}
|
|
|
|
{% if cinder_backend_ceph | bool %}
|
|
[{{ cinder_backend_ceph_name }}]
|
|
volume_driver = cinder.volume.drivers.rbd.RBDDriver
|
|
volume_backend_name = {{ cinder_backend_ceph_name }}
|
|
rbd_pool = {{ ceph_cinder_pool_name }}
|
|
rbd_ceph_conf = /etc/ceph/ceph.conf
|
|
rados_connect_timeout = 5
|
|
rbd_user = {{ ceph_cinder_user }}
|
|
rbd_secret_uuid = {{ cinder_rbd_secret_uuid }}
|
|
report_discard_supported = True
|
|
{% endif %}
|
|
|
|
{% if enable_cinder_backend_nfs | bool %}
|
|
[{{ cinder_backend_nfs_name }}]
|
|
volume_driver = cinder.volume.drivers.nfs.NfsDriver
|
|
volume_backend_name = {{ cinder_backend_nfs_name }}
|
|
nfs_shares_config = /etc/cinder/nfs_shares
|
|
nfs_snapshot_support = True
|
|
nas_secure_file_permissions = False
|
|
nas_secure_file_operations = False
|
|
{% endif %}
|
|
|
|
{% if enable_cinder_backend_hnas_nfs | bool %}
|
|
[{{ cinder_backend_hnas_nfs_name }}]
|
|
volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver
|
|
nfs_shares_config = /home/cinder/nfs_shares
|
|
volume_backend_name = {{ hnas_nfs_backend }}
|
|
hnas_username = {{ hnas_nfs_username }}
|
|
hnas_password = {{ hnas_nfs_password }}
|
|
hnas_mgmt_ip0 = {{ hnas_nfs_mgmt_ip0 }}
|
|
|
|
hnas_svc0_volume_type = {{ hnas_nfs_svc0_volume_type }}
|
|
hnas_svc0_hdp = {{ hnas_nfs_svc0_hdp }}
|
|
{% endif %}
|
|
|
|
{% if cinder_backend_vmwarevc_vmdk | bool %}
|
|
[{{ cinder_backend_vmwarevc_vmdk_name }}]
|
|
volume_driver = cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
|
|
vmware_host_ip = {{ vmware_vcenter_host_ip }}
|
|
vmware_host_username = {{ vmware_vcenter_host_username }}
|
|
vmware_host_password = {{ vmware_vcenter_host_password }}
|
|
vmware_cluster_name = {{ vmware_vcenter_cluster_name }}
|
|
vmware_insecure = True
|
|
{% endif %}
|
|
|
|
{% if cinder_backend_vmware_vstorage_object | bool %}
|
|
[{{ cinder_backend_vmware_vstorage_object_name }}]
|
|
volume_driver = cinder.volume.drivers.vmware.fcd.VMwareVStorageObjectDriver
|
|
vmware_host_ip = {{ vmware_vcenter_host_ip }}
|
|
vmware_host_username = {{ vmware_vcenter_host_username }}
|
|
vmware_host_password = {{ vmware_vcenter_host_password }}
|
|
vmware_cluster_name = {{ vmware_vcenter_cluster_name }}
|
|
vmware_insecure = True
|
|
{% endif %}
|
|
|
|
{% if enable_cinder_backend_quobyte | bool %}
|
|
[{{ cinder_backend_quobyte_name }}]
|
|
volume_driver = cinder.volume.drivers.quobyte.QuobyteDriver
|
|
quobyte_volume_url = quobyte://{{ quobyte_storage_host }}/{{ quobyte_storage_volume }}
|
|
{% endif %}
|
|
|
|
{% if enable_cinder_backend_pure_iscsi | bool %}
|
|
[{{ cinder_backend_pure_iscsi_name }}]
|
|
volume_backend_name = {{ pure_iscsi_backend }}
|
|
volume_driver = cinder.volume.drivers.pure.PureISCSIDriver
|
|
san_ip = {{ pure_san_ip }}
|
|
pure_api_token = {{ pure_api_token }}
|
|
{% endif %}
|
|
|
|
{% if enable_cinder_backend_pure_fc | bool %}
|
|
[{{ cinder_backend_pure_fc_name }}]
|
|
volume_backend_name = {{ pure_fc_backend }}
|
|
volume_driver = cinder.volume.drivers.pure.PureFCDriver
|
|
san_ip = {{ pure_san_ip }}
|
|
pure_api_token = {{ pure_api_token }}
|
|
{% endif %}
|
|
|
|
{% if enable_cinder_backend_pure_roce | bool %}
|
|
[{{ cinder_backend_pure_roce_name }}]
|
|
volume_backend_name = {{ pure_roce_backend }}
|
|
volume_driver = cinder.volume.drivers.pure.PureNVMEDriver
|
|
san_ip = {{ pure_san_ip }}
|
|
pure_api_token = {{ pure_api_token }}
|
|
{% endif %}
|
|
|
|
[privsep_entrypoint]
|
|
helper_command=sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf
|
|
|
|
{% if enable_osprofiler | bool %}
|
|
[profiler]
|
|
enabled = true
|
|
trace_sqlalchemy = true
|
|
hmac_keys = {{ osprofiler_secret }}
|
|
connection_string = {{ osprofiler_backend_connection_string }}
|
|
{% endif %}
|
|
|
|
{% if enable_barbican | bool %}
|
|
[barbican]
|
|
auth_endpoint = {{ keystone_internal_url }}
|
|
barbican_endpoint_type = internal
|
|
verify_ssl_path = {{ openstack_cacert }}
|
|
{% endif %}
|
|
|
|
[coordination]
|
|
{% if cinder_coordination_backend == 'redis' %}
|
|
backend_url = {{ redis_connection_string }}
|
|
{% elif cinder_coordination_backend == 'etcd' %}
|
|
# NOTE(yoctozepto): etcd-compatible tooz drivers do not support multiple endpoints here (verified in Stein, Train)
|
|
# NOTE(yoctozepto): we must use etcd3gw (aka etcd3+http) due to issues with alternative (etcd3) and eventlet (as used by cinder)
|
|
# see https://bugs.launchpad.net/kolla-ansible/+bug/1854932
|
|
# and https://review.opendev.org/466098 for details
|
|
backend_url = etcd3+{{ etcd_protocol }}://{{ 'api' | kolla_address(groups['etcd'][0]) | put_address_in_context('url') }}:{{ etcd_client_port }}
|
|
{% endif %}
|