kolla-ansible/ansible/roles/cinder/templates/cinder.conf.j2
Jim Rollenhagen 31ed556e67 Allow swift proxy server to use independent hostnames
This allows swift service endpoints to use custom hostnames, and adds the
following variables:

* swift_internal_fqdn
* swift_external_fqdn

These default to the old values of kolla_internal_fqdn or
kolla_external_fqdn.

This also adds a swift_proxy_server_listen_port option, which defaults to
swift_proxy_server_port for backward compatibility.

This option allow the user to differentiate between the port the
service listens on, and the port the service is reachable on. This is
useful for external load balancers which live on the same host as the
service itself.

While we're in here, use the ``internal_protocol`` variable for the swift
endpoint in cinder's swift backup driver configuration, instead of hardcoding
to ``http``.

Change-Id: Ibc01618383c26e16c0067f7f6b9cf5160d968d1e
Implements: blueprint service-hostnames
2019-03-06 15:08:28 -05:00

229 lines
7.8 KiB
Django/Jinja

[DEFAULT]
debug = {{ cinder_logging_debug }}
log_dir = /var/log/kolla/cinder
use_forwarded_for = true
# Set use_stderr to False or the logs will also be sent to stderr
# and collected by Docker
use_stderr = False
my_ip = {{ api_interface_address }}
osapi_volume_workers = {{ openstack_service_workers }}
volume_name_template = volume-%s
glance_api_servers = {{ internal_protocol }}://{{ glance_internal_fqdn }}:{{ glance_api_port }}
glance_num_retries = {{ groups['glance-api'] | length }}
glance_api_version = 2
os_region_name = {{ openstack_region_name }}
{% if cinder_enabled_backends %}
enabled_backends = {{ cinder_enabled_backends|map(attribute='name')|join(',') }}
{% endif %}
{% if service_name == "cinder-backup" and enable_cinder_backup | bool %}
{% if cinder_backup_driver == "ceph" %}
backup_driver = cinder.backup.drivers.ceph.CephBackupDriver
backup_ceph_conf = /etc/ceph/ceph.conf
backup_ceph_user = cinder-backup
backup_ceph_chunk_size = 134217728
backup_ceph_pool = {{ ceph_cinder_backup_pool_name }}
backup_ceph_stripe_unit = 0
backup_ceph_stripe_count = 0
restore_discard_excess_bytes = true
{% elif cinder_backup_driver == "nfs" %}
backup_driver = cinder.backup.drivers.nfs.NFSBackupDriver
backup_mount_options = {{ cinder_backup_mount_options_nfs }}
backup_mount_point_base = /var/lib/cinder/backup
backup_share = {{ cinder_backup_share }}
backup_file_size = 327680000
{% elif enable_swift | bool and cinder_backup_driver == "swift" %}
backup_driver = cinder.backup.drivers.swift.SwiftBackupDriver
backup_swift_url = {{ internal_protocol }}://{{ swift_internal_fqdn }}:{{ swift_proxy_server_port }}/v1/AUTH_
backup_swift_auth = per_user
backup_swift_auth_version = 1
backup_swift_user =
backup_swift_key =
{% endif %}
{% endif %}
osapi_volume_listen = {{ api_interface_address }}
osapi_volume_listen_port = {{ cinder_api_listen_port }}
api_paste_config = /etc/cinder/api-paste.ini
auth_strategy = keystone
transport_url = {{ rpc_transport_url }}
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
{% if cinder_enabled_notification_topics %}
driver = messagingv2
topics = {{ cinder_enabled_notification_topics | map(attribute='name') | join(',') }}
{% else %}
driver = noop
{% endif %}
[oslo_middleware]
enable_proxy_headers_parsing = True
{% if cinder_policy_file is defined %}
[oslo_policy]
policy_file = {{ cinder_policy_file }}
{% endif %}
[nova]
interface = internal
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = {{ default_project_domain_id }}
user_domain_id = {{ default_user_domain_id }}
region_name = {{ openstack_region_name }}
project_name = service
username = {{ nova_keystone_user }}
password = {{ nova_keystone_password }}
[database]
connection = mysql+pymysql://{{ cinder_database_user }}:{{ cinder_database_password }}@{{ cinder_database_address }}/{{ cinder_database_name }}
max_retries = -1
[keystone_authtoken]
www_authenticate_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_admin_url }}
auth_type = password
project_domain_id = {{ default_project_domain_id }}
user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ cinder_keystone_user }}
password = {{ cinder_keystone_password }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
{% if enable_cinder_backend_lvm | bool %}
[lvm-1]
volume_group = {{ cinder_volume_group }}
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name = lvm-1
target_helper = {{ cinder_target_helper }}
target_protocol = iscsi
{% endif %}
{% if enable_ceph | bool and cinder_backend_ceph | bool %}
[rbd-1]
volume_driver = cinder.volume.drivers.rbd.RBDDriver
volume_backend_name = rbd-1
rbd_pool = {{ ceph_cinder_pool_name }}
rbd_ceph_conf = /etc/ceph/ceph.conf
rbd_flatten_volume_from_snapshot = false
rbd_max_clone_depth = 5
rbd_store_chunk_size = 4
rados_connect_timeout = 5
rbd_user = cinder
rbd_secret_uuid = {{ cinder_rbd_secret_uuid }}
report_discard_supported = True
image_upload_use_cinder_backend = True
{% endif %}
{% if enable_cinder_backend_nfs | bool %}
[nfs-1]
volume_driver = cinder.volume.drivers.nfs.NfsDriver
volume_backend_name = nfs-1
nfs_shares_config = /etc/cinder/nfs_shares
nfs_snapshot_support = True
nas_secure_file_permissions = False
nas_secure_file_operations = False
{% endif %}
{% if enable_cinder_backend_hnas_iscsi | bool %}
[hnas-iscsi]
volume_driver = cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver
volume_backend_name = {{ hnas_iscsi_backend }}
hnas_username = {{ hnas_iscsi_username }}
hnas_password = {{ hnas_iscsi_password }}
hnas_mgmt_ip0 = {{ hnas_iscsi_mgmt_ip0 }}
hnas_chap_enabled = True
hnas_svc0_volume_type = {{ hnas_iscsi_svc0_volume_type }}
hnas_svc0_hdp = {{ hnas_iscsi_svc0_hdp }}
hnas_svc0_iscsi_ip = {{ hnas_iscsi_svc0_ip }}
{% endif %}
{% if enable_cinder_backend_hnas_nfs | bool %}
[hnas-nfs]
volume_driver = cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver
nfs_shares_config = /home/cinder/nfs_shares
volume_backend_name = {{ hnas_nfs_backend }}
hnas_username = {{ hnas_nfs_username }}
hnas_password = {{ hnas_nfs_password }}
hnas_mgmt_ip0 = {{ hnas_nfs_mgmt_ip0 }}
hnas_svc0_volume_type = {{ hnas_nfs_svc0_volume_type }}
hnas_svc0_hdp = {{ hnas_nfs_svc0_hdp }}
{% endif %}
{% if cinder_backend_vmwarevc_vmdk | bool %}
[vmwarevc-vmdk]
volume_backend_name=vmwarevc-vmdk
volume_driver = cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver
vmware_host_ip = {{ vmware_vcenter_host_ip }}
vmware_host_username = {{ vmware_vcenter_host_username }}
vmware_host_password = {{ vmware_vcenter_host_password }}
vmware_cluster_name = {{ vmware_vcenter_cluster_name }}
vmware_insecure = True
{% endif %}
{% if enable_cinder_backend_zfssa_iscsi | bool %}
[zfssa-iscsi]
volume_backend_name = {{ zfssa_iscsi_backend }}
volume_driver = cinder.volume.drivers.zfssa.zfssaiscsi.ZFSSAISCSIDriver
san_ip = {{ zfssa_iscsi_san_ip }}
san_login = {{ zfssa_iscsi_login }}
san_password = {{ zfssa_iscsi_password }}
zfssa_pool = {{ zfssa_iscsi_pool }}
zfssa_project = {{ zfssa_iscsi_project }}
zfssa_initiator_group = {{ zfssa_iscsi_initiator_group }}
zfssa_target_portal = {{ zfssa_iscsi_target_portal }}
zfssa_target_interfaces = {{ zfssa_iscsi_target_interfaces }}
{% endif %}
{% if enable_cinder_backend_quobyte | bool %}
[QuobyteHD]
volume_driver = cinder.volume.drivers.quobyte.QuobyteDriver
quobyte_volume_url = quobyte://{{ quobyte_storage_host }}/{{ quobyte_storage_volume }}
{% endif %}
[privsep_entrypoint]
helper_command=sudo cinder-rootwrap /etc/cinder/rootwrap.conf privsep-helper --config-file /etc/cinder/cinder.conf
{% if enable_osprofiler | bool %}
[profiler]
enabled = true
trace_sqlalchemy = true
hmac_keys = {{ osprofiler_secret }}
connection_string = {{ osprofiler_backend_connection_string }}
{% endif %}
{% if enable_barbican | bool %}
[barbican]
auth_endpoint = {{ keystone_internal_url }}
{% endif %}
[coordination]
{% if enable_redis | bool %}
backend_url = redis://{% for host in groups['redis'] %}{% if host == groups['redis'][0] %}admin:{{ redis_master_password }}@{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ redis_sentinel_port }}?sentinel=kolla{% else %}&sentinel_fallback={{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ redis_sentinel_port }}{% endif %}{% endfor %}&socket_timeout=60&retry_on_timeout=yes
{% elif enable_etcd | bool %}
# NOTE(jeffrey4l): python-etcd3 module do not support multi endpoint here.
backend_url = etcd3://{{ hostvars[groups['etcd'][0]]['ansible_' + hostvars[groups['etcd'][0]]['api_interface']]['ipv4']['address'] }}:{{ etcd_client_port }}
{% endif %}