kolla-ansible/ansible/roles/glance/templates/glance-api.conf.j2
Michal Arbet fdf2385f14 Add support for multiple ceph files
This patch is adding a feature for an option to copy different
ceph configuration files and corresponding keyrings for cinder,
glance, manila, gnocchi and nova services.

This is especially useful when the deployment uses availability
zones as below example.

  - Individual compute can read/write to individual ceph
    cluster in same AZ.
  - Cinder can write to several ceph clusters in several AZs.
  - Glance can use multistore and upload images to
    several ceph clusters in several AZs at once.

Change-Id: Ie4d8ab5a3df748137835cae1c943b9180cd10eb1
2023-06-14 10:18:11 +02:00

154 lines
4.5 KiB
Django/Jinja

[DEFAULT]
debug = {{ glance_logging_debug }}
# NOTE(elemoine) log_dir alone does not work for Glance
log_file = /var/log/kolla/glance/glance-api.log
{% if glance_enable_tls_backend | bool %}
bind_host = 127.0.0.1
{% else %}
bind_host = {{ api_interface_address }}
{% endif %}
bind_port = {{ glance_api_listen_port }}
workers = {{ glance_api_workers }}
{% if glance_store_backends %}
enabled_backends = {% for key in glance_store_backends %}{{ key.name }}:{{ key.type }}{% if not loop.last %}, {% endif %}{% endfor %}
{% endif %}
{% if glance_enable_property_protection | bool %}
property_protection_file = /etc/glance/property-protections-rules.conf
{% endif %}
cinder_catalog_info = volume:cinder:internalURL
transport_url = {{ rpc_transport_url }}
{% if enable_glance_image_cache | bool %}
image_cache_max_size = {{ glance_cache_max_size }}
image_cache_dir = /var/lib/glance/image-cache
{% endif %}
[database]
connection = mysql+pymysql://{{ glance_database_user }}:{{ glance_database_password }}@{{ glance_database_address }}/{{ glance_database_name }}
connection_recycle_time = {{ database_connection_recycle_time }}
max_pool_size = {{ database_max_pool_size }}
max_retries = -1
[keystone_authtoken]
service_type = image
www_authenticate_uri = {{ keystone_internal_url }}
auth_url = {{ keystone_internal_url }}
auth_type = password
project_domain_id = {{ default_project_domain_id }}
user_domain_id = {{ default_user_domain_id }}
project_name = service
username = {{ glance_keystone_user }}
password = {{ glance_keystone_password }}
cafile = {{ openstack_cacert }}
region_name = {{ openstack_region_name }}
memcache_security_strategy = ENCRYPT
memcache_secret_key = {{ memcache_secret_key }}
memcached_servers = {% for host in groups['memcached'] %}{{ 'api' | kolla_address(host) | put_address_in_context('memcache') }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
[paste_deploy]
flavor = {% if enable_glance_image_cache | bool %}keystone+cachemanagement{% else %}keystone{% endif %}
[glance_store]
{% if glance_backend_vmware | bool %}
default_backend = vmware
{% elif glance_backend_ceph | bool %}
default_backend = "{{ glance_ceph_backends[0].name }}"
{% elif glance_backend_swift | bool %}
default_backend = swift
{% else %}
default_backend = file
{% endif %}
{% if glance_backend_file | bool %}
[file]
filesystem_store_datadir = /var/lib/glance/images/
{% endif %}
{% if glance_backend_ceph | bool %}
{% for backend in glance_ceph_backends %}
[{{ backend.name }}]
rbd_store_user = {{ ceph_glance_user }}
rbd_store_pool = {{ ceph_glance_pool_name }}
rbd_store_ceph_conf = /etc/ceph/{{ backend.cluster }}.conf
{% endfor %}
{% endif %}
{% if glance_backend_swift | bool %}
[swift]
swift_store_container = glance
swift_store_multiple_containers_seed = 0
swift_store_multi_tenant = False
swift_store_create_container_on_put = True
swift_store_region = {{ openstack_region_name }}
default_swift_reference = swift
swift_store_config_file = /etc/glance/glance-swift.conf
swift_store_auth_insecure = True
{% endif %}
{% if glance_backend_vmware | bool %}
[vmware]
vmware_server_host = {{ vmware_vcenter_host_ip }}
vmware_server_username = {{ vmware_vcenter_host_username }}
vmware_server_password = {{ vmware_vcenter_host_password }}
vmware_datastores = {{ vmware_vcenter_name }}:{{ vmware_datastore_name }}
vmware_insecure = True
{% endif %}
[os_glance_tasks_store]
filesystem_store_datadir = /var/lib/glance/tasks_work_dir
[os_glance_staging_store]
filesystem_store_datadir = /var/lib/glance/staging
[oslo_middleware]
enable_proxy_headers_parsing = True
[oslo_concurrency]
lock_path = /var/lib/glance/tmp
[oslo_messaging_notifications]
transport_url = {{ notify_transport_url }}
{% if glance_enabled_notification_topics %}
driver = messagingv2
topics = {{ glance_enabled_notification_topics | map(attribute='name') | join(',') }}
{% else %}
driver = noop
{% endif %}
[oslo_messaging_rabbit]
heartbeat_in_pthread = false
{% if om_enable_rabbitmq_tls | bool %}
ssl = true
ssl_ca_file = {{ om_rabbitmq_cacert }}
{% endif %}
{% if om_enable_rabbitmq_high_availability | bool %}
amqp_durable_queues = true
{% endif %}
{% if glance_policy_file is defined %}
[oslo_policy]
policy_file = {{ glance_policy_file }}
{% endif %}
{% if enable_osprofiler | bool %}
[profiler]
enabled = true
trace_sqlalchemy = true
hmac_keys = {{ osprofiler_secret }}
connection_string = {{ osprofiler_backend_connection_string }}
{% endif %}
{% if enable_barbican | bool %}
[barbican]
auth_endpoint = {{ keystone_internal_url }}
barbican_endpoint_type = internal
verify_ssl_path = {{ openstack_cacert }}
{% endif %}