22a6223b1b
After all of the discussions we had on "https://review.opendev.org/#/c/670626/2", I studied all projects that have an "oslo_messaging" section. Afterwards, I applied the same method that is already used in "oslo_messaging" section in Nova, Cinder, and others. This guarantees that we have a consistent method to enable/disable notifications across projects based on components (e.g. Ceilometer) being enabled or disabled. Here follows the list of components, and the respective changes I did. * Aodh: The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Congress: The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Cinder: It was already properly configured. * Octavia: The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Heat: It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Ceilometer: Ceilometer publishes some messages in the rabbitMQ. However, the default driver is "messagingv2", and not ''(empty) as defined in Oslo; these configurations are defined in ceilometer/publisher/messaging.py. Therefore, we do not need to do anything for the "oslo_messaging_notifications" section in Ceilometer * Tacker: It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Neutron: It was already properly configured. * Nova It was already properly configured. However, we found another issue with its configuration. Kolla-ansible does not configure nova notifications as it should. If 'searchlight' is not installed (enabled) the 'notification_format' should be 'unversioned'. The default is 'both'; so nova will send a notification to the queue versioned_notifications; but that queue has no consumer when 'searchlight' is disabled. In our case, the queue got 511k messages. The huge amount of "stuck" messages made the Rabbitmq cluster unstable. https://bugzilla.redhat.com/show_bug.cgi?id=1478274 https://bugs.launchpad.net/ceilometer/+bug/1665449 * Nova_hyperv: I added the same configurations as in Nova project. * Vitrage It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Searchlight I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Ironic I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Glance It was already properly configured. * Trove It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Blazar It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Sahara It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Watcher I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Barbican I created a mechanism similar to what we have in Cinder, Nova, and others. I also added a configuration to 'keystone_notifications' section. Barbican needs its own queue to capture events from Keystone. Otherwise, it has an impact on Ceilometer and other systems that are connected to the "notifications" default queue. * Keystone Keystone is the system that triggered this work with the discussions that followed on https://review.opendev.org/#/c/670626/2. After a long discussion, we agreed to apply the same approach that we have in Nova, Cinder and other systems in Keystone. That is what we did. Moreover, we introduce a new topic "barbican_notifications" when barbican is enabled. We also removed the "variable" enable_cadf_notifications, as it is obsolete, and the default in Keystone is CADF. * Mistral: It was hardcoded "noop" as the driver. However, that does not seem a good practice. Instead, I applied the same standard of using the driver and pushing to "notifications" queue if Ceilometer is enabled. * Cyborg: I created a mechanism similar to what we have in AODH, Cinder, Nova, and others. * Murano It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Senlin It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Manila It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Zun The section is declared, but it is not used. Therefore, it will be removed in an upcomming PR. * Designate It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components * Magnum It was already using a similar scheme; I just modified it a little bit to be the same as we have in all other components Closes-Bug: #1838985 Change-Id: I88bdb004814f37c81c9a9c4e5e491fac69f6f202 Signed-off-by: Rafael Weingärtner <rafael@apache.org>
85 lines
2.6 KiB
Django/Jinja
85 lines
2.6 KiB
Django/Jinja
[DEFAULT]
|
|
debug = {{ mistral_logging_debug }}
|
|
|
|
log_dir = /var/log/kolla/mistral
|
|
{% if service_name == 'mistral-engine' %}
|
|
log_file = /var/log/kolla/mistral/mistral-engine.log
|
|
{% elif service_name == 'mistral-event-engine' %}
|
|
log_file = /var/log/kolla/mistral/mistral-event-engine.log
|
|
{% elif service_name == 'mistral-executor' %}
|
|
log_file = /var/log/kolla/mistral/mistral-executor.log
|
|
{% endif %}
|
|
|
|
# NOTE(elemoine): set use_stderr to False or the logs will also be sent to
|
|
# stderr and collected by Docker
|
|
use_stderr = False
|
|
|
|
transport_url = {{ rpc_transport_url }}
|
|
|
|
{% if service_name == 'mistral-api' %}
|
|
[api]
|
|
host = {{ api_interface_address }}
|
|
port = {{ mistral_api_port }}
|
|
api_workers = {{ openstack_service_workers }}
|
|
{% elif service_name == 'mistral-engine' %}
|
|
[engine]
|
|
host = {{ api_interface_address }}
|
|
{% elif service_name == 'mistral-event-engine' %}
|
|
[event_engine]
|
|
host = {{ api_interface_address }}
|
|
{% elif service_name == 'mistral-executor' %}
|
|
[executor]
|
|
host = {{ api_interface_address }}
|
|
{% endif %}
|
|
|
|
[database]
|
|
connection = mysql+pymysql://{{ mistral_database_user }}:{{ mistral_database_password }}@{{ mistral_database_address }}/{{ mistral_database_name }}
|
|
max_retries = -1
|
|
|
|
[keystone_authtoken]
|
|
www_authenticate_uri = {{ keystone_internal_url }}/v3
|
|
auth_url = {{ keystone_admin_url }}/v3
|
|
auth_type = password
|
|
project_domain_id = {{ default_project_domain_id }}
|
|
user_domain_id = {{ default_user_domain_id }}
|
|
project_name = service
|
|
username = {{ mistral_keystone_user }}
|
|
password = {{ mistral_keystone_password }}
|
|
|
|
memcache_security_strategy = ENCRYPT
|
|
memcache_secret_key = {{ memcache_secret_key }}
|
|
memcached_servers = {% for host in groups['memcached'] %}{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }}:{{ memcached_port }}{% if not loop.last %},{% endif %}{% endfor %}
|
|
|
|
|
|
[mistral]
|
|
url = {{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ mistral_api_port }}
|
|
|
|
[openstack_actions]
|
|
os_actions_endpoint_type = internal
|
|
default_region = {{ openstack_region_name }}
|
|
|
|
[oslo_messaging_notifications]
|
|
transport_url = {{ notify_transport_url }}
|
|
{% if mistral_enabled_notification_topics %}
|
|
driver = messagingv2
|
|
topics = {{ mistral_enabled_notification_topics | map(attribute='name') | join(',') }}
|
|
{% else %}
|
|
driver = noop
|
|
{% endif %}
|
|
|
|
{% if mistral_policy_file is defined %}
|
|
[oslo_policy]
|
|
policy_file = {{ mistral_policy_file }}
|
|
{% endif %}
|
|
|
|
{% if enable_osprofiler | bool %}
|
|
[profiler]
|
|
enabled = true
|
|
trace_sqlalchemy = true
|
|
hmac_keys = {{ osprofiler_secret }}
|
|
connection_string = {{ osprofiler_backend_connection_string }}
|
|
{% endif %}
|
|
|
|
[coordination]
|
|
backend_url = {{ redis_connection_string }}
|