Fix issues introduced by ansible-lint 6.6.0
mainly jinja spacing and jinja[invalid] related Change-Id: I6f52f2b0c1ef76de626657d79486d31e0f47f384
This commit is contained in:
parent
81d4d26279
commit
1aac65de0c
@ -220,8 +220,8 @@
|
||||
|
||||
- name: Ensure RabbitMQ Erlang cookie exists
|
||||
become: true
|
||||
copy:
|
||||
content: "{{ rabbitmq_cluster_cookie }}"
|
||||
template:
|
||||
src: "rabbitmq-erlang.cookie.j2"
|
||||
dest: "{{ node_config_directory }}/kolla-toolbox/rabbitmq-erlang.cookie"
|
||||
mode: "0660"
|
||||
when:
|
||||
|
1
ansible/roles/common/templates/rabbitmq-erlang.cookie.j2
Normal file
1
ansible/roles/common/templates/rabbitmq-erlang.cookie.j2
Normal file
@ -0,0 +1 @@
|
||||
{{ rabbitmq_cluster_cookie }}
|
@ -17,19 +17,19 @@
|
||||
destroy_include_dev: "{{ destroy_include_dev }}"
|
||||
|
||||
- block:
|
||||
- name: disable octavia-interface service
|
||||
- name: Disable octavia-interface service
|
||||
service:
|
||||
name: octavia-interface
|
||||
enabled: no
|
||||
state: stopped
|
||||
failed_when: false
|
||||
|
||||
- name: remove octavia-interface service file
|
||||
- name: Remove octavia-interface service file
|
||||
file:
|
||||
path: /etc/systemd/system/octavia-interface.service
|
||||
state: absent
|
||||
|
||||
- name: remove dhclient.conf
|
||||
- name: Remove dhclient.conf
|
||||
file:
|
||||
path: /etc/dhcp/octavia-dhclient.conf
|
||||
state: absent
|
||||
|
@ -36,7 +36,7 @@ elasticsearch_enable_keystone_registration: False
|
||||
|
||||
elasticsearch_cluster_name: "kolla_logging"
|
||||
es_heap_size: "1g"
|
||||
es_java_opts: "{% if es_heap_size %}-Xms{{ es_heap_size }} -Xmx{{ es_heap_size }}{%endif%} -Dlog4j2.formatMsgNoLookups=true"
|
||||
es_java_opts: "{% if es_heap_size %}-Xms{{ es_heap_size }} -Xmx{{ es_heap_size }}{% endif %} -Dlog4j2.formatMsgNoLookups=true"
|
||||
|
||||
#######################
|
||||
# Elasticsearch Curator
|
||||
@ -62,7 +62,7 @@ elasticsearch_curator_dry_run: false
|
||||
|
||||
# Index prefix pattern. Any indices matching this regex will
|
||||
# be managed by Curator.
|
||||
elasticsearch_curator_index_pattern: "^{{ '(monasca|' + kibana_log_prefix + ')' if enable_monasca|bool else kibana_log_prefix }}-.*" # noqa var-spacing
|
||||
elasticsearch_curator_index_pattern: "^{{ '(monasca|' + kibana_log_prefix + ')' if enable_monasca | bool else kibana_log_prefix }}-.*" # noqa jinja[spacing]
|
||||
|
||||
# Duration after which an index is staged for deletion. This is
|
||||
# implemented by closing the index. Whilst in this state the index
|
||||
|
@ -224,7 +224,7 @@ keystone_host_federation_oidc_attribute_mappings_folder: "{{ node_config_directo
|
||||
# These variables are used to define multiple trusted Horizon dashboards.
|
||||
# keystone_trusted_dashboards: ['<https://dashboardServerOne/auth/websso/>', '<https://dashboardServerTwo/auth/websso/>', '<https://dashboardServerN/auth/websso/>']
|
||||
keystone_trusted_dashboards: "{{ ['%s://%s/auth/websso/' % (public_protocol, kolla_external_fqdn), '%s/auth/websso/' % (horizon_public_endpoint)] if enable_horizon | bool else [] }}"
|
||||
keystone_enable_federation_openid: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol','equalto','openid') | list | count > 0 }}"
|
||||
keystone_enable_federation_openid: "{{ enable_keystone_federation | bool and keystone_identity_providers | selectattr('protocol', 'equalto', 'openid') | list | count > 0 }}"
|
||||
keystone_should_remove_attribute_mappings: False
|
||||
keystone_should_remove_identity_providers: False
|
||||
keystone_federation_oidc_response_type: "id_token"
|
||||
|
@ -15,5 +15,4 @@
|
||||
run_once: True
|
||||
delegate_to: >-
|
||||
{% if groups['keystone_fernet_running'] is defined -%}
|
||||
{{ groups['keystone_fernet_running'][0] }}
|
||||
{%- else -%}{{ groups['keystone'][0] }}{%- endif %}
|
||||
{{ groups['keystone_fernet_running'][0] }}{%- else -%}{{ groups['keystone'][0] }}{%- endif %}
|
||||
|
@ -101,16 +101,15 @@
|
||||
- haproxy_enable_external_vip | bool
|
||||
- not hostvars[inventory_hostname].ansible_facts[kolla_external_vip_interface]['active']
|
||||
|
||||
# NOTE(hrw): let assume that each supported host OS has ping with ipv4/v6 support
|
||||
- name: Checking if kolla_internal_vip_address and kolla_external_vip_address are not pingable from any node
|
||||
command: "{{ item.command }} -c 3 {{ item.address }}"
|
||||
command: "ping -c 3 {{ item }}"
|
||||
register: ping_output
|
||||
changed_when: false
|
||||
failed_when: ping_output.rc != 1
|
||||
with_items:
|
||||
- address: "{{ kolla_internal_vip_address }}"
|
||||
command: "{{ 'ping' if kolla_internal_vip_address | ipv4 else 'ping6' }}"
|
||||
- address: "{{ kolla_external_vip_address }}"
|
||||
command: "{{ 'ping' if kolla_external_vip_address | ipv4 else 'ping6' }}"
|
||||
- "{{ kolla_internal_vip_address }}"
|
||||
- "{{ kolla_external_vip_address }}"
|
||||
when:
|
||||
- enable_keepalived | bool
|
||||
- keepalived_vip_prechecks
|
||||
@ -195,7 +194,7 @@
|
||||
|
||||
- name: Getting haproxy stat
|
||||
become: true
|
||||
shell: echo "show stat" | docker exec -i haproxy socat unix-connect:/var/lib/kolla/haproxy/haproxy.sock stdio # noqa 306
|
||||
shell: echo "show stat" | docker exec -i haproxy socat unix-connect:/var/lib/kolla/haproxy/haproxy.sock stdio # noqa risky-shell-pipe
|
||||
register: haproxy_stat_shell
|
||||
changed_when: false
|
||||
when: container_facts['haproxy'] is defined
|
||||
|
@ -77,8 +77,8 @@
|
||||
changed_when: false
|
||||
|
||||
- name: Writing hostname of host with the largest seqno to temp file
|
||||
copy:
|
||||
content: "{{ inventory_hostname }}"
|
||||
template:
|
||||
src: "hostname.j2"
|
||||
dest: "{{ mariadb_recover_tmp_file_path }}"
|
||||
mode: 0644
|
||||
delegate_to: localhost
|
||||
|
1
ansible/roles/mariadb/templates/hostname.j2
Normal file
1
ansible/roles/mariadb/templates/hostname.j2
Normal file
@ -0,0 +1 @@
|
||||
{{ inventory_hostname }}
|
@ -31,18 +31,17 @@ neutron_services:
|
||||
KOLLA_LEGACY_IPTABLES: "{{ neutron_legacy_iptables | bool | lower }}"
|
||||
host_in_groups: >-
|
||||
{{
|
||||
( inventory_hostname in groups['compute']
|
||||
(inventory_hostname in groups['compute']
|
||||
or (enable_manila_backend_generic | bool and inventory_hostname in groups['manila-share'])
|
||||
or inventory_hostname in groups['neutron-dhcp-agent']
|
||||
or inventory_hostname in groups['neutron-l3-agent']
|
||||
or inventory_hostname in groups['neutron-metadata-agent']
|
||||
and not enable_nova_fake | bool
|
||||
) or
|
||||
( inventory_hostname in groups['neutron-dhcp-agent']
|
||||
and not enable_nova_fake | bool)
|
||||
or
|
||||
(inventory_hostname in groups['neutron-dhcp-agent']
|
||||
or inventory_hostname in groups['neutron-l3-agent']
|
||||
or inventory_hostname in groups['neutron-metadata-agent']
|
||||
and enable_nova_fake | bool
|
||||
)
|
||||
and enable_nova_fake | bool)
|
||||
}}
|
||||
volumes: "{{ neutron_openvswitch_agent_default_volumes + neutron_openvswitch_agent_extra_volumes }}"
|
||||
dimensions: "{{ neutron_openvswitch_agent_dimensions }}"
|
||||
@ -94,7 +93,7 @@ neutron_services:
|
||||
container_name: "neutron_sriov_agent"
|
||||
image: "{{ neutron_sriov_agent_image_full }}"
|
||||
privileged: True
|
||||
enabled: "{{ enable_neutron_sriov | bool and neutron_plugin_agent not in ['vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp' ] }}"
|
||||
enabled: "{{ enable_neutron_sriov | bool and neutron_plugin_agent not in ['vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp'] }}"
|
||||
host_in_groups: "{{ inventory_hostname in groups['compute'] }}"
|
||||
volumes: "{{ neutron_sriov_agent_default_volumes + neutron_sriov_agent_extra_volumes }}"
|
||||
dimensions: "{{ neutron_sriov_agent_dimensions }}"
|
||||
@ -102,7 +101,7 @@ neutron_services:
|
||||
neutron-mlnx-agent:
|
||||
container_name: "neutron_mlnx_agent"
|
||||
image: "{{ neutron_mlnx_agent_image_full }}"
|
||||
enabled: "{{ enable_neutron_mlnx | bool and neutron_plugin_agent not in ['vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp' ] }}"
|
||||
enabled: "{{ enable_neutron_mlnx | bool and neutron_plugin_agent not in ['vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp'] }}"
|
||||
host_in_groups: "{{ inventory_hostname in groups['compute'] }}"
|
||||
volumes: "{{ neutron_mlnx_agent_default_volumes + neutron_mlnx_agent_extra_volumes }}"
|
||||
dimensions: "{{ neutron_mlnx_agent_dimensions }}"
|
||||
@ -110,7 +109,7 @@ neutron_services:
|
||||
container_name: "neutron_eswitchd"
|
||||
image: "{{ neutron_eswitchd_image_full }}"
|
||||
privileged: True
|
||||
enabled: "{{ enable_neutron_mlnx | bool and neutron_plugin_agent not in ['vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp' ] }}"
|
||||
enabled: "{{ enable_neutron_mlnx | bool and neutron_plugin_agent not in ['vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp'] }}"
|
||||
host_in_groups: "{{ inventory_hostname in groups['compute'] }}"
|
||||
volumes: "{{ neutron_eswitchd_default_volumes + neutron_eswitchd_extra_volumes }}"
|
||||
dimensions: "{{ neutron_eswitchd_dimensions }}"
|
||||
@ -118,7 +117,7 @@ neutron_services:
|
||||
container_name: "neutron_metadata_agent"
|
||||
image: "{{ neutron_metadata_agent_image_full }}"
|
||||
privileged: True
|
||||
enabled: "{{ neutron_plugin_agent not in [ 'ovn', 'vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp' ] }}"
|
||||
enabled: "{{ neutron_plugin_agent not in ['ovn', 'vmware_nsxv', 'vmware_nsxv3', 'vmware_nsxp'] }}"
|
||||
host_in_groups: >-
|
||||
{{
|
||||
inventory_hostname in groups['neutron-metadata-agent']
|
||||
|
@ -9,7 +9,7 @@
|
||||
neutron_services |
|
||||
select_services_enabled_and_mapped_to_host |
|
||||
list |
|
||||
intersect([ "neutron-l3-agent", "neutron-linuxbridge-agent", "neutron-openvswitch-agent" ]) |
|
||||
intersect(["neutron-l3-agent", "neutron-linuxbridge-agent", "neutron-openvswitch-agent"]) |
|
||||
list |
|
||||
length > 0
|
||||
|
||||
|
@ -48,6 +48,7 @@
|
||||
neutron_server: "{{ neutron_services['neutron-server'] }}"
|
||||
first_neutron_server_host: "{{ groups[neutron_server.group][0] }}"
|
||||
results_of_check_pending_contract_scripts: "{{ hostvars[first_neutron_server_host]['neutron_check_contract_db_stdout'] }}"
|
||||
# NOTE(hrw): no idea
|
||||
filter_rc: "results[?rc!=`0`]"
|
||||
is_stop_neutron_server: "{{ results_of_check_pending_contract_scripts | json_query(filter_rc) }}"
|
||||
become: true
|
||||
|
@ -466,7 +466,7 @@ openstack_nova_auth: "{{ openstack_auth }}"
|
||||
# Set to KOLLA_UNSET to make Kolla unset these in the managed sysctl.conf file.
|
||||
nova_compute_host_rp_filter_mode: KOLLA_SKIP
|
||||
|
||||
nova_libvirt_port: "{{'16514' if libvirt_tls | bool else '16509'}}"
|
||||
nova_libvirt_port: "{{ '16514' if libvirt_tls | bool else '16509' }}"
|
||||
nova_ssh_port: "8022"
|
||||
|
||||
# NOTE(mgoddard): The order of this list defines the order in which services
|
||||
|
@ -157,8 +157,8 @@
|
||||
- name: Pushing secrets key for libvirt
|
||||
vars:
|
||||
service: "{{ nova_cell_services['nova-libvirt'] }}"
|
||||
copy:
|
||||
content: "{{ item.result }}"
|
||||
template:
|
||||
src: "libvirt-secret.j2"
|
||||
dest: "{{ libvirt_secrets_dir }}/{{ item.uuid }}.base64"
|
||||
owner: "{{ config_owner_user }}"
|
||||
group: "{{ config_owner_group }}"
|
||||
|
1
ansible/roles/nova-cell/templates/libvirt-secret.j2
Normal file
1
ansible/roles/nova-cell/templates/libvirt-secret.j2
Normal file
@ -0,0 +1 @@
|
||||
{{ item.result }}
|
@ -348,8 +348,8 @@ octavia_amp_network:
|
||||
octavia_amp_network_cidr: 10.1.0.0/24
|
||||
|
||||
# Octavia provider drivers
|
||||
octavia_provider_drivers: "amphora:Amphora provider{% if neutron_plugin_agent == 'ovn'%}, ovn:OVN provider{% endif %}"
|
||||
octavia_provider_agents: "amphora_agent{% if neutron_plugin_agent == 'ovn'%}, ovn{% endif %}"
|
||||
octavia_provider_drivers: "amphora:Amphora provider{% if neutron_plugin_agent == 'ovn' %}, ovn:OVN provider{% endif %}"
|
||||
octavia_provider_agents: "amphora_agent{% if neutron_plugin_agent == 'ovn' %}, ovn{% endif %}"
|
||||
|
||||
####################
|
||||
# TLS
|
||||
|
@ -103,8 +103,8 @@
|
||||
- block:
|
||||
|
||||
- name: Copying over Octavia SSH key
|
||||
copy:
|
||||
content: "{{ octavia_amp_ssh_key.private_key }}"
|
||||
template:
|
||||
src: "octavia-ssh-key.j2"
|
||||
dest: "{{ node_config_directory }}/octavia-worker/{{ octavia_amp_ssh_key_name }}"
|
||||
owner: "{{ config_owner_user }}"
|
||||
group: "{{ config_owner_group }}"
|
||||
|
1
ansible/roles/octavia/templates/octavia-ssh-key.j2
Normal file
1
ansible/roles/octavia/templates/octavia-ssh-key.j2
Normal file
@ -0,0 +1 @@
|
||||
{{ octavia_amp_ssh_key.private_key }}
|
@ -12,7 +12,7 @@
|
||||
- name: Configure OVN in OVSDB
|
||||
vars:
|
||||
ovn_mappings: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ bridge }}{% if not loop.last %},{% endif %}{% endfor %}"
|
||||
ovn_macs: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ ovn_base_mac | random_mac(seed=inventory_hostname+bridge) }}{% if not loop.last %},{% endif %}{% endfor %}"
|
||||
ovn_macs: "{% for bridge in neutron_bridge_name.split(',') %}physnet{{ loop.index0 + 1 }}:{{ ovn_base_mac | random_mac(seed=inventory_hostname + bridge) }}{% if not loop.last %},{% endif %}{% endfor %}"
|
||||
ovn_cms_opts: "{{ 'enable-chassis-as-gw' if inventory_hostname in groups['ovn-controller-network'] else '' }}"
|
||||
become: true
|
||||
kolla_toolbox:
|
||||
|
@ -85,13 +85,13 @@
|
||||
or inventory_hostname in groups['neutron-metadata-agent'])
|
||||
- ovs_physical_port_policy == 'named'
|
||||
|
||||
- name: wait for dpdk tunnel ip
|
||||
- name: Wait for dpdk tunnel ip
|
||||
wait_for:
|
||||
host: "{{ dpdk_tunnel_interface_address }}"
|
||||
timeout: 300
|
||||
sleep: 5
|
||||
|
||||
- name: ovs-dpdk gather facts
|
||||
- name: OVS-DPDK gather facts
|
||||
setup:
|
||||
filter: ansible_*
|
||||
when:
|
||||
|
@ -1,6 +1,6 @@
|
||||
---
|
||||
proxysql_project_database_shard: "{{ lookup('vars', (kolla_role_name | default(project_name)) + '_database_shard', default=omit) }}"
|
||||
# NOTE(kevko): Kolla_role_name and replace is used only because of nova-cell
|
||||
proxysql_project: "{{ kolla_role_name | default(project_name) | replace('_','-') }}"
|
||||
proxysql_project: "{{ kolla_role_name | default(project_name) | replace('_', '-') }}"
|
||||
proxysql_config_users: "{% if proxysql_project_database_shard is defined and proxysql_project_database_shard['users'] is defined %}True{% else %}False{% endif %}"
|
||||
proxysql_config_rules: "{% if proxysql_project_database_shard is defined and proxysql_project_database_shard['rules'] is defined %}True{% else %}False{% endif %}"
|
||||
|
@ -44,7 +44,7 @@ swift_rsyncd_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ do
|
||||
swift_rsyncd_tag: "{{ swift_tag }}"
|
||||
swift_rsyncd_image_full: "{{ swift_rsyncd_image }}:{{ swift_rsyncd_tag }}"
|
||||
|
||||
swift_log_level: "{{ 'DEBUG' if openstack_logging_debug | bool else 'INFO'}}"
|
||||
swift_log_level: "{{ 'DEBUG' if openstack_logging_debug | bool else 'INFO' }}"
|
||||
|
||||
####################
|
||||
# OpenStack
|
||||
|
@ -5,9 +5,9 @@
|
||||
action: "stop_container"
|
||||
common_options: "{{ docker_common_options }}"
|
||||
name: "swift_rsyncd"
|
||||
when: ( inventory_hostname in groups['swift-account-server'] or
|
||||
when: (inventory_hostname in groups['swift-account-server'] or
|
||||
inventory_hostname in groups['swift-container-server'] or
|
||||
inventory_hostname in groups['swift-object-server'] ) and
|
||||
inventory_hostname in groups['swift-object-server']) and
|
||||
'swift_rsyncd' not in skip_stop_containers
|
||||
|
||||
- name: Stopping swift-account-server container
|
||||
|
Loading…
Reference in New Issue
Block a user