Merge "Updating Jinja filters to conform to Ansible 2.5+"
This commit is contained in:
commit
473df9b811
@ -24,7 +24,7 @@
|
||||
delegate_to: "{{ controller_host }}"
|
||||
register: result
|
||||
failed_when:
|
||||
- result | failed
|
||||
- result is failed
|
||||
# Some BMCs complain if the node is already powered off.
|
||||
- "'Command not supported in present state' not in result.stderr"
|
||||
vars:
|
||||
|
@ -67,7 +67,7 @@
|
||||
delegate_to: "{{ delegate_host }}"
|
||||
register: arp_result
|
||||
failed_when:
|
||||
- arp_result | failed
|
||||
- arp_result is failed
|
||||
- "'No ARP entry for ' ~ idrac_default_ip not in arp_result.stdout"
|
||||
|
||||
# Ansible's until keyword seems to not work nicely with failed_when, causing
|
||||
|
@ -17,7 +17,7 @@
|
||||
- iscsid.service
|
||||
register: result
|
||||
failed_when:
|
||||
- result|failed
|
||||
- result is failed
|
||||
# If a service is not installed, the ansible service module will fail
|
||||
# with this error message.
|
||||
- '"Could not find the requested service" not in result.msg'
|
||||
|
@ -44,7 +44,7 @@
|
||||
become: True
|
||||
register: nm_result
|
||||
failed_when:
|
||||
- nm_result | failed
|
||||
- nm_result is failed
|
||||
# Ugh, Ansible's service module doesn't handle uninstalled services.
|
||||
- "'Could not find the requested service' not in nm_result.msg"
|
||||
|
||||
|
@ -51,7 +51,7 @@
|
||||
tasks:
|
||||
- name: Set a fact about whether the configuration changed
|
||||
set_fact:
|
||||
bios_or_raid_change: "{{ drac_result | changed }}"
|
||||
bios_or_raid_change: "{{ drac_result is changed }}"
|
||||
|
||||
- name: Ensure that overcloud BIOS and RAID volumes are configured
|
||||
hosts: overcloud_with_bmcs_of_type_idrac
|
||||
|
@ -81,7 +81,7 @@
|
||||
-m command
|
||||
-a "openstack baremetal node undeploy {% raw %}{{ inventory_hostname }}{% endraw %}"'
|
||||
register: delete_result
|
||||
until: delete_result | success or 'is locked by host' in delete_result.stdout
|
||||
until: delete_result is successful or 'is locked by host' in delete_result.stdout
|
||||
retries: "{{ ironic_retries }}"
|
||||
delay: "{{ ironic_retry_interval }}"
|
||||
when: initial_provision_state != 'available'
|
||||
|
@ -75,7 +75,7 @@
|
||||
-m command
|
||||
-a "openstack baremetal node manage {% raw %}{{ inventory_hostname }}{% endraw %}"'
|
||||
register: manage_result
|
||||
until: manage_result | success or 'is locked by host' in manage_result.stdout
|
||||
until: manage_result is successful or 'is locked by host' in manage_result.stdout
|
||||
retries: "{{ ironic_retries }}"
|
||||
delay: "{{ ironic_retry_interval }}"
|
||||
when: initial_provision_state != 'manageable'
|
||||
@ -100,7 +100,7 @@
|
||||
-m command
|
||||
-a "openstack baremetal node inspect {% raw %}{{ inventory_hostname }}{% endraw %}"'
|
||||
register: provide_result
|
||||
until: provide_result | success or 'is locked by host' in provide_result.stdout
|
||||
until: provide_result is successful or 'is locked by host' in provide_result.stdout
|
||||
retries: "{{ ironic_retries }}"
|
||||
delay: "{{ ironic_retry_interval }}"
|
||||
delegate_to: "{{ seed_host }}"
|
||||
|
@ -84,7 +84,7 @@
|
||||
-m command
|
||||
-a "openstack baremetal node manage {% raw %}{{ inventory_hostname }}{% endraw %}"'
|
||||
register: manage_result
|
||||
until: manage_result | success or 'is locked by host' in manage_result.stdout
|
||||
until: manage_result is successful or 'is locked by host' in manage_result.stdout
|
||||
retries: "{{ ironic_retries }}"
|
||||
delay: "{{ ironic_retry_interval }}"
|
||||
when: initial_provision_state == 'enroll'
|
||||
@ -109,7 +109,7 @@
|
||||
-m command
|
||||
-a "openstack baremetal node provide {% raw %}{{ inventory_hostname }}{% endraw %}"'
|
||||
register: provide_result
|
||||
until: provide_result | success or 'is locked by host' in provide_result.stdout
|
||||
until: provide_result is successful or 'is locked by host' in provide_result.stdout
|
||||
retries: "{{ ironic_retries }}"
|
||||
delay: "{{ ironic_retry_interval }}"
|
||||
when: initial_provision_state in ['enroll', 'manageable']
|
||||
|
@ -29,7 +29,7 @@
|
||||
sudo shutdown -r now "Applying SELinux changes"
|
||||
register: reboot_result
|
||||
failed_when:
|
||||
- reboot_result | failed
|
||||
- reboot_result is failed
|
||||
- "'closed by remote host' not in reboot_result.stderr"
|
||||
when: not is_local | bool
|
||||
|
||||
@ -51,4 +51,4 @@
|
||||
when: not is_local | bool
|
||||
when:
|
||||
- disable_selinux_do_reboot | bool
|
||||
- selinux_result | changed
|
||||
- selinux_result is changed
|
||||
|
@ -23,7 +23,7 @@
|
||||
command: docker volume rm {{ volume }}
|
||||
with_items: "{{ volume_result.results }}"
|
||||
when:
|
||||
- not item | skipped
|
||||
- item is not skipped
|
||||
- item.rc == 0
|
||||
vars:
|
||||
volume: "{{ item.item.1.split(':')[0] }}"
|
||||
|
@ -42,14 +42,14 @@
|
||||
register: cp_sockets
|
||||
run_once: True
|
||||
when:
|
||||
- group_result|changed
|
||||
- group_result is changed
|
||||
|
||||
- name: Drop all persistent SSH connections to activate the new group membership
|
||||
local_action:
|
||||
module: shell ssh -O stop None -o ControlPath={{ item.path }}
|
||||
with_items: "{{ cp_sockets.files }}"
|
||||
run_once: True
|
||||
when: not cp_sockets|skipped
|
||||
when: cp_sockets is not skipped
|
||||
|
||||
- name: Ensure Docker daemon is started
|
||||
service:
|
||||
|
@ -23,7 +23,7 @@
|
||||
command: docker volume rm {{ volume }}
|
||||
with_items: "{{ volume_result.results }}"
|
||||
when:
|
||||
- not item | skipped
|
||||
- item is not skipped
|
||||
- item.rc == 0
|
||||
vars:
|
||||
volume: "{{ item.item.1.split(':')[0] }}"
|
||||
|
@ -42,7 +42,7 @@
|
||||
Ensure that each disk in 'ceph_disks' does not have any partitions.
|
||||
with_items: "{{ disk_journal_info.results }}"
|
||||
when:
|
||||
- not item | skipped
|
||||
- item is not skipped
|
||||
- item.partitions | length > 0
|
||||
- not item.partitions.0.name.startswith('KOLLA_CEPH')
|
||||
loop_control:
|
||||
@ -75,7 +75,7 @@
|
||||
state: present
|
||||
with_items: "{{ disk_journal_info.results }}"
|
||||
when:
|
||||
- not item | skipped
|
||||
- item is not skipped
|
||||
- item.partitions | length == 0
|
||||
loop_control:
|
||||
label: "{{item.item}}"
|
||||
|
@ -23,7 +23,7 @@
|
||||
command: docker volume rm {{ volume }}
|
||||
with_items: "{{ volume_result.results }}"
|
||||
when:
|
||||
- not item | skipped
|
||||
- item is not skipped
|
||||
- item.rc == 0
|
||||
vars:
|
||||
volume: "{{ item.item.1.split(':')[0] }}"
|
||||
|
@ -39,5 +39,5 @@
|
||||
with_together:
|
||||
- "{{ veth_result.results }}"
|
||||
- "{{ peer_result.results }}"
|
||||
when: ctl_result|changed or item[0]|changed or item[1]|changed
|
||||
when: ctl_result is changed or item[0] is changed or item[1] is changed
|
||||
become: True
|
||||
|
Loading…
Reference in New Issue
Block a user