kolla-ansible/ansible/roles/ceph/tasks/bootstrap_osds.yml
Paul Bourke fecf719e59 Allow operators to use 'fallback mode' for Ceph disks
This change adds a variable to the Ceph role "kolla_ceph_use_udev",
which is True by default meaning no change to the current behaviour. If
set to False, it will fallback to tools such as sgdisk/blkid to parse
the disk info it needs, instead of using udev.

Change-Id: I88d7b73efe27f04bb1ba16d61e101fa14a9f0d81
Depends-On: I6ad7825cdb164498f3d02f2ae064c7c1c38e10d5
Closes-Bug: #1631949
2016-12-20 13:38:33 +00:00

118 lines
5.1 KiB
YAML

---
- name: Looking up disks to bootstrap for Ceph OSDs
command: docker exec -t kolla_toolbox sudo -E /usr/bin/ansible localhost
-m find_disks
-a "partition_name='KOLLA_CEPH_OSD_BOOTSTRAP' match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
register: osd_lookup
changed_when: "{{ osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
- name: Parsing disk info for Ceph OSDs
set_fact:
osds_bootstrap: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
- name: Looking up disks to bootstrap for Ceph Cache OSDs
command: docker exec -t kolla_toolbox sudo -E /usr/bin/ansible localhost
-m find_disks
-a "partition_name='KOLLA_CEPH_OSD_CACHE_BOOTSTRAP' match_mode='prefix' use_udev={{ kolla_ceph_use_udev }}"
register: osd_cache_lookup
changed_when: "{{ osd_cache_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
failed_when: osd_cache_lookup.stdout.split()[2] != 'SUCCESS'
- name: Parsing disk info for Ceph Cache OSDs
set_fact:
osds_cache_bootstrap: "{{ (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
- pause:
prompt: |
WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
a co-located journal, but appears to contain other existing partitions (>1).
If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
with_items: "{{ osds_bootstrap|default([]) }}"
when:
- item.external_journal | bool == False
- ansible_devices[item.device.split('/')[2]].partitions|count > 1
- ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
- name: Bootstrapping Ceph OSDs
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
environment:
KOLLA_BOOTSTRAP:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
OSD_DEV: "{{ item.1.device }}"
OSD_PARTITION: "{{ item.1.partition }}"
OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
JOURNAL_DEV: "{{ item.1.journal_device }}"
JOURNAL_PARTITION: "{{ item.1.journal }}"
JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
image: "{{ ceph_osd_image_full }}"
labels:
BOOTSTRAP:
name: "bootstrap_osd_{{ item.0 }}"
privileged: True
restart_policy: "never"
volumes:
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/dev/:/dev/"
- "kolla_logs:/var/log/kolla/"
with_indexed_items: "{{ osds_bootstrap|default([]) }}"
- pause:
prompt: |
WARNING: It seems {{ item.device }} is marked to be wiped and partitioned for Ceph data and
a co-located journal, but appears to contain other existing partitions (>1).
If you are sure you want this disk to be *wiped* for use with Ceph, press enter.
Otherwise, press Ctrl-C, then 'A'. (You can disable this check by setting
ceph_osd_wipe_disk: 'yes-i-really-really-mean-it' within globals.yml)
with_items: "{{ osds_cache_bootstrap|default([]) }}"
when:
- item.external_journal | bool == False
- ansible_devices[item.device.split('/')[2]].partitions|count > 1
- ceph_osd_wipe_disk != "yes-i-really-really-mean-it"
- name: Bootstrapping Ceph Cache OSDs
kolla_docker:
action: "start_container"
common_options: "{{ docker_common_options }}"
detach: False
environment:
KOLLA_BOOTSTRAP:
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
CEPH_CACHE:
OSD_DEV: "{{ item.1.device }}"
OSD_PARTITION: "{{ item.1.partition }}"
OSD_PARTITION_NUM: "{{ item.1.partition_num }}"
JOURNAL_DEV: "{{ item.1.journal_device }}"
JOURNAL_PARTITION: "{{ item.1.journal }}"
JOURNAL_PARTITION_NUM: "{{ item.1.journal_num }}"
USE_EXTERNAL_JOURNAL: "{{ item.1.external_journal | bool }}"
OSD_FILESYSTEM: "{{ ceph_osd_filesystem }}"
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
HOSTNAME: "{{ hostvars[inventory_hostname]['ansible_' + storage_interface]['ipv4']['address'] }}"
image: "{{ ceph_osd_image_full }}"
labels:
BOOTSTRAP:
name: "bootstrap_osd_cache_{{ item.0 }}"
privileged: True
restart_policy: "never"
volumes:
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
- "/etc/localtime:/etc/localtime:ro"
- "/dev/:/dev/"
- "kolla_logs:/var/log/kolla/"
with_indexed_items: "{{ osds_cache_bootstrap|default([]) }}"