Switch find_disks to use a decent python lib
Now /dev/loop devices will work! Change-Id: Icb8efe4f9bc3a21a72eb04bfd03452c26f13fd70 Closes-Bug: #1518438
This commit is contained in:
parent
6bbd4bf1ca
commit
dd16395ae1
3
ansible/roles/ceph/meta/main.yml
Normal file
3
ansible/roles/ceph/meta/main.yml
Normal file
@ -0,0 +1,3 @@
|
||||
---
|
||||
dependencies:
|
||||
- { role: common }
|
@ -1,15 +1,27 @@
|
||||
---
|
||||
- name: Looking up disks to bootstrap for Ceph
|
||||
find_disks:
|
||||
partition_name: 'KOLLA_CEPH_OSD_BOOTSTRAP'
|
||||
register: osds_bootstrap
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
command: docker exec -t kolla_ansible /usr/bin/ansible localhost
|
||||
-m find_disks
|
||||
-a "partition_name='KOLLA_CEPH_OSD_BOOTSTRAP'"
|
||||
register: osd_lookup
|
||||
changed_when: "{{ osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
|
||||
|
||||
- name: Looking up disks to bootstrap for Ceph cache
|
||||
find_disks:
|
||||
partition_name: 'KOLLA_CEPH_OSD_CACHE_BOOTSTRAP'
|
||||
register: osds_cache_bootstrap
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
- name: Reading data from variable
|
||||
set_fact:
|
||||
osds_bootstrap: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
|
||||
|
||||
- name: Looking up disks to bootstrap for Ceph
|
||||
command: docker exec -t kolla_ansible /usr/bin/ansible localhost
|
||||
-m find_disks
|
||||
-a "partition_name='KOLLA_CEPH_OSD_CACHE_BOOTSTRAP'"
|
||||
register: osd_cache_lookup
|
||||
changed_when: "{{ osd_cache_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
failed_when: osd_cache_lookup.stdout.split()[2] != 'SUCCESS'
|
||||
|
||||
- name: Reading data from variable
|
||||
set_fact:
|
||||
osds_cache_bootstrap: "{{ (osd_cache_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
|
||||
|
||||
- name: Bootstrapping Ceph OSDs
|
||||
docker:
|
||||
@ -33,8 +45,7 @@
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
OSD_DEV: "{{ item.1.device }}"
|
||||
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
|
||||
with_indexed_items: osds_bootstrap['disks']|default([])
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
with_indexed_items: osds_bootstrap|default([])
|
||||
|
||||
# https://github.com/ansible/ansible-modules-core/pull/1031
|
||||
- name: Waiting for bootstrap containers to exit
|
||||
@ -42,8 +53,7 @@
|
||||
register: bootstrap_result
|
||||
run_once: True
|
||||
failed_when: bootstrap_result.stdout != "0"
|
||||
with_indexed_items: osds_bootstrap['disks']|default([])
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
with_indexed_items: osds_bootstrap|default([])
|
||||
|
||||
- name: Cleaning up bootstrap containers
|
||||
docker:
|
||||
@ -51,8 +61,7 @@
|
||||
name: "bootstrap_osd_{{ item.0 }}"
|
||||
image: "{{ ceph_osd_image_full }}"
|
||||
state: absent
|
||||
with_indexed_items: osds_bootstrap['disks']|default([])
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
with_indexed_items: osds_bootstrap|default([])
|
||||
|
||||
- name: Bootstrapping Ceph Cache OSDs
|
||||
docker:
|
||||
@ -77,8 +86,7 @@
|
||||
CEPH_CACHE:
|
||||
OSD_DEV: "{{ item.1.device }}"
|
||||
OSD_INITIAL_WEIGHT: "{{ osd_initial_weight }}"
|
||||
with_indexed_items: osds_cache_bootstrap['disks']|default([])
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
with_indexed_items: osds_cache_bootstrap|default([])
|
||||
|
||||
# https://github.com/ansible/ansible-modules-core/pull/1031
|
||||
- name: Waiting for bootstrap containers to exit
|
||||
@ -86,8 +94,7 @@
|
||||
register: bootstrap_result
|
||||
run_once: True
|
||||
failed_when: bootstrap_result.stdout != "0"
|
||||
with_indexed_items: osds_cache_bootstrap['disks']|default([])
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
with_indexed_items: osds_cache_bootstrap|default([])
|
||||
|
||||
- name: Cleaning up bootstrap containers
|
||||
docker:
|
||||
@ -95,5 +102,4 @@
|
||||
name: "bootstrap_osd_cache_{{ item.0 }}"
|
||||
image: "{{ ceph_osd_image_full }}"
|
||||
state: absent
|
||||
with_indexed_items: osds_cache_bootstrap['disks']|default([])
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
with_indexed_items: osds_cache_bootstrap|default([])
|
||||
|
@ -1,20 +1,27 @@
|
||||
---
|
||||
- name: Looking up OSDs for Ceph
|
||||
find_disks:
|
||||
partition_name: 'KOLLA_CEPH_DATA'
|
||||
register: osds
|
||||
command: docker exec -t kolla_ansible /usr/bin/ansible localhost
|
||||
-m find_disks
|
||||
-a "partition_name='KOLLA_CEPH_DATA'"
|
||||
register: osd_lookup
|
||||
changed_when: "{{ osd_lookup.stdout.find('localhost | SUCCESS => ') != -1 and (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).changed }}"
|
||||
failed_when: osd_lookup.stdout.split()[2] != 'SUCCESS'
|
||||
|
||||
- name: Reading data from variable
|
||||
set_fact:
|
||||
osds: "{{ (osd_lookup.stdout.split('localhost | SUCCESS => ')[1]|from_json).disks|from_json }}"
|
||||
|
||||
- name: Mounting Ceph OSD volumes
|
||||
mount:
|
||||
src: "UUID={{ item.fs_uuid }}"
|
||||
fstype: xfs
|
||||
state: mounted
|
||||
name: "/var/lib/ceph/osd/{{ item.fs_uuid }}"
|
||||
with_items: osds.disks
|
||||
name: "/var/lib/ceph/osd/{{ item['fs_uuid'] }}"
|
||||
with_items: osds
|
||||
|
||||
- name: Gathering OSD IDs
|
||||
command: 'cat /var/lib/ceph/osd/{{ item.fs_uuid }}/whoami'
|
||||
with_items: osds.disks
|
||||
command: "cat /var/lib/ceph/osd/{{ item['fs_uuid'] }}/whoami"
|
||||
with_items: osds
|
||||
register: id
|
||||
changed_when: False
|
||||
failed_when: id.rc != 0
|
||||
@ -36,15 +43,16 @@
|
||||
name: "ceph_osd_{{ item.0.stdout }}"
|
||||
image: "{{ ceph_osd_image_full }}"
|
||||
volumes:
|
||||
- "/var/lib/ceph/osd/{{ item.1.fs_uuid }}:/var/lib/ceph/osd/ceph-{{ item.0.stdout }}"
|
||||
- "/var/lib/ceph/osd/{{ item.1['fs_uuid'] }}:/var/lib/ceph/osd/ceph-{{ item.0.stdout }}"
|
||||
- "{{ node_config_directory }}/ceph-osd/:{{ container_config_directory }}/:ro"
|
||||
- "/dev/:/dev/"
|
||||
env:
|
||||
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||
OSD_ID: "{{ item.0.stdout }}"
|
||||
OSD_DEV: "{{ item.1.device }}"
|
||||
OSD_DEV: "{{ item.1['device'] }}"
|
||||
with_together:
|
||||
- id.results
|
||||
- osds.disks
|
||||
when: inventory_hostname in groups['ceph-osd']
|
||||
and osds.disks
|
||||
- osds
|
||||
when:
|
||||
- inventory_hostname in groups['ceph-osd']
|
||||
- osds
|
||||
|
@ -60,4 +60,6 @@
|
||||
ANSIBLE_LIBRARY: "/usr/share/ansible"
|
||||
volumes:
|
||||
- /var/lib/kolla/dev/log:/dev/log
|
||||
- /dev/:/dev/
|
||||
- /run/:/run/
|
||||
command: "/bin/sleep infinity"
|
||||
|
@ -17,7 +17,9 @@ RUN pip install -U pip wheel
|
||||
|
||||
{% endif %}
|
||||
|
||||
RUN pip --no-cache-dir install shade
|
||||
RUN pip --no-cache-dir install \
|
||||
shade \
|
||||
pyudev
|
||||
|
||||
RUN git clone --depth 1 -b v2.0.0-0.2.alpha2 https://github.com/ansible/ansible.git \
|
||||
&& cd ansible \
|
||||
@ -28,7 +30,7 @@ RUN mkdir -p /etc/ansible /usr/share/ansible /home/ansible \
|
||||
&& echo 'localhost ansible_connection=local' > /etc/ansible/hosts \
|
||||
&& useradd --user-group ansible --groups kolla
|
||||
|
||||
COPY kolla_keystone_service.py kolla_keystone_user.py /usr/share/ansible/
|
||||
COPY find_disks.py kolla_keystone_service.py kolla_keystone_user.py /usr/share/ansible/
|
||||
COPY ansible.cfg /home/ansible/.ansible.cfg
|
||||
|
||||
{{ include_footer }}
|
||||
|
@ -42,9 +42,8 @@ EXAMPLES = '''
|
||||
register: osds
|
||||
'''
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
import json
|
||||
import pyudev
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
@ -52,33 +51,19 @@ def main():
|
||||
partition_name = dict(required=True, type='str')
|
||||
)
|
||||
)
|
||||
|
||||
partition_name = module.params.get('partition_name')
|
||||
|
||||
try:
|
||||
# This should all really be done differently. Unfortunately there is no
|
||||
# decent python library for dealing with disks like we need to here.
|
||||
disks = subprocess.check_output("parted -l", shell=True).split('\n')
|
||||
ret = list()
|
||||
|
||||
for line in disks:
|
||||
d = line.split(' ')
|
||||
if d[0] == 'Disk' and d[1] != 'Flags:':
|
||||
dev = d[1][:-1]
|
||||
|
||||
if line.find(partition_name) != -1:
|
||||
# This process returns an error code when no results return
|
||||
# We can ignore that, it is safe
|
||||
p = subprocess.Popen("blkid " + dev + "*", shell=True, stdout=subprocess.PIPE)
|
||||
blkid_out = p.communicate()[0]
|
||||
# The dev doesn't need to have a uuid, will be '' otherwise
|
||||
if ' UUID=' in blkid_out:
|
||||
fs_uuid = blkid_out.split(' UUID="')[1].split('"')[0]
|
||||
else:
|
||||
ct = pyudev.Context()
|
||||
for dev in ct.list_devices(subsystem='block', DEVTYPE='partition'):
|
||||
if dev.get('ID_PART_ENTRY_NAME') == partition_name:
|
||||
fs_uuid = dev.get('ID_FS_UUID')
|
||||
if not fs_uuid:
|
||||
fs_uuid = ''
|
||||
ret.append({'device': dev, 'fs_uuid': fs_uuid})
|
||||
|
||||
module.exit_json(disks=ret)
|
||||
dev_parent = dev.find_parent('block').device_node
|
||||
ret.append({'device': dev_parent, 'fs_uuid': fs_uuid})
|
||||
module.exit_json(disks=json.dumps(ret))
|
||||
except Exception as e:
|
||||
module.exit_json(failed=True, msg=repr(e))
|
||||
|
Loading…
Reference in New Issue
Block a user