510cea0c23
- In case we deploy Ceph on a multi-node env we have to prepare the loop devices on all nodes. For this we moved loop devices setup to the deploy-env Ansible role. For simplicity we need the same device on all nodes, so we create a loop device with a big minor number (/dev/loop100 by default) hoping that only low minor numbers could be busy. - For test jobs we don't need to use different devices for OSD data and metadata. There is no any benefit from this for the test environment. So let's keep it simple and put both OSD data and metadata on the same device. - On multi-node env Ceph cluster needs cluster members see each other, so let's use pod network CIDR. Change-Id: I493b6c31d97ff2fc4992c6bb1994d0c73320cd7b
19 lines
405 B
Desktop File
19 lines
405 B
Desktop File
[Unit]
|
|
Description=Setup loop devices
|
|
DefaultDependencies=no
|
|
Conflicts=umount.target
|
|
Before=local-fs.target
|
|
After=systemd-udevd.service
|
|
Requires=systemd-udevd.service
|
|
|
|
[Service]
|
|
Type=oneshot
|
|
ExecStart=/sbin/losetup {{ loopback_device }} '{{ loopback_image }}'
|
|
ExecStop=/sbin/losetup -d {{ loopback_device }}
|
|
TimeoutSec=60
|
|
RemainAfterExit=yes
|
|
|
|
[Install]
|
|
WantedBy=local-fs.target
|
|
Also=systemd-udevd.service
|