Fix cinder-volume AIO ceph scenario
At the moment we don't configure cinder-volume properly to deal with ceph scenario - LVM backend is always hardcoded even for ceph scenario. We fix this by moving cinder_backends definition from conf.d to aio templates. With that proper tempest test has been added to verify cinder-volume functionality. Change-Id: I545f4098e899ab80045c9dba03101873b80f9a6c
This commit is contained in:
parent
8acc9802c3
commit
091ae6369d
@ -29,16 +29,3 @@ storage-infra_hosts:
|
|||||||
storage_hosts:
|
storage_hosts:
|
||||||
aio1:
|
aio1:
|
||||||
ip: 172.29.236.100
|
ip: 172.29.236.100
|
||||||
container_vars:
|
|
||||||
cinder_backends:
|
|
||||||
limit_container_types: cinder_volume
|
|
||||||
lvm:
|
|
||||||
volume_group: cinder-volumes
|
|
||||||
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
|
|
||||||
volume_backend_name: LVM_iSCSI
|
|
||||||
iscsi_ip_address: "172.29.236.100"
|
|
||||||
lvm_type: "thin"
|
|
||||||
extra_volume_types:
|
|
||||||
- low-iops
|
|
||||||
- high-iops
|
|
||||||
- ultra-high-iops
|
|
||||||
|
@ -91,15 +91,20 @@
|
|||||||
data: lv-{{ d | basename }}
|
data: lv-{{ d | basename }}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
cinder_backends:
|
cinder_backends:
|
||||||
"rbd_volumes":
|
aio_ceph:
|
||||||
volume_driver: cinder.volume.drivers.rbd.RBDDriver
|
volume_driver: cinder.volume.drivers.rbd.RBDDriver
|
||||||
rbd_pool: volumes
|
rbd_pool: volumes
|
||||||
rbd_ceph_conf: /etc/ceph/ceph.conf
|
rbd_ceph_conf: /etc/ceph/ceph.conf
|
||||||
rbd_store_chunk_size: 8
|
rbd_store_chunk_size: 8
|
||||||
|
rbd_exclusive_cinder_pool: true
|
||||||
volume_backend_name: rbddriver
|
volume_backend_name: rbddriver
|
||||||
rbd_user: cinder
|
rbd_user: "{% raw %}{{ cinder_ceph_client }}{% endraw %}"
|
||||||
rbd_secret_uuid: "{% raw %}{{ cinder_ceph_client_uuid }}{% endraw %}"
|
rbd_secret_uuid: "{% raw %}{{ cinder_ceph_client_uuid }}{% endraw %}"
|
||||||
report_discard_supported: true
|
report_discard_supported: true
|
||||||
|
extra_volume_types:
|
||||||
|
- low-iops
|
||||||
|
- high-iops
|
||||||
|
- ultra-high-iops
|
||||||
dest: /etc/openstack_deploy/user_ceph_aio.yml
|
dest: /etc/openstack_deploy/user_ceph_aio.yml
|
||||||
force: no
|
force: no
|
||||||
become: false
|
become: false
|
||||||
|
@ -287,3 +287,16 @@ deployment_environment_variables:
|
|||||||
lxc_container_networks: {}
|
lxc_container_networks: {}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
{% if 'ceph' not in bootstrap_host_scenarios_expanded %}
|
||||||
|
cinder_backends:
|
||||||
|
lvm:
|
||||||
|
volume_group: cinder-volumes
|
||||||
|
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
|
||||||
|
volume_backend_name: LVM_iSCSI
|
||||||
|
iscsi_ip_address: "172.29.236.100"
|
||||||
|
lvm_type: "thin"
|
||||||
|
extra_volume_types:
|
||||||
|
- low-iops
|
||||||
|
- high-iops
|
||||||
|
- ultra-high-iops
|
||||||
|
{% endif %}
|
||||||
|
@ -24,12 +24,11 @@ ceph_conf_overrides_custom:
|
|||||||
global:
|
global:
|
||||||
mon_max_pg_per_osd: 500
|
mon_max_pg_per_osd: 500
|
||||||
openstack_config: true # Ceph ansible automatically creates pools & keys
|
openstack_config: true # Ceph ansible automatically creates pools & keys
|
||||||
cinder_ceph_client: cinder
|
cinder_default_volume_type: aio_ceph
|
||||||
cinder_default_volume_type: rbd_volumes
|
|
||||||
glance_ceph_client: glance
|
glance_ceph_client: glance
|
||||||
glance_default_store: rbd
|
glance_default_store: rbd
|
||||||
glance_rbd_store_pool: images
|
glance_rbd_store_pool: images
|
||||||
nova_libvirt_images_rbd_pool: vms
|
nova_libvirt_images_rbd_pool: vms
|
||||||
# NOTE(noonedeadpunk): ceph bug to track the issue https://tracker.ceph.com/issues/46295
|
# NOTE(noonedeadpunk): ceph bug to track the issue https://tracker.ceph.com/issues/46295
|
||||||
tempest_test_excludelist:
|
tempest_test_includelist:
|
||||||
- tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download
|
- tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern
|
||||||
|
Loading…
x
Reference in New Issue
Block a user