Jonathan Rosser 76cd97b1e5 Reduce ceph memory overhead for AIO by setting is_hci to true
This reserves 80% of the host memory to *not* be used when calculating
memory limits for the ceph osd processes. This setting is used in
ceph-ansible for 'hyperconverged' deployments where nova compute is
co-located with osd on the same nodes and the majority of memory must
be available for vm guests rather than storage.

Change-Id: I7bba3429d71b30253bc72fd013cec4d085eb8fbf
2021-12-04 13:04:26 +00:00

36 lines
1.4 KiB
Django/Jinja

---
# Copyright 2017, Logan Vig <logan2211@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## ceph-ansible AIO settings
is_hci: true
common_single_host_mode: true
monitor_interface: "{{ ('metal' in bootstrap_host_scenarios_expanded) | ternary('br-mgmt', 'eth1') }}" # Management network in the AIO
public_network: "{{ (mgmt_range ~ '.0/' ~ netmask) | ipaddr('net') }}"
journal_size: 100
osd_scenario: collocated
ceph_conf_overrides_custom:
global:
mon_max_pg_per_osd: 500
openstack_config: true # Ceph ansible automatically creates pools & keys
cinder_ceph_client: cinder
cinder_default_volume_type: RBD
glance_ceph_client: glance
glance_default_store: rbd
glance_rbd_store_pool: images
nova_libvirt_images_rbd_pool: vms
# NOTE(noonedeadpunk): ceph bug to track the issue https://tracker.ceph.com/issues/46295
tempest_test_excludelist:
- tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download