
This incorrect disk_cachemodes setting was causing nova-compute to set 'none' as the disk cache instead of 'writeback'. We want to use writeback instead because we don't care about data integrity on nodepool guests. Change-Id: I9b31c4afa8b7836f2f77294033d0a92bec55dd84
174 lines
5.2 KiB
YAML
174 lines
5.2 KiB
YAML
---
|
|
# Because we have three haproxy nodes, we need
|
|
# to one active LB IP, and we use keepalived for that.
|
|
## Load Balancer Configuration (haproxy/keepalived)
|
|
haproxy_keepalived_external_vip_cidr: "192.169.91.101/32"
|
|
haproxy_keepalived_internal_vip_cidr: "10.53.2.2/32"
|
|
# Use the management bridge veth in the containers
|
|
haproxy_keepalived_external_interface: eth1
|
|
haproxy_keepalived_internal_interface: eth1
|
|
|
|
# Add the SSH pubkey needed for deployment
|
|
lxc_container_ssh_key: "{{ lookup('file', '/etc/openstack_deploy/ssh/id_rsa.pub') }}"
|
|
|
|
# Configure SSH keys on hosts
|
|
ssh_key_url: https://git.openstack.org/cgit/openstack/limestone-ci-cloud/plain/ssh/authorized_keys
|
|
|
|
# Use the utility container for bootstrapping openstack services
|
|
openstack_service_setup_host: "{{ groups['utility_all'][0] }}"
|
|
openstack_service_setup_host_python_interpreter: "/openstack/venvs/utility-{{ openstack_release }}/bin/python"
|
|
# NOTE(logan): It should be possible to remove shade once ansible 2.6 is used in Stein
|
|
utility_pip_packages:
|
|
- shade
|
|
|
|
# Galera max_connections default is 100*VCPU. On our controllers, this is 800 connections
|
|
# With the base services deployed, OSA exceeds this threshold resulting in connection
|
|
# contention in the database cluster
|
|
galera_max_connections: 1500
|
|
|
|
# Enable L2pop and disable vxlan multicast
|
|
neutron_l2_population: True
|
|
neutron_vxlan_group:
|
|
|
|
# Set the MTU correctly so VXLAN tenant networks recehive 1500 byte MTUs
|
|
neutron_neutron_conf_overrides:
|
|
DEFAULT:
|
|
global_physnet_mtu: 1550
|
|
|
|
# Enable novnc instead of spice
|
|
nova_console_type: novnc
|
|
|
|
# Adjust default OSA allocation ratios
|
|
# The intent is to allow flexible ratios (to a point) while using ram as the
|
|
# bottom line limiting factor in determining a full host normally.
|
|
nova_cpu_mode: host-passthrough
|
|
nova_libvirt_disk_cachemodes: 'file=writeback'
|
|
nova_compute_ksm_enabled: True
|
|
nova_cpu_allocation_ratio: 10
|
|
nova_disk_allocation_ratio: 5
|
|
nova_ram_allocation_ratio: 1.2
|
|
|
|
# Enable extra Nova scheduler filters
|
|
# This list is all of the OSA defaults + AggregateInstanceExtraSpecsFilter
|
|
nova_scheduler_default_filters: >-
|
|
RetryFilter,
|
|
AvailabilityZoneFilter,
|
|
RamFilter,
|
|
AggregateRamFilter,
|
|
ComputeFilter,
|
|
AggregateCoreFilter,
|
|
DiskFilter,
|
|
AggregateDiskFilter,
|
|
AggregateNumInstancesFilter,
|
|
AggregateIoOpsFilter,
|
|
ComputeCapabilitiesFilter,
|
|
ImagePropertiesFilter,
|
|
ServerGroupAntiAffinityFilter,
|
|
ServerGroupAffinityFilter,
|
|
NUMATopologyFilter,
|
|
AggregateInstanceExtraSpecsFilter
|
|
|
|
## Ceph cluster fsid (must be generated before first run)
|
|
## Generate a uuid using: python -c 'import uuid; print(str(uuid.uuid4()))'
|
|
generate_fsid: false
|
|
fsid: 98a1078e-7b72-4782-9cf7-ae7c68fd900f # Replace with your generated UUID
|
|
|
|
## ceph-ansible settings
|
|
## See https://github.com/ceph/ceph-ansible/tree/master/group_vars for
|
|
## additional configuration options availble.
|
|
monitor_address_block: "{{ cidr_networks.container }}"
|
|
public_network: "{{ cidr_networks.container }},10.4.69.0/24"
|
|
osd_scenario: collocated
|
|
osd_objectstore: bluestore
|
|
|
|
# ceph-ansible automatically creates pools & keys for OpenStack services
|
|
openstack_config: true
|
|
cinder_ceph_client: cinder
|
|
glance_ceph_client: glance
|
|
glance_default_store: rbd
|
|
glance_rbd_store_pool: images
|
|
|
|
# Ceph OSD devices
|
|
devices:
|
|
- /dev/sdc
|
|
- /dev/sdd
|
|
|
|
cinder_backends:
|
|
RBD:
|
|
volume_driver: cinder.volume.drivers.rbd.RBDDriver
|
|
rbd_pool: volumes
|
|
rbd_ceph_conf: /etc/ceph/ceph.conf
|
|
rbd_store_chunk_size: 8
|
|
volume_backend_name: rbddriver
|
|
rbd_user: "{{ cinder_ceph_client }}"
|
|
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
|
|
report_discard_supported: true
|
|
|
|
# uwsgi monitoring overrides
|
|
keystone_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/keystone-uwsgi-stats.sock"
|
|
|
|
cinder_api_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/cinder-api-uwsgi-stats.sock"
|
|
|
|
glance_api_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/glance-api-uwsgi-stats.sock"
|
|
|
|
heat_api_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/heat-api-uwsgi-stats.sock"
|
|
|
|
heat_api_cfn_init_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/heat-api-cfn-uwsgi-stats.sock"
|
|
|
|
nova_api_metadata_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/nova-api-metadata-uwsgi-stats.sock"
|
|
|
|
nova_api_os_compute_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/nova-api-os-compute-uwsgi-stats.sock"
|
|
|
|
nova_placement_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/nova-placement-uwsgi-stats.sock"
|
|
|
|
octavia_api_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/octavia-api-uwsgi-stats.sock"
|
|
|
|
sahara_api_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/sahara-api-uwsgi-stats.sock"
|
|
|
|
ironic_api_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/ironic-api-uwsgi-stats.sock"
|
|
|
|
magnum_api_uwsgi_ini_overrides:
|
|
uwsgi:
|
|
stats: "/tmp/magnum-api-uwsgi-stats.sock"
|
|
|
|
# Adding elastic search and kibana to haproxy_extra_services
|
|
haproxy_extra_services:
|
|
- service:
|
|
haproxy_service_name: elasticsearch
|
|
haproxy_backend_nodes: "{{ groups['elastic-logstash'] | default([]) }}"
|
|
haproxy_ssl: True
|
|
haproxy_port: 9201
|
|
haproxy_backend_port: 9200
|
|
haproxy_balance_type: http
|
|
haproxy_backend_options:
|
|
- "httpchk"
|
|
- service:
|
|
haproxy_service_name: kibana_ssl
|
|
haproxy_backend_nodes: "{{ groups['kibana'] | default([]) }}"
|
|
haproxy_ssl: True
|
|
haproxy_port: 8443
|
|
haproxy_backend_port: 81
|
|
haproxy_balance_type: tcp
|