c5fbcbfbbb
Due to the way of how dynamic_inventory parses groups, usage of `haproxy_hosts` which are eventually mapped to `haproxy` lead bare metal hosts to be also a part of `haproxy_all` group, which might be not expected or anticipated. Change-Id: Ib1199236a6dfecb47b5840f04cb4b1486cc2f6a6
312 lines
7.1 KiB
Plaintext
312 lines
7.1 KiB
Plaintext
---
|
|
cidr_networks:
|
|
management: 172.29.236.0/22
|
|
tunnel: 172.29.240.0/22
|
|
storage: 172.29.244.0/22
|
|
|
|
used_ips:
|
|
- "172.29.236.1,172.29.236.50"
|
|
- "172.29.240.1,172.29.240.50"
|
|
- "172.29.244.1,172.29.244.50"
|
|
- "172.29.248.1,172.29.248.50"
|
|
|
|
global_overrides:
|
|
internal_lb_vip_address: 172.29.236.9
|
|
#
|
|
# The below domain name must resolve to an IP address
|
|
# in the CIDR specified in haproxy_keepalived_external_vip_cidr.
|
|
# If using different protocols (https/http) for the public/internal
|
|
# endpoints the two addresses must be different.
|
|
#
|
|
external_lb_vip_address: openstack.example.com
|
|
management_bridge: "br-mgmt"
|
|
provider_networks:
|
|
- network:
|
|
container_bridge: "br-mgmt"
|
|
container_type: "veth"
|
|
container_interface: "eth1"
|
|
ip_from_q: "management"
|
|
type: "raw"
|
|
group_binds:
|
|
- all_containers
|
|
- hosts
|
|
is_management_address: true
|
|
- network:
|
|
container_bridge: "br-vxlan"
|
|
container_type: "veth"
|
|
container_interface: "eth10"
|
|
ip_from_q: "tunnel"
|
|
type: "vxlan"
|
|
range: "1:1000"
|
|
net_name: "vxlan"
|
|
group_binds:
|
|
- neutron_linuxbridge_agent
|
|
- network:
|
|
container_bridge: "br-vlan"
|
|
container_type: "veth"
|
|
container_interface: "eth12"
|
|
host_bind_override: "eth12"
|
|
type: "flat"
|
|
net_name: "physnet1"
|
|
group_binds:
|
|
- neutron_linuxbridge_agent
|
|
- network:
|
|
container_bridge: "br-vlan"
|
|
container_type: "veth"
|
|
container_interface: "eth11"
|
|
type: "vlan"
|
|
range: "101:200,301:400"
|
|
net_name: "physnet2"
|
|
group_binds:
|
|
- neutron_linuxbridge_agent
|
|
- network:
|
|
container_bridge: "br-storage"
|
|
container_type: "veth"
|
|
container_interface: "eth2"
|
|
ip_from_q: "storage"
|
|
type: "raw"
|
|
group_binds:
|
|
- glance_api
|
|
- cinder_api
|
|
- cinder_volume
|
|
- nova_compute
|
|
|
|
###
|
|
### Infrastructure
|
|
###
|
|
|
|
# galera, memcache, rabbitmq, utility
|
|
shared-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# zookeeper
|
|
coordination_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# repository (apt cache, python packages, etc)
|
|
repo-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# load balancer
|
|
# Ideally the load balancer should not use the Infrastructure hosts.
|
|
# Dedicated hardware is best for improved performance and security.
|
|
load_balancer_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
###
|
|
### OpenStack
|
|
###
|
|
|
|
# keystone
|
|
identity_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# cinder api services
|
|
storage-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# glance
|
|
# The settings here are repeated for each infra host.
|
|
# They could instead be applied as global settings in
|
|
# user_variables, but are left here to illustrate that
|
|
# each container could have different storage targets.
|
|
image_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
container_vars:
|
|
limit_container_types: glance
|
|
glance_remote_client:
|
|
- what: "172.29.244.15:/images"
|
|
where: "/var/lib/glance/images"
|
|
type: "nfs"
|
|
options: "_netdev,auto"
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
container_vars:
|
|
limit_container_types: glance
|
|
glance_remote_client:
|
|
- what: "172.29.244.15:/images"
|
|
where: "/var/lib/glance/images"
|
|
type: "nfs"
|
|
options: "_netdev,auto"
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
container_vars:
|
|
limit_container_types: glance
|
|
glance_remote_client:
|
|
- what: "172.29.244.15:/images"
|
|
where: "/var/lib/glance/images"
|
|
type: "nfs"
|
|
options: "_netdev,auto"
|
|
|
|
# placement
|
|
placement-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# nova api, conductor, etc services
|
|
compute-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# heat
|
|
orchestration_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# horizon
|
|
dashboard_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# neutron api
|
|
network-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# neutron agents (L3, DHCP, etc)
|
|
network-agent_hosts:
|
|
net1:
|
|
ip: 172.29.236.21
|
|
net2:
|
|
ip: 172.29.236.22
|
|
net3:
|
|
ip: 172.29.236.23
|
|
|
|
# ceilometer (telemetry data collection)
|
|
metering-infra_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# aodh (telemetry alarm service)
|
|
metering-alarm_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# gnocchi (telemetry metrics storage)
|
|
metrics_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# nova hypervisors
|
|
compute_hosts:
|
|
compute1:
|
|
ip: 172.29.236.16
|
|
compute2:
|
|
ip: 172.29.236.17
|
|
|
|
# ceilometer compute agent (telemetry data collection)
|
|
metering-compute_hosts:
|
|
compute1:
|
|
ip: 172.29.236.16
|
|
compute2:
|
|
ip: 172.29.236.17
|
|
|
|
# cinder volume hosts (NFS-backed)
|
|
# The settings here are repeated for each infra host.
|
|
# They could instead be applied as global settings in
|
|
# user_variables, but are left here to illustrate that
|
|
# each container could have different storage targets.
|
|
storage_hosts:
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
container_vars:
|
|
cinder_backends:
|
|
limit_container_types: cinder_volume
|
|
nfs_volume:
|
|
volume_backend_name: NFS_VOLUME1
|
|
volume_driver: cinder.volume.drivers.nfs.NfsDriver
|
|
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
|
|
nfs_shares_config: /etc/cinder/nfs_shares
|
|
shares:
|
|
- ip: "172.29.244.15"
|
|
share: "/vol/cinder"
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
container_vars:
|
|
cinder_backends:
|
|
limit_container_types: cinder_volume
|
|
nfs_volume:
|
|
volume_backend_name: NFS_VOLUME1
|
|
volume_driver: cinder.volume.drivers.nfs.NfsDriver
|
|
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
|
|
nfs_shares_config: /etc/cinder/nfs_shares
|
|
shares:
|
|
- ip: "172.29.244.15"
|
|
share: "/vol/cinder"
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
container_vars:
|
|
cinder_backends:
|
|
limit_container_types: cinder_volume
|
|
nfs_volume:
|
|
volume_backend_name: NFS_VOLUME1
|
|
volume_driver: cinder.volume.drivers.nfs.NfsDriver
|
|
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
|
|
nfs_shares_config: /etc/cinder/nfs_shares
|
|
shares:
|
|
- ip: "172.29.244.15"
|
|
share: "/vol/cinder"
|