openstack-ansible/etc/openstack_deploy/openstack_user_config.yml.provnet-group.example
Dmitriy Rabotyagov bc5428b21d Remove usage of rsyslog roles
We've switched all services to store logs to journald by default and
rsyslog roles are not used except really small amount of usecases that
also hardly valid as of today. With that we deprecate repos and remove
their usega to reduce maintenance load.

Change-Id: Iefd4143f83f4df44b917180000a1aa57161b2811
2022-10-19 15:10:59 +02:00

347 lines
8.8 KiB
Plaintext

---
cidr_networks:
container: 172.29.236.0/22
tunnel: 172.29.240.0/22
storage: 172.29.244.0/22
used_ips:
- "172.29.236.1,172.29.236.50"
- "172.29.240.1,172.29.240.50"
- "172.29.244.1,172.29.244.50"
- "172.29.248.1,172.29.248.50"
global_overrides:
internal_lb_vip_address: 172.29.236.9
#
# The below domain name must resolve to an IP address
# in the CIDR specified in haproxy_keepalived_external_vip_cidr.
# If using different protocols (https/http) for the public/internal
# endpoints the two addresses must be different.
#
external_lb_vip_address: openstack.example.com
management_bridge: "br-mgmt"
provider_networks:
- network:
container_bridge: "br-mgmt"
container_type: "veth"
container_interface: "eth1"
ip_from_q: "container"
type: "raw"
group_binds:
- all_containers
- hosts
is_container_address: true
#
# The below provider network defines details related to vxlan traffic,
# including the range of VNIs to assign to project/tenant networks and
# other attributes.
#
# The network details will be used to populate the respective network
# configuration file(s) on the members of the listed groups.
#
- network:
container_bridge: "br-vxlan"
container_type: "veth"
container_interface: "eth10"
ip_from_q: "tunnel"
type: "vxlan"
range: "1:1000"
net_name: "vxlan"
group_binds:
- network_hosts
- compute_hosts
#
# The below provider network(s) define details related to a given provider
# network: physnet1. Details include the name of the veth interface to
# connect to the bridge when agent on_metal is False (container_interface)
# or the physical interface to connect to the bridge when agent on_metal
# is True (host_bind_override), as well as the network type. The provider
# network name (net_name) will be used to build a physical network mapping
# to a network interface; either container_interface or host_bind_override
# (when defined).
#
# The network details will be used to populate the respective network
# configuration file(s) on the members of the listed groups. In this
# example, host_bind_override specifies the ens1f0 interface and applies
# only to the members of network_hosts:
#
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "ens1f0"
type: "flat"
net_name: "physnet1"
group_binds:
- network_hosts
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
host_bind_override: "ens1f0"
type: "vlan"
range: "101:200,301:400"
net_name: "physnet1"
group_binds:
- network_hosts
#
# The below provider network(s) also define details related to the
# physnet1 provider network. In this example, however, host_bind_override
# specifies the ens2f0 interface and applies only to the members of
# compute_hosts:
#
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth12"
host_bind_override: "ens2f0"
type: "flat"
net_name: "physnet1"
group_binds:
- compute_hosts
- network:
container_bridge: "br-vlan"
container_type: "veth"
container_interface: "eth11"
host_bind_override: "ens2f0"
type: "vlan"
range: "101:200,301:400"
net_name: "physnet1"
group_binds:
- compute_hosts
#
# The below provider network defines details related to storage traffic.
#
- network:
container_bridge: "br-storage"
container_type: "veth"
container_interface: "eth2"
ip_from_q: "storage"
type: "raw"
group_binds:
- glance_api
- cinder_api
- cinder_volume
- nova_compute
###
### Infrastructure
###
# galera, memcache, rabbitmq, utility
shared-infra_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# repository (apt cache, python packages, etc)
repo-infra_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# load balancer
# Ideally the load balancer should not use the Infrastructure hosts.
# Dedicated hardware is best for improved performance and security.
haproxy_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
###
### OpenStack
###
# keystone
identity_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# cinder api services
storage-infra_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# glance
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
image_hosts:
infra1:
ip: 172.29.236.11
container_vars:
limit_container_types: glance
glance_remote_client:
- what: "172.29.244.15:/images"
where: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
infra2:
ip: 172.29.236.12
container_vars:
limit_container_types: glance
glance_remote_client:
- what: "172.29.244.15:/images"
where: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
infra3:
ip: 172.29.236.13
container_vars:
limit_container_types: glance
glance_remote_client:
- what: "172.29.244.15:/images"
where: "/var/lib/glance/images"
type: "nfs"
options: "_netdev,auto"
# placement
placement-infra_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# nova api, conductor, etc services
compute-infra_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# heat
orchestration_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# horizon
dashboard_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# neutron server, agents (L3, etc)
network_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# ceilometer (telemetry data collection)
metering-infra_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# aodh (telemetry alarm service)
metering-alarm_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# gnocchi (telemetry metrics storage)
metrics_hosts:
infra1:
ip: 172.29.236.11
infra2:
ip: 172.29.236.12
infra3:
ip: 172.29.236.13
# nova hypervisors
compute_hosts:
compute1:
ip: 172.29.236.16
compute2:
ip: 172.29.236.17
# ceilometer compute agent (telemetry data collection)
metering-compute_hosts:
compute1:
ip: 172.29.236.16
compute2:
ip: 172.29.236.17
# cinder volume hosts (NFS-backed)
# The settings here are repeated for each infra host.
# They could instead be applied as global settings in
# user_variables, but are left here to illustrate that
# each container could have different storage targets.
storage_hosts:
infra1:
ip: 172.29.236.11
container_vars:
cinder_backends:
limit_container_types: cinder_volume
nfs_volume:
volume_backend_name: NFS_VOLUME1
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.15"
share: "/vol/cinder"
infra2:
ip: 172.29.236.12
container_vars:
cinder_backends:
limit_container_types: cinder_volume
nfs_volume:
volume_backend_name: NFS_VOLUME1
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.15"
share: "/vol/cinder"
infra3:
ip: 172.29.236.13
container_vars:
cinder_backends:
limit_container_types: cinder_volume
nfs_volume:
volume_backend_name: NFS_VOLUME1
volume_driver: cinder.volume.drivers.nfs.NfsDriver
nfs_mount_options: "rsize=65535,wsize=65535,timeo=1200,actimeo=120"
nfs_shares_config: /etc/cinder/nfs_shares
shares:
- ip: "172.29.244.15"
share: "/vol/cinder"