![Damian Dabrowski](/assets/img/avatar_default.png)
With current "Ceph production example" the difference between ceph's public and storage network is not clear. We assign Storage Network to compute nodes, but it's not used there. We also asign Storage Network to ceph monitors, but it's not used there as well. Same problems apply to AIO environment. As Dmitriy suggested in [1], ceph should not use mgmt network for storage traffic. This change makes ceph use storage network for: - OSD<>OSD communication - client<>OSD communication - client<>MON communication I think it's the most common scenario where all ceph-related traffic uses dedicated(storage) network and do not depend on mgmt network. This change affects both "Ceph production example" docs and AIO environments. [1] https://review.opendev.org/c/openstack/openstack-ansible/+/856566 Change-Id: I74387a2e961e2b8355ea6a0c889b2f5674233ebf
171 lines
4.0 KiB
Plaintext
171 lines
4.0 KiB
Plaintext
---
|
|
cidr_networks: &cidr_networks
|
|
container: 172.29.236.0/22
|
|
tunnel: 172.29.240.0/22
|
|
storage: 172.29.244.0/22
|
|
|
|
used_ips:
|
|
- "172.29.236.1,172.29.236.50"
|
|
- "172.29.240.1,172.29.240.50"
|
|
- "172.29.244.1,172.29.244.50"
|
|
- "172.29.248.1,172.29.248.50"
|
|
|
|
global_overrides:
|
|
cidr_networks: *cidr_networks
|
|
internal_lb_vip_address: 172.29.236.9
|
|
#
|
|
# The below domain name must resolve to an IP address
|
|
# in the CIDR specified in haproxy_keepalived_external_vip_cidr.
|
|
# If using different protocols (https/http) for the public/internal
|
|
# endpoints the two addresses must be different.
|
|
#
|
|
external_lb_vip_address: openstack.example.com
|
|
management_bridge: "br-mgmt"
|
|
provider_networks:
|
|
- network:
|
|
container_bridge: "br-mgmt"
|
|
container_type: "veth"
|
|
container_interface: "eth1"
|
|
ip_from_q: "container"
|
|
type: "raw"
|
|
group_binds:
|
|
- all_containers
|
|
- hosts
|
|
is_container_address: true
|
|
- network:
|
|
container_bridge: "br-vxlan"
|
|
container_type: "veth"
|
|
container_interface: "eth10"
|
|
ip_from_q: "tunnel"
|
|
type: "vxlan"
|
|
range: "1:1000"
|
|
net_name: "vxlan"
|
|
group_binds:
|
|
- neutron_linuxbridge_agent
|
|
- network:
|
|
container_bridge: "br-vlan"
|
|
container_type: "veth"
|
|
container_interface: "eth12"
|
|
host_bind_override: "eth12"
|
|
type: "flat"
|
|
net_name: "flat"
|
|
group_binds:
|
|
- neutron_linuxbridge_agent
|
|
- network:
|
|
container_bridge: "br-vlan"
|
|
container_type: "veth"
|
|
container_interface: "eth11"
|
|
type: "vlan"
|
|
range: "101:200,301:400"
|
|
net_name: "vlan"
|
|
group_binds:
|
|
- neutron_linuxbridge_agent
|
|
- network:
|
|
container_bridge: "br-storage"
|
|
container_type: "veth"
|
|
container_interface: "eth2"
|
|
ip_from_q: "storage"
|
|
type: "raw"
|
|
group_binds:
|
|
- glance_api
|
|
- cinder_api
|
|
- cinder_volume
|
|
- manila_share
|
|
- nova_compute
|
|
- ceph-mon
|
|
- ceph-osd
|
|
|
|
###
|
|
### Infrastructure
|
|
###
|
|
|
|
_infrastructure_hosts: &infrastructure_hosts
|
|
infra1:
|
|
ip: 172.29.236.11
|
|
infra2:
|
|
ip: 172.29.236.12
|
|
infra3:
|
|
ip: 172.29.236.13
|
|
|
|
# nova hypervisors
|
|
compute_hosts: &compute_hosts
|
|
compute1:
|
|
ip: 172.29.236.16
|
|
compute2:
|
|
ip: 172.29.236.17
|
|
|
|
ceph-osd_hosts:
|
|
osd1:
|
|
ip: 172.29.236.18
|
|
osd2:
|
|
ip: 172.29.236.19
|
|
osd3:
|
|
ip: 172.29.236.20
|
|
|
|
# galera, memcache, rabbitmq, utility
|
|
shared-infra_hosts: *infrastructure_hosts
|
|
|
|
# ceph-mon containers
|
|
ceph-mon_hosts: *infrastructure_hosts
|
|
|
|
# ceph-mds containers
|
|
ceph-mds_hosts: *infrastructure_hosts
|
|
|
|
# ganesha-nfs hosts
|
|
ceph-nfs_hosts: *infrastructure_hosts
|
|
|
|
# repository (apt cache, python packages, etc)
|
|
repo-infra_hosts: *infrastructure_hosts
|
|
|
|
# load balancer
|
|
# Ideally the load balancer should not use the Infrastructure hosts.
|
|
# Dedicated hardware is best for improved performance and security.
|
|
haproxy_hosts: *infrastructure_hosts
|
|
|
|
###
|
|
### OpenStack
|
|
###
|
|
|
|
# keystone
|
|
identity_hosts: *infrastructure_hosts
|
|
|
|
# cinder api services
|
|
storage-infra_hosts: *infrastructure_hosts
|
|
|
|
# cinder volume hosts (Ceph RBD-backed)
|
|
storage_hosts: *infrastructure_hosts
|
|
|
|
# glance
|
|
image_hosts: *infrastructure_hosts
|
|
|
|
# placement
|
|
placement-infra_hosts: *infrastructure_hosts
|
|
|
|
# nova api, conductor, etc services
|
|
compute-infra_hosts: *infrastructure_hosts
|
|
|
|
# heat
|
|
orchestration_hosts: *infrastructure_hosts
|
|
|
|
# horizon
|
|
dashboard_hosts: *infrastructure_hosts
|
|
|
|
# neutron server, agents (L3, etc)
|
|
network_hosts: *infrastructure_hosts
|
|
|
|
# ceilometer (telemetry data collection)
|
|
metering-infra_hosts: *infrastructure_hosts
|
|
|
|
# aodh (telemetry alarm service)
|
|
metering-alarm_hosts: *infrastructure_hosts
|
|
|
|
# gnocchi (telemetry metrics storage)
|
|
metrics_hosts: *infrastructure_hosts
|
|
|
|
# manila (share service)
|
|
manila-infra_hosts: *infrastructure_hosts
|
|
manila-data_hosts: *infrastructure_hosts
|
|
|
|
# ceilometer compute agent (telemetry data collection)
|
|
metering-compute_hosts: *compute_hosts
|