--- # Copyright 2014, Rackspace US, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # User defined container networks in CIDR notation. The inventory generator # assigns IP addresses to network interfaces inside containers from these # ranges. cidr_networks: # Management (same range as br-mgmt on the target hosts) management: 172.29.236.0/22 # Service (optional, same range as br-snet on the target hosts) snet: 172.29.248.0/22 # Tunnel endpoints for VXLAN tenant networks # (same range as br-vxlan on the target hosts) tunnel: 172.29.240.0/22 # Storage (same range as br-storage on the target hosts) storage: 172.29.244.0/22 # User defined list of consumed IP addresses that may intersect # with the provided CIDR. If you want to use a range, split the # desired range with the lower and upper IP address in the range # using a comma. IE "10.0.0.1,10.0.0.100". used_ips: - 10.240.0.1,10.240.0.50 - 172.29.244.1,172.29.244.50 # As a user you can define anything that you may wish to "globally" # override from within the openstack_deploy configuration file. Anything # specified here will take precedence over anything else any where. global_overrides: # Internal Management vip address internal_lb_vip_address: 10.240.0.1 # External DMZ VIP address external_lb_vip_address: 192.168.1.1 # Bridged interface to use with tunnel type networks tunnel_bridge: "br-vxlan" # Bridged interface to build containers with management_bridge: "br-mgmt" # Define your Add on container networks. # group_binds: bind a provided network to a particular group # container_bridge: instructs inventory where a bridge is plugged # into on the host side of a veth pair # container_interface: interface name within a container # ip_from_q: name of a cidr to pull an IP address from # type: Networks must have a type. types are: ["raw", "vxlan", "flat", "vlan"] # range: Optional value used in "vxlan" and "vlan" type networks # net_name: Optional value used in mapping network names used in neutron ml2 # You must have a management network. provider_networks: - network: group_binds: - all_containers - hosts type: "raw" container_bridge: "br-mgmt" container_interface: "eth1" container_type: "veth" ip_from_q: "management" is_container_address: true is_ssh_address: true - network: group_binds: - glance_api - cinder_api - cinder_volume - nova_compute # - swift_proxy ## If you are using the storage network for swift_proxy add it to the group_binds type: "raw" container_bridge: "br-storage" container_type: "veth" container_interface: "eth2" ip_from_q: "storage" - network: group_binds: - glance_api - nova_compute - neutron_linuxbridge_agent type: "raw" container_bridge: "br-snet" container_type: "veth" container_interface: "eth3" ip_from_q: "snet" - network: group_binds: - neutron_linuxbridge_agent container_bridge: "br-vxlan" container_type: "veth" container_interface: "eth10" ip_from_q: "tunnel" type: "vxlan" range: "1:1000" net_name: "vxlan" - network: group_binds: - neutron_linuxbridge_agent container_bridge: "br-vlan" container_type: "veth" container_interface: "eth11" type: "vlan" range: "1:1" net_name: "vlan" - network: group_binds: - neutron_linuxbridge_agent container_bridge: "br-vlan" container_type: "veth" container_interface: "eth12" host_bind_override: "eth12" type: "flat" net_name: "flat" # Shared infrastructure parts shared-infra_hosts: infra1: ip: 10.240.0.100 infra2: ip: 10.240.0.101 infra3: ip: 10.240.0.102 # OpenStack Compute infrastructure parts os-infra_hosts: infra1: ip: 10.240.0.100 infra2: ip: 10.240.0.101 infra3: ip: 10.240.0.102 # OpenStack Compute infrastructure parts storage-infra_hosts: infra1: ip: 10.240.0.100 infra2: ip: 10.240.0.101 infra3: ip: 10.240.0.102 # Keystone Identity infrastructure parts identity_hosts: infra1: ip: 10.240.0.100 infra2: ip: 10.240.0.101 infra3: ip: 10.240.0.102 # User defined Compute Hosts, this should be a required group compute_hosts: compute1: ip: 10.240.0.103 # User defined Storage Hosts, this should be a required group storage_hosts: cinder1: ip: 10.240.0.104 # "container_vars" can be set outside of all other options as # host specific optional variables. container_vars: # If you would like to define a cinder availability zone this can # be done with the name spaced variable. cinder_storage_availability_zone: cinderAZ_1 # When creating more than ONE availability zone you should define a # sane default for the system to use when scheduling volume creation. cinder_default_availability_zone: cinderAZ_1 # In this example we are defining what cinder volumes are # on a given host. cinder_backends: # if the "limit_container_types" argument is set, within # the top level key of the provided option the inventory # process will perform a string match on the container name with # the value found within the "limit_container_types" argument. # If any part of the string found within the container # name the options are appended as host_vars inside of inventory. limit_container_types: cinder_volume lvm: volume_group: cinder-volumes volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name: LVM_iSCSI # The ``cinder_nfs_client`` values is an optional component available # when configuring cinder. cinder_nfs_client: nfs_shares_config: /etc/cinder/nfs_shares shares: - { ip: "{{ ip_nfs_server }}", share: "/vol/cinder" } cinder2: ip: 10.240.0.105 container_vars: cinder_storage_availability_zone: cinderAZ_2 cinder_default_availability_zone: cinderAZ_1 cinder_backends: limit_container_types: cinder_volume lvm_ssd: volume_group: cinder-volumes volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver volume_backend_name: LVM_iSCSI_SSD # User defined Logging Hosts, this should be a required group log_hosts: logger1: ip: 10.240.0.107 # User defined Networking Hosts, this should be a required group network_hosts: network1: ip: 10.240.0.108 # User defined Repository Hosts, this is an optional group repo_hosts: infra1: ip: 10.240.0.100 infra2: ip: 10.240.0.101 infra3: ip: 10.240.0.102